1 /*- 2 * Copyright (c) 2002 Poul-Henning Kamp 3 * Copyright (c) 2002 Networks Associates Technology, Inc. 4 * All rights reserved. 5 * 6 * This software was developed for the FreeBSD Project by Poul-Henning Kamp 7 * and NAI Labs, the Security Research Division of Network Associates, Inc. 8 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the 9 * DARPA CHATS research program. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. The names of the authors may not be used to endorse or promote 20 * products derived from this software without specific prior written 21 * permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/devicestat.h> 42 #include <sys/kernel.h> 43 #include <sys/malloc.h> 44 #include <sys/bio.h> 45 #include <sys/sysctl.h> 46 #include <sys/proc.h> 47 #include <sys/kthread.h> 48 #include <sys/lock.h> 49 #include <sys/mutex.h> 50 #include <sys/errno.h> 51 #include <sys/sbuf.h> 52 #include <geom/geom.h> 53 #include <geom/geom_int.h> 54 #include <machine/stdarg.h> 55 56 struct class_list_head g_classes = LIST_HEAD_INITIALIZER(g_classes); 57 static struct g_tailq_head geoms = TAILQ_HEAD_INITIALIZER(geoms); 58 char *g_wait_event, *g_wait_up, *g_wait_down, *g_wait_sim; 59 60 61 struct g_hh00 { 62 struct g_class *mp; 63 int error; 64 }; 65 66 /* 67 * This event offers a new class a chance to taste all preexisting providers. 68 */ 69 static void 70 g_load_class(void *arg, int flag) 71 { 72 struct g_hh00 *hh; 73 struct g_class *mp2, *mp; 74 struct g_geom *gp; 75 struct g_provider *pp; 76 77 g_topology_assert(); 78 if (flag == EV_CANCEL) /* XXX: can't happen ? */ 79 return; 80 if (g_shutdown) 81 return; 82 83 hh = arg; 84 mp = hh->mp; 85 g_free(hh); 86 g_trace(G_T_TOPOLOGY, "g_load_class(%s)", mp->name); 87 LIST_FOREACH(mp2, &g_classes, class) { 88 KASSERT(mp2 != mp, 89 ("The GEOM class %s already loaded", mp2->name)); 90 KASSERT(strcmp(mp2->name, mp->name) != 0, 91 ("A GEOM class named %s is already loaded", mp2->name)); 92 } 93 94 if (mp->init != NULL) 95 mp->init(mp); 96 LIST_INIT(&mp->geom); 97 LIST_INSERT_HEAD(&g_classes, mp, class); 98 if (mp->taste == NULL) 99 return; 100 LIST_FOREACH(mp2, &g_classes, class) { 101 if (mp == mp2) 102 continue; 103 LIST_FOREACH(gp, &mp2->geom, geom) { 104 LIST_FOREACH(pp, &gp->provider, provider) { 105 mp->taste(mp, pp, 0); 106 g_topology_assert(); 107 } 108 } 109 } 110 } 111 112 static void 113 g_unload_class(void *arg, int flag) 114 { 115 struct g_hh00 *hh; 116 struct g_class *mp; 117 struct g_geom *gp; 118 struct g_provider *pp; 119 struct g_consumer *cp; 120 int error; 121 122 g_topology_assert(); 123 hh = arg; 124 mp = hh->mp; 125 g_trace(G_T_TOPOLOGY, "g_unload_class(%s)", mp->name); 126 if (mp->destroy_geom == NULL) { 127 hh->error = EOPNOTSUPP; 128 return; 129 } 130 131 /* We refuse to unload if anything is open */ 132 LIST_FOREACH(gp, &mp->geom, geom) { 133 LIST_FOREACH(pp, &gp->provider, provider) 134 if (pp->acr || pp->acw || pp->ace) { 135 hh->error = EBUSY; 136 return; 137 } 138 LIST_FOREACH(cp, &gp->consumer, consumer) 139 if (cp->acr || cp->acw || cp->ace) { 140 hh->error = EBUSY; 141 return; 142 } 143 } 144 145 /* Bar new entries */ 146 mp->taste = NULL; 147 mp->config = NULL; 148 149 error = 0; 150 LIST_FOREACH(gp, &mp->geom, geom) { 151 error = mp->destroy_geom(NULL, mp, gp); 152 if (error != 0) 153 break; 154 } 155 if (error == 0) { 156 LIST_REMOVE(mp, class); 157 if (mp->fini != NULL) 158 mp->fini(mp); 159 } 160 hh->error = error; 161 return; 162 } 163 164 int 165 g_modevent(module_t mod, int type, void *data) 166 { 167 struct g_hh00 *hh; 168 int error; 169 static int g_ignition; 170 171 if (!g_ignition) { 172 g_ignition++; 173 g_init(); 174 } 175 hh = g_malloc(sizeof *hh, M_WAITOK | M_ZERO); 176 hh->mp = data; 177 error = EOPNOTSUPP; 178 switch (type) { 179 case MOD_LOAD: 180 g_trace(G_T_TOPOLOGY, "g_modevent(%s, LOAD)", hh->mp->name); 181 g_post_event(g_load_class, hh, M_WAITOK, NULL); 182 error = 0; 183 break; 184 case MOD_UNLOAD: 185 g_trace(G_T_TOPOLOGY, "g_modevent(%s, UNLOAD)", hh->mp->name); 186 error = g_waitfor_event(g_unload_class, hh, M_WAITOK, NULL); 187 if (error == 0) 188 error = hh->error; 189 if (error == 0) { 190 g_waitidle(); 191 KASSERT(LIST_EMPTY(&hh->mp->geom), 192 ("Unloaded class (%s) still has geom", hh->mp->name)); 193 } 194 g_free(hh); 195 break; 196 } 197 return (error); 198 } 199 200 struct g_geom * 201 g_new_geomf(struct g_class *mp, const char *fmt, ...) 202 { 203 struct g_geom *gp; 204 va_list ap; 205 struct sbuf *sb; 206 207 g_topology_assert(); 208 sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND); 209 va_start(ap, fmt); 210 sbuf_vprintf(sb, fmt, ap); 211 va_end(ap); 212 sbuf_finish(sb); 213 gp = g_malloc(sizeof *gp, M_WAITOK | M_ZERO); 214 gp->name = g_malloc(sbuf_len(sb) + 1, M_WAITOK | M_ZERO); 215 gp->class = mp; 216 gp->rank = 1; 217 LIST_INIT(&gp->consumer); 218 LIST_INIT(&gp->provider); 219 LIST_INSERT_HEAD(&mp->geom, gp, geom); 220 TAILQ_INSERT_HEAD(&geoms, gp, geoms); 221 strcpy(gp->name, sbuf_data(sb)); 222 sbuf_delete(sb); 223 return (gp); 224 } 225 226 void 227 g_destroy_geom(struct g_geom *gp) 228 { 229 230 g_trace(G_T_TOPOLOGY, "g_destroy_geom(%p(%s))", gp, gp->name); 231 g_topology_assert(); 232 KASSERT(LIST_EMPTY(&gp->consumer), 233 ("g_destroy_geom(%s) with consumer(s) [%p]", 234 gp->name, LIST_FIRST(&gp->consumer))); 235 KASSERT(LIST_EMPTY(&gp->provider), 236 ("g_destroy_geom(%s) with provider(s) [%p]", 237 gp->name, LIST_FIRST(&gp->consumer))); 238 g_cancel_event(gp); 239 LIST_REMOVE(gp, geom); 240 TAILQ_REMOVE(&geoms, gp, geoms); 241 g_free(gp->name); 242 g_free(gp); 243 } 244 245 /* 246 * This function is called (repeatedly) until has withered away. 247 */ 248 void 249 g_wither_geom(struct g_geom *gp, int error) 250 { 251 struct g_provider *pp, *pp2; 252 struct g_consumer *cp, *cp2; 253 static int once_is_enough; 254 255 if (once_is_enough) 256 return; 257 once_is_enough = 1; 258 g_trace(G_T_TOPOLOGY, "g_wither_geom(%p(%s))", gp, gp->name); 259 g_topology_assert(); 260 if (!(gp->flags & G_GEOM_WITHER)) { 261 gp->flags |= G_GEOM_WITHER; 262 LIST_FOREACH(pp, &gp->provider, provider) 263 g_orphan_provider(pp, error); 264 } 265 for (pp = LIST_FIRST(&gp->provider); pp != NULL; pp = pp2) { 266 pp2 = LIST_NEXT(pp, provider); 267 if (!LIST_EMPTY(&pp->consumers)) 268 continue; 269 g_destroy_provider(pp); 270 } 271 for (cp = LIST_FIRST(&gp->consumer); cp != NULL; cp = cp2) { 272 cp2 = LIST_NEXT(cp, consumer); 273 if (cp->acr || cp->acw || cp->ace) 274 continue; 275 g_detach(cp); 276 g_destroy_consumer(cp); 277 } 278 if (LIST_EMPTY(&gp->provider) && LIST_EMPTY(&gp->consumer)) 279 g_destroy_geom(gp); 280 once_is_enough = 0; 281 } 282 283 struct g_consumer * 284 g_new_consumer(struct g_geom *gp) 285 { 286 struct g_consumer *cp; 287 288 g_topology_assert(); 289 KASSERT(gp->orphan != NULL, 290 ("g_new_consumer on geom(%s) (class %s) without orphan", 291 gp->name, gp->class->name)); 292 293 cp = g_malloc(sizeof *cp, M_WAITOK | M_ZERO); 294 cp->geom = gp; 295 cp->stat = devstat_new_entry(cp, -1, 0, DEVSTAT_ALL_SUPPORTED, 296 DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX); 297 LIST_INSERT_HEAD(&gp->consumer, cp, consumer); 298 return(cp); 299 } 300 301 void 302 g_destroy_consumer(struct g_consumer *cp) 303 { 304 struct g_geom *gp; 305 306 g_trace(G_T_TOPOLOGY, "g_destroy_consumer(%p)", cp); 307 g_topology_assert(); 308 KASSERT (cp->provider == NULL, ("g_destroy_consumer but attached")); 309 KASSERT (cp->acr == 0, ("g_destroy_consumer with acr")); 310 KASSERT (cp->acw == 0, ("g_destroy_consumer with acw")); 311 KASSERT (cp->ace == 0, ("g_destroy_consumer with ace")); 312 g_cancel_event(cp); 313 gp = cp->geom; 314 LIST_REMOVE(cp, consumer); 315 devstat_remove_entry(cp->stat); 316 g_free(cp); 317 if (gp->flags & G_GEOM_WITHER) 318 g_wither_geom(gp, 0); 319 } 320 321 static void 322 g_new_provider_event(void *arg, int flag) 323 { 324 struct g_class *mp; 325 struct g_provider *pp; 326 struct g_consumer *cp; 327 int i; 328 329 g_topology_assert(); 330 if (flag == EV_CANCEL) 331 return; 332 if (g_shutdown) 333 return; 334 pp = arg; 335 LIST_FOREACH(mp, &g_classes, class) { 336 if (mp->taste == NULL) 337 continue; 338 i = 1; 339 LIST_FOREACH(cp, &pp->consumers, consumers) 340 if (cp->geom->class == mp) 341 i = 0; 342 if (!i) 343 continue; 344 mp->taste(mp, pp, 0); 345 g_topology_assert(); 346 } 347 } 348 349 350 struct g_provider * 351 g_new_providerf(struct g_geom *gp, const char *fmt, ...) 352 { 353 struct g_provider *pp; 354 struct sbuf *sb; 355 va_list ap; 356 357 g_topology_assert(); 358 sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND); 359 va_start(ap, fmt); 360 sbuf_vprintf(sb, fmt, ap); 361 va_end(ap); 362 sbuf_finish(sb); 363 pp = g_malloc(sizeof *pp + sbuf_len(sb) + 1, M_WAITOK | M_ZERO); 364 pp->name = (char *)(pp + 1); 365 strcpy(pp->name, sbuf_data(sb)); 366 sbuf_delete(sb); 367 LIST_INIT(&pp->consumers); 368 pp->error = ENXIO; 369 pp->geom = gp; 370 pp->stat = devstat_new_entry(pp, -1, 0, DEVSTAT_ALL_SUPPORTED, 371 DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX); 372 LIST_INSERT_HEAD(&gp->provider, pp, provider); 373 g_post_event(g_new_provider_event, pp, M_WAITOK, pp, NULL); 374 return (pp); 375 } 376 377 void 378 g_error_provider(struct g_provider *pp, int error) 379 { 380 381 pp->error = error; 382 } 383 384 struct g_provider * 385 g_provider_by_name(char const *arg) 386 { 387 struct g_class *cp; 388 struct g_geom *gp; 389 struct g_provider *pp; 390 391 LIST_FOREACH(cp, &g_classes, class) { 392 LIST_FOREACH(gp, &cp->geom, geom) { 393 LIST_FOREACH(pp, &gp->provider, provider) { 394 if (!strcmp(arg, pp->name)) 395 return (pp); 396 } 397 } 398 } 399 return (NULL); 400 } 401 402 void 403 g_destroy_provider(struct g_provider *pp) 404 { 405 struct g_geom *gp; 406 407 g_topology_assert(); 408 KASSERT(LIST_EMPTY(&pp->consumers), 409 ("g_destroy_provider but attached")); 410 KASSERT (pp->acr == 0, ("g_destroy_provider with acr")); 411 KASSERT (pp->acw == 0, ("g_destroy_provider with acw")); 412 KASSERT (pp->acw == 0, ("g_destroy_provider with ace")); 413 g_cancel_event(pp); 414 LIST_REMOVE(pp, provider); 415 gp = pp->geom; 416 devstat_remove_entry(pp->stat); 417 g_free(pp); 418 if ((gp->flags & G_GEOM_WITHER)) 419 g_wither_geom(gp, 0); 420 } 421 422 /* 423 * We keep the "geoms" list sorted by topological order (== increasing 424 * numerical rank) at all times. 425 * When an attach is done, the attaching geoms rank is invalidated 426 * and it is moved to the tail of the list. 427 * All geoms later in the sequence has their ranks reevaluated in 428 * sequence. If we cannot assign rank to a geom because it's 429 * prerequisites do not have rank, we move that element to the tail 430 * of the sequence with invalid rank as well. 431 * At some point we encounter our original geom and if we stil fail 432 * to assign it a rank, there must be a loop and we fail back to 433 * g_attach() which detach again and calls redo_rank again 434 * to fix up the damage. 435 * It would be much simpler code wise to do it recursively, but we 436 * can't risk that on the kernel stack. 437 */ 438 439 static int 440 redo_rank(struct g_geom *gp) 441 { 442 struct g_consumer *cp; 443 struct g_geom *gp1, *gp2; 444 int n, m; 445 446 g_topology_assert(); 447 448 /* Invalidate this geoms rank and move it to the tail */ 449 gp1 = TAILQ_NEXT(gp, geoms); 450 if (gp1 != NULL) { 451 gp->rank = 0; 452 TAILQ_REMOVE(&geoms, gp, geoms); 453 TAILQ_INSERT_TAIL(&geoms, gp, geoms); 454 } else { 455 gp1 = gp; 456 } 457 458 /* re-rank the rest of the sequence */ 459 for (; gp1 != NULL; gp1 = gp2) { 460 gp1->rank = 0; 461 m = 1; 462 LIST_FOREACH(cp, &gp1->consumer, consumer) { 463 if (cp->provider == NULL) 464 continue; 465 n = cp->provider->geom->rank; 466 if (n == 0) { 467 m = 0; 468 break; 469 } else if (n >= m) 470 m = n + 1; 471 } 472 gp1->rank = m; 473 gp2 = TAILQ_NEXT(gp1, geoms); 474 475 /* got a rank, moving on */ 476 if (m != 0) 477 continue; 478 479 /* no rank to original geom means loop */ 480 if (gp == gp1) 481 return (ELOOP); 482 483 /* no rank, put it at the end move on */ 484 TAILQ_REMOVE(&geoms, gp1, geoms); 485 TAILQ_INSERT_TAIL(&geoms, gp1, geoms); 486 } 487 return (0); 488 } 489 490 int 491 g_attach(struct g_consumer *cp, struct g_provider *pp) 492 { 493 int error; 494 495 g_topology_assert(); 496 KASSERT(cp->provider == NULL, ("attach but attached")); 497 cp->provider = pp; 498 LIST_INSERT_HEAD(&pp->consumers, cp, consumers); 499 error = redo_rank(cp->geom); 500 if (error) { 501 LIST_REMOVE(cp, consumers); 502 cp->provider = NULL; 503 redo_rank(cp->geom); 504 } 505 return (error); 506 } 507 508 void 509 g_detach(struct g_consumer *cp) 510 { 511 struct g_provider *pp; 512 513 g_trace(G_T_TOPOLOGY, "g_detach(%p)", cp); 514 KASSERT(cp != (void*)0xd0d0d0d0, ("ARGH!")); 515 g_topology_assert(); 516 KASSERT(cp->provider != NULL, ("detach but not attached")); 517 KASSERT(cp->acr == 0, ("detach but nonzero acr")); 518 KASSERT(cp->acw == 0, ("detach but nonzero acw")); 519 KASSERT(cp->ace == 0, ("detach but nonzero ace")); 520 KASSERT(cp->nstart == cp->nend, 521 ("detach with active requests")); 522 pp = cp->provider; 523 LIST_REMOVE(cp, consumers); 524 cp->provider = NULL; 525 if (pp->geom->flags & G_GEOM_WITHER) 526 g_wither_geom(pp->geom, 0); 527 redo_rank(cp->geom); 528 } 529 530 531 /* 532 * g_access_abs() 533 * 534 * Access-check with absolute new values: Just fall through 535 * and use the relative version. 536 */ 537 int 538 g_access_abs(struct g_consumer *cp, int acr, int acw, int ace) 539 { 540 541 g_topology_assert(); 542 return(g_access_rel(cp, 543 acr - cp->acr, 544 acw - cp->acw, 545 ace - cp->ace)); 546 } 547 548 /* 549 * g_access_rel() 550 * 551 * Access-check with delta values. The question asked is "can provider 552 * "cp" change the access counters by the relative amounts dc[rwe] ?" 553 */ 554 555 int 556 g_access_rel(struct g_consumer *cp, int dcr, int dcw, int dce) 557 { 558 struct g_provider *pp; 559 int pr,pw,pe; 560 int error; 561 562 pp = cp->provider; 563 564 g_trace(G_T_ACCESS, "g_access_rel(%p(%s), %d, %d, %d)", 565 cp, pp->name, dcr, dcw, dce); 566 567 g_topology_assert(); 568 KASSERT(cp->provider != NULL, ("access but not attached")); 569 KASSERT(cp->acr + dcr >= 0, ("access resulting in negative acr")); 570 KASSERT(cp->acw + dcw >= 0, ("access resulting in negative acw")); 571 KASSERT(cp->ace + dce >= 0, ("access resulting in negative ace")); 572 KASSERT(pp->geom->access != NULL, ("NULL geom->access")); 573 574 /* 575 * If our class cares about being spoiled, and we have been, we 576 * are probably just ahead of the event telling us that. Fail 577 * now rather than having to unravel this later. 578 */ 579 if (cp->geom->spoiled != NULL && cp->spoiled) { 580 KASSERT(dcr <= 0, ("spoiled but dcr = %d", dcr)); 581 KASSERT(dcw <= 0, ("spoiled but dce = %d", dcw)); 582 KASSERT(dce <= 0, ("spoiled but dcw = %d", dce)); 583 } 584 585 /* 586 * Figure out what counts the provider would have had, if this 587 * consumer had (r0w0e0) at this time. 588 */ 589 pr = pp->acr - cp->acr; 590 pw = pp->acw - cp->acw; 591 pe = pp->ace - cp->ace; 592 593 g_trace(G_T_ACCESS, 594 "open delta:[r%dw%de%d] old:[r%dw%de%d] provider:[r%dw%de%d] %p(%s)", 595 dcr, dcw, dce, 596 cp->acr, cp->acw, cp->ace, 597 pp->acr, pp->acw, pp->ace, 598 pp, pp->name); 599 600 /* If foot-shooting is enabled, any open on rank#1 is OK */ 601 if ((g_debugflags & 16) && pp->geom->rank == 1) 602 ; 603 /* If we try exclusive but already write: fail */ 604 else if (dce > 0 && pw > 0) 605 return (EPERM); 606 /* If we try write but already exclusive: fail */ 607 else if (dcw > 0 && pe > 0) 608 return (EPERM); 609 /* If we try to open more but provider is error'ed: fail */ 610 else if ((dcr > 0 || dcw > 0 || dce > 0) && pp->error != 0) 611 return (pp->error); 612 613 /* Ok then... */ 614 615 error = pp->geom->access(pp, dcr, dcw, dce); 616 if (!error) { 617 /* 618 * If we open first write, spoil any partner consumers. 619 * If we close last write, trigger re-taste. 620 */ 621 if (pp->acw == 0 && dcw != 0) 622 g_spoil(pp, cp); 623 else if (pp->acw != 0 && pp->acw == -dcw && 624 !(pp->geom->flags & G_GEOM_WITHER)) 625 g_post_event(g_new_provider_event, pp, M_WAITOK, 626 pp, NULL); 627 628 pp->acr += dcr; 629 pp->acw += dcw; 630 pp->ace += dce; 631 cp->acr += dcr; 632 cp->acw += dcw; 633 cp->ace += dce; 634 } 635 return (error); 636 } 637 638 int 639 g_handleattr_int(struct bio *bp, const char *attribute, int val) 640 { 641 642 return (g_handleattr(bp, attribute, &val, sizeof val)); 643 } 644 645 int 646 g_handleattr_off_t(struct bio *bp, const char *attribute, off_t val) 647 { 648 649 return (g_handleattr(bp, attribute, &val, sizeof val)); 650 } 651 652 int 653 g_handleattr(struct bio *bp, const char *attribute, void *val, int len) 654 { 655 int error; 656 657 if (strcmp(bp->bio_attribute, attribute)) 658 return (0); 659 if (bp->bio_length != len) { 660 printf("bio_length %jd len %d -> EFAULT\n", 661 (intmax_t)bp->bio_length, len); 662 error = EFAULT; 663 } else { 664 error = 0; 665 bcopy(val, bp->bio_data, len); 666 bp->bio_completed = len; 667 } 668 g_io_deliver(bp, error); 669 return (1); 670 } 671 672 int 673 g_std_access(struct g_provider *pp __unused, 674 int dr __unused, int dw __unused, int de __unused) 675 { 676 677 return (0); 678 } 679 680 void 681 g_std_done(struct bio *bp) 682 { 683 struct bio *bp2; 684 685 bp2 = bp->bio_parent; 686 if (bp2->bio_error == 0) 687 bp2->bio_error = bp->bio_error; 688 bp2->bio_completed += bp->bio_completed; 689 g_destroy_bio(bp); 690 bp2->bio_inbed++; 691 if (bp2->bio_children == bp2->bio_inbed) 692 g_io_deliver(bp2, bp2->bio_error); 693 } 694 695 /* XXX: maybe this is only g_slice_spoiled */ 696 697 void 698 g_std_spoiled(struct g_consumer *cp) 699 { 700 struct g_geom *gp; 701 struct g_provider *pp; 702 703 g_trace(G_T_TOPOLOGY, "g_std_spoiled(%p)", cp); 704 g_topology_assert(); 705 g_detach(cp); 706 gp = cp->geom; 707 LIST_FOREACH(pp, &gp->provider, provider) 708 g_orphan_provider(pp, ENXIO); 709 g_destroy_consumer(cp); 710 if (LIST_EMPTY(&gp->provider) && LIST_EMPTY(&gp->consumer)) 711 g_destroy_geom(gp); 712 else 713 gp->flags |= G_GEOM_WITHER; 714 } 715 716 /* 717 * Spoiling happens when a provider is opened for writing, but consumers 718 * which are configured by in-band data are attached (slicers for instance). 719 * Since the write might potentially change the in-band data, such consumers 720 * need to re-evaluate their existence after the writing session closes. 721 * We do this by (offering to) tear them down when the open for write happens 722 * in return for a re-taste when it closes again. 723 * Together with the fact that such consumers grab an 'e' bit whenever they 724 * are open, regardless of mode, this ends up DTRT. 725 */ 726 727 static void 728 g_spoil_event(void *arg, int flag) 729 { 730 struct g_provider *pp; 731 struct g_consumer *cp, *cp2; 732 733 g_topology_assert(); 734 if (flag == EV_CANCEL) 735 return; 736 pp = arg; 737 for (cp = LIST_FIRST(&pp->consumers); cp != NULL; cp = cp2) { 738 cp2 = LIST_NEXT(cp, consumers); 739 if (!cp->spoiled) 740 continue; 741 cp->spoiled = 0; 742 if (cp->geom->spoiled == NULL) 743 continue; 744 cp->geom->spoiled(cp); 745 g_topology_assert(); 746 } 747 } 748 749 void 750 g_spoil(struct g_provider *pp, struct g_consumer *cp) 751 { 752 struct g_consumer *cp2; 753 754 g_topology_assert(); 755 756 LIST_FOREACH(cp2, &pp->consumers, consumers) { 757 if (cp2 == cp) 758 continue; 759 /* 760 KASSERT(cp2->acr == 0, ("spoiling cp->acr = %d", cp2->acr)); 761 KASSERT(cp2->acw == 0, ("spoiling cp->acw = %d", cp2->acw)); 762 */ 763 KASSERT(cp2->ace == 0, ("spoiling cp->ace = %d", cp2->ace)); 764 cp2->spoiled++; 765 } 766 g_post_event(g_spoil_event, pp, M_WAITOK, pp, NULL); 767 } 768 769 int 770 g_getattr__(const char *attr, struct g_consumer *cp, void *var, int len) 771 { 772 int error, i; 773 774 i = len; 775 error = g_io_getattr(attr, cp, &i, var); 776 if (error) 777 return (error); 778 if (i != len) 779 return (EINVAL); 780 return (0); 781 } 782 783 /* 784 * Check if the given pointer is a live object 785 */ 786 787 void 788 g_sanity(void const *ptr) 789 { 790 struct g_class *mp; 791 struct g_geom *gp; 792 struct g_consumer *cp; 793 struct g_provider *pp; 794 795 if (!(g_debugflags & 0x8)) 796 return; 797 LIST_FOREACH(mp, &g_classes, class) { 798 KASSERT(mp != ptr, ("Ptr is live class")); 799 LIST_FOREACH(gp, &mp->geom, geom) { 800 KASSERT(gp != ptr, ("Ptr is live geom")); 801 KASSERT(gp->name != ptr, ("Ptr is live geom's name")); 802 LIST_FOREACH(cp, &gp->consumer, consumer) { 803 KASSERT(cp != ptr, ("Ptr is live consumer")); 804 } 805 LIST_FOREACH(pp, &gp->provider, provider) { 806 KASSERT(pp != ptr, ("Ptr is live provider")); 807 } 808 } 809 } 810 } 811 812