1 /*- 2 * Copyright (c) 2002 Poul-Henning Kamp 3 * Copyright (c) 2002 Networks Associates Technology, Inc. 4 * All rights reserved. 5 * 6 * This software was developed for the FreeBSD Project by Poul-Henning Kamp 7 * and NAI Labs, the Security Research Division of Network Associates, Inc. 8 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the 9 * DARPA CHATS research program. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. The names of the authors may not be used to endorse or promote 20 * products derived from this software without specific prior written 21 * permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/devicestat.h> 42 #include <sys/kernel.h> 43 #include <sys/malloc.h> 44 #include <sys/bio.h> 45 #include <sys/sysctl.h> 46 #include <sys/proc.h> 47 #include <sys/kthread.h> 48 #include <sys/lock.h> 49 #include <sys/mutex.h> 50 #include <sys/errno.h> 51 #include <sys/sbuf.h> 52 #include <geom/geom.h> 53 #include <geom/geom_int.h> 54 #include <machine/stdarg.h> 55 56 struct class_list_head g_classes = LIST_HEAD_INITIALIZER(g_classes); 57 static struct g_tailq_head geoms = TAILQ_HEAD_INITIALIZER(geoms); 58 char *g_wait_event, *g_wait_up, *g_wait_down, *g_wait_sim; 59 60 struct g_hh00 { 61 struct g_class *mp; 62 int error; 63 int post; 64 }; 65 66 /* 67 * This event offers a new class a chance to taste all preexisting providers. 68 */ 69 static void 70 g_load_class(void *arg, int flag) 71 { 72 struct g_hh00 *hh; 73 struct g_class *mp2, *mp; 74 struct g_geom *gp; 75 struct g_provider *pp; 76 77 g_topology_assert(); 78 if (flag == EV_CANCEL) /* XXX: can't happen ? */ 79 return; 80 if (g_shutdown) 81 return; 82 83 hh = arg; 84 mp = hh->mp; 85 hh->error = 0; 86 if (hh->post) { 87 g_free(hh); 88 hh = NULL; 89 } 90 g_trace(G_T_TOPOLOGY, "g_load_class(%s)", mp->name); 91 KASSERT(mp->name != NULL && *mp->name != '\0', 92 ("GEOM class has no name")); 93 LIST_FOREACH(mp2, &g_classes, class) { 94 if (mp2 == mp) { 95 printf("The GEOM class %s is already loaded.\n", 96 mp2->name); 97 if (hh != NULL) 98 hh->error = EEXIST; 99 return; 100 } else if (strcmp(mp2->name, mp->name) == 0) { 101 printf("A GEOM class %s is already loaded.\n", 102 mp2->name); 103 if (hh != NULL) 104 hh->error = EEXIST; 105 return; 106 } 107 } 108 109 LIST_INIT(&mp->geom); 110 LIST_INSERT_HEAD(&g_classes, mp, class); 111 if (mp->init != NULL) 112 mp->init(mp); 113 if (mp->taste == NULL) 114 return; 115 LIST_FOREACH(mp2, &g_classes, class) { 116 if (mp == mp2) 117 continue; 118 LIST_FOREACH(gp, &mp2->geom, geom) { 119 LIST_FOREACH(pp, &gp->provider, provider) { 120 mp->taste(mp, pp, 0); 121 g_topology_assert(); 122 } 123 } 124 } 125 } 126 127 static void 128 g_unload_class(void *arg, int flag) 129 { 130 struct g_hh00 *hh; 131 struct g_class *mp; 132 struct g_geom *gp; 133 struct g_provider *pp; 134 struct g_consumer *cp; 135 int error; 136 137 g_topology_assert(); 138 hh = arg; 139 mp = hh->mp; 140 G_VALID_CLASS(mp); 141 g_trace(G_T_TOPOLOGY, "g_unload_class(%s)", mp->name); 142 143 /* 144 * We allow unloading if we have no geoms, or a class 145 * method we can use to get rid of them. 146 */ 147 if (!LIST_EMPTY(&mp->geom) && mp->destroy_geom == NULL) { 148 hh->error = EOPNOTSUPP; 149 return; 150 } 151 152 /* We refuse to unload if anything is open */ 153 LIST_FOREACH(gp, &mp->geom, geom) { 154 LIST_FOREACH(pp, &gp->provider, provider) 155 if (pp->acr || pp->acw || pp->ace) { 156 hh->error = EBUSY; 157 return; 158 } 159 LIST_FOREACH(cp, &gp->consumer, consumer) 160 if (cp->acr || cp->acw || cp->ace) { 161 hh->error = EBUSY; 162 return; 163 } 164 } 165 166 /* Bar new entries */ 167 mp->taste = NULL; 168 mp->config = NULL; 169 170 error = 0; 171 for (;;) { 172 gp = LIST_FIRST(&mp->geom); 173 if (gp == NULL) 174 break; 175 error = mp->destroy_geom(NULL, mp, gp); 176 if (error != 0) 177 break; 178 } 179 if (error == 0) { 180 if (mp->fini != NULL) 181 mp->fini(mp); 182 LIST_REMOVE(mp, class); 183 } 184 hh->error = error; 185 return; 186 } 187 188 int 189 g_modevent(module_t mod, int type, void *data) 190 { 191 struct g_hh00 *hh; 192 int error; 193 static int g_ignition; 194 struct g_class *mp; 195 196 mp = data; 197 if (mp->version != G_VERSION) { 198 printf("GEOM class %s has Wrong version %x\n", 199 mp->name, mp->version); 200 return (EINVAL); 201 } 202 if (!g_ignition) { 203 g_ignition++; 204 g_init(); 205 } 206 hh = g_malloc(sizeof *hh, M_WAITOK | M_ZERO); 207 hh->mp = data; 208 error = EOPNOTSUPP; 209 switch (type) { 210 case MOD_LOAD: 211 g_trace(G_T_TOPOLOGY, "g_modevent(%s, LOAD)", hh->mp->name); 212 /* 213 * Once the system is not cold, MOD_LOAD calls will be 214 * from the userland and the g_event thread will be able 215 * to acknowledge their completion. 216 */ 217 if (cold) { 218 hh->post = 1; 219 error = g_post_event(g_load_class, hh, M_WAITOK, NULL); 220 } else { 221 error = g_waitfor_event(g_load_class, hh, M_WAITOK, 222 NULL); 223 if (error == 0) 224 error = hh->error; 225 g_free(hh); 226 } 227 break; 228 case MOD_UNLOAD: 229 g_trace(G_T_TOPOLOGY, "g_modevent(%s, UNLOAD)", hh->mp->name); 230 error = g_waitfor_event(g_unload_class, hh, M_WAITOK, NULL); 231 if (error == 0) 232 error = hh->error; 233 if (error == 0) { 234 KASSERT(LIST_EMPTY(&hh->mp->geom), 235 ("Unloaded class (%s) still has geom", hh->mp->name)); 236 } 237 g_free(hh); 238 break; 239 default: 240 g_free(hh); 241 break; 242 } 243 return (error); 244 } 245 246 struct g_geom * 247 g_new_geomf(struct g_class *mp, const char *fmt, ...) 248 { 249 struct g_geom *gp; 250 va_list ap; 251 struct sbuf *sb; 252 253 g_topology_assert(); 254 G_VALID_CLASS(mp); 255 sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND); 256 va_start(ap, fmt); 257 sbuf_vprintf(sb, fmt, ap); 258 va_end(ap); 259 sbuf_finish(sb); 260 gp = g_malloc(sizeof *gp, M_WAITOK | M_ZERO); 261 gp->name = g_malloc(sbuf_len(sb) + 1, M_WAITOK | M_ZERO); 262 gp->class = mp; 263 gp->rank = 1; 264 LIST_INIT(&gp->consumer); 265 LIST_INIT(&gp->provider); 266 LIST_INSERT_HEAD(&mp->geom, gp, geom); 267 TAILQ_INSERT_HEAD(&geoms, gp, geoms); 268 strcpy(gp->name, sbuf_data(sb)); 269 sbuf_delete(sb); 270 /* Fill in defaults from class */ 271 gp->start = mp->start; 272 gp->spoiled = mp->spoiled; 273 gp->dumpconf = mp->dumpconf; 274 gp->access = mp->access; 275 gp->orphan = mp->orphan; 276 gp->ioctl = mp->ioctl; 277 return (gp); 278 } 279 280 void 281 g_destroy_geom(struct g_geom *gp) 282 { 283 284 g_topology_assert(); 285 G_VALID_GEOM(gp); 286 g_trace(G_T_TOPOLOGY, "g_destroy_geom(%p(%s))", gp, gp->name); 287 KASSERT(LIST_EMPTY(&gp->consumer), 288 ("g_destroy_geom(%s) with consumer(s) [%p]", 289 gp->name, LIST_FIRST(&gp->consumer))); 290 KASSERT(LIST_EMPTY(&gp->provider), 291 ("g_destroy_geom(%s) with provider(s) [%p]", 292 gp->name, LIST_FIRST(&gp->provider))); 293 g_cancel_event(gp); 294 LIST_REMOVE(gp, geom); 295 TAILQ_REMOVE(&geoms, gp, geoms); 296 g_free(gp->name); 297 g_free(gp); 298 } 299 300 /* 301 * This function is called (repeatedly) until the has withered away. 302 */ 303 void 304 g_wither_geom(struct g_geom *gp, int error) 305 { 306 struct g_provider *pp; 307 308 g_topology_assert(); 309 G_VALID_GEOM(gp); 310 g_trace(G_T_TOPOLOGY, "g_wither_geom(%p(%s))", gp, gp->name); 311 if (!(gp->flags & G_GEOM_WITHER)) { 312 gp->flags |= G_GEOM_WITHER; 313 LIST_FOREACH(pp, &gp->provider, provider) 314 if (!(pp->flags & G_PF_ORPHAN)) 315 g_orphan_provider(pp, error); 316 } 317 g_do_wither(); 318 } 319 320 /* 321 * Convenience function to destroy a particular provider. 322 */ 323 void 324 g_wither_provider(struct g_provider *pp, int error) 325 { 326 327 pp->flags |= G_PF_WITHER; 328 if (!(pp->flags & G_PF_ORPHAN)) 329 g_orphan_provider(pp, error); 330 } 331 332 /* 333 * This function is called (repeatedly) until the has withered away. 334 */ 335 void 336 g_wither_geom_close(struct g_geom *gp, int error) 337 { 338 struct g_consumer *cp; 339 340 g_topology_assert(); 341 G_VALID_GEOM(gp); 342 g_trace(G_T_TOPOLOGY, "g_wither_geom_close(%p(%s))", gp, gp->name); 343 LIST_FOREACH(cp, &gp->consumer, consumer) 344 if (cp->acr || cp->acw || cp->ace) 345 g_access(cp, -cp->acr, -cp->acw, -cp->ace); 346 g_wither_geom(gp, error); 347 } 348 349 /* 350 * This function is called (repeatedly) until we cant wash away more 351 * withered bits at present. Return value contains two bits. Bit 0 352 * set means "withering stuff we can't wash now", bit 1 means "call 353 * me again, there may be stuff I didn't get the first time around. 354 */ 355 int 356 g_wither_washer() 357 { 358 struct g_class *mp; 359 struct g_geom *gp, *gp2; 360 struct g_provider *pp, *pp2; 361 struct g_consumer *cp, *cp2; 362 int result; 363 364 result = 0; 365 g_topology_assert(); 366 LIST_FOREACH(mp, &g_classes, class) { 367 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) { 368 LIST_FOREACH_SAFE(pp, &gp->provider, provider, pp2) { 369 if (!(pp->flags & G_PF_WITHER)) 370 continue; 371 if (LIST_EMPTY(&pp->consumers)) 372 g_destroy_provider(pp); 373 else 374 result |= 1; 375 } 376 if (!(gp->flags & G_GEOM_WITHER)) 377 continue; 378 LIST_FOREACH_SAFE(pp, &gp->provider, provider, pp2) { 379 if (LIST_EMPTY(&pp->consumers)) 380 g_destroy_provider(pp); 381 else 382 result |= 1; 383 } 384 LIST_FOREACH_SAFE(cp, &gp->consumer, consumer, cp2) { 385 if (cp->acr || cp->acw || cp->ace) { 386 result |= 1; 387 continue; 388 } 389 if (cp->provider != NULL) 390 g_detach(cp); 391 g_destroy_consumer(cp); 392 result |= 2; 393 } 394 if (LIST_EMPTY(&gp->provider) && 395 LIST_EMPTY(&gp->consumer)) 396 g_destroy_geom(gp); 397 else 398 result |= 1; 399 } 400 } 401 return (result); 402 } 403 404 struct g_consumer * 405 g_new_consumer(struct g_geom *gp) 406 { 407 struct g_consumer *cp; 408 409 g_topology_assert(); 410 G_VALID_GEOM(gp); 411 KASSERT(!(gp->flags & G_GEOM_WITHER), 412 ("g_new_consumer on WITHERing geom(%s) (class %s)", 413 gp->name, gp->class->name)); 414 KASSERT(gp->orphan != NULL, 415 ("g_new_consumer on geom(%s) (class %s) without orphan", 416 gp->name, gp->class->name)); 417 418 cp = g_malloc(sizeof *cp, M_WAITOK | M_ZERO); 419 cp->geom = gp; 420 cp->stat = devstat_new_entry(cp, -1, 0, DEVSTAT_ALL_SUPPORTED, 421 DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX); 422 LIST_INSERT_HEAD(&gp->consumer, cp, consumer); 423 return(cp); 424 } 425 426 void 427 g_destroy_consumer(struct g_consumer *cp) 428 { 429 struct g_geom *gp; 430 431 g_topology_assert(); 432 G_VALID_CONSUMER(cp); 433 g_trace(G_T_TOPOLOGY, "g_destroy_consumer(%p)", cp); 434 KASSERT (cp->provider == NULL, ("g_destroy_consumer but attached")); 435 KASSERT (cp->acr == 0, ("g_destroy_consumer with acr")); 436 KASSERT (cp->acw == 0, ("g_destroy_consumer with acw")); 437 KASSERT (cp->ace == 0, ("g_destroy_consumer with ace")); 438 g_cancel_event(cp); 439 gp = cp->geom; 440 LIST_REMOVE(cp, consumer); 441 devstat_remove_entry(cp->stat); 442 g_free(cp); 443 if (gp->flags & G_GEOM_WITHER) 444 g_do_wither(); 445 } 446 447 static void 448 g_new_provider_event(void *arg, int flag) 449 { 450 struct g_class *mp; 451 struct g_provider *pp; 452 struct g_consumer *cp; 453 int i; 454 455 g_topology_assert(); 456 if (flag == EV_CANCEL) 457 return; 458 if (g_shutdown) 459 return; 460 pp = arg; 461 G_VALID_PROVIDER(pp); 462 LIST_FOREACH(mp, &g_classes, class) { 463 if (mp->taste == NULL) 464 continue; 465 i = 1; 466 LIST_FOREACH(cp, &pp->consumers, consumers) 467 if (cp->geom->class == mp) 468 i = 0; 469 if (!i) 470 continue; 471 mp->taste(mp, pp, 0); 472 g_topology_assert(); 473 } 474 } 475 476 477 struct g_provider * 478 g_new_providerf(struct g_geom *gp, const char *fmt, ...) 479 { 480 struct g_provider *pp; 481 struct sbuf *sb; 482 va_list ap; 483 484 g_topology_assert(); 485 G_VALID_GEOM(gp); 486 KASSERT(gp->access != NULL, 487 ("new provider on geom(%s) without ->access (class %s)", 488 gp->name, gp->class->name)); 489 KASSERT(gp->start != NULL, 490 ("new provider on geom(%s) without ->start (class %s)", 491 gp->name, gp->class->name)); 492 KASSERT(!(gp->flags & G_GEOM_WITHER), 493 ("new provider on WITHERing geom(%s) (class %s)", 494 gp->name, gp->class->name)); 495 sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND); 496 va_start(ap, fmt); 497 sbuf_vprintf(sb, fmt, ap); 498 va_end(ap); 499 sbuf_finish(sb); 500 pp = g_malloc(sizeof *pp + sbuf_len(sb) + 1, M_WAITOK | M_ZERO); 501 pp->name = (char *)(pp + 1); 502 strcpy(pp->name, sbuf_data(sb)); 503 sbuf_delete(sb); 504 LIST_INIT(&pp->consumers); 505 pp->error = ENXIO; 506 pp->geom = gp; 507 pp->stat = devstat_new_entry(pp, -1, 0, DEVSTAT_ALL_SUPPORTED, 508 DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX); 509 LIST_INSERT_HEAD(&gp->provider, pp, provider); 510 g_post_event(g_new_provider_event, pp, M_WAITOK, pp, gp, NULL); 511 return (pp); 512 } 513 514 void 515 g_error_provider(struct g_provider *pp, int error) 516 { 517 518 /* G_VALID_PROVIDER(pp); We may not have g_topology */ 519 pp->error = error; 520 } 521 522 struct g_provider * 523 g_provider_by_name(char const *arg) 524 { 525 struct g_class *cp; 526 struct g_geom *gp; 527 struct g_provider *pp; 528 529 LIST_FOREACH(cp, &g_classes, class) { 530 LIST_FOREACH(gp, &cp->geom, geom) { 531 LIST_FOREACH(pp, &gp->provider, provider) { 532 if (!strcmp(arg, pp->name)) 533 return (pp); 534 } 535 } 536 } 537 return (NULL); 538 } 539 540 void 541 g_destroy_provider(struct g_provider *pp) 542 { 543 struct g_geom *gp; 544 545 g_topology_assert(); 546 G_VALID_PROVIDER(pp); 547 KASSERT(LIST_EMPTY(&pp->consumers), 548 ("g_destroy_provider but attached")); 549 KASSERT (pp->acr == 0, ("g_destroy_provider with acr")); 550 KASSERT (pp->acw == 0, ("g_destroy_provider with acw")); 551 KASSERT (pp->acw == 0, ("g_destroy_provider with ace")); 552 g_cancel_event(pp); 553 LIST_REMOVE(pp, provider); 554 gp = pp->geom; 555 devstat_remove_entry(pp->stat); 556 g_free(pp); 557 if ((gp->flags & G_GEOM_WITHER)) 558 g_do_wither(); 559 } 560 561 /* 562 * We keep the "geoms" list sorted by topological order (== increasing 563 * numerical rank) at all times. 564 * When an attach is done, the attaching geoms rank is invalidated 565 * and it is moved to the tail of the list. 566 * All geoms later in the sequence has their ranks reevaluated in 567 * sequence. If we cannot assign rank to a geom because it's 568 * prerequisites do not have rank, we move that element to the tail 569 * of the sequence with invalid rank as well. 570 * At some point we encounter our original geom and if we stil fail 571 * to assign it a rank, there must be a loop and we fail back to 572 * g_attach() which detach again and calls redo_rank again 573 * to fix up the damage. 574 * It would be much simpler code wise to do it recursively, but we 575 * can't risk that on the kernel stack. 576 */ 577 578 static int 579 redo_rank(struct g_geom *gp) 580 { 581 struct g_consumer *cp; 582 struct g_geom *gp1, *gp2; 583 int n, m; 584 585 g_topology_assert(); 586 G_VALID_GEOM(gp); 587 588 /* Invalidate this geoms rank and move it to the tail */ 589 gp1 = TAILQ_NEXT(gp, geoms); 590 if (gp1 != NULL) { 591 gp->rank = 0; 592 TAILQ_REMOVE(&geoms, gp, geoms); 593 TAILQ_INSERT_TAIL(&geoms, gp, geoms); 594 } else { 595 gp1 = gp; 596 } 597 598 /* re-rank the rest of the sequence */ 599 for (; gp1 != NULL; gp1 = gp2) { 600 gp1->rank = 0; 601 m = 1; 602 LIST_FOREACH(cp, &gp1->consumer, consumer) { 603 if (cp->provider == NULL) 604 continue; 605 n = cp->provider->geom->rank; 606 if (n == 0) { 607 m = 0; 608 break; 609 } else if (n >= m) 610 m = n + 1; 611 } 612 gp1->rank = m; 613 gp2 = TAILQ_NEXT(gp1, geoms); 614 615 /* got a rank, moving on */ 616 if (m != 0) 617 continue; 618 619 /* no rank to original geom means loop */ 620 if (gp == gp1) 621 return (ELOOP); 622 623 /* no rank, put it at the end move on */ 624 TAILQ_REMOVE(&geoms, gp1, geoms); 625 TAILQ_INSERT_TAIL(&geoms, gp1, geoms); 626 } 627 return (0); 628 } 629 630 int 631 g_attach(struct g_consumer *cp, struct g_provider *pp) 632 { 633 int error; 634 635 g_topology_assert(); 636 G_VALID_CONSUMER(cp); 637 G_VALID_PROVIDER(pp); 638 KASSERT(cp->provider == NULL, ("attach but attached")); 639 cp->provider = pp; 640 LIST_INSERT_HEAD(&pp->consumers, cp, consumers); 641 error = redo_rank(cp->geom); 642 if (error) { 643 LIST_REMOVE(cp, consumers); 644 cp->provider = NULL; 645 redo_rank(cp->geom); 646 } 647 return (error); 648 } 649 650 void 651 g_detach(struct g_consumer *cp) 652 { 653 struct g_provider *pp; 654 655 g_topology_assert(); 656 G_VALID_CONSUMER(cp); 657 g_trace(G_T_TOPOLOGY, "g_detach(%p)", cp); 658 KASSERT(cp->provider != NULL, ("detach but not attached")); 659 KASSERT(cp->acr == 0, ("detach but nonzero acr")); 660 KASSERT(cp->acw == 0, ("detach but nonzero acw")); 661 KASSERT(cp->ace == 0, ("detach but nonzero ace")); 662 KASSERT(cp->nstart == cp->nend, 663 ("detach with active requests")); 664 pp = cp->provider; 665 LIST_REMOVE(cp, consumers); 666 cp->provider = NULL; 667 if (pp->geom->flags & G_GEOM_WITHER) 668 g_do_wither(); 669 else if (pp->flags & G_PF_WITHER) 670 g_do_wither(); 671 redo_rank(cp->geom); 672 } 673 674 /* 675 * g_access() 676 * 677 * Access-check with delta values. The question asked is "can provider 678 * "cp" change the access counters by the relative amounts dc[rwe] ?" 679 */ 680 681 int 682 g_access(struct g_consumer *cp, int dcr, int dcw, int dce) 683 { 684 struct g_provider *pp; 685 int pr,pw,pe; 686 int error; 687 688 g_topology_assert(); 689 G_VALID_CONSUMER(cp); 690 pp = cp->provider; 691 KASSERT(pp != NULL, ("access but not attached")); 692 G_VALID_PROVIDER(pp); 693 694 g_trace(G_T_ACCESS, "g_access(%p(%s), %d, %d, %d)", 695 cp, pp->name, dcr, dcw, dce); 696 697 KASSERT(cp->acr + dcr >= 0, ("access resulting in negative acr")); 698 KASSERT(cp->acw + dcw >= 0, ("access resulting in negative acw")); 699 KASSERT(cp->ace + dce >= 0, ("access resulting in negative ace")); 700 KASSERT(dcr != 0 || dcw != 0 || dce != 0, ("NOP access request")); 701 KASSERT(pp->geom->access != NULL, ("NULL geom->access")); 702 703 /* 704 * If our class cares about being spoiled, and we have been, we 705 * are probably just ahead of the event telling us that. Fail 706 * now rather than having to unravel this later. 707 */ 708 if (cp->geom->spoiled != NULL && cp->spoiled && 709 (dcr > 0 || dcw > 0 || dce > 0)) 710 return (ENXIO); 711 712 /* 713 * Figure out what counts the provider would have had, if this 714 * consumer had (r0w0e0) at this time. 715 */ 716 pr = pp->acr - cp->acr; 717 pw = pp->acw - cp->acw; 718 pe = pp->ace - cp->ace; 719 720 g_trace(G_T_ACCESS, 721 "open delta:[r%dw%de%d] old:[r%dw%de%d] provider:[r%dw%de%d] %p(%s)", 722 dcr, dcw, dce, 723 cp->acr, cp->acw, cp->ace, 724 pp->acr, pp->acw, pp->ace, 725 pp, pp->name); 726 727 /* If foot-shooting is enabled, any open on rank#1 is OK */ 728 if ((g_debugflags & 16) && pp->geom->rank == 1) 729 ; 730 /* If we try exclusive but already write: fail */ 731 else if (dce > 0 && pw > 0) 732 return (EPERM); 733 /* If we try write but already exclusive: fail */ 734 else if (dcw > 0 && pe > 0) 735 return (EPERM); 736 /* If we try to open more but provider is error'ed: fail */ 737 else if ((dcr > 0 || dcw > 0 || dce > 0) && pp->error != 0) 738 return (pp->error); 739 740 /* Ok then... */ 741 742 error = pp->geom->access(pp, dcr, dcw, dce); 743 KASSERT(dcr > 0 || dcw > 0 || dce > 0 || error == 0, 744 ("Geom provider %s::%s failed closing ->access()", 745 pp->geom->class->name, pp->name)); 746 if (!error) { 747 /* 748 * If we open first write, spoil any partner consumers. 749 * If we close last write and provider is not errored, 750 * trigger re-taste. 751 */ 752 if (pp->acw == 0 && dcw != 0) 753 g_spoil(pp, cp); 754 else if (pp->acw != 0 && pp->acw == -dcw && pp->error == 0 && 755 !(pp->geom->flags & G_GEOM_WITHER)) 756 g_post_event(g_new_provider_event, pp, M_WAITOK, 757 pp, NULL); 758 759 pp->acr += dcr; 760 pp->acw += dcw; 761 pp->ace += dce; 762 cp->acr += dcr; 763 cp->acw += dcw; 764 cp->ace += dce; 765 if (pp->acr != 0 || pp->acw != 0 || pp->ace != 0) 766 KASSERT(pp->sectorsize > 0, 767 ("Provider %s lacks sectorsize", pp->name)); 768 } 769 return (error); 770 } 771 772 int 773 g_handleattr_int(struct bio *bp, const char *attribute, int val) 774 { 775 776 return (g_handleattr(bp, attribute, &val, sizeof val)); 777 } 778 779 int 780 g_handleattr_off_t(struct bio *bp, const char *attribute, off_t val) 781 { 782 783 return (g_handleattr(bp, attribute, &val, sizeof val)); 784 } 785 786 int 787 g_handleattr(struct bio *bp, const char *attribute, void *val, int len) 788 { 789 int error; 790 791 if (strcmp(bp->bio_attribute, attribute)) 792 return (0); 793 if (bp->bio_length != len) { 794 printf("bio_length %jd len %d -> EFAULT\n", 795 (intmax_t)bp->bio_length, len); 796 error = EFAULT; 797 } else { 798 error = 0; 799 bcopy(val, bp->bio_data, len); 800 bp->bio_completed = len; 801 } 802 g_io_deliver(bp, error); 803 return (1); 804 } 805 806 int 807 g_std_access(struct g_provider *pp, 808 int dr __unused, int dw __unused, int de __unused) 809 { 810 811 g_topology_assert(); 812 G_VALID_PROVIDER(pp); 813 return (0); 814 } 815 816 void 817 g_std_done(struct bio *bp) 818 { 819 struct bio *bp2; 820 821 bp2 = bp->bio_parent; 822 if (bp2->bio_error == 0) 823 bp2->bio_error = bp->bio_error; 824 bp2->bio_completed += bp->bio_completed; 825 g_destroy_bio(bp); 826 bp2->bio_inbed++; 827 if (bp2->bio_children == bp2->bio_inbed) 828 g_io_deliver(bp2, bp2->bio_error); 829 } 830 831 /* XXX: maybe this is only g_slice_spoiled */ 832 833 void 834 g_std_spoiled(struct g_consumer *cp) 835 { 836 struct g_geom *gp; 837 struct g_provider *pp; 838 839 g_topology_assert(); 840 G_VALID_CONSUMER(cp); 841 g_trace(G_T_TOPOLOGY, "g_std_spoiled(%p)", cp); 842 g_detach(cp); 843 gp = cp->geom; 844 LIST_FOREACH(pp, &gp->provider, provider) 845 g_orphan_provider(pp, ENXIO); 846 g_destroy_consumer(cp); 847 if (LIST_EMPTY(&gp->provider) && LIST_EMPTY(&gp->consumer)) 848 g_destroy_geom(gp); 849 else 850 gp->flags |= G_GEOM_WITHER; 851 } 852 853 /* 854 * Spoiling happens when a provider is opened for writing, but consumers 855 * which are configured by in-band data are attached (slicers for instance). 856 * Since the write might potentially change the in-band data, such consumers 857 * need to re-evaluate their existence after the writing session closes. 858 * We do this by (offering to) tear them down when the open for write happens 859 * in return for a re-taste when it closes again. 860 * Together with the fact that such consumers grab an 'e' bit whenever they 861 * are open, regardless of mode, this ends up DTRT. 862 */ 863 864 static void 865 g_spoil_event(void *arg, int flag) 866 { 867 struct g_provider *pp; 868 struct g_consumer *cp, *cp2; 869 870 g_topology_assert(); 871 if (flag == EV_CANCEL) 872 return; 873 pp = arg; 874 G_VALID_PROVIDER(pp); 875 for (cp = LIST_FIRST(&pp->consumers); cp != NULL; cp = cp2) { 876 cp2 = LIST_NEXT(cp, consumers); 877 if (!cp->spoiled) 878 continue; 879 cp->spoiled = 0; 880 if (cp->geom->spoiled == NULL) 881 continue; 882 cp->geom->spoiled(cp); 883 g_topology_assert(); 884 } 885 } 886 887 void 888 g_spoil(struct g_provider *pp, struct g_consumer *cp) 889 { 890 struct g_consumer *cp2; 891 892 g_topology_assert(); 893 G_VALID_PROVIDER(pp); 894 G_VALID_CONSUMER(cp); 895 896 LIST_FOREACH(cp2, &pp->consumers, consumers) { 897 if (cp2 == cp) 898 continue; 899 /* 900 KASSERT(cp2->acr == 0, ("spoiling cp->acr = %d", cp2->acr)); 901 KASSERT(cp2->acw == 0, ("spoiling cp->acw = %d", cp2->acw)); 902 */ 903 KASSERT(cp2->ace == 0, ("spoiling cp->ace = %d", cp2->ace)); 904 cp2->spoiled++; 905 } 906 g_post_event(g_spoil_event, pp, M_WAITOK, pp, NULL); 907 } 908 909 int 910 g_getattr__(const char *attr, struct g_consumer *cp, void *var, int len) 911 { 912 int error, i; 913 914 i = len; 915 error = g_io_getattr(attr, cp, &i, var); 916 if (error) 917 return (error); 918 if (i != len) 919 return (EINVAL); 920 return (0); 921 } 922 923 #ifdef DIAGNOSTIC 924 /* 925 * This function walks (topologically unsafely) the mesh and return a 926 * non-zero integer if it finds the argument pointer is an object. 927 * The return value indicates which type of object it is belived to be. 928 * If topology is not locked, this function is potentially dangerous, 929 * but since it is for debugging purposes and can be useful for instance 930 * from DDB, we do not assert topology lock is held. 931 */ 932 int 933 g_valid_obj(void const *ptr) 934 { 935 struct g_class *mp; 936 struct g_geom *gp; 937 struct g_consumer *cp; 938 struct g_provider *pp; 939 940 LIST_FOREACH(mp, &g_classes, class) { 941 if (ptr == mp) 942 return (1); 943 LIST_FOREACH(gp, &mp->geom, geom) { 944 if (ptr == gp) 945 return (2); 946 LIST_FOREACH(cp, &gp->consumer, consumer) 947 if (ptr == cp) 948 return (3); 949 LIST_FOREACH(pp, &gp->provider, provider) 950 if (ptr == pp) 951 return (4); 952 } 953 } 954 return(0); 955 } 956 #endif 957