1 /*- 2 * Copyright (c) 2002 Poul-Henning Kamp 3 * Copyright (c) 2002 Networks Associates Technology, Inc. 4 * All rights reserved. 5 * 6 * This software was developed for the FreeBSD Project by Poul-Henning Kamp 7 * and NAI Labs, the Security Research Division of Network Associates, Inc. 8 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the 9 * DARPA CHATS research program. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. The names of the authors may not be used to endorse or promote 20 * products derived from this software without specific prior written 21 * permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include "opt_ddb.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/devicestat.h> 44 #include <sys/kernel.h> 45 #include <sys/malloc.h> 46 #include <sys/bio.h> 47 #include <sys/sysctl.h> 48 #include <sys/proc.h> 49 #include <sys/kthread.h> 50 #include <sys/lock.h> 51 #include <sys/mutex.h> 52 #include <sys/errno.h> 53 #include <sys/sbuf.h> 54 #include <geom/geom.h> 55 #include <geom/geom_int.h> 56 #include <machine/stdarg.h> 57 58 #ifdef DDB 59 #include <ddb/ddb.h> 60 #endif 61 62 struct class_list_head g_classes = LIST_HEAD_INITIALIZER(g_classes); 63 static struct g_tailq_head geoms = TAILQ_HEAD_INITIALIZER(geoms); 64 char *g_wait_event, *g_wait_up, *g_wait_down, *g_wait_sim; 65 66 struct g_hh00 { 67 struct g_class *mp; 68 int error; 69 int post; 70 }; 71 72 /* 73 * This event offers a new class a chance to taste all preexisting providers. 74 */ 75 static void 76 g_load_class(void *arg, int flag) 77 { 78 struct g_hh00 *hh; 79 struct g_class *mp2, *mp; 80 struct g_geom *gp; 81 struct g_provider *pp; 82 83 g_topology_assert(); 84 if (flag == EV_CANCEL) /* XXX: can't happen ? */ 85 return; 86 if (g_shutdown) 87 return; 88 89 hh = arg; 90 mp = hh->mp; 91 hh->error = 0; 92 if (hh->post) { 93 g_free(hh); 94 hh = NULL; 95 } 96 g_trace(G_T_TOPOLOGY, "g_load_class(%s)", mp->name); 97 KASSERT(mp->name != NULL && *mp->name != '\0', 98 ("GEOM class has no name")); 99 LIST_FOREACH(mp2, &g_classes, class) { 100 if (mp2 == mp) { 101 printf("The GEOM class %s is already loaded.\n", 102 mp2->name); 103 if (hh != NULL) 104 hh->error = EEXIST; 105 return; 106 } else if (strcmp(mp2->name, mp->name) == 0) { 107 printf("A GEOM class %s is already loaded.\n", 108 mp2->name); 109 if (hh != NULL) 110 hh->error = EEXIST; 111 return; 112 } 113 } 114 115 LIST_INIT(&mp->geom); 116 LIST_INSERT_HEAD(&g_classes, mp, class); 117 if (mp->init != NULL) 118 mp->init(mp); 119 if (mp->taste == NULL) 120 return; 121 LIST_FOREACH(mp2, &g_classes, class) { 122 if (mp == mp2) 123 continue; 124 LIST_FOREACH(gp, &mp2->geom, geom) { 125 LIST_FOREACH(pp, &gp->provider, provider) { 126 mp->taste(mp, pp, 0); 127 g_topology_assert(); 128 } 129 } 130 } 131 } 132 133 static void 134 g_unload_class(void *arg, int flag) 135 { 136 struct g_hh00 *hh; 137 struct g_class *mp; 138 struct g_geom *gp; 139 struct g_provider *pp; 140 struct g_consumer *cp; 141 int error; 142 143 g_topology_assert(); 144 hh = arg; 145 mp = hh->mp; 146 G_VALID_CLASS(mp); 147 g_trace(G_T_TOPOLOGY, "g_unload_class(%s)", mp->name); 148 149 /* 150 * We allow unloading if we have no geoms, or a class 151 * method we can use to get rid of them. 152 */ 153 if (!LIST_EMPTY(&mp->geom) && mp->destroy_geom == NULL) { 154 hh->error = EOPNOTSUPP; 155 return; 156 } 157 158 /* We refuse to unload if anything is open */ 159 LIST_FOREACH(gp, &mp->geom, geom) { 160 LIST_FOREACH(pp, &gp->provider, provider) 161 if (pp->acr || pp->acw || pp->ace) { 162 hh->error = EBUSY; 163 return; 164 } 165 LIST_FOREACH(cp, &gp->consumer, consumer) 166 if (cp->acr || cp->acw || cp->ace) { 167 hh->error = EBUSY; 168 return; 169 } 170 } 171 172 /* Bar new entries */ 173 mp->taste = NULL; 174 mp->config = NULL; 175 176 error = 0; 177 for (;;) { 178 gp = LIST_FIRST(&mp->geom); 179 if (gp == NULL) 180 break; 181 error = mp->destroy_geom(NULL, mp, gp); 182 if (error != 0) 183 break; 184 } 185 if (error == 0) { 186 if (mp->fini != NULL) 187 mp->fini(mp); 188 LIST_REMOVE(mp, class); 189 } 190 hh->error = error; 191 return; 192 } 193 194 int 195 g_modevent(module_t mod, int type, void *data) 196 { 197 struct g_hh00 *hh; 198 int error; 199 static int g_ignition; 200 struct g_class *mp; 201 202 mp = data; 203 if (mp->version != G_VERSION) { 204 printf("GEOM class %s has Wrong version %x\n", 205 mp->name, mp->version); 206 return (EINVAL); 207 } 208 if (!g_ignition) { 209 g_ignition++; 210 g_init(); 211 } 212 hh = g_malloc(sizeof *hh, M_WAITOK | M_ZERO); 213 hh->mp = data; 214 error = EOPNOTSUPP; 215 switch (type) { 216 case MOD_LOAD: 217 g_trace(G_T_TOPOLOGY, "g_modevent(%s, LOAD)", hh->mp->name); 218 /* 219 * Once the system is not cold, MOD_LOAD calls will be 220 * from the userland and the g_event thread will be able 221 * to acknowledge their completion. 222 */ 223 if (cold) { 224 hh->post = 1; 225 error = g_post_event(g_load_class, hh, M_WAITOK, NULL); 226 } else { 227 error = g_waitfor_event(g_load_class, hh, M_WAITOK, 228 NULL); 229 if (error == 0) 230 error = hh->error; 231 g_free(hh); 232 } 233 break; 234 case MOD_UNLOAD: 235 g_trace(G_T_TOPOLOGY, "g_modevent(%s, UNLOAD)", hh->mp->name); 236 error = g_waitfor_event(g_unload_class, hh, M_WAITOK, NULL); 237 if (error == 0) 238 error = hh->error; 239 if (error == 0) { 240 KASSERT(LIST_EMPTY(&hh->mp->geom), 241 ("Unloaded class (%s) still has geom", hh->mp->name)); 242 } 243 g_free(hh); 244 break; 245 default: 246 g_free(hh); 247 break; 248 } 249 return (error); 250 } 251 252 static void 253 g_retaste_event(void *arg, int flag) 254 { 255 struct g_class *cp, *mp; 256 struct g_geom *gp, *gp2; 257 struct g_hh00 *hh; 258 struct g_provider *pp; 259 260 g_topology_assert(); 261 if (flag == EV_CANCEL) /* XXX: can't happen ? */ 262 return; 263 if (g_shutdown) 264 return; 265 266 hh = arg; 267 mp = hh->mp; 268 hh->error = 0; 269 if (hh->post) { 270 g_free(hh); 271 hh = NULL; 272 } 273 g_trace(G_T_TOPOLOGY, "g_retaste(%s)", mp->name); 274 275 LIST_FOREACH(cp, &g_classes, class) { 276 LIST_FOREACH(gp, &cp->geom, geom) { 277 LIST_FOREACH(pp, &gp->provider, provider) { 278 if (pp->acr || pp->acw || pp->ace) 279 continue; 280 LIST_FOREACH(gp2, &mp->geom, geom) { 281 if (!strcmp(pp->name, gp2->name)) 282 break; 283 } 284 if (gp2 != NULL) 285 g_wither_geom(gp2, ENXIO); 286 mp->taste(mp, pp, 0); 287 g_topology_assert(); 288 } 289 } 290 } 291 } 292 293 int 294 g_retaste(struct g_class *mp) 295 { 296 struct g_hh00 *hh; 297 int error; 298 299 if (mp->taste == NULL) 300 return (EINVAL); 301 302 hh = g_malloc(sizeof *hh, M_WAITOK | M_ZERO); 303 hh->mp = mp; 304 305 if (cold) { 306 hh->post = 1; 307 error = g_post_event(g_retaste_event, hh, M_WAITOK, NULL); 308 } else { 309 error = g_waitfor_event(g_retaste_event, hh, M_WAITOK, NULL); 310 if (error == 0) 311 error = hh->error; 312 g_free(hh); 313 } 314 315 return (error); 316 } 317 318 struct g_geom * 319 g_new_geomf(struct g_class *mp, const char *fmt, ...) 320 { 321 struct g_geom *gp; 322 va_list ap; 323 struct sbuf *sb; 324 325 g_topology_assert(); 326 G_VALID_CLASS(mp); 327 sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND); 328 va_start(ap, fmt); 329 sbuf_vprintf(sb, fmt, ap); 330 va_end(ap); 331 sbuf_finish(sb); 332 gp = g_malloc(sizeof *gp, M_WAITOK | M_ZERO); 333 gp->name = g_malloc(sbuf_len(sb) + 1, M_WAITOK | M_ZERO); 334 gp->class = mp; 335 gp->rank = 1; 336 LIST_INIT(&gp->consumer); 337 LIST_INIT(&gp->provider); 338 LIST_INSERT_HEAD(&mp->geom, gp, geom); 339 TAILQ_INSERT_HEAD(&geoms, gp, geoms); 340 strcpy(gp->name, sbuf_data(sb)); 341 sbuf_delete(sb); 342 /* Fill in defaults from class */ 343 gp->start = mp->start; 344 gp->spoiled = mp->spoiled; 345 gp->dumpconf = mp->dumpconf; 346 gp->access = mp->access; 347 gp->orphan = mp->orphan; 348 gp->ioctl = mp->ioctl; 349 return (gp); 350 } 351 352 void 353 g_destroy_geom(struct g_geom *gp) 354 { 355 356 g_topology_assert(); 357 G_VALID_GEOM(gp); 358 g_trace(G_T_TOPOLOGY, "g_destroy_geom(%p(%s))", gp, gp->name); 359 KASSERT(LIST_EMPTY(&gp->consumer), 360 ("g_destroy_geom(%s) with consumer(s) [%p]", 361 gp->name, LIST_FIRST(&gp->consumer))); 362 KASSERT(LIST_EMPTY(&gp->provider), 363 ("g_destroy_geom(%s) with provider(s) [%p]", 364 gp->name, LIST_FIRST(&gp->provider))); 365 g_cancel_event(gp); 366 LIST_REMOVE(gp, geom); 367 TAILQ_REMOVE(&geoms, gp, geoms); 368 g_free(gp->name); 369 g_free(gp); 370 } 371 372 /* 373 * This function is called (repeatedly) until the has withered away. 374 */ 375 void 376 g_wither_geom(struct g_geom *gp, int error) 377 { 378 struct g_provider *pp; 379 380 g_topology_assert(); 381 G_VALID_GEOM(gp); 382 g_trace(G_T_TOPOLOGY, "g_wither_geom(%p(%s))", gp, gp->name); 383 if (!(gp->flags & G_GEOM_WITHER)) { 384 gp->flags |= G_GEOM_WITHER; 385 LIST_FOREACH(pp, &gp->provider, provider) 386 if (!(pp->flags & G_PF_ORPHAN)) 387 g_orphan_provider(pp, error); 388 } 389 g_do_wither(); 390 } 391 392 /* 393 * Convenience function to destroy a particular provider. 394 */ 395 void 396 g_wither_provider(struct g_provider *pp, int error) 397 { 398 399 pp->flags |= G_PF_WITHER; 400 if (!(pp->flags & G_PF_ORPHAN)) 401 g_orphan_provider(pp, error); 402 } 403 404 /* 405 * This function is called (repeatedly) until the has withered away. 406 */ 407 void 408 g_wither_geom_close(struct g_geom *gp, int error) 409 { 410 struct g_consumer *cp; 411 412 g_topology_assert(); 413 G_VALID_GEOM(gp); 414 g_trace(G_T_TOPOLOGY, "g_wither_geom_close(%p(%s))", gp, gp->name); 415 LIST_FOREACH(cp, &gp->consumer, consumer) 416 if (cp->acr || cp->acw || cp->ace) 417 g_access(cp, -cp->acr, -cp->acw, -cp->ace); 418 g_wither_geom(gp, error); 419 } 420 421 /* 422 * This function is called (repeatedly) until we cant wash away more 423 * withered bits at present. Return value contains two bits. Bit 0 424 * set means "withering stuff we can't wash now", bit 1 means "call 425 * me again, there may be stuff I didn't get the first time around. 426 */ 427 int 428 g_wither_washer() 429 { 430 struct g_class *mp; 431 struct g_geom *gp, *gp2; 432 struct g_provider *pp, *pp2; 433 struct g_consumer *cp, *cp2; 434 int result; 435 436 result = 0; 437 g_topology_assert(); 438 LIST_FOREACH(mp, &g_classes, class) { 439 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) { 440 LIST_FOREACH_SAFE(pp, &gp->provider, provider, pp2) { 441 if (!(pp->flags & G_PF_WITHER)) 442 continue; 443 if (LIST_EMPTY(&pp->consumers)) 444 g_destroy_provider(pp); 445 else 446 result |= 1; 447 } 448 if (!(gp->flags & G_GEOM_WITHER)) 449 continue; 450 LIST_FOREACH_SAFE(pp, &gp->provider, provider, pp2) { 451 if (LIST_EMPTY(&pp->consumers)) 452 g_destroy_provider(pp); 453 else 454 result |= 1; 455 } 456 LIST_FOREACH_SAFE(cp, &gp->consumer, consumer, cp2) { 457 if (cp->acr || cp->acw || cp->ace) { 458 result |= 1; 459 continue; 460 } 461 if (cp->provider != NULL) 462 g_detach(cp); 463 g_destroy_consumer(cp); 464 result |= 2; 465 } 466 if (LIST_EMPTY(&gp->provider) && 467 LIST_EMPTY(&gp->consumer)) 468 g_destroy_geom(gp); 469 else 470 result |= 1; 471 } 472 } 473 return (result); 474 } 475 476 struct g_consumer * 477 g_new_consumer(struct g_geom *gp) 478 { 479 struct g_consumer *cp; 480 481 g_topology_assert(); 482 G_VALID_GEOM(gp); 483 KASSERT(!(gp->flags & G_GEOM_WITHER), 484 ("g_new_consumer on WITHERing geom(%s) (class %s)", 485 gp->name, gp->class->name)); 486 KASSERT(gp->orphan != NULL, 487 ("g_new_consumer on geom(%s) (class %s) without orphan", 488 gp->name, gp->class->name)); 489 490 cp = g_malloc(sizeof *cp, M_WAITOK | M_ZERO); 491 cp->geom = gp; 492 cp->stat = devstat_new_entry(cp, -1, 0, DEVSTAT_ALL_SUPPORTED, 493 DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX); 494 LIST_INSERT_HEAD(&gp->consumer, cp, consumer); 495 return(cp); 496 } 497 498 void 499 g_destroy_consumer(struct g_consumer *cp) 500 { 501 struct g_geom *gp; 502 503 g_topology_assert(); 504 G_VALID_CONSUMER(cp); 505 g_trace(G_T_TOPOLOGY, "g_destroy_consumer(%p)", cp); 506 KASSERT (cp->provider == NULL, ("g_destroy_consumer but attached")); 507 KASSERT (cp->acr == 0, ("g_destroy_consumer with acr")); 508 KASSERT (cp->acw == 0, ("g_destroy_consumer with acw")); 509 KASSERT (cp->ace == 0, ("g_destroy_consumer with ace")); 510 g_cancel_event(cp); 511 gp = cp->geom; 512 LIST_REMOVE(cp, consumer); 513 devstat_remove_entry(cp->stat); 514 g_free(cp); 515 if (gp->flags & G_GEOM_WITHER) 516 g_do_wither(); 517 } 518 519 static void 520 g_new_provider_event(void *arg, int flag) 521 { 522 struct g_class *mp; 523 struct g_provider *pp; 524 struct g_consumer *cp; 525 int i; 526 527 g_topology_assert(); 528 if (flag == EV_CANCEL) 529 return; 530 if (g_shutdown) 531 return; 532 pp = arg; 533 G_VALID_PROVIDER(pp); 534 LIST_FOREACH(mp, &g_classes, class) { 535 if (mp->taste == NULL) 536 continue; 537 i = 1; 538 LIST_FOREACH(cp, &pp->consumers, consumers) 539 if (cp->geom->class == mp) 540 i = 0; 541 if (!i) 542 continue; 543 mp->taste(mp, pp, 0); 544 g_topology_assert(); 545 } 546 } 547 548 549 struct g_provider * 550 g_new_providerf(struct g_geom *gp, const char *fmt, ...) 551 { 552 struct g_provider *pp; 553 struct sbuf *sb; 554 va_list ap; 555 556 g_topology_assert(); 557 G_VALID_GEOM(gp); 558 KASSERT(gp->access != NULL, 559 ("new provider on geom(%s) without ->access (class %s)", 560 gp->name, gp->class->name)); 561 KASSERT(gp->start != NULL, 562 ("new provider on geom(%s) without ->start (class %s)", 563 gp->name, gp->class->name)); 564 KASSERT(!(gp->flags & G_GEOM_WITHER), 565 ("new provider on WITHERing geom(%s) (class %s)", 566 gp->name, gp->class->name)); 567 sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND); 568 va_start(ap, fmt); 569 sbuf_vprintf(sb, fmt, ap); 570 va_end(ap); 571 sbuf_finish(sb); 572 pp = g_malloc(sizeof *pp + sbuf_len(sb) + 1, M_WAITOK | M_ZERO); 573 pp->name = (char *)(pp + 1); 574 strcpy(pp->name, sbuf_data(sb)); 575 sbuf_delete(sb); 576 LIST_INIT(&pp->consumers); 577 pp->error = ENXIO; 578 pp->geom = gp; 579 pp->stat = devstat_new_entry(pp, -1, 0, DEVSTAT_ALL_SUPPORTED, 580 DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX); 581 LIST_INSERT_HEAD(&gp->provider, pp, provider); 582 g_post_event(g_new_provider_event, pp, M_WAITOK, pp, gp, NULL); 583 return (pp); 584 } 585 586 void 587 g_error_provider(struct g_provider *pp, int error) 588 { 589 590 /* G_VALID_PROVIDER(pp); We may not have g_topology */ 591 pp->error = error; 592 } 593 594 struct g_provider * 595 g_provider_by_name(char const *arg) 596 { 597 struct g_class *cp; 598 struct g_geom *gp; 599 struct g_provider *pp; 600 601 LIST_FOREACH(cp, &g_classes, class) { 602 LIST_FOREACH(gp, &cp->geom, geom) { 603 LIST_FOREACH(pp, &gp->provider, provider) { 604 if (!strcmp(arg, pp->name)) 605 return (pp); 606 } 607 } 608 } 609 return (NULL); 610 } 611 612 void 613 g_destroy_provider(struct g_provider *pp) 614 { 615 struct g_geom *gp; 616 617 g_topology_assert(); 618 G_VALID_PROVIDER(pp); 619 KASSERT(LIST_EMPTY(&pp->consumers), 620 ("g_destroy_provider but attached")); 621 KASSERT (pp->acr == 0, ("g_destroy_provider with acr")); 622 KASSERT (pp->acw == 0, ("g_destroy_provider with acw")); 623 KASSERT (pp->acw == 0, ("g_destroy_provider with ace")); 624 g_cancel_event(pp); 625 LIST_REMOVE(pp, provider); 626 gp = pp->geom; 627 devstat_remove_entry(pp->stat); 628 g_free(pp); 629 if ((gp->flags & G_GEOM_WITHER)) 630 g_do_wither(); 631 } 632 633 /* 634 * We keep the "geoms" list sorted by topological order (== increasing 635 * numerical rank) at all times. 636 * When an attach is done, the attaching geoms rank is invalidated 637 * and it is moved to the tail of the list. 638 * All geoms later in the sequence has their ranks reevaluated in 639 * sequence. If we cannot assign rank to a geom because it's 640 * prerequisites do not have rank, we move that element to the tail 641 * of the sequence with invalid rank as well. 642 * At some point we encounter our original geom and if we stil fail 643 * to assign it a rank, there must be a loop and we fail back to 644 * g_attach() which detach again and calls redo_rank again 645 * to fix up the damage. 646 * It would be much simpler code wise to do it recursively, but we 647 * can't risk that on the kernel stack. 648 */ 649 650 static int 651 redo_rank(struct g_geom *gp) 652 { 653 struct g_consumer *cp; 654 struct g_geom *gp1, *gp2; 655 int n, m; 656 657 g_topology_assert(); 658 G_VALID_GEOM(gp); 659 660 /* Invalidate this geoms rank and move it to the tail */ 661 gp1 = TAILQ_NEXT(gp, geoms); 662 if (gp1 != NULL) { 663 gp->rank = 0; 664 TAILQ_REMOVE(&geoms, gp, geoms); 665 TAILQ_INSERT_TAIL(&geoms, gp, geoms); 666 } else { 667 gp1 = gp; 668 } 669 670 /* re-rank the rest of the sequence */ 671 for (; gp1 != NULL; gp1 = gp2) { 672 gp1->rank = 0; 673 m = 1; 674 LIST_FOREACH(cp, &gp1->consumer, consumer) { 675 if (cp->provider == NULL) 676 continue; 677 n = cp->provider->geom->rank; 678 if (n == 0) { 679 m = 0; 680 break; 681 } else if (n >= m) 682 m = n + 1; 683 } 684 gp1->rank = m; 685 gp2 = TAILQ_NEXT(gp1, geoms); 686 687 /* got a rank, moving on */ 688 if (m != 0) 689 continue; 690 691 /* no rank to original geom means loop */ 692 if (gp == gp1) 693 return (ELOOP); 694 695 /* no rank, put it at the end move on */ 696 TAILQ_REMOVE(&geoms, gp1, geoms); 697 TAILQ_INSERT_TAIL(&geoms, gp1, geoms); 698 } 699 return (0); 700 } 701 702 int 703 g_attach(struct g_consumer *cp, struct g_provider *pp) 704 { 705 int error; 706 707 g_topology_assert(); 708 G_VALID_CONSUMER(cp); 709 G_VALID_PROVIDER(pp); 710 KASSERT(cp->provider == NULL, ("attach but attached")); 711 cp->provider = pp; 712 LIST_INSERT_HEAD(&pp->consumers, cp, consumers); 713 error = redo_rank(cp->geom); 714 if (error) { 715 LIST_REMOVE(cp, consumers); 716 cp->provider = NULL; 717 redo_rank(cp->geom); 718 } 719 return (error); 720 } 721 722 void 723 g_detach(struct g_consumer *cp) 724 { 725 struct g_provider *pp; 726 727 g_topology_assert(); 728 G_VALID_CONSUMER(cp); 729 g_trace(G_T_TOPOLOGY, "g_detach(%p)", cp); 730 KASSERT(cp->provider != NULL, ("detach but not attached")); 731 KASSERT(cp->acr == 0, ("detach but nonzero acr")); 732 KASSERT(cp->acw == 0, ("detach but nonzero acw")); 733 KASSERT(cp->ace == 0, ("detach but nonzero ace")); 734 KASSERT(cp->nstart == cp->nend, 735 ("detach with active requests")); 736 pp = cp->provider; 737 LIST_REMOVE(cp, consumers); 738 cp->provider = NULL; 739 if (pp->geom->flags & G_GEOM_WITHER) 740 g_do_wither(); 741 else if (pp->flags & G_PF_WITHER) 742 g_do_wither(); 743 redo_rank(cp->geom); 744 } 745 746 /* 747 * g_access() 748 * 749 * Access-check with delta values. The question asked is "can provider 750 * "cp" change the access counters by the relative amounts dc[rwe] ?" 751 */ 752 753 int 754 g_access(struct g_consumer *cp, int dcr, int dcw, int dce) 755 { 756 struct g_provider *pp; 757 int pr,pw,pe; 758 int error; 759 760 g_topology_assert(); 761 G_VALID_CONSUMER(cp); 762 pp = cp->provider; 763 KASSERT(pp != NULL, ("access but not attached")); 764 G_VALID_PROVIDER(pp); 765 766 g_trace(G_T_ACCESS, "g_access(%p(%s), %d, %d, %d)", 767 cp, pp->name, dcr, dcw, dce); 768 769 KASSERT(cp->acr + dcr >= 0, ("access resulting in negative acr")); 770 KASSERT(cp->acw + dcw >= 0, ("access resulting in negative acw")); 771 KASSERT(cp->ace + dce >= 0, ("access resulting in negative ace")); 772 KASSERT(dcr != 0 || dcw != 0 || dce != 0, ("NOP access request")); 773 KASSERT(pp->geom->access != NULL, ("NULL geom->access")); 774 775 /* 776 * If our class cares about being spoiled, and we have been, we 777 * are probably just ahead of the event telling us that. Fail 778 * now rather than having to unravel this later. 779 */ 780 if (cp->geom->spoiled != NULL && cp->spoiled && 781 (dcr > 0 || dcw > 0 || dce > 0)) 782 return (ENXIO); 783 784 /* 785 * Figure out what counts the provider would have had, if this 786 * consumer had (r0w0e0) at this time. 787 */ 788 pr = pp->acr - cp->acr; 789 pw = pp->acw - cp->acw; 790 pe = pp->ace - cp->ace; 791 792 g_trace(G_T_ACCESS, 793 "open delta:[r%dw%de%d] old:[r%dw%de%d] provider:[r%dw%de%d] %p(%s)", 794 dcr, dcw, dce, 795 cp->acr, cp->acw, cp->ace, 796 pp->acr, pp->acw, pp->ace, 797 pp, pp->name); 798 799 /* If foot-shooting is enabled, any open on rank#1 is OK */ 800 if ((g_debugflags & 16) && pp->geom->rank == 1) 801 ; 802 /* If we try exclusive but already write: fail */ 803 else if (dce > 0 && pw > 0) 804 return (EPERM); 805 /* If we try write but already exclusive: fail */ 806 else if (dcw > 0 && pe > 0) 807 return (EPERM); 808 /* If we try to open more but provider is error'ed: fail */ 809 else if ((dcr > 0 || dcw > 0 || dce > 0) && pp->error != 0) 810 return (pp->error); 811 812 /* Ok then... */ 813 814 error = pp->geom->access(pp, dcr, dcw, dce); 815 KASSERT(dcr > 0 || dcw > 0 || dce > 0 || error == 0, 816 ("Geom provider %s::%s failed closing ->access()", 817 pp->geom->class->name, pp->name)); 818 if (!error) { 819 /* 820 * If we open first write, spoil any partner consumers. 821 * If we close last write and provider is not errored, 822 * trigger re-taste. 823 */ 824 if (pp->acw == 0 && dcw != 0) 825 g_spoil(pp, cp); 826 else if (pp->acw != 0 && pp->acw == -dcw && pp->error == 0 && 827 !(pp->geom->flags & G_GEOM_WITHER)) 828 g_post_event(g_new_provider_event, pp, M_WAITOK, 829 pp, NULL); 830 831 pp->acr += dcr; 832 pp->acw += dcw; 833 pp->ace += dce; 834 cp->acr += dcr; 835 cp->acw += dcw; 836 cp->ace += dce; 837 if (pp->acr != 0 || pp->acw != 0 || pp->ace != 0) 838 KASSERT(pp->sectorsize > 0, 839 ("Provider %s lacks sectorsize", pp->name)); 840 } 841 return (error); 842 } 843 844 int 845 g_handleattr_int(struct bio *bp, const char *attribute, int val) 846 { 847 848 return (g_handleattr(bp, attribute, &val, sizeof val)); 849 } 850 851 int 852 g_handleattr_off_t(struct bio *bp, const char *attribute, off_t val) 853 { 854 855 return (g_handleattr(bp, attribute, &val, sizeof val)); 856 } 857 858 int 859 g_handleattr_str(struct bio *bp, const char *attribute, char *str) 860 { 861 862 return (g_handleattr(bp, attribute, str, 0)); 863 } 864 865 int 866 g_handleattr(struct bio *bp, const char *attribute, void *val, int len) 867 { 868 int error = 0; 869 870 if (strcmp(bp->bio_attribute, attribute)) 871 return (0); 872 if (len == 0) { 873 bzero(bp->bio_data, bp->bio_length); 874 if (strlcpy(bp->bio_data, val, bp->bio_length) >= 875 bp->bio_length) { 876 printf("%s: %s bio_length %jd len %zu -> EFAULT\n", 877 __func__, bp->bio_to->name, 878 (intmax_t)bp->bio_length, strlen(val)); 879 error = EFAULT; 880 } 881 } else if (bp->bio_length == len) { 882 bcopy(val, bp->bio_data, len); 883 bp->bio_completed = len; 884 } else { 885 printf("%s: %s bio_length %jd len %d -> EFAULT\n", __func__, 886 bp->bio_to->name, (intmax_t)bp->bio_length, len); 887 error = EFAULT; 888 } 889 g_io_deliver(bp, error); 890 return (1); 891 } 892 893 int 894 g_std_access(struct g_provider *pp, 895 int dr __unused, int dw __unused, int de __unused) 896 { 897 898 g_topology_assert(); 899 G_VALID_PROVIDER(pp); 900 return (0); 901 } 902 903 void 904 g_std_done(struct bio *bp) 905 { 906 struct bio *bp2; 907 908 bp2 = bp->bio_parent; 909 if (bp2->bio_error == 0) 910 bp2->bio_error = bp->bio_error; 911 bp2->bio_completed += bp->bio_completed; 912 g_destroy_bio(bp); 913 bp2->bio_inbed++; 914 if (bp2->bio_children == bp2->bio_inbed) 915 g_io_deliver(bp2, bp2->bio_error); 916 } 917 918 /* XXX: maybe this is only g_slice_spoiled */ 919 920 void 921 g_std_spoiled(struct g_consumer *cp) 922 { 923 struct g_geom *gp; 924 struct g_provider *pp; 925 926 g_topology_assert(); 927 G_VALID_CONSUMER(cp); 928 g_trace(G_T_TOPOLOGY, "g_std_spoiled(%p)", cp); 929 g_detach(cp); 930 gp = cp->geom; 931 LIST_FOREACH(pp, &gp->provider, provider) 932 g_orphan_provider(pp, ENXIO); 933 g_destroy_consumer(cp); 934 if (LIST_EMPTY(&gp->provider) && LIST_EMPTY(&gp->consumer)) 935 g_destroy_geom(gp); 936 else 937 gp->flags |= G_GEOM_WITHER; 938 } 939 940 /* 941 * Spoiling happens when a provider is opened for writing, but consumers 942 * which are configured by in-band data are attached (slicers for instance). 943 * Since the write might potentially change the in-band data, such consumers 944 * need to re-evaluate their existence after the writing session closes. 945 * We do this by (offering to) tear them down when the open for write happens 946 * in return for a re-taste when it closes again. 947 * Together with the fact that such consumers grab an 'e' bit whenever they 948 * are open, regardless of mode, this ends up DTRT. 949 */ 950 951 static void 952 g_spoil_event(void *arg, int flag) 953 { 954 struct g_provider *pp; 955 struct g_consumer *cp, *cp2; 956 957 g_topology_assert(); 958 if (flag == EV_CANCEL) 959 return; 960 pp = arg; 961 G_VALID_PROVIDER(pp); 962 for (cp = LIST_FIRST(&pp->consumers); cp != NULL; cp = cp2) { 963 cp2 = LIST_NEXT(cp, consumers); 964 if (!cp->spoiled) 965 continue; 966 cp->spoiled = 0; 967 if (cp->geom->spoiled == NULL) 968 continue; 969 cp->geom->spoiled(cp); 970 g_topology_assert(); 971 } 972 } 973 974 void 975 g_spoil(struct g_provider *pp, struct g_consumer *cp) 976 { 977 struct g_consumer *cp2; 978 979 g_topology_assert(); 980 G_VALID_PROVIDER(pp); 981 G_VALID_CONSUMER(cp); 982 983 LIST_FOREACH(cp2, &pp->consumers, consumers) { 984 if (cp2 == cp) 985 continue; 986 /* 987 KASSERT(cp2->acr == 0, ("spoiling cp->acr = %d", cp2->acr)); 988 KASSERT(cp2->acw == 0, ("spoiling cp->acw = %d", cp2->acw)); 989 */ 990 KASSERT(cp2->ace == 0, ("spoiling cp->ace = %d", cp2->ace)); 991 cp2->spoiled++; 992 } 993 g_post_event(g_spoil_event, pp, M_WAITOK, pp, NULL); 994 } 995 996 int 997 g_getattr__(const char *attr, struct g_consumer *cp, void *var, int len) 998 { 999 int error, i; 1000 1001 i = len; 1002 error = g_io_getattr(attr, cp, &i, var); 1003 if (error) 1004 return (error); 1005 if (i != len) 1006 return (EINVAL); 1007 return (0); 1008 } 1009 1010 #if defined(DIAGNOSTIC) || defined(DDB) 1011 /* 1012 * This function walks (topologically unsafely) the mesh and return a 1013 * non-zero integer if it finds the argument pointer is an object. 1014 * The return value indicates which type of object it is belived to be. 1015 * If topology is not locked, this function is potentially dangerous, 1016 * but since it is for debugging purposes and can be useful for instance 1017 * from DDB, we do not assert topology lock is held. 1018 */ 1019 int 1020 g_valid_obj(void const *ptr) 1021 { 1022 struct g_class *mp; 1023 struct g_geom *gp; 1024 struct g_consumer *cp; 1025 struct g_provider *pp; 1026 1027 LIST_FOREACH(mp, &g_classes, class) { 1028 if (ptr == mp) 1029 return (1); 1030 LIST_FOREACH(gp, &mp->geom, geom) { 1031 if (ptr == gp) 1032 return (2); 1033 LIST_FOREACH(cp, &gp->consumer, consumer) 1034 if (ptr == cp) 1035 return (3); 1036 LIST_FOREACH(pp, &gp->provider, provider) 1037 if (ptr == pp) 1038 return (4); 1039 } 1040 } 1041 return(0); 1042 } 1043 #endif 1044 1045 #ifdef DDB 1046 1047 #define gprintf(...) do { \ 1048 printf("%*s", indent, ""); \ 1049 printf(__VA_ARGS__); \ 1050 } while (0) 1051 #define gprintln(...) do { \ 1052 gprintf(__VA_ARGS__); \ 1053 printf("\n"); \ 1054 } while (0) 1055 1056 #define ADDFLAG(obj, flag, sflag) do { \ 1057 if ((obj)->flags & (flag)) { \ 1058 if (comma) \ 1059 strlcat(str, ",", size); \ 1060 strlcat(str, (sflag), size); \ 1061 comma = 1; \ 1062 } \ 1063 } while (0) 1064 1065 static char * 1066 provider_flags_to_string(struct g_provider *pp, char *str, size_t size) 1067 { 1068 int comma = 0; 1069 1070 bzero(str, size); 1071 if (pp->flags == 0) { 1072 strlcpy(str, "NONE", size); 1073 return (str); 1074 } 1075 ADDFLAG(pp, G_PF_CANDELETE, "G_PF_CANDELETE"); 1076 ADDFLAG(pp, G_PF_WITHER, "G_PF_WITHER"); 1077 ADDFLAG(pp, G_PF_ORPHAN, "G_PF_ORPHAN"); 1078 return (str); 1079 } 1080 1081 static char * 1082 geom_flags_to_string(struct g_geom *gp, char *str, size_t size) 1083 { 1084 int comma = 0; 1085 1086 bzero(str, size); 1087 if (gp->flags == 0) { 1088 strlcpy(str, "NONE", size); 1089 return (str); 1090 } 1091 ADDFLAG(gp, G_GEOM_WITHER, "G_GEOM_WITHER"); 1092 return (str); 1093 } 1094 static void 1095 db_show_geom_consumer(int indent, struct g_consumer *cp) 1096 { 1097 1098 if (indent == 0) { 1099 gprintln("consumer: %p", cp); 1100 gprintln(" class: %s (%p)", cp->geom->class->name, 1101 cp->geom->class); 1102 gprintln(" geom: %s (%p)", cp->geom->name, cp->geom); 1103 if (cp->provider == NULL) 1104 gprintln(" provider: none"); 1105 else { 1106 gprintln(" provider: %s (%p)", cp->provider->name, 1107 cp->provider); 1108 } 1109 gprintln(" access: r%dw%de%d", cp->acr, cp->acw, cp->ace); 1110 gprintln(" spoiled: %d", cp->spoiled); 1111 gprintln(" nstart: %u", cp->nstart); 1112 gprintln(" nend: %u", cp->nend); 1113 } else { 1114 gprintf("consumer: %p (%s), access=r%dw%de%d", cp, 1115 cp->provider != NULL ? cp->provider->name : "none", 1116 cp->acr, cp->acw, cp->ace); 1117 if (cp->spoiled) 1118 printf(", spoiled=%d", cp->spoiled); 1119 printf("\n"); 1120 } 1121 } 1122 1123 static void 1124 db_show_geom_provider(int indent, struct g_provider *pp) 1125 { 1126 struct g_consumer *cp; 1127 char flags[64]; 1128 1129 if (indent == 0) { 1130 gprintln("provider: %s (%p)", pp->name, pp); 1131 gprintln(" class: %s (%p)", pp->geom->class->name, 1132 pp->geom->class); 1133 gprintln(" geom: %s (%p)", pp->geom->name, pp->geom); 1134 gprintln(" mediasize: %jd", (intmax_t)pp->mediasize); 1135 gprintln(" sectorsize: %u", pp->sectorsize); 1136 gprintln(" stripesize: %u", pp->stripesize); 1137 gprintln(" stripeoffset: %u", pp->stripeoffset); 1138 gprintln(" access: r%dw%de%d", pp->acr, pp->acw, 1139 pp->ace); 1140 gprintln(" flags: %s (0x%04x)", 1141 provider_flags_to_string(pp, flags, sizeof(flags)), 1142 pp->flags); 1143 gprintln(" error: %d", pp->error); 1144 gprintln(" nstart: %u", pp->nstart); 1145 gprintln(" nend: %u", pp->nend); 1146 if (LIST_EMPTY(&pp->consumers)) 1147 gprintln(" consumers: none"); 1148 } else { 1149 gprintf("provider: %s (%p), access=r%dw%de%d", 1150 pp->name, pp, pp->acr, pp->acw, pp->ace); 1151 if (pp->flags != 0) { 1152 printf(", flags=%s (0x%04x)", 1153 provider_flags_to_string(pp, flags, sizeof(flags)), 1154 pp->flags); 1155 } 1156 printf("\n"); 1157 } 1158 if (!LIST_EMPTY(&pp->consumers)) { 1159 LIST_FOREACH(cp, &pp->consumers, consumers) 1160 db_show_geom_consumer(indent + 2, cp); 1161 } 1162 } 1163 1164 static void 1165 db_show_geom_geom(int indent, struct g_geom *gp) 1166 { 1167 struct g_provider *pp; 1168 struct g_consumer *cp; 1169 char flags[64]; 1170 1171 if (indent == 0) { 1172 gprintln("geom: %s (%p)", gp->name, gp); 1173 gprintln(" class: %s (%p)", gp->class->name, gp->class); 1174 gprintln(" flags: %s (0x%04x)", 1175 geom_flags_to_string(gp, flags, sizeof(flags)), gp->flags); 1176 gprintln(" rank: %d", gp->rank); 1177 if (LIST_EMPTY(&gp->provider)) 1178 gprintln(" providers: none"); 1179 if (LIST_EMPTY(&gp->consumer)) 1180 gprintln(" consumers: none"); 1181 } else { 1182 gprintf("geom: %s (%p), rank=%d", gp->name, gp, gp->rank); 1183 if (gp->flags != 0) { 1184 printf(", flags=%s (0x%04x)", 1185 geom_flags_to_string(gp, flags, sizeof(flags)), 1186 gp->flags); 1187 } 1188 printf("\n"); 1189 } 1190 if (!LIST_EMPTY(&gp->provider)) { 1191 LIST_FOREACH(pp, &gp->provider, provider) 1192 db_show_geom_provider(indent + 2, pp); 1193 } 1194 if (!LIST_EMPTY(&gp->consumer)) { 1195 LIST_FOREACH(cp, &gp->consumer, consumer) 1196 db_show_geom_consumer(indent + 2, cp); 1197 } 1198 } 1199 1200 static void 1201 db_show_geom_class(struct g_class *mp) 1202 { 1203 struct g_geom *gp; 1204 1205 printf("class: %s (%p)\n", mp->name, mp); 1206 LIST_FOREACH(gp, &mp->geom, geom) 1207 db_show_geom_geom(2, gp); 1208 } 1209 1210 /* 1211 * Print the GEOM topology or the given object. 1212 */ 1213 DB_SHOW_COMMAND(geom, db_show_geom) 1214 { 1215 struct g_class *mp; 1216 1217 if (!have_addr) { 1218 /* No address given, print the entire topology. */ 1219 LIST_FOREACH(mp, &g_classes, class) { 1220 db_show_geom_class(mp); 1221 printf("\n"); 1222 } 1223 } else { 1224 switch (g_valid_obj((void *)addr)) { 1225 case 1: 1226 db_show_geom_class((struct g_class *)addr); 1227 break; 1228 case 2: 1229 db_show_geom_geom(0, (struct g_geom *)addr); 1230 break; 1231 case 3: 1232 db_show_geom_consumer(0, (struct g_consumer *)addr); 1233 break; 1234 case 4: 1235 db_show_geom_provider(0, (struct g_provider *)addr); 1236 break; 1237 default: 1238 printf("Not a GEOM object.\n"); 1239 break; 1240 } 1241 } 1242 } 1243 1244 #undef gprintf 1245 #undef gprintln 1246 #undef ADDFLAG 1247 1248 #endif /* DDB */ 1249