1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2002 Poul-Henning Kamp 5 * Copyright (c) 2002 Networks Associates Technology, Inc. 6 * All rights reserved. 7 * 8 * This software was developed for the FreeBSD Project by Poul-Henning Kamp 9 * and NAI Labs, the Security Research Division of Network Associates, Inc. 10 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the 11 * DARPA CHATS research program. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. The names of the authors may not be used to endorse or promote 22 * products derived from this software without specific prior written 23 * permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 */ 37 38 #include <sys/cdefs.h> 39 #include "opt_ddb.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/devicestat.h> 44 #include <sys/kernel.h> 45 #include <sys/malloc.h> 46 #include <sys/bio.h> 47 #include <sys/sysctl.h> 48 #include <sys/proc.h> 49 #include <sys/kthread.h> 50 #include <sys/lock.h> 51 #include <sys/mutex.h> 52 #include <sys/errno.h> 53 #include <sys/sbuf.h> 54 #include <sys/sdt.h> 55 #include <geom/geom.h> 56 #include <geom/geom_dbg.h> 57 #include <geom/geom_int.h> 58 #include <machine/stdarg.h> 59 60 #ifdef DDB 61 #include <ddb/ddb.h> 62 #endif 63 64 #ifdef KDB 65 #include <sys/kdb.h> 66 #endif 67 68 SDT_PROVIDER_DEFINE(geom); 69 70 struct class_list_head g_classes = LIST_HEAD_INITIALIZER(g_classes); 71 static struct g_tailq_head geoms = TAILQ_HEAD_INITIALIZER(geoms); 72 char *g_wait_event, *g_wait_up, *g_wait_down; 73 74 struct g_hh00 { 75 struct g_class *mp; 76 struct g_provider *pp; 77 off_t size; 78 int error; 79 int post; 80 }; 81 82 void 83 g_dbg_printf(const char *classname, int lvl, struct bio *bp, 84 const char *format, 85 ...) 86 { 87 #ifndef PRINTF_BUFR_SIZE 88 #define PRINTF_BUFR_SIZE 64 89 #endif 90 char bufr[PRINTF_BUFR_SIZE]; 91 struct sbuf sb, *sbp __unused; 92 va_list ap; 93 94 sbp = sbuf_new(&sb, bufr, sizeof(bufr), SBUF_FIXEDLEN); 95 KASSERT(sbp != NULL, ("sbuf_new misused?")); 96 97 sbuf_set_drain(&sb, sbuf_printf_drain, NULL); 98 99 sbuf_cat(&sb, classname); 100 if (lvl >= 0) 101 sbuf_printf(&sb, "[%d]", lvl); 102 103 va_start(ap, format); 104 sbuf_vprintf(&sb, format, ap); 105 va_end(ap); 106 107 if (bp != NULL) { 108 sbuf_putc(&sb, ' '); 109 g_format_bio(&sb, bp); 110 } 111 112 /* Terminate the debug line with a single '\n'. */ 113 sbuf_nl_terminate(&sb); 114 115 /* Flush line to printf. */ 116 sbuf_finish(&sb); 117 sbuf_delete(&sb); 118 } 119 120 /* 121 * This event offers a new class a chance to taste all preexisting providers. 122 */ 123 static void 124 g_load_class(void *arg, int flag) 125 { 126 struct g_hh00 *hh; 127 struct g_class *mp2, *mp; 128 struct g_geom *gp; 129 struct g_provider *pp; 130 131 g_topology_assert(); 132 if (flag == EV_CANCEL) /* XXX: can't happen ? */ 133 return; 134 if (g_shutdown) 135 return; 136 137 hh = arg; 138 mp = hh->mp; 139 hh->error = 0; 140 if (hh->post) { 141 g_free(hh); 142 hh = NULL; 143 } 144 g_trace(G_T_TOPOLOGY, "g_load_class(%s)", mp->name); 145 KASSERT(mp->name != NULL && *mp->name != '\0', 146 ("GEOM class has no name")); 147 LIST_FOREACH(mp2, &g_classes, class) { 148 if (mp2 == mp) { 149 printf("The GEOM class %s is already loaded.\n", 150 mp2->name); 151 if (hh != NULL) 152 hh->error = EEXIST; 153 return; 154 } else if (strcmp(mp2->name, mp->name) == 0) { 155 printf("A GEOM class %s is already loaded.\n", 156 mp2->name); 157 if (hh != NULL) 158 hh->error = EEXIST; 159 return; 160 } 161 } 162 163 LIST_INIT(&mp->geom); 164 LIST_INSERT_HEAD(&g_classes, mp, class); 165 if (mp->init != NULL) 166 mp->init(mp); 167 if (mp->taste == NULL) 168 return; 169 LIST_FOREACH(mp2, &g_classes, class) { 170 if (mp == mp2) 171 continue; 172 LIST_FOREACH(gp, &mp2->geom, geom) { 173 LIST_FOREACH(pp, &gp->provider, provider) { 174 mp->taste(mp, pp, 0); 175 g_topology_assert(); 176 } 177 } 178 } 179 } 180 181 static int 182 g_unload_class(struct g_class *mp) 183 { 184 struct g_geom *gp; 185 struct g_provider *pp; 186 struct g_consumer *cp; 187 int error; 188 189 g_topology_lock(); 190 g_trace(G_T_TOPOLOGY, "g_unload_class(%s)", mp->name); 191 retry: 192 G_VALID_CLASS(mp); 193 LIST_FOREACH(gp, &mp->geom, geom) { 194 /* We refuse to unload if anything is open */ 195 LIST_FOREACH(pp, &gp->provider, provider) 196 if (pp->acr || pp->acw || pp->ace) { 197 g_topology_unlock(); 198 return (EBUSY); 199 } 200 LIST_FOREACH(cp, &gp->consumer, consumer) 201 if (cp->acr || cp->acw || cp->ace) { 202 g_topology_unlock(); 203 return (EBUSY); 204 } 205 /* If the geom is withering, wait for it to finish. */ 206 if (gp->flags & G_GEOM_WITHER) { 207 g_topology_sleep(mp, 1); 208 goto retry; 209 } 210 } 211 212 /* 213 * We allow unloading if we have no geoms, or a class 214 * method we can use to get rid of them. 215 */ 216 if (!LIST_EMPTY(&mp->geom) && mp->destroy_geom == NULL) { 217 g_topology_unlock(); 218 return (EOPNOTSUPP); 219 } 220 221 /* Bar new entries */ 222 mp->taste = NULL; 223 224 LIST_FOREACH(gp, &mp->geom, geom) { 225 error = mp->destroy_geom(NULL, mp, gp); 226 if (error != 0) { 227 g_topology_unlock(); 228 return (error); 229 } 230 } 231 /* Wait for withering to finish. */ 232 for (;;) { 233 gp = LIST_FIRST(&mp->geom); 234 if (gp == NULL) 235 break; 236 KASSERT(gp->flags & G_GEOM_WITHER, 237 ("Non-withering geom in class %s", mp->name)); 238 g_topology_sleep(mp, 1); 239 } 240 G_VALID_CLASS(mp); 241 if (mp->fini != NULL) 242 mp->fini(mp); 243 LIST_REMOVE(mp, class); 244 g_topology_unlock(); 245 246 return (0); 247 } 248 249 int 250 g_modevent(module_t mod, int type, void *data) 251 { 252 struct g_hh00 *hh; 253 int error; 254 static int g_ignition; 255 struct g_class *mp; 256 257 mp = data; 258 if (mp->version != G_VERSION) { 259 printf("GEOM class %s has Wrong version %x\n", 260 mp->name, mp->version); 261 return (EINVAL); 262 } 263 if (!g_ignition) { 264 g_ignition++; 265 g_init(); 266 } 267 error = EOPNOTSUPP; 268 switch (type) { 269 case MOD_LOAD: 270 g_trace(G_T_TOPOLOGY, "g_modevent(%s, LOAD)", mp->name); 271 hh = g_malloc(sizeof *hh, M_WAITOK | M_ZERO); 272 hh->mp = mp; 273 /* 274 * Once the system is not cold, MOD_LOAD calls will be 275 * from the userland and the g_event thread will be able 276 * to acknowledge their completion. 277 */ 278 if (cold) { 279 hh->post = 1; 280 error = g_post_event(g_load_class, hh, M_WAITOK, NULL); 281 } else { 282 error = g_waitfor_event(g_load_class, hh, M_WAITOK, 283 NULL); 284 if (error == 0) 285 error = hh->error; 286 g_free(hh); 287 } 288 break; 289 case MOD_UNLOAD: 290 g_trace(G_T_TOPOLOGY, "g_modevent(%s, UNLOAD)", mp->name); 291 error = g_unload_class(mp); 292 if (error == 0) { 293 KASSERT(LIST_EMPTY(&mp->geom), 294 ("Unloaded class (%s) still has geom", mp->name)); 295 } 296 break; 297 } 298 return (error); 299 } 300 301 static void 302 g_retaste_event(void *arg, int flag) 303 { 304 struct g_class *mp, *mp2; 305 struct g_geom *gp; 306 struct g_hh00 *hh; 307 struct g_provider *pp; 308 struct g_consumer *cp; 309 310 g_topology_assert(); 311 if (flag == EV_CANCEL) /* XXX: can't happen ? */ 312 return; 313 if (g_shutdown || g_notaste) 314 return; 315 316 hh = arg; 317 mp = hh->mp; 318 hh->error = 0; 319 if (hh->post) { 320 g_free(hh); 321 hh = NULL; 322 } 323 g_trace(G_T_TOPOLOGY, "g_retaste(%s)", mp->name); 324 325 LIST_FOREACH(mp2, &g_classes, class) { 326 LIST_FOREACH(gp, &mp2->geom, geom) { 327 LIST_FOREACH(pp, &gp->provider, provider) { 328 if (pp->acr || pp->acw || pp->ace) 329 continue; 330 LIST_FOREACH(cp, &pp->consumers, consumers) { 331 if (cp->geom->class == mp && 332 (cp->flags & G_CF_ORPHAN) == 0) 333 break; 334 } 335 if (cp != NULL) { 336 cp->flags |= G_CF_ORPHAN; 337 g_wither_geom(cp->geom, ENXIO); 338 } 339 mp->taste(mp, pp, 0); 340 g_topology_assert(); 341 } 342 } 343 } 344 } 345 346 int 347 g_retaste(struct g_class *mp) 348 { 349 struct g_hh00 *hh; 350 int error; 351 352 if (mp->taste == NULL) 353 return (EINVAL); 354 355 hh = g_malloc(sizeof *hh, M_WAITOK | M_ZERO); 356 hh->mp = mp; 357 358 if (cold) { 359 hh->post = 1; 360 error = g_post_event(g_retaste_event, hh, M_WAITOK, NULL); 361 } else { 362 error = g_waitfor_event(g_retaste_event, hh, M_WAITOK, NULL); 363 if (error == 0) 364 error = hh->error; 365 g_free(hh); 366 } 367 368 return (error); 369 } 370 371 struct g_geom * 372 g_new_geomf(struct g_class *mp, const char *fmt, ...) 373 { 374 struct g_geom *gp; 375 va_list ap; 376 struct sbuf *sb; 377 378 g_topology_assert(); 379 G_VALID_CLASS(mp); 380 sb = sbuf_new_auto(); 381 va_start(ap, fmt); 382 sbuf_vprintf(sb, fmt, ap); 383 va_end(ap); 384 sbuf_finish(sb); 385 gp = g_malloc(sizeof *gp, M_WAITOK | M_ZERO); 386 gp->name = g_malloc(sbuf_len(sb) + 1, M_WAITOK | M_ZERO); 387 gp->class = mp; 388 gp->rank = 1; 389 LIST_INIT(&gp->consumer); 390 LIST_INIT(&gp->provider); 391 LIST_INSERT_HEAD(&mp->geom, gp, geom); 392 TAILQ_INSERT_HEAD(&geoms, gp, geoms); 393 strcpy(gp->name, sbuf_data(sb)); 394 sbuf_delete(sb); 395 /* Fill in defaults from class */ 396 gp->start = mp->start; 397 gp->spoiled = mp->spoiled; 398 gp->attrchanged = mp->attrchanged; 399 gp->providergone = mp->providergone; 400 gp->dumpconf = mp->dumpconf; 401 gp->access = mp->access; 402 gp->orphan = mp->orphan; 403 gp->ioctl = mp->ioctl; 404 gp->resize = mp->resize; 405 return (gp); 406 } 407 408 void 409 g_destroy_geom(struct g_geom *gp) 410 { 411 412 g_topology_assert(); 413 G_VALID_GEOM(gp); 414 g_trace(G_T_TOPOLOGY, "g_destroy_geom(%p(%s))", gp, gp->name); 415 KASSERT(LIST_EMPTY(&gp->consumer), 416 ("g_destroy_geom(%s) with consumer(s) [%p]", 417 gp->name, LIST_FIRST(&gp->consumer))); 418 KASSERT(LIST_EMPTY(&gp->provider), 419 ("g_destroy_geom(%s) with provider(s) [%p]", 420 gp->name, LIST_FIRST(&gp->provider))); 421 g_cancel_event(gp); 422 LIST_REMOVE(gp, geom); 423 TAILQ_REMOVE(&geoms, gp, geoms); 424 g_free(gp->name); 425 g_free(gp); 426 } 427 428 /* 429 * This function is called (repeatedly) until the geom has withered away. 430 */ 431 void 432 g_wither_geom(struct g_geom *gp, int error) 433 { 434 struct g_provider *pp; 435 436 g_topology_assert(); 437 G_VALID_GEOM(gp); 438 g_trace(G_T_TOPOLOGY, "g_wither_geom(%p(%s))", gp, gp->name); 439 if (!(gp->flags & G_GEOM_WITHER)) { 440 gp->flags |= G_GEOM_WITHER; 441 LIST_FOREACH(pp, &gp->provider, provider) 442 if (!(pp->flags & G_PF_ORPHAN)) 443 g_orphan_provider(pp, error); 444 } 445 g_do_wither(); 446 } 447 448 /* 449 * Convenience function to destroy a particular provider. 450 */ 451 void 452 g_wither_provider(struct g_provider *pp, int error) 453 { 454 455 pp->flags |= G_PF_WITHER; 456 if (!(pp->flags & G_PF_ORPHAN)) 457 g_orphan_provider(pp, error); 458 } 459 460 /* 461 * This function is called (repeatedly) until the has withered away. 462 */ 463 void 464 g_wither_geom_close(struct g_geom *gp, int error) 465 { 466 struct g_consumer *cp; 467 468 g_topology_assert(); 469 G_VALID_GEOM(gp); 470 g_trace(G_T_TOPOLOGY, "g_wither_geom_close(%p(%s))", gp, gp->name); 471 LIST_FOREACH(cp, &gp->consumer, consumer) 472 if (cp->acr || cp->acw || cp->ace) 473 g_access(cp, -cp->acr, -cp->acw, -cp->ace); 474 g_wither_geom(gp, error); 475 } 476 477 /* 478 * This function is called (repeatedly) until we cant wash away more 479 * withered bits at present. 480 */ 481 void 482 g_wither_washer(void) 483 { 484 struct g_class *mp; 485 struct g_geom *gp, *gp2; 486 struct g_provider *pp, *pp2; 487 struct g_consumer *cp, *cp2; 488 489 g_topology_assert(); 490 LIST_FOREACH(mp, &g_classes, class) { 491 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) { 492 LIST_FOREACH_SAFE(pp, &gp->provider, provider, pp2) { 493 if (!(pp->flags & G_PF_WITHER)) 494 continue; 495 if (LIST_EMPTY(&pp->consumers)) 496 g_destroy_provider(pp); 497 } 498 if (!(gp->flags & G_GEOM_WITHER)) 499 continue; 500 LIST_FOREACH_SAFE(pp, &gp->provider, provider, pp2) { 501 if (LIST_EMPTY(&pp->consumers)) 502 g_destroy_provider(pp); 503 } 504 LIST_FOREACH_SAFE(cp, &gp->consumer, consumer, cp2) { 505 if (cp->acr || cp->acw || cp->ace) 506 continue; 507 if (cp->provider != NULL) 508 g_detach(cp); 509 g_destroy_consumer(cp); 510 } 511 if (LIST_EMPTY(&gp->provider) && 512 LIST_EMPTY(&gp->consumer)) 513 g_destroy_geom(gp); 514 } 515 } 516 } 517 518 struct g_consumer * 519 g_new_consumer(struct g_geom *gp) 520 { 521 struct g_consumer *cp; 522 523 g_topology_assert(); 524 G_VALID_GEOM(gp); 525 KASSERT(!(gp->flags & G_GEOM_WITHER), 526 ("g_new_consumer on WITHERing geom(%s) (class %s)", 527 gp->name, gp->class->name)); 528 KASSERT(gp->orphan != NULL, 529 ("g_new_consumer on geom(%s) (class %s) without orphan", 530 gp->name, gp->class->name)); 531 532 cp = g_malloc(sizeof *cp, M_WAITOK | M_ZERO); 533 cp->geom = gp; 534 cp->stat = devstat_new_entry(cp, -1, 0, DEVSTAT_ALL_SUPPORTED, 535 DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX); 536 LIST_INSERT_HEAD(&gp->consumer, cp, consumer); 537 return(cp); 538 } 539 540 void 541 g_destroy_consumer(struct g_consumer *cp) 542 { 543 struct g_geom *gp; 544 545 g_topology_assert(); 546 G_VALID_CONSUMER(cp); 547 g_trace(G_T_TOPOLOGY, "g_destroy_consumer(%p)", cp); 548 KASSERT (cp->provider == NULL, ("g_destroy_consumer but attached")); 549 KASSERT (cp->acr == 0, ("g_destroy_consumer with acr")); 550 KASSERT (cp->acw == 0, ("g_destroy_consumer with acw")); 551 KASSERT (cp->ace == 0, ("g_destroy_consumer with ace")); 552 g_cancel_event(cp); 553 gp = cp->geom; 554 LIST_REMOVE(cp, consumer); 555 devstat_remove_entry(cp->stat); 556 g_free(cp); 557 if (gp->flags & G_GEOM_WITHER) 558 g_do_wither(); 559 } 560 561 static void 562 g_new_provider_event(void *arg, int flag) 563 { 564 struct g_class *mp; 565 struct g_provider *pp; 566 struct g_consumer *cp, *next_cp; 567 568 g_topology_assert(); 569 if (flag == EV_CANCEL) 570 return; 571 if (g_shutdown) 572 return; 573 pp = arg; 574 G_VALID_PROVIDER(pp); 575 if ((pp->flags & G_PF_WITHER) != 0) 576 return; 577 LIST_FOREACH_SAFE(cp, &pp->consumers, consumers, next_cp) { 578 if ((cp->flags & G_CF_ORPHAN) == 0 && 579 cp->geom->attrchanged != NULL) 580 cp->geom->attrchanged(cp, "GEOM::media"); 581 } 582 if (g_notaste) 583 return; 584 LIST_FOREACH(mp, &g_classes, class) { 585 if (mp->taste == NULL) 586 continue; 587 LIST_FOREACH(cp, &pp->consumers, consumers) 588 if (cp->geom->class == mp && 589 (cp->flags & G_CF_ORPHAN) == 0) 590 break; 591 if (cp != NULL) 592 continue; 593 mp->taste(mp, pp, 0); 594 g_topology_assert(); 595 } 596 } 597 598 struct g_provider * 599 g_new_providerf(struct g_geom *gp, const char *fmt, ...) 600 { 601 struct g_provider *pp; 602 struct sbuf *sb; 603 va_list ap; 604 605 g_topology_assert(); 606 G_VALID_GEOM(gp); 607 KASSERT(gp->access != NULL, 608 ("new provider on geom(%s) without ->access (class %s)", 609 gp->name, gp->class->name)); 610 KASSERT(gp->start != NULL, 611 ("new provider on geom(%s) without ->start (class %s)", 612 gp->name, gp->class->name)); 613 KASSERT(!(gp->flags & G_GEOM_WITHER), 614 ("new provider on WITHERing geom(%s) (class %s)", 615 gp->name, gp->class->name)); 616 sb = sbuf_new_auto(); 617 va_start(ap, fmt); 618 sbuf_vprintf(sb, fmt, ap); 619 va_end(ap); 620 sbuf_finish(sb); 621 pp = g_malloc(sizeof *pp + sbuf_len(sb) + 1, M_WAITOK | M_ZERO); 622 pp->name = (char *)(pp + 1); 623 strcpy(pp->name, sbuf_data(sb)); 624 sbuf_delete(sb); 625 LIST_INIT(&pp->consumers); 626 LIST_INIT(&pp->aliases); 627 pp->error = ENXIO; 628 pp->geom = gp; 629 pp->stat = devstat_new_entry(pp, -1, 0, DEVSTAT_ALL_SUPPORTED, 630 DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX); 631 LIST_INSERT_HEAD(&gp->provider, pp, provider); 632 g_post_event(g_new_provider_event, pp, M_WAITOK, pp, gp, NULL); 633 return (pp); 634 } 635 636 void 637 g_provider_add_alias(struct g_provider *pp, const char *fmt, ...) 638 { 639 struct sbuf *sb; 640 struct g_geom_alias *gap; 641 va_list ap; 642 643 /* 644 * Generate the alias string and save it in the list. 645 */ 646 sb = sbuf_new_auto(); 647 va_start(ap, fmt); 648 sbuf_vprintf(sb, fmt, ap); 649 va_end(ap); 650 sbuf_finish(sb); 651 652 LIST_FOREACH(gap, &pp->aliases, ga_next) { 653 if (strcmp(gap->ga_alias, sbuf_data(sb)) != 0) 654 continue; 655 /* Don't re-add the same alias. */ 656 sbuf_delete(sb); 657 return; 658 } 659 660 gap = g_malloc(sizeof(*gap) + sbuf_len(sb) + 1, M_WAITOK | M_ZERO); 661 memcpy((char *)(gap + 1), sbuf_data(sb), sbuf_len(sb)); 662 sbuf_delete(sb); 663 gap->ga_alias = (const char *)(gap + 1); 664 LIST_INSERT_HEAD(&pp->aliases, gap, ga_next); 665 } 666 667 void 668 g_error_provider(struct g_provider *pp, int error) 669 { 670 671 /* G_VALID_PROVIDER(pp); We may not have g_topology */ 672 pp->error = error; 673 } 674 675 static void 676 g_resize_provider_event(void *arg, int flag) 677 { 678 struct g_hh00 *hh; 679 struct g_class *mp; 680 struct g_geom *gp; 681 struct g_provider *pp; 682 struct g_consumer *cp, *cp2; 683 off_t size; 684 685 g_topology_assert(); 686 if (g_shutdown) 687 return; 688 689 hh = arg; 690 pp = hh->pp; 691 size = hh->size; 692 g_free(hh); 693 694 G_VALID_PROVIDER(pp); 695 KASSERT(!(pp->flags & G_PF_WITHER), 696 ("g_resize_provider_event but withered")); 697 g_trace(G_T_TOPOLOGY, "g_resize_provider_event(%p)", pp); 698 699 LIST_FOREACH_SAFE(cp, &pp->consumers, consumers, cp2) { 700 gp = cp->geom; 701 if (gp->resize == NULL && size < pp->mediasize) { 702 /* 703 * XXX: g_dev_orphan method does deferred destroying 704 * and it is possible, that other event could already 705 * call the orphan method. Check consumer's flags to 706 * do not schedule it twice. 707 */ 708 if (cp->flags & G_CF_ORPHAN) 709 continue; 710 cp->flags |= G_CF_ORPHAN; 711 cp->geom->orphan(cp); 712 } 713 } 714 715 pp->mediasize = size; 716 717 LIST_FOREACH_SAFE(cp, &pp->consumers, consumers, cp2) { 718 gp = cp->geom; 719 if ((gp->flags & G_GEOM_WITHER) == 0 && gp->resize != NULL) 720 gp->resize(cp); 721 } 722 723 /* 724 * After resizing, the previously invalid GEOM class metadata 725 * might become valid. This means we should retaste. 726 */ 727 LIST_FOREACH(mp, &g_classes, class) { 728 if (mp->taste == NULL) 729 continue; 730 LIST_FOREACH(cp, &pp->consumers, consumers) 731 if (cp->geom->class == mp && 732 (cp->flags & G_CF_ORPHAN) == 0) 733 break; 734 if (cp != NULL) 735 continue; 736 mp->taste(mp, pp, 0); 737 g_topology_assert(); 738 } 739 } 740 741 void 742 g_resize_provider(struct g_provider *pp, off_t size) 743 { 744 struct g_hh00 *hh; 745 746 G_VALID_PROVIDER(pp); 747 if (pp->flags & G_PF_WITHER) 748 return; 749 750 if (size == pp->mediasize) 751 return; 752 753 hh = g_malloc(sizeof *hh, M_WAITOK | M_ZERO); 754 hh->pp = pp; 755 hh->size = size; 756 g_post_event(g_resize_provider_event, hh, M_WAITOK, NULL); 757 } 758 759 struct g_provider * 760 g_provider_by_name(char const *arg) 761 { 762 struct g_class *cp; 763 struct g_geom *gp; 764 struct g_provider *pp, *wpp; 765 766 if (strncmp(arg, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 767 arg += sizeof(_PATH_DEV) - 1; 768 769 wpp = NULL; 770 LIST_FOREACH(cp, &g_classes, class) { 771 LIST_FOREACH(gp, &cp->geom, geom) { 772 LIST_FOREACH(pp, &gp->provider, provider) { 773 if (strcmp(arg, pp->name) != 0) 774 continue; 775 if ((gp->flags & G_GEOM_WITHER) == 0 && 776 (pp->flags & G_PF_WITHER) == 0) 777 return (pp); 778 else 779 wpp = pp; 780 } 781 } 782 } 783 784 return (wpp); 785 } 786 787 void 788 g_destroy_provider(struct g_provider *pp) 789 { 790 struct g_geom *gp; 791 struct g_geom_alias *gap, *gaptmp; 792 793 g_topology_assert(); 794 G_VALID_PROVIDER(pp); 795 KASSERT(LIST_EMPTY(&pp->consumers), 796 ("g_destroy_provider but attached")); 797 KASSERT (pp->acr == 0, ("g_destroy_provider with acr")); 798 KASSERT (pp->acw == 0, ("g_destroy_provider with acw")); 799 KASSERT (pp->ace == 0, ("g_destroy_provider with ace")); 800 g_cancel_event(pp); 801 LIST_REMOVE(pp, provider); 802 gp = pp->geom; 803 devstat_remove_entry(pp->stat); 804 /* 805 * If a callback was provided, send notification that the provider 806 * is now gone. 807 */ 808 if (gp->providergone != NULL) 809 gp->providergone(pp); 810 LIST_FOREACH_SAFE(gap, &pp->aliases, ga_next, gaptmp) 811 g_free(gap); 812 g_free(pp); 813 if ((gp->flags & G_GEOM_WITHER)) 814 g_do_wither(); 815 } 816 817 /* 818 * We keep the "geoms" list sorted by topological order (== increasing 819 * numerical rank) at all times. 820 * When an attach is done, the attaching geoms rank is invalidated 821 * and it is moved to the tail of the list. 822 * All geoms later in the sequence has their ranks reevaluated in 823 * sequence. If we cannot assign rank to a geom because it's 824 * prerequisites do not have rank, we move that element to the tail 825 * of the sequence with invalid rank as well. 826 * At some point we encounter our original geom and if we stil fail 827 * to assign it a rank, there must be a loop and we fail back to 828 * g_attach() which detach again and calls redo_rank again 829 * to fix up the damage. 830 * It would be much simpler code wise to do it recursively, but we 831 * can't risk that on the kernel stack. 832 */ 833 834 static int 835 redo_rank(struct g_geom *gp) 836 { 837 struct g_consumer *cp; 838 struct g_geom *gp1, *gp2; 839 int n, m; 840 841 g_topology_assert(); 842 G_VALID_GEOM(gp); 843 844 /* Invalidate this geoms rank and move it to the tail */ 845 gp1 = TAILQ_NEXT(gp, geoms); 846 if (gp1 != NULL) { 847 gp->rank = 0; 848 TAILQ_REMOVE(&geoms, gp, geoms); 849 TAILQ_INSERT_TAIL(&geoms, gp, geoms); 850 } else { 851 gp1 = gp; 852 } 853 854 /* re-rank the rest of the sequence */ 855 for (; gp1 != NULL; gp1 = gp2) { 856 gp1->rank = 0; 857 m = 1; 858 LIST_FOREACH(cp, &gp1->consumer, consumer) { 859 if (cp->provider == NULL) 860 continue; 861 n = cp->provider->geom->rank; 862 if (n == 0) { 863 m = 0; 864 break; 865 } else if (n >= m) 866 m = n + 1; 867 } 868 gp1->rank = m; 869 gp2 = TAILQ_NEXT(gp1, geoms); 870 871 /* got a rank, moving on */ 872 if (m != 0) 873 continue; 874 875 /* no rank to original geom means loop */ 876 if (gp == gp1) 877 return (ELOOP); 878 879 /* no rank, put it at the end move on */ 880 TAILQ_REMOVE(&geoms, gp1, geoms); 881 TAILQ_INSERT_TAIL(&geoms, gp1, geoms); 882 } 883 return (0); 884 } 885 886 int 887 g_attach(struct g_consumer *cp, struct g_provider *pp) 888 { 889 int error; 890 891 g_topology_assert(); 892 G_VALID_CONSUMER(cp); 893 G_VALID_PROVIDER(pp); 894 g_trace(G_T_TOPOLOGY, "g_attach(%p, %p)", cp, pp); 895 KASSERT(cp->provider == NULL, ("attach but attached")); 896 if ((pp->flags & (G_PF_ORPHAN | G_PF_WITHER)) != 0) 897 return (ENXIO); 898 cp->provider = pp; 899 cp->flags &= ~G_CF_ORPHAN; 900 LIST_INSERT_HEAD(&pp->consumers, cp, consumers); 901 error = redo_rank(cp->geom); 902 if (error) { 903 LIST_REMOVE(cp, consumers); 904 cp->provider = NULL; 905 redo_rank(cp->geom); 906 } 907 return (error); 908 } 909 910 void 911 g_detach(struct g_consumer *cp) 912 { 913 struct g_provider *pp; 914 915 g_topology_assert(); 916 G_VALID_CONSUMER(cp); 917 g_trace(G_T_TOPOLOGY, "g_detach(%p)", cp); 918 KASSERT(cp->provider != NULL, ("detach but not attached")); 919 KASSERT(cp->acr == 0, ("detach but nonzero acr")); 920 KASSERT(cp->acw == 0, ("detach but nonzero acw")); 921 KASSERT(cp->ace == 0, ("detach but nonzero ace")); 922 KASSERT(cp->nstart == cp->nend, 923 ("detach with active requests")); 924 pp = cp->provider; 925 LIST_REMOVE(cp, consumers); 926 cp->provider = NULL; 927 if ((cp->geom->flags & G_GEOM_WITHER) || 928 (pp->geom->flags & G_GEOM_WITHER) || 929 (pp->flags & G_PF_WITHER)) 930 g_do_wither(); 931 redo_rank(cp->geom); 932 } 933 934 /* 935 * g_access() 936 * 937 * Access-check with delta values. The question asked is "can provider 938 * "cp" change the access counters by the relative amounts dc[rwe] ?" 939 */ 940 941 int 942 g_access(struct g_consumer *cp, int dcr, int dcw, int dce) 943 { 944 struct g_provider *pp; 945 struct g_geom *gp; 946 int pw, pe; 947 #ifdef INVARIANTS 948 int sr, sw, se; 949 #endif 950 int error; 951 952 g_topology_assert(); 953 G_VALID_CONSUMER(cp); 954 pp = cp->provider; 955 KASSERT(pp != NULL, ("access but not attached")); 956 G_VALID_PROVIDER(pp); 957 gp = pp->geom; 958 959 g_trace(G_T_ACCESS, "g_access(%p(%s), %d, %d, %d)", 960 cp, pp->name, dcr, dcw, dce); 961 962 KASSERT(cp->acr + dcr >= 0, ("access resulting in negative acr")); 963 KASSERT(cp->acw + dcw >= 0, ("access resulting in negative acw")); 964 KASSERT(cp->ace + dce >= 0, ("access resulting in negative ace")); 965 KASSERT(dcr != 0 || dcw != 0 || dce != 0, ("NOP access request")); 966 KASSERT(cp->acr + dcr != 0 || cp->acw + dcw != 0 || 967 cp->ace + dce != 0 || cp->nstart == cp->nend, 968 ("Last close with active requests")); 969 KASSERT(gp->access != NULL, ("NULL geom->access")); 970 971 /* 972 * If our class cares about being spoiled, and we have been, we 973 * are probably just ahead of the event telling us that. Fail 974 * now rather than having to unravel this later. 975 */ 976 if (cp->geom->spoiled != NULL && (cp->flags & G_CF_SPOILED) && 977 (dcr > 0 || dcw > 0 || dce > 0)) 978 return (ENXIO); 979 980 /* 981 * A number of GEOM classes either need to perform an I/O on the first 982 * open or to acquire a different subsystem's lock. To do that they 983 * may have to drop the topology lock. 984 * Other GEOM classes perform special actions when opening a lower rank 985 * geom for the first time. As a result, more than one thread may 986 * end up performing the special actions. 987 * So, we prevent concurrent "first" opens by marking the consumer with 988 * special flag. 989 * 990 * Note that if the geom's access method never drops the topology lock, 991 * then we will never see G_GEOM_IN_ACCESS here. 992 */ 993 while ((gp->flags & G_GEOM_IN_ACCESS) != 0) { 994 g_trace(G_T_ACCESS, 995 "%s: race on geom %s via provider %s and consumer of %s", 996 __func__, gp->name, pp->name, cp->geom->name); 997 gp->flags |= G_GEOM_ACCESS_WAIT; 998 g_topology_sleep(gp, 0); 999 } 1000 1001 /* 1002 * Figure out what counts the provider would have had, if this 1003 * consumer had (r0w0e0) at this time. 1004 */ 1005 pw = pp->acw - cp->acw; 1006 pe = pp->ace - cp->ace; 1007 1008 g_trace(G_T_ACCESS, 1009 "open delta:[r%dw%de%d] old:[r%dw%de%d] provider:[r%dw%de%d] %p(%s)", 1010 dcr, dcw, dce, 1011 cp->acr, cp->acw, cp->ace, 1012 pp->acr, pp->acw, pp->ace, 1013 pp, pp->name); 1014 1015 /* If foot-shooting is enabled, any open on rank#1 is OK */ 1016 if ((g_debugflags & G_F_FOOTSHOOTING) && gp->rank == 1) 1017 ; 1018 /* If we try exclusive but already write: fail */ 1019 else if (dce > 0 && pw > 0) 1020 return (EPERM); 1021 /* If we try write but already exclusive: fail */ 1022 else if (dcw > 0 && pe > 0) 1023 return (EPERM); 1024 /* If we try to open more but provider is error'ed: fail */ 1025 else if ((dcr > 0 || dcw > 0 || dce > 0) && pp->error != 0) { 1026 printf("%s(%d): provider %s has error %d set\n", 1027 __func__, __LINE__, pp->name, pp->error); 1028 return (pp->error); 1029 } 1030 1031 /* Ok then... */ 1032 1033 #ifdef INVARIANTS 1034 sr = cp->acr; 1035 sw = cp->acw; 1036 se = cp->ace; 1037 #endif 1038 gp->flags |= G_GEOM_IN_ACCESS; 1039 error = gp->access(pp, dcr, dcw, dce); 1040 KASSERT(dcr > 0 || dcw > 0 || dce > 0 || error == 0, 1041 ("Geom provider %s::%s dcr=%d dcw=%d dce=%d error=%d failed " 1042 "closing ->access()", gp->class->name, pp->name, dcr, dcw, 1043 dce, error)); 1044 1045 g_topology_assert(); 1046 gp->flags &= ~G_GEOM_IN_ACCESS; 1047 KASSERT(cp->acr == sr && cp->acw == sw && cp->ace == se, 1048 ("Access counts changed during geom->access")); 1049 if ((gp->flags & G_GEOM_ACCESS_WAIT) != 0) { 1050 gp->flags &= ~G_GEOM_ACCESS_WAIT; 1051 wakeup(gp); 1052 } 1053 1054 if (!error) { 1055 /* 1056 * If we open first write, spoil any partner consumers. 1057 * If we close last write and provider is not errored, 1058 * trigger re-taste. 1059 */ 1060 if (pp->acw == 0 && dcw != 0) 1061 g_spoil(pp, cp); 1062 else if (pp->acw != 0 && pp->acw == -dcw && pp->error == 0 && 1063 !(gp->flags & G_GEOM_WITHER)) 1064 g_post_event(g_new_provider_event, pp, M_WAITOK, 1065 pp, NULL); 1066 1067 pp->acr += dcr; 1068 pp->acw += dcw; 1069 pp->ace += dce; 1070 cp->acr += dcr; 1071 cp->acw += dcw; 1072 cp->ace += dce; 1073 if (pp->acr != 0 || pp->acw != 0 || pp->ace != 0) 1074 KASSERT(pp->sectorsize > 0, 1075 ("Provider %s lacks sectorsize", pp->name)); 1076 if ((cp->geom->flags & G_GEOM_WITHER) && 1077 cp->acr == 0 && cp->acw == 0 && cp->ace == 0) 1078 g_do_wither(); 1079 } 1080 return (error); 1081 } 1082 1083 int 1084 g_handleattr_int(struct bio *bp, const char *attribute, int val) 1085 { 1086 1087 return (g_handleattr(bp, attribute, &val, sizeof val)); 1088 } 1089 1090 int 1091 g_handleattr_uint16_t(struct bio *bp, const char *attribute, uint16_t val) 1092 { 1093 1094 return (g_handleattr(bp, attribute, &val, sizeof val)); 1095 } 1096 1097 int 1098 g_handleattr_off_t(struct bio *bp, const char *attribute, off_t val) 1099 { 1100 1101 return (g_handleattr(bp, attribute, &val, sizeof val)); 1102 } 1103 1104 int 1105 g_handleattr_str(struct bio *bp, const char *attribute, const char *str) 1106 { 1107 1108 return (g_handleattr(bp, attribute, str, 0)); 1109 } 1110 1111 int 1112 g_handleattr(struct bio *bp, const char *attribute, const void *val, int len) 1113 { 1114 int error = 0; 1115 1116 if (strcmp(bp->bio_attribute, attribute)) 1117 return (0); 1118 if (len == 0) { 1119 bzero(bp->bio_data, bp->bio_length); 1120 if (strlcpy(bp->bio_data, val, bp->bio_length) >= 1121 bp->bio_length) { 1122 printf("%s: %s %s bio_length %jd strlen %zu -> EFAULT\n", 1123 __func__, bp->bio_to->name, attribute, 1124 (intmax_t)bp->bio_length, strlen(val)); 1125 error = EFAULT; 1126 } 1127 } else if (bp->bio_length == len) { 1128 bcopy(val, bp->bio_data, len); 1129 } else { 1130 printf("%s: %s %s bio_length %jd len %d -> EFAULT\n", __func__, 1131 bp->bio_to->name, attribute, (intmax_t)bp->bio_length, len); 1132 error = EFAULT; 1133 } 1134 if (error == 0) 1135 bp->bio_completed = bp->bio_length; 1136 g_io_deliver(bp, error); 1137 return (1); 1138 } 1139 1140 int 1141 g_std_access(struct g_provider *pp, 1142 int dr __unused, int dw __unused, int de __unused) 1143 { 1144 1145 g_topology_assert(); 1146 G_VALID_PROVIDER(pp); 1147 return (0); 1148 } 1149 1150 void 1151 g_std_done(struct bio *bp) 1152 { 1153 struct bio *bp2; 1154 1155 bp2 = bp->bio_parent; 1156 if (bp2->bio_error == 0) 1157 bp2->bio_error = bp->bio_error; 1158 bp2->bio_completed += bp->bio_completed; 1159 g_destroy_bio(bp); 1160 bp2->bio_inbed++; 1161 if (bp2->bio_children == bp2->bio_inbed) { 1162 if (bp2->bio_cmd == BIO_SPEEDUP) 1163 bp2->bio_completed = bp2->bio_length; 1164 g_io_deliver(bp2, bp2->bio_error); 1165 } 1166 } 1167 1168 /* XXX: maybe this is only g_slice_spoiled */ 1169 1170 void 1171 g_std_spoiled(struct g_consumer *cp) 1172 { 1173 struct g_geom *gp; 1174 struct g_provider *pp; 1175 1176 g_topology_assert(); 1177 G_VALID_CONSUMER(cp); 1178 g_trace(G_T_TOPOLOGY, "g_std_spoiled(%p)", cp); 1179 cp->flags |= G_CF_ORPHAN; 1180 g_detach(cp); 1181 gp = cp->geom; 1182 LIST_FOREACH(pp, &gp->provider, provider) 1183 g_orphan_provider(pp, ENXIO); 1184 g_destroy_consumer(cp); 1185 if (LIST_EMPTY(&gp->provider) && LIST_EMPTY(&gp->consumer)) 1186 g_destroy_geom(gp); 1187 else 1188 gp->flags |= G_GEOM_WITHER; 1189 } 1190 1191 /* 1192 * Spoiling happens when a provider is opened for writing, but consumers 1193 * which are configured by in-band data are attached (slicers for instance). 1194 * Since the write might potentially change the in-band data, such consumers 1195 * need to re-evaluate their existence after the writing session closes. 1196 * We do this by (offering to) tear them down when the open for write happens 1197 * in return for a re-taste when it closes again. 1198 * Together with the fact that such consumers grab an 'e' bit whenever they 1199 * are open, regardless of mode, this ends up DTRT. 1200 */ 1201 1202 static void 1203 g_spoil_event(void *arg, int flag) 1204 { 1205 struct g_provider *pp; 1206 struct g_consumer *cp, *cp2; 1207 1208 g_topology_assert(); 1209 if (flag == EV_CANCEL) 1210 return; 1211 pp = arg; 1212 G_VALID_PROVIDER(pp); 1213 g_trace(G_T_TOPOLOGY, "%s %p(%s:%s:%s)", __func__, pp, 1214 pp->geom->class->name, pp->geom->name, pp->name); 1215 for (cp = LIST_FIRST(&pp->consumers); cp != NULL; cp = cp2) { 1216 cp2 = LIST_NEXT(cp, consumers); 1217 if ((cp->flags & G_CF_SPOILED) == 0) 1218 continue; 1219 cp->flags &= ~G_CF_SPOILED; 1220 if (cp->geom->spoiled == NULL) 1221 continue; 1222 cp->geom->spoiled(cp); 1223 g_topology_assert(); 1224 } 1225 } 1226 1227 void 1228 g_spoil(struct g_provider *pp, struct g_consumer *cp) 1229 { 1230 struct g_consumer *cp2; 1231 1232 g_topology_assert(); 1233 G_VALID_PROVIDER(pp); 1234 G_VALID_CONSUMER(cp); 1235 1236 LIST_FOREACH(cp2, &pp->consumers, consumers) { 1237 if (cp2 == cp) 1238 continue; 1239 /* 1240 KASSERT(cp2->acr == 0, ("spoiling cp->acr = %d", cp2->acr)); 1241 KASSERT(cp2->acw == 0, ("spoiling cp->acw = %d", cp2->acw)); 1242 */ 1243 KASSERT(cp2->ace == 0, ("spoiling cp->ace = %d", cp2->ace)); 1244 cp2->flags |= G_CF_SPOILED; 1245 } 1246 g_post_event(g_spoil_event, pp, M_WAITOK, pp, NULL); 1247 } 1248 1249 static void 1250 g_media_changed_event(void *arg, int flag) 1251 { 1252 struct g_provider *pp; 1253 int retaste; 1254 1255 g_topology_assert(); 1256 if (flag == EV_CANCEL) 1257 return; 1258 pp = arg; 1259 G_VALID_PROVIDER(pp); 1260 1261 /* 1262 * If provider was not open for writing, queue retaste after spoiling. 1263 * If it was, retaste will happen automatically on close. 1264 */ 1265 retaste = (pp->acw == 0 && pp->error == 0 && 1266 !(pp->geom->flags & G_GEOM_WITHER)); 1267 g_spoil_event(arg, flag); 1268 if (retaste) 1269 g_post_event(g_new_provider_event, pp, M_WAITOK, pp, NULL); 1270 } 1271 1272 int 1273 g_media_changed(struct g_provider *pp, int flag) 1274 { 1275 struct g_consumer *cp; 1276 1277 LIST_FOREACH(cp, &pp->consumers, consumers) 1278 cp->flags |= G_CF_SPOILED; 1279 return (g_post_event(g_media_changed_event, pp, flag, pp, NULL)); 1280 } 1281 1282 int 1283 g_media_gone(struct g_provider *pp, int flag) 1284 { 1285 struct g_consumer *cp; 1286 1287 LIST_FOREACH(cp, &pp->consumers, consumers) 1288 cp->flags |= G_CF_SPOILED; 1289 return (g_post_event(g_spoil_event, pp, flag, pp, NULL)); 1290 } 1291 1292 int 1293 g_getattr__(const char *attr, struct g_consumer *cp, void *var, int len) 1294 { 1295 int error, i; 1296 1297 i = len; 1298 error = g_io_getattr(attr, cp, &i, var); 1299 if (error) 1300 return (error); 1301 if (i != len) 1302 return (EINVAL); 1303 return (0); 1304 } 1305 1306 static int 1307 g_get_device_prefix_len(const char *name) 1308 { 1309 int len; 1310 1311 if (strncmp(name, "ada", 3) == 0) 1312 len = 3; 1313 else if (strncmp(name, "ad", 2) == 0) 1314 len = 2; 1315 else 1316 return (0); 1317 if (name[len] < '0' || name[len] > '9') 1318 return (0); 1319 do { 1320 len++; 1321 } while (name[len] >= '0' && name[len] <= '9'); 1322 return (len); 1323 } 1324 1325 int 1326 g_compare_names(const char *namea, const char *nameb) 1327 { 1328 int deva, devb; 1329 1330 if (strcmp(namea, nameb) == 0) 1331 return (1); 1332 deva = g_get_device_prefix_len(namea); 1333 if (deva == 0) 1334 return (0); 1335 devb = g_get_device_prefix_len(nameb); 1336 if (devb == 0) 1337 return (0); 1338 if (strcmp(namea + deva, nameb + devb) == 0) 1339 return (1); 1340 return (0); 1341 } 1342 1343 #if defined(DIAGNOSTIC) || defined(DDB) 1344 /* 1345 * This function walks the mesh and returns a non-zero integer if it 1346 * finds the argument pointer is an object. The return value indicates 1347 * which type of object it is believed to be. If topology is not locked, 1348 * this function is potentially dangerous, but we don't assert that the 1349 * topology lock is held when called from debugger. 1350 */ 1351 int 1352 g_valid_obj(void const *ptr) 1353 { 1354 struct g_class *mp; 1355 struct g_geom *gp; 1356 struct g_consumer *cp; 1357 struct g_provider *pp; 1358 1359 #ifdef KDB 1360 if (kdb_active == 0) 1361 #endif 1362 g_topology_assert(); 1363 1364 LIST_FOREACH(mp, &g_classes, class) { 1365 if (ptr == mp) 1366 return (1); 1367 LIST_FOREACH(gp, &mp->geom, geom) { 1368 if (ptr == gp) 1369 return (2); 1370 LIST_FOREACH(cp, &gp->consumer, consumer) 1371 if (ptr == cp) 1372 return (3); 1373 LIST_FOREACH(pp, &gp->provider, provider) 1374 if (ptr == pp) 1375 return (4); 1376 } 1377 } 1378 return(0); 1379 } 1380 #endif 1381 1382 #ifdef DDB 1383 1384 #define gprintf(...) do { \ 1385 db_printf("%*s", indent, ""); \ 1386 db_printf(__VA_ARGS__); \ 1387 } while (0) 1388 #define gprintln(...) do { \ 1389 gprintf(__VA_ARGS__); \ 1390 db_printf("\n"); \ 1391 } while (0) 1392 1393 #define ADDFLAG(obj, flag, sflag) do { \ 1394 if ((obj)->flags & (flag)) { \ 1395 if (comma) \ 1396 strlcat(str, ",", size); \ 1397 strlcat(str, (sflag), size); \ 1398 comma = 1; \ 1399 } \ 1400 } while (0) 1401 1402 static char * 1403 provider_flags_to_string(struct g_provider *pp, char *str, size_t size) 1404 { 1405 int comma = 0; 1406 1407 bzero(str, size); 1408 if (pp->flags == 0) { 1409 strlcpy(str, "NONE", size); 1410 return (str); 1411 } 1412 ADDFLAG(pp, G_PF_WITHER, "G_PF_WITHER"); 1413 ADDFLAG(pp, G_PF_ORPHAN, "G_PF_ORPHAN"); 1414 return (str); 1415 } 1416 1417 static char * 1418 geom_flags_to_string(struct g_geom *gp, char *str, size_t size) 1419 { 1420 int comma = 0; 1421 1422 bzero(str, size); 1423 if (gp->flags == 0) { 1424 strlcpy(str, "NONE", size); 1425 return (str); 1426 } 1427 ADDFLAG(gp, G_GEOM_WITHER, "G_GEOM_WITHER"); 1428 return (str); 1429 } 1430 static void 1431 db_show_geom_consumer(int indent, struct g_consumer *cp) 1432 { 1433 1434 if (indent == 0) { 1435 gprintln("consumer: %p", cp); 1436 gprintln(" class: %s (%p)", cp->geom->class->name, 1437 cp->geom->class); 1438 gprintln(" geom: %s (%p)", cp->geom->name, cp->geom); 1439 if (cp->provider == NULL) 1440 gprintln(" provider: none"); 1441 else { 1442 gprintln(" provider: %s (%p)", cp->provider->name, 1443 cp->provider); 1444 } 1445 gprintln(" access: r%dw%de%d", cp->acr, cp->acw, cp->ace); 1446 gprintln(" flags: 0x%04x", cp->flags); 1447 #ifdef INVARIANTS 1448 gprintln(" nstart: %u", cp->nstart); 1449 gprintln(" nend: %u", cp->nend); 1450 #endif 1451 } else { 1452 gprintf("consumer: %p (%s), access=r%dw%de%d", cp, 1453 cp->provider != NULL ? cp->provider->name : "none", 1454 cp->acr, cp->acw, cp->ace); 1455 if (cp->flags) 1456 db_printf(", flags=0x%04x", cp->flags); 1457 db_printf("\n"); 1458 } 1459 } 1460 1461 static void 1462 db_show_geom_provider(int indent, struct g_provider *pp) 1463 { 1464 struct g_consumer *cp; 1465 char flags[64]; 1466 1467 if (indent == 0) { 1468 gprintln("provider: %s (%p)", pp->name, pp); 1469 gprintln(" class: %s (%p)", pp->geom->class->name, 1470 pp->geom->class); 1471 gprintln(" geom: %s (%p)", pp->geom->name, pp->geom); 1472 gprintln(" mediasize: %jd", (intmax_t)pp->mediasize); 1473 gprintln(" sectorsize: %u", pp->sectorsize); 1474 gprintln(" stripesize: %ju", (uintmax_t)pp->stripesize); 1475 gprintln(" stripeoffset: %ju", (uintmax_t)pp->stripeoffset); 1476 gprintln(" access: r%dw%de%d", pp->acr, pp->acw, 1477 pp->ace); 1478 gprintln(" flags: %s (0x%04x)", 1479 provider_flags_to_string(pp, flags, sizeof(flags)), 1480 pp->flags); 1481 gprintln(" error: %d", pp->error); 1482 if (LIST_EMPTY(&pp->consumers)) 1483 gprintln(" consumers: none"); 1484 } else { 1485 gprintf("provider: %s (%p), access=r%dw%de%d", 1486 pp->name, pp, pp->acr, pp->acw, pp->ace); 1487 if (pp->flags != 0) { 1488 db_printf(", flags=%s (0x%04x)", 1489 provider_flags_to_string(pp, flags, sizeof(flags)), 1490 pp->flags); 1491 } 1492 db_printf("\n"); 1493 } 1494 if (!LIST_EMPTY(&pp->consumers)) { 1495 LIST_FOREACH(cp, &pp->consumers, consumers) { 1496 db_show_geom_consumer(indent + 2, cp); 1497 if (db_pager_quit) 1498 break; 1499 } 1500 } 1501 } 1502 1503 static void 1504 db_show_geom_geom(int indent, struct g_geom *gp) 1505 { 1506 struct g_provider *pp; 1507 struct g_consumer *cp; 1508 char flags[64]; 1509 1510 if (indent == 0) { 1511 gprintln("geom: %s (%p)", gp->name, gp); 1512 gprintln(" class: %s (%p)", gp->class->name, gp->class); 1513 gprintln(" flags: %s (0x%04x)", 1514 geom_flags_to_string(gp, flags, sizeof(flags)), gp->flags); 1515 gprintln(" rank: %d", gp->rank); 1516 if (LIST_EMPTY(&gp->provider)) 1517 gprintln(" providers: none"); 1518 if (LIST_EMPTY(&gp->consumer)) 1519 gprintln(" consumers: none"); 1520 } else { 1521 gprintf("geom: %s (%p), rank=%d", gp->name, gp, gp->rank); 1522 if (gp->flags != 0) { 1523 db_printf(", flags=%s (0x%04x)", 1524 geom_flags_to_string(gp, flags, sizeof(flags)), 1525 gp->flags); 1526 } 1527 db_printf("\n"); 1528 } 1529 if (!LIST_EMPTY(&gp->provider)) { 1530 LIST_FOREACH(pp, &gp->provider, provider) { 1531 db_show_geom_provider(indent + 2, pp); 1532 if (db_pager_quit) 1533 break; 1534 } 1535 } 1536 if (!LIST_EMPTY(&gp->consumer)) { 1537 LIST_FOREACH(cp, &gp->consumer, consumer) { 1538 db_show_geom_consumer(indent + 2, cp); 1539 if (db_pager_quit) 1540 break; 1541 } 1542 } 1543 } 1544 1545 static void 1546 db_show_geom_class(struct g_class *mp) 1547 { 1548 struct g_geom *gp; 1549 1550 db_printf("class: %s (%p)\n", mp->name, mp); 1551 LIST_FOREACH(gp, &mp->geom, geom) { 1552 db_show_geom_geom(2, gp); 1553 if (db_pager_quit) 1554 break; 1555 } 1556 } 1557 1558 /* 1559 * Print the GEOM topology or the given object. 1560 */ 1561 DB_SHOW_COMMAND(geom, db_show_geom) 1562 { 1563 struct g_class *mp; 1564 1565 if (!have_addr) { 1566 /* No address given, print the entire topology. */ 1567 LIST_FOREACH(mp, &g_classes, class) { 1568 db_show_geom_class(mp); 1569 db_printf("\n"); 1570 if (db_pager_quit) 1571 break; 1572 } 1573 } else { 1574 switch (g_valid_obj((void *)addr)) { 1575 case 1: 1576 db_show_geom_class((struct g_class *)addr); 1577 break; 1578 case 2: 1579 db_show_geom_geom(0, (struct g_geom *)addr); 1580 break; 1581 case 3: 1582 db_show_geom_consumer(0, (struct g_consumer *)addr); 1583 break; 1584 case 4: 1585 db_show_geom_provider(0, (struct g_provider *)addr); 1586 break; 1587 default: 1588 db_printf("Not a GEOM object.\n"); 1589 break; 1590 } 1591 } 1592 } 1593 1594 static void 1595 db_print_bio_cmd(struct bio *bp) 1596 { 1597 db_printf(" cmd: "); 1598 switch (bp->bio_cmd) { 1599 case BIO_READ: db_printf("BIO_READ"); break; 1600 case BIO_WRITE: db_printf("BIO_WRITE"); break; 1601 case BIO_DELETE: db_printf("BIO_DELETE"); break; 1602 case BIO_GETATTR: db_printf("BIO_GETATTR"); break; 1603 case BIO_FLUSH: db_printf("BIO_FLUSH"); break; 1604 case BIO_CMD0: db_printf("BIO_CMD0"); break; 1605 case BIO_CMD1: db_printf("BIO_CMD1"); break; 1606 case BIO_CMD2: db_printf("BIO_CMD2"); break; 1607 case BIO_ZONE: db_printf("BIO_ZONE"); break; 1608 default: db_printf("UNKNOWN"); break; 1609 } 1610 db_printf("\n"); 1611 } 1612 1613 static void 1614 db_print_bio_flags(struct bio *bp) 1615 { 1616 int comma; 1617 1618 comma = 0; 1619 db_printf(" flags: "); 1620 if (bp->bio_flags & BIO_ERROR) { 1621 db_printf("BIO_ERROR"); 1622 comma = 1; 1623 } 1624 if (bp->bio_flags & BIO_DONE) { 1625 db_printf("%sBIO_DONE", (comma ? ", " : "")); 1626 comma = 1; 1627 } 1628 if (bp->bio_flags & BIO_ONQUEUE) 1629 db_printf("%sBIO_ONQUEUE", (comma ? ", " : "")); 1630 db_printf("\n"); 1631 } 1632 1633 /* 1634 * Print useful information in a BIO 1635 */ 1636 DB_SHOW_COMMAND(bio, db_show_bio) 1637 { 1638 struct bio *bp; 1639 1640 if (have_addr) { 1641 bp = (struct bio *)addr; 1642 db_printf("BIO %p\n", bp); 1643 db_print_bio_cmd(bp); 1644 db_print_bio_flags(bp); 1645 db_printf(" cflags: 0x%hx\n", bp->bio_cflags); 1646 db_printf(" pflags: 0x%hx\n", bp->bio_pflags); 1647 db_printf(" offset: %jd\n", (intmax_t)bp->bio_offset); 1648 db_printf(" length: %jd\n", (intmax_t)bp->bio_length); 1649 db_printf(" bcount: %ld\n", bp->bio_bcount); 1650 db_printf(" resid: %ld\n", bp->bio_resid); 1651 db_printf(" completed: %jd\n", (intmax_t)bp->bio_completed); 1652 db_printf(" children: %u\n", bp->bio_children); 1653 db_printf(" inbed: %u\n", bp->bio_inbed); 1654 db_printf(" error: %d\n", bp->bio_error); 1655 db_printf(" parent: %p\n", bp->bio_parent); 1656 db_printf(" driver1: %p\n", bp->bio_driver1); 1657 db_printf(" driver2: %p\n", bp->bio_driver2); 1658 db_printf(" caller1: %p\n", bp->bio_caller1); 1659 db_printf(" caller2: %p\n", bp->bio_caller2); 1660 db_printf(" bio_from: %p\n", bp->bio_from); 1661 db_printf(" bio_to: %p\n", bp->bio_to); 1662 1663 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 1664 db_printf(" bio_track_bp: %p\n", bp->bio_track_bp); 1665 #endif 1666 } 1667 } 1668 1669 #undef gprintf 1670 #undef gprintln 1671 #undef ADDFLAG 1672 1673 #endif /* DDB */ 1674