1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2002 Poul-Henning Kamp 5 * Copyright (c) 2002 Networks Associates Technology, Inc. 6 * All rights reserved. 7 * 8 * This software was developed for the FreeBSD Project by Poul-Henning Kamp 9 * and NAI Labs, the Security Research Division of Network Associates, Inc. 10 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the 11 * DARPA CHATS research program. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. The names of the authors may not be used to endorse or promote 22 * products derived from this software without specific prior written 23 * permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 */ 37 38 #include <sys/cdefs.h> 39 __FBSDID("$FreeBSD$"); 40 41 #include "opt_ddb.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/devicestat.h> 46 #include <sys/kernel.h> 47 #include <sys/malloc.h> 48 #include <sys/bio.h> 49 #include <sys/sysctl.h> 50 #include <sys/proc.h> 51 #include <sys/kthread.h> 52 #include <sys/lock.h> 53 #include <sys/mutex.h> 54 #include <sys/errno.h> 55 #include <sys/sbuf.h> 56 #include <geom/geom.h> 57 #include <geom/geom_int.h> 58 #include <machine/stdarg.h> 59 60 #ifdef DDB 61 #include <ddb/ddb.h> 62 #endif 63 64 #ifdef KDB 65 #include <sys/kdb.h> 66 #endif 67 68 struct class_list_head g_classes = LIST_HEAD_INITIALIZER(g_classes); 69 static struct g_tailq_head geoms = TAILQ_HEAD_INITIALIZER(geoms); 70 char *g_wait_event, *g_wait_up, *g_wait_down, *g_wait_sim; 71 72 struct g_hh00 { 73 struct g_class *mp; 74 struct g_provider *pp; 75 off_t size; 76 int error; 77 int post; 78 }; 79 80 /* 81 * This event offers a new class a chance to taste all preexisting providers. 82 */ 83 static void 84 g_load_class(void *arg, int flag) 85 { 86 struct g_hh00 *hh; 87 struct g_class *mp2, *mp; 88 struct g_geom *gp; 89 struct g_provider *pp; 90 91 g_topology_assert(); 92 if (flag == EV_CANCEL) /* XXX: can't happen ? */ 93 return; 94 if (g_shutdown) 95 return; 96 97 hh = arg; 98 mp = hh->mp; 99 hh->error = 0; 100 if (hh->post) { 101 g_free(hh); 102 hh = NULL; 103 } 104 g_trace(G_T_TOPOLOGY, "g_load_class(%s)", mp->name); 105 KASSERT(mp->name != NULL && *mp->name != '\0', 106 ("GEOM class has no name")); 107 LIST_FOREACH(mp2, &g_classes, class) { 108 if (mp2 == mp) { 109 printf("The GEOM class %s is already loaded.\n", 110 mp2->name); 111 if (hh != NULL) 112 hh->error = EEXIST; 113 return; 114 } else if (strcmp(mp2->name, mp->name) == 0) { 115 printf("A GEOM class %s is already loaded.\n", 116 mp2->name); 117 if (hh != NULL) 118 hh->error = EEXIST; 119 return; 120 } 121 } 122 123 LIST_INIT(&mp->geom); 124 LIST_INSERT_HEAD(&g_classes, mp, class); 125 if (mp->init != NULL) 126 mp->init(mp); 127 if (mp->taste == NULL) 128 return; 129 LIST_FOREACH(mp2, &g_classes, class) { 130 if (mp == mp2) 131 continue; 132 LIST_FOREACH(gp, &mp2->geom, geom) { 133 LIST_FOREACH(pp, &gp->provider, provider) { 134 mp->taste(mp, pp, 0); 135 g_topology_assert(); 136 } 137 } 138 } 139 } 140 141 static int 142 g_unload_class(struct g_class *mp) 143 { 144 struct g_geom *gp; 145 struct g_provider *pp; 146 struct g_consumer *cp; 147 int error; 148 149 g_topology_lock(); 150 g_trace(G_T_TOPOLOGY, "g_unload_class(%s)", mp->name); 151 retry: 152 G_VALID_CLASS(mp); 153 LIST_FOREACH(gp, &mp->geom, geom) { 154 /* We refuse to unload if anything is open */ 155 LIST_FOREACH(pp, &gp->provider, provider) 156 if (pp->acr || pp->acw || pp->ace) { 157 g_topology_unlock(); 158 return (EBUSY); 159 } 160 LIST_FOREACH(cp, &gp->consumer, consumer) 161 if (cp->acr || cp->acw || cp->ace) { 162 g_topology_unlock(); 163 return (EBUSY); 164 } 165 /* If the geom is withering, wait for it to finish. */ 166 if (gp->flags & G_GEOM_WITHER) { 167 g_topology_sleep(mp, 1); 168 goto retry; 169 } 170 } 171 172 /* 173 * We allow unloading if we have no geoms, or a class 174 * method we can use to get rid of them. 175 */ 176 if (!LIST_EMPTY(&mp->geom) && mp->destroy_geom == NULL) { 177 g_topology_unlock(); 178 return (EOPNOTSUPP); 179 } 180 181 /* Bar new entries */ 182 mp->taste = NULL; 183 mp->config = NULL; 184 185 LIST_FOREACH(gp, &mp->geom, geom) { 186 error = mp->destroy_geom(NULL, mp, gp); 187 if (error != 0) { 188 g_topology_unlock(); 189 return (error); 190 } 191 } 192 /* Wait for withering to finish. */ 193 for (;;) { 194 gp = LIST_FIRST(&mp->geom); 195 if (gp == NULL) 196 break; 197 KASSERT(gp->flags & G_GEOM_WITHER, 198 ("Non-withering geom in class %s", mp->name)); 199 g_topology_sleep(mp, 1); 200 } 201 G_VALID_CLASS(mp); 202 if (mp->fini != NULL) 203 mp->fini(mp); 204 LIST_REMOVE(mp, class); 205 g_topology_unlock(); 206 207 return (0); 208 } 209 210 int 211 g_modevent(module_t mod, int type, void *data) 212 { 213 struct g_hh00 *hh; 214 int error; 215 static int g_ignition; 216 struct g_class *mp; 217 218 mp = data; 219 if (mp->version != G_VERSION) { 220 printf("GEOM class %s has Wrong version %x\n", 221 mp->name, mp->version); 222 return (EINVAL); 223 } 224 if (!g_ignition) { 225 g_ignition++; 226 g_init(); 227 } 228 error = EOPNOTSUPP; 229 switch (type) { 230 case MOD_LOAD: 231 g_trace(G_T_TOPOLOGY, "g_modevent(%s, LOAD)", mp->name); 232 hh = g_malloc(sizeof *hh, M_WAITOK | M_ZERO); 233 hh->mp = mp; 234 /* 235 * Once the system is not cold, MOD_LOAD calls will be 236 * from the userland and the g_event thread will be able 237 * to acknowledge their completion. 238 */ 239 if (cold) { 240 hh->post = 1; 241 error = g_post_event(g_load_class, hh, M_WAITOK, NULL); 242 } else { 243 error = g_waitfor_event(g_load_class, hh, M_WAITOK, 244 NULL); 245 if (error == 0) 246 error = hh->error; 247 g_free(hh); 248 } 249 break; 250 case MOD_UNLOAD: 251 g_trace(G_T_TOPOLOGY, "g_modevent(%s, UNLOAD)", mp->name); 252 error = g_unload_class(mp); 253 if (error == 0) { 254 KASSERT(LIST_EMPTY(&mp->geom), 255 ("Unloaded class (%s) still has geom", mp->name)); 256 } 257 break; 258 } 259 return (error); 260 } 261 262 static void 263 g_retaste_event(void *arg, int flag) 264 { 265 struct g_class *mp, *mp2; 266 struct g_geom *gp; 267 struct g_hh00 *hh; 268 struct g_provider *pp; 269 struct g_consumer *cp; 270 271 g_topology_assert(); 272 if (flag == EV_CANCEL) /* XXX: can't happen ? */ 273 return; 274 if (g_shutdown || g_notaste) 275 return; 276 277 hh = arg; 278 mp = hh->mp; 279 hh->error = 0; 280 if (hh->post) { 281 g_free(hh); 282 hh = NULL; 283 } 284 g_trace(G_T_TOPOLOGY, "g_retaste(%s)", mp->name); 285 286 LIST_FOREACH(mp2, &g_classes, class) { 287 LIST_FOREACH(gp, &mp2->geom, geom) { 288 LIST_FOREACH(pp, &gp->provider, provider) { 289 if (pp->acr || pp->acw || pp->ace) 290 continue; 291 LIST_FOREACH(cp, &pp->consumers, consumers) { 292 if (cp->geom->class == mp && 293 (cp->flags & G_CF_ORPHAN) == 0) 294 break; 295 } 296 if (cp != NULL) { 297 cp->flags |= G_CF_ORPHAN; 298 g_wither_geom(cp->geom, ENXIO); 299 } 300 mp->taste(mp, pp, 0); 301 g_topology_assert(); 302 } 303 } 304 } 305 } 306 307 int 308 g_retaste(struct g_class *mp) 309 { 310 struct g_hh00 *hh; 311 int error; 312 313 if (mp->taste == NULL) 314 return (EINVAL); 315 316 hh = g_malloc(sizeof *hh, M_WAITOK | M_ZERO); 317 hh->mp = mp; 318 319 if (cold) { 320 hh->post = 1; 321 error = g_post_event(g_retaste_event, hh, M_WAITOK, NULL); 322 } else { 323 error = g_waitfor_event(g_retaste_event, hh, M_WAITOK, NULL); 324 if (error == 0) 325 error = hh->error; 326 g_free(hh); 327 } 328 329 return (error); 330 } 331 332 struct g_geom * 333 g_new_geomf(struct g_class *mp, const char *fmt, ...) 334 { 335 struct g_geom *gp; 336 va_list ap; 337 struct sbuf *sb; 338 339 g_topology_assert(); 340 G_VALID_CLASS(mp); 341 sb = sbuf_new_auto(); 342 va_start(ap, fmt); 343 sbuf_vprintf(sb, fmt, ap); 344 va_end(ap); 345 sbuf_finish(sb); 346 gp = g_malloc(sizeof *gp, M_WAITOK | M_ZERO); 347 gp->name = g_malloc(sbuf_len(sb) + 1, M_WAITOK | M_ZERO); 348 gp->class = mp; 349 gp->rank = 1; 350 LIST_INIT(&gp->consumer); 351 LIST_INIT(&gp->provider); 352 LIST_INIT(&gp->aliases); 353 LIST_INSERT_HEAD(&mp->geom, gp, geom); 354 TAILQ_INSERT_HEAD(&geoms, gp, geoms); 355 strcpy(gp->name, sbuf_data(sb)); 356 sbuf_delete(sb); 357 /* Fill in defaults from class */ 358 gp->start = mp->start; 359 gp->spoiled = mp->spoiled; 360 gp->attrchanged = mp->attrchanged; 361 gp->providergone = mp->providergone; 362 gp->dumpconf = mp->dumpconf; 363 gp->access = mp->access; 364 gp->orphan = mp->orphan; 365 gp->ioctl = mp->ioctl; 366 gp->resize = mp->resize; 367 return (gp); 368 } 369 370 void 371 g_destroy_geom(struct g_geom *gp) 372 { 373 struct g_geom_alias *gap, *gaptmp; 374 375 g_topology_assert(); 376 G_VALID_GEOM(gp); 377 g_trace(G_T_TOPOLOGY, "g_destroy_geom(%p(%s))", gp, gp->name); 378 KASSERT(LIST_EMPTY(&gp->consumer), 379 ("g_destroy_geom(%s) with consumer(s) [%p]", 380 gp->name, LIST_FIRST(&gp->consumer))); 381 KASSERT(LIST_EMPTY(&gp->provider), 382 ("g_destroy_geom(%s) with provider(s) [%p]", 383 gp->name, LIST_FIRST(&gp->provider))); 384 g_cancel_event(gp); 385 LIST_REMOVE(gp, geom); 386 TAILQ_REMOVE(&geoms, gp, geoms); 387 LIST_FOREACH_SAFE(gap, &gp->aliases, ga_next, gaptmp) 388 g_free(gap); 389 g_free(gp->name); 390 g_free(gp); 391 } 392 393 /* 394 * This function is called (repeatedly) until the geom has withered away. 395 */ 396 void 397 g_wither_geom(struct g_geom *gp, int error) 398 { 399 struct g_provider *pp; 400 401 g_topology_assert(); 402 G_VALID_GEOM(gp); 403 g_trace(G_T_TOPOLOGY, "g_wither_geom(%p(%s))", gp, gp->name); 404 if (!(gp->flags & G_GEOM_WITHER)) { 405 gp->flags |= G_GEOM_WITHER; 406 LIST_FOREACH(pp, &gp->provider, provider) 407 if (!(pp->flags & G_PF_ORPHAN)) 408 g_orphan_provider(pp, error); 409 } 410 g_do_wither(); 411 } 412 413 /* 414 * Convenience function to destroy a particular provider. 415 */ 416 void 417 g_wither_provider(struct g_provider *pp, int error) 418 { 419 420 pp->flags |= G_PF_WITHER; 421 if (!(pp->flags & G_PF_ORPHAN)) 422 g_orphan_provider(pp, error); 423 } 424 425 /* 426 * This function is called (repeatedly) until the has withered away. 427 */ 428 void 429 g_wither_geom_close(struct g_geom *gp, int error) 430 { 431 struct g_consumer *cp; 432 433 g_topology_assert(); 434 G_VALID_GEOM(gp); 435 g_trace(G_T_TOPOLOGY, "g_wither_geom_close(%p(%s))", gp, gp->name); 436 LIST_FOREACH(cp, &gp->consumer, consumer) 437 if (cp->acr || cp->acw || cp->ace) 438 g_access(cp, -cp->acr, -cp->acw, -cp->ace); 439 g_wither_geom(gp, error); 440 } 441 442 /* 443 * This function is called (repeatedly) until we cant wash away more 444 * withered bits at present. 445 */ 446 void 447 g_wither_washer() 448 { 449 struct g_class *mp; 450 struct g_geom *gp, *gp2; 451 struct g_provider *pp, *pp2; 452 struct g_consumer *cp, *cp2; 453 454 g_topology_assert(); 455 LIST_FOREACH(mp, &g_classes, class) { 456 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) { 457 LIST_FOREACH_SAFE(pp, &gp->provider, provider, pp2) { 458 if (!(pp->flags & G_PF_WITHER)) 459 continue; 460 if (LIST_EMPTY(&pp->consumers)) 461 g_destroy_provider(pp); 462 } 463 if (!(gp->flags & G_GEOM_WITHER)) 464 continue; 465 LIST_FOREACH_SAFE(pp, &gp->provider, provider, pp2) { 466 if (LIST_EMPTY(&pp->consumers)) 467 g_destroy_provider(pp); 468 } 469 LIST_FOREACH_SAFE(cp, &gp->consumer, consumer, cp2) { 470 if (cp->acr || cp->acw || cp->ace) 471 continue; 472 if (cp->provider != NULL) 473 g_detach(cp); 474 g_destroy_consumer(cp); 475 } 476 if (LIST_EMPTY(&gp->provider) && 477 LIST_EMPTY(&gp->consumer)) 478 g_destroy_geom(gp); 479 } 480 } 481 } 482 483 struct g_consumer * 484 g_new_consumer(struct g_geom *gp) 485 { 486 struct g_consumer *cp; 487 488 g_topology_assert(); 489 G_VALID_GEOM(gp); 490 KASSERT(!(gp->flags & G_GEOM_WITHER), 491 ("g_new_consumer on WITHERing geom(%s) (class %s)", 492 gp->name, gp->class->name)); 493 KASSERT(gp->orphan != NULL, 494 ("g_new_consumer on geom(%s) (class %s) without orphan", 495 gp->name, gp->class->name)); 496 497 cp = g_malloc(sizeof *cp, M_WAITOK | M_ZERO); 498 cp->geom = gp; 499 cp->stat = devstat_new_entry(cp, -1, 0, DEVSTAT_ALL_SUPPORTED, 500 DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX); 501 LIST_INSERT_HEAD(&gp->consumer, cp, consumer); 502 return(cp); 503 } 504 505 void 506 g_destroy_consumer(struct g_consumer *cp) 507 { 508 struct g_geom *gp; 509 510 g_topology_assert(); 511 G_VALID_CONSUMER(cp); 512 g_trace(G_T_TOPOLOGY, "g_destroy_consumer(%p)", cp); 513 KASSERT (cp->provider == NULL, ("g_destroy_consumer but attached")); 514 KASSERT (cp->acr == 0, ("g_destroy_consumer with acr")); 515 KASSERT (cp->acw == 0, ("g_destroy_consumer with acw")); 516 KASSERT (cp->ace == 0, ("g_destroy_consumer with ace")); 517 g_cancel_event(cp); 518 gp = cp->geom; 519 LIST_REMOVE(cp, consumer); 520 devstat_remove_entry(cp->stat); 521 g_free(cp); 522 if (gp->flags & G_GEOM_WITHER) 523 g_do_wither(); 524 } 525 526 static void 527 g_new_provider_event(void *arg, int flag) 528 { 529 struct g_class *mp; 530 struct g_provider *pp; 531 struct g_consumer *cp, *next_cp; 532 533 g_topology_assert(); 534 if (flag == EV_CANCEL) 535 return; 536 if (g_shutdown) 537 return; 538 pp = arg; 539 G_VALID_PROVIDER(pp); 540 KASSERT(!(pp->flags & G_PF_WITHER), 541 ("g_new_provider_event but withered")); 542 LIST_FOREACH_SAFE(cp, &pp->consumers, consumers, next_cp) { 543 if ((cp->flags & G_CF_ORPHAN) == 0 && 544 cp->geom->attrchanged != NULL) 545 cp->geom->attrchanged(cp, "GEOM::media"); 546 } 547 if (g_notaste) 548 return; 549 LIST_FOREACH(mp, &g_classes, class) { 550 if (mp->taste == NULL) 551 continue; 552 LIST_FOREACH(cp, &pp->consumers, consumers) 553 if (cp->geom->class == mp && 554 (cp->flags & G_CF_ORPHAN) == 0) 555 break; 556 if (cp != NULL) 557 continue; 558 mp->taste(mp, pp, 0); 559 g_topology_assert(); 560 } 561 } 562 563 564 struct g_provider * 565 g_new_providerf(struct g_geom *gp, const char *fmt, ...) 566 { 567 struct g_provider *pp; 568 struct sbuf *sb; 569 va_list ap; 570 571 g_topology_assert(); 572 G_VALID_GEOM(gp); 573 KASSERT(gp->access != NULL, 574 ("new provider on geom(%s) without ->access (class %s)", 575 gp->name, gp->class->name)); 576 KASSERT(gp->start != NULL, 577 ("new provider on geom(%s) without ->start (class %s)", 578 gp->name, gp->class->name)); 579 KASSERT(!(gp->flags & G_GEOM_WITHER), 580 ("new provider on WITHERing geom(%s) (class %s)", 581 gp->name, gp->class->name)); 582 sb = sbuf_new_auto(); 583 va_start(ap, fmt); 584 sbuf_vprintf(sb, fmt, ap); 585 va_end(ap); 586 sbuf_finish(sb); 587 pp = g_malloc(sizeof *pp + sbuf_len(sb) + 1, M_WAITOK | M_ZERO); 588 pp->name = (char *)(pp + 1); 589 strcpy(pp->name, sbuf_data(sb)); 590 sbuf_delete(sb); 591 LIST_INIT(&pp->consumers); 592 pp->error = ENXIO; 593 pp->geom = gp; 594 pp->stat = devstat_new_entry(pp, -1, 0, DEVSTAT_ALL_SUPPORTED, 595 DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX); 596 LIST_INSERT_HEAD(&gp->provider, pp, provider); 597 g_post_event(g_new_provider_event, pp, M_WAITOK, pp, gp, NULL); 598 return (pp); 599 } 600 601 void 602 g_error_provider(struct g_provider *pp, int error) 603 { 604 605 /* G_VALID_PROVIDER(pp); We may not have g_topology */ 606 pp->error = error; 607 } 608 609 static void 610 g_resize_provider_event(void *arg, int flag) 611 { 612 struct g_hh00 *hh; 613 struct g_class *mp; 614 struct g_geom *gp; 615 struct g_provider *pp; 616 struct g_consumer *cp, *cp2; 617 off_t size; 618 619 g_topology_assert(); 620 if (g_shutdown) 621 return; 622 623 hh = arg; 624 pp = hh->pp; 625 size = hh->size; 626 g_free(hh); 627 628 G_VALID_PROVIDER(pp); 629 KASSERT(!(pp->flags & G_PF_WITHER), 630 ("g_resize_provider_event but withered")); 631 g_trace(G_T_TOPOLOGY, "g_resize_provider_event(%p)", pp); 632 633 LIST_FOREACH_SAFE(cp, &pp->consumers, consumers, cp2) { 634 gp = cp->geom; 635 if (gp->resize == NULL && size < pp->mediasize) { 636 /* 637 * XXX: g_dev_orphan method does deferred destroying 638 * and it is possible, that other event could already 639 * call the orphan method. Check consumer's flags to 640 * do not schedule it twice. 641 */ 642 if (cp->flags & G_CF_ORPHAN) 643 continue; 644 cp->flags |= G_CF_ORPHAN; 645 cp->geom->orphan(cp); 646 } 647 } 648 649 pp->mediasize = size; 650 651 LIST_FOREACH_SAFE(cp, &pp->consumers, consumers, cp2) { 652 gp = cp->geom; 653 if ((gp->flags & G_GEOM_WITHER) == 0 && gp->resize != NULL) 654 gp->resize(cp); 655 } 656 657 /* 658 * After resizing, the previously invalid GEOM class metadata 659 * might become valid. This means we should retaste. 660 */ 661 LIST_FOREACH(mp, &g_classes, class) { 662 if (mp->taste == NULL) 663 continue; 664 LIST_FOREACH(cp, &pp->consumers, consumers) 665 if (cp->geom->class == mp && 666 (cp->flags & G_CF_ORPHAN) == 0) 667 break; 668 if (cp != NULL) 669 continue; 670 mp->taste(mp, pp, 0); 671 g_topology_assert(); 672 } 673 } 674 675 void 676 g_resize_provider(struct g_provider *pp, off_t size) 677 { 678 struct g_hh00 *hh; 679 680 G_VALID_PROVIDER(pp); 681 if (pp->flags & G_PF_WITHER) 682 return; 683 684 if (size == pp->mediasize) 685 return; 686 687 hh = g_malloc(sizeof *hh, M_WAITOK | M_ZERO); 688 hh->pp = pp; 689 hh->size = size; 690 g_post_event(g_resize_provider_event, hh, M_WAITOK, NULL); 691 } 692 693 #ifndef _PATH_DEV 694 #define _PATH_DEV "/dev/" 695 #endif 696 697 struct g_provider * 698 g_provider_by_name(char const *arg) 699 { 700 struct g_class *cp; 701 struct g_geom *gp; 702 struct g_provider *pp, *wpp; 703 704 if (strncmp(arg, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 705 arg += sizeof(_PATH_DEV) - 1; 706 707 wpp = NULL; 708 LIST_FOREACH(cp, &g_classes, class) { 709 LIST_FOREACH(gp, &cp->geom, geom) { 710 LIST_FOREACH(pp, &gp->provider, provider) { 711 if (strcmp(arg, pp->name) != 0) 712 continue; 713 if ((gp->flags & G_GEOM_WITHER) == 0 && 714 (pp->flags & G_PF_WITHER) == 0) 715 return (pp); 716 else 717 wpp = pp; 718 } 719 } 720 } 721 722 return (wpp); 723 } 724 725 void 726 g_destroy_provider(struct g_provider *pp) 727 { 728 struct g_geom *gp; 729 730 g_topology_assert(); 731 G_VALID_PROVIDER(pp); 732 KASSERT(LIST_EMPTY(&pp->consumers), 733 ("g_destroy_provider but attached")); 734 KASSERT (pp->acr == 0, ("g_destroy_provider with acr")); 735 KASSERT (pp->acw == 0, ("g_destroy_provider with acw")); 736 KASSERT (pp->ace == 0, ("g_destroy_provider with ace")); 737 g_cancel_event(pp); 738 LIST_REMOVE(pp, provider); 739 gp = pp->geom; 740 devstat_remove_entry(pp->stat); 741 /* 742 * If a callback was provided, send notification that the provider 743 * is now gone. 744 */ 745 if (gp->providergone != NULL) 746 gp->providergone(pp); 747 748 g_free(pp); 749 if ((gp->flags & G_GEOM_WITHER)) 750 g_do_wither(); 751 } 752 753 /* 754 * We keep the "geoms" list sorted by topological order (== increasing 755 * numerical rank) at all times. 756 * When an attach is done, the attaching geoms rank is invalidated 757 * and it is moved to the tail of the list. 758 * All geoms later in the sequence has their ranks reevaluated in 759 * sequence. If we cannot assign rank to a geom because it's 760 * prerequisites do not have rank, we move that element to the tail 761 * of the sequence with invalid rank as well. 762 * At some point we encounter our original geom and if we stil fail 763 * to assign it a rank, there must be a loop and we fail back to 764 * g_attach() which detach again and calls redo_rank again 765 * to fix up the damage. 766 * It would be much simpler code wise to do it recursively, but we 767 * can't risk that on the kernel stack. 768 */ 769 770 static int 771 redo_rank(struct g_geom *gp) 772 { 773 struct g_consumer *cp; 774 struct g_geom *gp1, *gp2; 775 int n, m; 776 777 g_topology_assert(); 778 G_VALID_GEOM(gp); 779 780 /* Invalidate this geoms rank and move it to the tail */ 781 gp1 = TAILQ_NEXT(gp, geoms); 782 if (gp1 != NULL) { 783 gp->rank = 0; 784 TAILQ_REMOVE(&geoms, gp, geoms); 785 TAILQ_INSERT_TAIL(&geoms, gp, geoms); 786 } else { 787 gp1 = gp; 788 } 789 790 /* re-rank the rest of the sequence */ 791 for (; gp1 != NULL; gp1 = gp2) { 792 gp1->rank = 0; 793 m = 1; 794 LIST_FOREACH(cp, &gp1->consumer, consumer) { 795 if (cp->provider == NULL) 796 continue; 797 n = cp->provider->geom->rank; 798 if (n == 0) { 799 m = 0; 800 break; 801 } else if (n >= m) 802 m = n + 1; 803 } 804 gp1->rank = m; 805 gp2 = TAILQ_NEXT(gp1, geoms); 806 807 /* got a rank, moving on */ 808 if (m != 0) 809 continue; 810 811 /* no rank to original geom means loop */ 812 if (gp == gp1) 813 return (ELOOP); 814 815 /* no rank, put it at the end move on */ 816 TAILQ_REMOVE(&geoms, gp1, geoms); 817 TAILQ_INSERT_TAIL(&geoms, gp1, geoms); 818 } 819 return (0); 820 } 821 822 int 823 g_attach(struct g_consumer *cp, struct g_provider *pp) 824 { 825 int error; 826 827 g_topology_assert(); 828 G_VALID_CONSUMER(cp); 829 G_VALID_PROVIDER(pp); 830 g_trace(G_T_TOPOLOGY, "g_attach(%p, %p)", cp, pp); 831 KASSERT(cp->provider == NULL, ("attach but attached")); 832 cp->provider = pp; 833 cp->flags &= ~G_CF_ORPHAN; 834 LIST_INSERT_HEAD(&pp->consumers, cp, consumers); 835 error = redo_rank(cp->geom); 836 if (error) { 837 LIST_REMOVE(cp, consumers); 838 cp->provider = NULL; 839 redo_rank(cp->geom); 840 } 841 return (error); 842 } 843 844 void 845 g_detach(struct g_consumer *cp) 846 { 847 struct g_provider *pp; 848 849 g_topology_assert(); 850 G_VALID_CONSUMER(cp); 851 g_trace(G_T_TOPOLOGY, "g_detach(%p)", cp); 852 KASSERT(cp->provider != NULL, ("detach but not attached")); 853 KASSERT(cp->acr == 0, ("detach but nonzero acr")); 854 KASSERT(cp->acw == 0, ("detach but nonzero acw")); 855 KASSERT(cp->ace == 0, ("detach but nonzero ace")); 856 KASSERT(cp->nstart == cp->nend, 857 ("detach with active requests")); 858 pp = cp->provider; 859 LIST_REMOVE(cp, consumers); 860 cp->provider = NULL; 861 if ((cp->geom->flags & G_GEOM_WITHER) || 862 (pp->geom->flags & G_GEOM_WITHER) || 863 (pp->flags & G_PF_WITHER)) 864 g_do_wither(); 865 redo_rank(cp->geom); 866 } 867 868 /* 869 * g_access() 870 * 871 * Access-check with delta values. The question asked is "can provider 872 * "cp" change the access counters by the relative amounts dc[rwe] ?" 873 */ 874 875 int 876 g_access(struct g_consumer *cp, int dcr, int dcw, int dce) 877 { 878 struct g_provider *pp; 879 int pr,pw,pe; 880 int error; 881 882 g_topology_assert(); 883 G_VALID_CONSUMER(cp); 884 pp = cp->provider; 885 KASSERT(pp != NULL, ("access but not attached")); 886 G_VALID_PROVIDER(pp); 887 888 g_trace(G_T_ACCESS, "g_access(%p(%s), %d, %d, %d)", 889 cp, pp->name, dcr, dcw, dce); 890 891 KASSERT(cp->acr + dcr >= 0, ("access resulting in negative acr")); 892 KASSERT(cp->acw + dcw >= 0, ("access resulting in negative acw")); 893 KASSERT(cp->ace + dce >= 0, ("access resulting in negative ace")); 894 KASSERT(dcr != 0 || dcw != 0 || dce != 0, ("NOP access request")); 895 KASSERT(pp->geom->access != NULL, ("NULL geom->access")); 896 897 /* 898 * If our class cares about being spoiled, and we have been, we 899 * are probably just ahead of the event telling us that. Fail 900 * now rather than having to unravel this later. 901 */ 902 if (cp->geom->spoiled != NULL && (cp->flags & G_CF_SPOILED) && 903 (dcr > 0 || dcw > 0 || dce > 0)) 904 return (ENXIO); 905 906 /* 907 * Figure out what counts the provider would have had, if this 908 * consumer had (r0w0e0) at this time. 909 */ 910 pr = pp->acr - cp->acr; 911 pw = pp->acw - cp->acw; 912 pe = pp->ace - cp->ace; 913 914 g_trace(G_T_ACCESS, 915 "open delta:[r%dw%de%d] old:[r%dw%de%d] provider:[r%dw%de%d] %p(%s)", 916 dcr, dcw, dce, 917 cp->acr, cp->acw, cp->ace, 918 pp->acr, pp->acw, pp->ace, 919 pp, pp->name); 920 921 /* If foot-shooting is enabled, any open on rank#1 is OK */ 922 if ((g_debugflags & 16) && pp->geom->rank == 1) 923 ; 924 /* If we try exclusive but already write: fail */ 925 else if (dce > 0 && pw > 0) 926 return (EPERM); 927 /* If we try write but already exclusive: fail */ 928 else if (dcw > 0 && pe > 0) 929 return (EPERM); 930 /* If we try to open more but provider is error'ed: fail */ 931 else if ((dcr > 0 || dcw > 0 || dce > 0) && pp->error != 0) { 932 printf("%s(%d): provider %s has error %d set\n", 933 __func__, __LINE__, pp->name, pp->error); 934 return (pp->error); 935 } 936 937 /* Ok then... */ 938 939 error = pp->geom->access(pp, dcr, dcw, dce); 940 KASSERT(dcr > 0 || dcw > 0 || dce > 0 || error == 0, 941 ("Geom provider %s::%s dcr=%d dcw=%d dce=%d error=%d failed " 942 "closing ->access()", pp->geom->class->name, pp->name, dcr, dcw, 943 dce, error)); 944 if (!error) { 945 /* 946 * If we open first write, spoil any partner consumers. 947 * If we close last write and provider is not errored, 948 * trigger re-taste. 949 */ 950 if (pp->acw == 0 && dcw != 0) 951 g_spoil(pp, cp); 952 else if (pp->acw != 0 && pp->acw == -dcw && pp->error == 0 && 953 !(pp->geom->flags & G_GEOM_WITHER)) 954 g_post_event(g_new_provider_event, pp, M_WAITOK, 955 pp, NULL); 956 957 pp->acr += dcr; 958 pp->acw += dcw; 959 pp->ace += dce; 960 cp->acr += dcr; 961 cp->acw += dcw; 962 cp->ace += dce; 963 if (pp->acr != 0 || pp->acw != 0 || pp->ace != 0) 964 KASSERT(pp->sectorsize > 0, 965 ("Provider %s lacks sectorsize", pp->name)); 966 if ((cp->geom->flags & G_GEOM_WITHER) && 967 cp->acr == 0 && cp->acw == 0 && cp->ace == 0) 968 g_do_wither(); 969 } 970 return (error); 971 } 972 973 int 974 g_handleattr_int(struct bio *bp, const char *attribute, int val) 975 { 976 977 return (g_handleattr(bp, attribute, &val, sizeof val)); 978 } 979 980 int 981 g_handleattr_uint16_t(struct bio *bp, const char *attribute, uint16_t val) 982 { 983 984 return (g_handleattr(bp, attribute, &val, sizeof val)); 985 } 986 987 int 988 g_handleattr_off_t(struct bio *bp, const char *attribute, off_t val) 989 { 990 991 return (g_handleattr(bp, attribute, &val, sizeof val)); 992 } 993 994 int 995 g_handleattr_str(struct bio *bp, const char *attribute, const char *str) 996 { 997 998 return (g_handleattr(bp, attribute, str, 0)); 999 } 1000 1001 int 1002 g_handleattr(struct bio *bp, const char *attribute, const void *val, int len) 1003 { 1004 int error = 0; 1005 1006 if (strcmp(bp->bio_attribute, attribute)) 1007 return (0); 1008 if (len == 0) { 1009 bzero(bp->bio_data, bp->bio_length); 1010 if (strlcpy(bp->bio_data, val, bp->bio_length) >= 1011 bp->bio_length) { 1012 printf("%s: %s bio_length %jd len %zu -> EFAULT\n", 1013 __func__, bp->bio_to->name, 1014 (intmax_t)bp->bio_length, strlen(val)); 1015 error = EFAULT; 1016 } 1017 } else if (bp->bio_length == len) { 1018 bcopy(val, bp->bio_data, len); 1019 } else { 1020 printf("%s: %s bio_length %jd len %d -> EFAULT\n", __func__, 1021 bp->bio_to->name, (intmax_t)bp->bio_length, len); 1022 error = EFAULT; 1023 } 1024 if (error == 0) 1025 bp->bio_completed = bp->bio_length; 1026 g_io_deliver(bp, error); 1027 return (1); 1028 } 1029 1030 int 1031 g_std_access(struct g_provider *pp, 1032 int dr __unused, int dw __unused, int de __unused) 1033 { 1034 1035 g_topology_assert(); 1036 G_VALID_PROVIDER(pp); 1037 return (0); 1038 } 1039 1040 void 1041 g_std_done(struct bio *bp) 1042 { 1043 struct bio *bp2; 1044 1045 bp2 = bp->bio_parent; 1046 if (bp2->bio_error == 0) 1047 bp2->bio_error = bp->bio_error; 1048 bp2->bio_completed += bp->bio_completed; 1049 g_destroy_bio(bp); 1050 bp2->bio_inbed++; 1051 if (bp2->bio_children == bp2->bio_inbed) 1052 g_io_deliver(bp2, bp2->bio_error); 1053 } 1054 1055 /* XXX: maybe this is only g_slice_spoiled */ 1056 1057 void 1058 g_std_spoiled(struct g_consumer *cp) 1059 { 1060 struct g_geom *gp; 1061 struct g_provider *pp; 1062 1063 g_topology_assert(); 1064 G_VALID_CONSUMER(cp); 1065 g_trace(G_T_TOPOLOGY, "g_std_spoiled(%p)", cp); 1066 cp->flags |= G_CF_ORPHAN; 1067 g_detach(cp); 1068 gp = cp->geom; 1069 LIST_FOREACH(pp, &gp->provider, provider) 1070 g_orphan_provider(pp, ENXIO); 1071 g_destroy_consumer(cp); 1072 if (LIST_EMPTY(&gp->provider) && LIST_EMPTY(&gp->consumer)) 1073 g_destroy_geom(gp); 1074 else 1075 gp->flags |= G_GEOM_WITHER; 1076 } 1077 1078 /* 1079 * Spoiling happens when a provider is opened for writing, but consumers 1080 * which are configured by in-band data are attached (slicers for instance). 1081 * Since the write might potentially change the in-band data, such consumers 1082 * need to re-evaluate their existence after the writing session closes. 1083 * We do this by (offering to) tear them down when the open for write happens 1084 * in return for a re-taste when it closes again. 1085 * Together with the fact that such consumers grab an 'e' bit whenever they 1086 * are open, regardless of mode, this ends up DTRT. 1087 */ 1088 1089 static void 1090 g_spoil_event(void *arg, int flag) 1091 { 1092 struct g_provider *pp; 1093 struct g_consumer *cp, *cp2; 1094 1095 g_topology_assert(); 1096 if (flag == EV_CANCEL) 1097 return; 1098 pp = arg; 1099 G_VALID_PROVIDER(pp); 1100 g_trace(G_T_TOPOLOGY, "%s %p(%s:%s:%s)", __func__, pp, 1101 pp->geom->class->name, pp->geom->name, pp->name); 1102 for (cp = LIST_FIRST(&pp->consumers); cp != NULL; cp = cp2) { 1103 cp2 = LIST_NEXT(cp, consumers); 1104 if ((cp->flags & G_CF_SPOILED) == 0) 1105 continue; 1106 cp->flags &= ~G_CF_SPOILED; 1107 if (cp->geom->spoiled == NULL) 1108 continue; 1109 cp->geom->spoiled(cp); 1110 g_topology_assert(); 1111 } 1112 } 1113 1114 void 1115 g_spoil(struct g_provider *pp, struct g_consumer *cp) 1116 { 1117 struct g_consumer *cp2; 1118 1119 g_topology_assert(); 1120 G_VALID_PROVIDER(pp); 1121 G_VALID_CONSUMER(cp); 1122 1123 LIST_FOREACH(cp2, &pp->consumers, consumers) { 1124 if (cp2 == cp) 1125 continue; 1126 /* 1127 KASSERT(cp2->acr == 0, ("spoiling cp->acr = %d", cp2->acr)); 1128 KASSERT(cp2->acw == 0, ("spoiling cp->acw = %d", cp2->acw)); 1129 */ 1130 KASSERT(cp2->ace == 0, ("spoiling cp->ace = %d", cp2->ace)); 1131 cp2->flags |= G_CF_SPOILED; 1132 } 1133 g_post_event(g_spoil_event, pp, M_WAITOK, pp, NULL); 1134 } 1135 1136 static void 1137 g_media_changed_event(void *arg, int flag) 1138 { 1139 struct g_provider *pp; 1140 int retaste; 1141 1142 g_topology_assert(); 1143 if (flag == EV_CANCEL) 1144 return; 1145 pp = arg; 1146 G_VALID_PROVIDER(pp); 1147 1148 /* 1149 * If provider was not open for writing, queue retaste after spoiling. 1150 * If it was, retaste will happen automatically on close. 1151 */ 1152 retaste = (pp->acw == 0 && pp->error == 0 && 1153 !(pp->geom->flags & G_GEOM_WITHER)); 1154 g_spoil_event(arg, flag); 1155 if (retaste) 1156 g_post_event(g_new_provider_event, pp, M_WAITOK, pp, NULL); 1157 } 1158 1159 int 1160 g_media_changed(struct g_provider *pp, int flag) 1161 { 1162 struct g_consumer *cp; 1163 1164 LIST_FOREACH(cp, &pp->consumers, consumers) 1165 cp->flags |= G_CF_SPOILED; 1166 return (g_post_event(g_media_changed_event, pp, flag, pp, NULL)); 1167 } 1168 1169 int 1170 g_media_gone(struct g_provider *pp, int flag) 1171 { 1172 struct g_consumer *cp; 1173 1174 LIST_FOREACH(cp, &pp->consumers, consumers) 1175 cp->flags |= G_CF_SPOILED; 1176 return (g_post_event(g_spoil_event, pp, flag, pp, NULL)); 1177 } 1178 1179 int 1180 g_getattr__(const char *attr, struct g_consumer *cp, void *var, int len) 1181 { 1182 int error, i; 1183 1184 i = len; 1185 error = g_io_getattr(attr, cp, &i, var); 1186 if (error) 1187 return (error); 1188 if (i != len) 1189 return (EINVAL); 1190 return (0); 1191 } 1192 1193 static int 1194 g_get_device_prefix_len(const char *name) 1195 { 1196 int len; 1197 1198 if (strncmp(name, "ada", 3) == 0) 1199 len = 3; 1200 else if (strncmp(name, "ad", 2) == 0) 1201 len = 2; 1202 else 1203 return (0); 1204 if (name[len] < '0' || name[len] > '9') 1205 return (0); 1206 do { 1207 len++; 1208 } while (name[len] >= '0' && name[len] <= '9'); 1209 return (len); 1210 } 1211 1212 int 1213 g_compare_names(const char *namea, const char *nameb) 1214 { 1215 int deva, devb; 1216 1217 if (strcmp(namea, nameb) == 0) 1218 return (1); 1219 deva = g_get_device_prefix_len(namea); 1220 if (deva == 0) 1221 return (0); 1222 devb = g_get_device_prefix_len(nameb); 1223 if (devb == 0) 1224 return (0); 1225 if (strcmp(namea + deva, nameb + devb) == 0) 1226 return (1); 1227 return (0); 1228 } 1229 1230 void 1231 g_geom_add_alias(struct g_geom *gp, const char *alias) 1232 { 1233 struct g_geom_alias *gap; 1234 1235 gap = (struct g_geom_alias *)g_malloc( 1236 sizeof(struct g_geom_alias) + strlen(alias) + 1, M_WAITOK); 1237 strcpy((char *)(gap + 1), alias); 1238 gap->ga_alias = (const char *)(gap + 1); 1239 LIST_INSERT_HEAD(&gp->aliases, gap, ga_next); 1240 } 1241 1242 #if defined(DIAGNOSTIC) || defined(DDB) 1243 /* 1244 * This function walks the mesh and returns a non-zero integer if it 1245 * finds the argument pointer is an object. The return value indicates 1246 * which type of object it is believed to be. If topology is not locked, 1247 * this function is potentially dangerous, but we don't assert that the 1248 * topology lock is held when called from debugger. 1249 */ 1250 int 1251 g_valid_obj(void const *ptr) 1252 { 1253 struct g_class *mp; 1254 struct g_geom *gp; 1255 struct g_consumer *cp; 1256 struct g_provider *pp; 1257 1258 #ifdef KDB 1259 if (kdb_active == 0) 1260 #endif 1261 g_topology_assert(); 1262 1263 LIST_FOREACH(mp, &g_classes, class) { 1264 if (ptr == mp) 1265 return (1); 1266 LIST_FOREACH(gp, &mp->geom, geom) { 1267 if (ptr == gp) 1268 return (2); 1269 LIST_FOREACH(cp, &gp->consumer, consumer) 1270 if (ptr == cp) 1271 return (3); 1272 LIST_FOREACH(pp, &gp->provider, provider) 1273 if (ptr == pp) 1274 return (4); 1275 } 1276 } 1277 return(0); 1278 } 1279 #endif 1280 1281 #ifdef DDB 1282 1283 #define gprintf(...) do { \ 1284 db_printf("%*s", indent, ""); \ 1285 db_printf(__VA_ARGS__); \ 1286 } while (0) 1287 #define gprintln(...) do { \ 1288 gprintf(__VA_ARGS__); \ 1289 db_printf("\n"); \ 1290 } while (0) 1291 1292 #define ADDFLAG(obj, flag, sflag) do { \ 1293 if ((obj)->flags & (flag)) { \ 1294 if (comma) \ 1295 strlcat(str, ",", size); \ 1296 strlcat(str, (sflag), size); \ 1297 comma = 1; \ 1298 } \ 1299 } while (0) 1300 1301 static char * 1302 provider_flags_to_string(struct g_provider *pp, char *str, size_t size) 1303 { 1304 int comma = 0; 1305 1306 bzero(str, size); 1307 if (pp->flags == 0) { 1308 strlcpy(str, "NONE", size); 1309 return (str); 1310 } 1311 ADDFLAG(pp, G_PF_WITHER, "G_PF_WITHER"); 1312 ADDFLAG(pp, G_PF_ORPHAN, "G_PF_ORPHAN"); 1313 return (str); 1314 } 1315 1316 static char * 1317 geom_flags_to_string(struct g_geom *gp, char *str, size_t size) 1318 { 1319 int comma = 0; 1320 1321 bzero(str, size); 1322 if (gp->flags == 0) { 1323 strlcpy(str, "NONE", size); 1324 return (str); 1325 } 1326 ADDFLAG(gp, G_GEOM_WITHER, "G_GEOM_WITHER"); 1327 return (str); 1328 } 1329 static void 1330 db_show_geom_consumer(int indent, struct g_consumer *cp) 1331 { 1332 1333 if (indent == 0) { 1334 gprintln("consumer: %p", cp); 1335 gprintln(" class: %s (%p)", cp->geom->class->name, 1336 cp->geom->class); 1337 gprintln(" geom: %s (%p)", cp->geom->name, cp->geom); 1338 if (cp->provider == NULL) 1339 gprintln(" provider: none"); 1340 else { 1341 gprintln(" provider: %s (%p)", cp->provider->name, 1342 cp->provider); 1343 } 1344 gprintln(" access: r%dw%de%d", cp->acr, cp->acw, cp->ace); 1345 gprintln(" flags: 0x%04x", cp->flags); 1346 gprintln(" nstart: %u", cp->nstart); 1347 gprintln(" nend: %u", cp->nend); 1348 } else { 1349 gprintf("consumer: %p (%s), access=r%dw%de%d", cp, 1350 cp->provider != NULL ? cp->provider->name : "none", 1351 cp->acr, cp->acw, cp->ace); 1352 if (cp->flags) 1353 db_printf(", flags=0x%04x", cp->flags); 1354 db_printf("\n"); 1355 } 1356 } 1357 1358 static void 1359 db_show_geom_provider(int indent, struct g_provider *pp) 1360 { 1361 struct g_consumer *cp; 1362 char flags[64]; 1363 1364 if (indent == 0) { 1365 gprintln("provider: %s (%p)", pp->name, pp); 1366 gprintln(" class: %s (%p)", pp->geom->class->name, 1367 pp->geom->class); 1368 gprintln(" geom: %s (%p)", pp->geom->name, pp->geom); 1369 gprintln(" mediasize: %jd", (intmax_t)pp->mediasize); 1370 gprintln(" sectorsize: %u", pp->sectorsize); 1371 gprintln(" stripesize: %u", pp->stripesize); 1372 gprintln(" stripeoffset: %u", pp->stripeoffset); 1373 gprintln(" access: r%dw%de%d", pp->acr, pp->acw, 1374 pp->ace); 1375 gprintln(" flags: %s (0x%04x)", 1376 provider_flags_to_string(pp, flags, sizeof(flags)), 1377 pp->flags); 1378 gprintln(" error: %d", pp->error); 1379 gprintln(" nstart: %u", pp->nstart); 1380 gprintln(" nend: %u", pp->nend); 1381 if (LIST_EMPTY(&pp->consumers)) 1382 gprintln(" consumers: none"); 1383 } else { 1384 gprintf("provider: %s (%p), access=r%dw%de%d", 1385 pp->name, pp, pp->acr, pp->acw, pp->ace); 1386 if (pp->flags != 0) { 1387 db_printf(", flags=%s (0x%04x)", 1388 provider_flags_to_string(pp, flags, sizeof(flags)), 1389 pp->flags); 1390 } 1391 db_printf("\n"); 1392 } 1393 if (!LIST_EMPTY(&pp->consumers)) { 1394 LIST_FOREACH(cp, &pp->consumers, consumers) { 1395 db_show_geom_consumer(indent + 2, cp); 1396 if (db_pager_quit) 1397 break; 1398 } 1399 } 1400 } 1401 1402 static void 1403 db_show_geom_geom(int indent, struct g_geom *gp) 1404 { 1405 struct g_provider *pp; 1406 struct g_consumer *cp; 1407 char flags[64]; 1408 1409 if (indent == 0) { 1410 gprintln("geom: %s (%p)", gp->name, gp); 1411 gprintln(" class: %s (%p)", gp->class->name, gp->class); 1412 gprintln(" flags: %s (0x%04x)", 1413 geom_flags_to_string(gp, flags, sizeof(flags)), gp->flags); 1414 gprintln(" rank: %d", gp->rank); 1415 if (LIST_EMPTY(&gp->provider)) 1416 gprintln(" providers: none"); 1417 if (LIST_EMPTY(&gp->consumer)) 1418 gprintln(" consumers: none"); 1419 } else { 1420 gprintf("geom: %s (%p), rank=%d", gp->name, gp, gp->rank); 1421 if (gp->flags != 0) { 1422 db_printf(", flags=%s (0x%04x)", 1423 geom_flags_to_string(gp, flags, sizeof(flags)), 1424 gp->flags); 1425 } 1426 db_printf("\n"); 1427 } 1428 if (!LIST_EMPTY(&gp->provider)) { 1429 LIST_FOREACH(pp, &gp->provider, provider) { 1430 db_show_geom_provider(indent + 2, pp); 1431 if (db_pager_quit) 1432 break; 1433 } 1434 } 1435 if (!LIST_EMPTY(&gp->consumer)) { 1436 LIST_FOREACH(cp, &gp->consumer, consumer) { 1437 db_show_geom_consumer(indent + 2, cp); 1438 if (db_pager_quit) 1439 break; 1440 } 1441 } 1442 } 1443 1444 static void 1445 db_show_geom_class(struct g_class *mp) 1446 { 1447 struct g_geom *gp; 1448 1449 db_printf("class: %s (%p)\n", mp->name, mp); 1450 LIST_FOREACH(gp, &mp->geom, geom) { 1451 db_show_geom_geom(2, gp); 1452 if (db_pager_quit) 1453 break; 1454 } 1455 } 1456 1457 /* 1458 * Print the GEOM topology or the given object. 1459 */ 1460 DB_SHOW_COMMAND(geom, db_show_geom) 1461 { 1462 struct g_class *mp; 1463 1464 if (!have_addr) { 1465 /* No address given, print the entire topology. */ 1466 LIST_FOREACH(mp, &g_classes, class) { 1467 db_show_geom_class(mp); 1468 db_printf("\n"); 1469 if (db_pager_quit) 1470 break; 1471 } 1472 } else { 1473 switch (g_valid_obj((void *)addr)) { 1474 case 1: 1475 db_show_geom_class((struct g_class *)addr); 1476 break; 1477 case 2: 1478 db_show_geom_geom(0, (struct g_geom *)addr); 1479 break; 1480 case 3: 1481 db_show_geom_consumer(0, (struct g_consumer *)addr); 1482 break; 1483 case 4: 1484 db_show_geom_provider(0, (struct g_provider *)addr); 1485 break; 1486 default: 1487 db_printf("Not a GEOM object.\n"); 1488 break; 1489 } 1490 } 1491 } 1492 1493 static void 1494 db_print_bio_cmd(struct bio *bp) 1495 { 1496 db_printf(" cmd: "); 1497 switch (bp->bio_cmd) { 1498 case BIO_READ: db_printf("BIO_READ"); break; 1499 case BIO_WRITE: db_printf("BIO_WRITE"); break; 1500 case BIO_DELETE: db_printf("BIO_DELETE"); break; 1501 case BIO_GETATTR: db_printf("BIO_GETATTR"); break; 1502 case BIO_FLUSH: db_printf("BIO_FLUSH"); break; 1503 case BIO_CMD0: db_printf("BIO_CMD0"); break; 1504 case BIO_CMD1: db_printf("BIO_CMD1"); break; 1505 case BIO_CMD2: db_printf("BIO_CMD2"); break; 1506 case BIO_ZONE: db_printf("BIO_ZONE"); break; 1507 default: db_printf("UNKNOWN"); break; 1508 } 1509 db_printf("\n"); 1510 } 1511 1512 static void 1513 db_print_bio_flags(struct bio *bp) 1514 { 1515 int comma; 1516 1517 comma = 0; 1518 db_printf(" flags: "); 1519 if (bp->bio_flags & BIO_ERROR) { 1520 db_printf("BIO_ERROR"); 1521 comma = 1; 1522 } 1523 if (bp->bio_flags & BIO_DONE) { 1524 db_printf("%sBIO_DONE", (comma ? ", " : "")); 1525 comma = 1; 1526 } 1527 if (bp->bio_flags & BIO_ONQUEUE) 1528 db_printf("%sBIO_ONQUEUE", (comma ? ", " : "")); 1529 db_printf("\n"); 1530 } 1531 1532 /* 1533 * Print useful information in a BIO 1534 */ 1535 DB_SHOW_COMMAND(bio, db_show_bio) 1536 { 1537 struct bio *bp; 1538 1539 if (have_addr) { 1540 bp = (struct bio *)addr; 1541 db_printf("BIO %p\n", bp); 1542 db_print_bio_cmd(bp); 1543 db_print_bio_flags(bp); 1544 db_printf(" cflags: 0x%hx\n", bp->bio_cflags); 1545 db_printf(" pflags: 0x%hx\n", bp->bio_pflags); 1546 db_printf(" offset: %jd\n", (intmax_t)bp->bio_offset); 1547 db_printf(" length: %jd\n", (intmax_t)bp->bio_length); 1548 db_printf(" bcount: %ld\n", bp->bio_bcount); 1549 db_printf(" resid: %ld\n", bp->bio_resid); 1550 db_printf(" completed: %jd\n", (intmax_t)bp->bio_completed); 1551 db_printf(" children: %u\n", bp->bio_children); 1552 db_printf(" inbed: %u\n", bp->bio_inbed); 1553 db_printf(" error: %d\n", bp->bio_error); 1554 db_printf(" parent: %p\n", bp->bio_parent); 1555 db_printf(" driver1: %p\n", bp->bio_driver1); 1556 db_printf(" driver2: %p\n", bp->bio_driver2); 1557 db_printf(" caller1: %p\n", bp->bio_caller1); 1558 db_printf(" caller2: %p\n", bp->bio_caller2); 1559 db_printf(" bio_from: %p\n", bp->bio_from); 1560 db_printf(" bio_to: %p\n", bp->bio_to); 1561 1562 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 1563 db_printf(" bio_track_bp: %p\n", bp->bio_track_bp); 1564 #endif 1565 } 1566 } 1567 1568 #undef gprintf 1569 #undef gprintln 1570 #undef ADDFLAG 1571 1572 #endif /* DDB */ 1573