1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2002 Poul-Henning Kamp 5 * Copyright (c) 2002 Networks Associates Technology, Inc. 6 * All rights reserved. 7 * 8 * This software was developed for the FreeBSD Project by Poul-Henning Kamp 9 * and NAI Labs, the Security Research Division of Network Associates, Inc. 10 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the 11 * DARPA CHATS research program. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. The names of the authors may not be used to endorse or promote 22 * products derived from this software without specific prior written 23 * permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 */ 37 38 #include <sys/cdefs.h> 39 __FBSDID("$FreeBSD$"); 40 41 #include "opt_ddb.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/devicestat.h> 46 #include <sys/kernel.h> 47 #include <sys/malloc.h> 48 #include <sys/bio.h> 49 #include <sys/sysctl.h> 50 #include <sys/proc.h> 51 #include <sys/kthread.h> 52 #include <sys/lock.h> 53 #include <sys/mutex.h> 54 #include <sys/errno.h> 55 #include <sys/sbuf.h> 56 #include <geom/geom.h> 57 #include <geom/geom_int.h> 58 #include <machine/stdarg.h> 59 60 #ifdef DDB 61 #include <ddb/ddb.h> 62 #endif 63 64 #ifdef KDB 65 #include <sys/kdb.h> 66 #endif 67 68 struct class_list_head g_classes = LIST_HEAD_INITIALIZER(g_classes); 69 static struct g_tailq_head geoms = TAILQ_HEAD_INITIALIZER(geoms); 70 char *g_wait_event, *g_wait_up, *g_wait_down, *g_wait_sim; 71 72 struct g_hh00 { 73 struct g_class *mp; 74 struct g_provider *pp; 75 off_t size; 76 int error; 77 int post; 78 }; 79 80 /* 81 * This event offers a new class a chance to taste all preexisting providers. 82 */ 83 static void 84 g_load_class(void *arg, int flag) 85 { 86 struct g_hh00 *hh; 87 struct g_class *mp2, *mp; 88 struct g_geom *gp; 89 struct g_provider *pp; 90 91 g_topology_assert(); 92 if (flag == EV_CANCEL) /* XXX: can't happen ? */ 93 return; 94 if (g_shutdown) 95 return; 96 97 hh = arg; 98 mp = hh->mp; 99 hh->error = 0; 100 if (hh->post) { 101 g_free(hh); 102 hh = NULL; 103 } 104 g_trace(G_T_TOPOLOGY, "g_load_class(%s)", mp->name); 105 KASSERT(mp->name != NULL && *mp->name != '\0', 106 ("GEOM class has no name")); 107 LIST_FOREACH(mp2, &g_classes, class) { 108 if (mp2 == mp) { 109 printf("The GEOM class %s is already loaded.\n", 110 mp2->name); 111 if (hh != NULL) 112 hh->error = EEXIST; 113 return; 114 } else if (strcmp(mp2->name, mp->name) == 0) { 115 printf("A GEOM class %s is already loaded.\n", 116 mp2->name); 117 if (hh != NULL) 118 hh->error = EEXIST; 119 return; 120 } 121 } 122 123 LIST_INIT(&mp->geom); 124 LIST_INSERT_HEAD(&g_classes, mp, class); 125 if (mp->init != NULL) 126 mp->init(mp); 127 if (mp->taste == NULL) 128 return; 129 LIST_FOREACH(mp2, &g_classes, class) { 130 if (mp == mp2) 131 continue; 132 LIST_FOREACH(gp, &mp2->geom, geom) { 133 LIST_FOREACH(pp, &gp->provider, provider) { 134 mp->taste(mp, pp, 0); 135 g_topology_assert(); 136 } 137 } 138 } 139 } 140 141 static int 142 g_unload_class(struct g_class *mp) 143 { 144 struct g_geom *gp; 145 struct g_provider *pp; 146 struct g_consumer *cp; 147 int error; 148 149 g_topology_lock(); 150 g_trace(G_T_TOPOLOGY, "g_unload_class(%s)", mp->name); 151 retry: 152 G_VALID_CLASS(mp); 153 LIST_FOREACH(gp, &mp->geom, geom) { 154 /* We refuse to unload if anything is open */ 155 LIST_FOREACH(pp, &gp->provider, provider) 156 if (pp->acr || pp->acw || pp->ace) { 157 g_topology_unlock(); 158 return (EBUSY); 159 } 160 LIST_FOREACH(cp, &gp->consumer, consumer) 161 if (cp->acr || cp->acw || cp->ace) { 162 g_topology_unlock(); 163 return (EBUSY); 164 } 165 /* If the geom is withering, wait for it to finish. */ 166 if (gp->flags & G_GEOM_WITHER) { 167 g_topology_sleep(mp, 1); 168 goto retry; 169 } 170 } 171 172 /* 173 * We allow unloading if we have no geoms, or a class 174 * method we can use to get rid of them. 175 */ 176 if (!LIST_EMPTY(&mp->geom) && mp->destroy_geom == NULL) { 177 g_topology_unlock(); 178 return (EOPNOTSUPP); 179 } 180 181 /* Bar new entries */ 182 mp->taste = NULL; 183 mp->config = NULL; 184 185 LIST_FOREACH(gp, &mp->geom, geom) { 186 error = mp->destroy_geom(NULL, mp, gp); 187 if (error != 0) { 188 g_topology_unlock(); 189 return (error); 190 } 191 } 192 /* Wait for withering to finish. */ 193 for (;;) { 194 gp = LIST_FIRST(&mp->geom); 195 if (gp == NULL) 196 break; 197 KASSERT(gp->flags & G_GEOM_WITHER, 198 ("Non-withering geom in class %s", mp->name)); 199 g_topology_sleep(mp, 1); 200 } 201 G_VALID_CLASS(mp); 202 if (mp->fini != NULL) 203 mp->fini(mp); 204 LIST_REMOVE(mp, class); 205 g_topology_unlock(); 206 207 return (0); 208 } 209 210 int 211 g_modevent(module_t mod, int type, void *data) 212 { 213 struct g_hh00 *hh; 214 int error; 215 static int g_ignition; 216 struct g_class *mp; 217 218 mp = data; 219 if (mp->version != G_VERSION) { 220 printf("GEOM class %s has Wrong version %x\n", 221 mp->name, mp->version); 222 return (EINVAL); 223 } 224 if (!g_ignition) { 225 g_ignition++; 226 g_init(); 227 } 228 error = EOPNOTSUPP; 229 switch (type) { 230 case MOD_LOAD: 231 g_trace(G_T_TOPOLOGY, "g_modevent(%s, LOAD)", mp->name); 232 hh = g_malloc(sizeof *hh, M_WAITOK | M_ZERO); 233 hh->mp = mp; 234 /* 235 * Once the system is not cold, MOD_LOAD calls will be 236 * from the userland and the g_event thread will be able 237 * to acknowledge their completion. 238 */ 239 if (cold) { 240 hh->post = 1; 241 error = g_post_event(g_load_class, hh, M_WAITOK, NULL); 242 } else { 243 error = g_waitfor_event(g_load_class, hh, M_WAITOK, 244 NULL); 245 if (error == 0) 246 error = hh->error; 247 g_free(hh); 248 } 249 break; 250 case MOD_UNLOAD: 251 g_trace(G_T_TOPOLOGY, "g_modevent(%s, UNLOAD)", mp->name); 252 error = g_unload_class(mp); 253 if (error == 0) { 254 KASSERT(LIST_EMPTY(&mp->geom), 255 ("Unloaded class (%s) still has geom", mp->name)); 256 } 257 break; 258 } 259 return (error); 260 } 261 262 static void 263 g_retaste_event(void *arg, int flag) 264 { 265 struct g_class *mp, *mp2; 266 struct g_geom *gp; 267 struct g_hh00 *hh; 268 struct g_provider *pp; 269 struct g_consumer *cp; 270 271 g_topology_assert(); 272 if (flag == EV_CANCEL) /* XXX: can't happen ? */ 273 return; 274 if (g_shutdown || g_notaste) 275 return; 276 277 hh = arg; 278 mp = hh->mp; 279 hh->error = 0; 280 if (hh->post) { 281 g_free(hh); 282 hh = NULL; 283 } 284 g_trace(G_T_TOPOLOGY, "g_retaste(%s)", mp->name); 285 286 LIST_FOREACH(mp2, &g_classes, class) { 287 LIST_FOREACH(gp, &mp2->geom, geom) { 288 LIST_FOREACH(pp, &gp->provider, provider) { 289 if (pp->acr || pp->acw || pp->ace) 290 continue; 291 LIST_FOREACH(cp, &pp->consumers, consumers) { 292 if (cp->geom->class == mp && 293 (cp->flags & G_CF_ORPHAN) == 0) 294 break; 295 } 296 if (cp != NULL) { 297 cp->flags |= G_CF_ORPHAN; 298 g_wither_geom(cp->geom, ENXIO); 299 } 300 mp->taste(mp, pp, 0); 301 g_topology_assert(); 302 } 303 } 304 } 305 } 306 307 int 308 g_retaste(struct g_class *mp) 309 { 310 struct g_hh00 *hh; 311 int error; 312 313 if (mp->taste == NULL) 314 return (EINVAL); 315 316 hh = g_malloc(sizeof *hh, M_WAITOK | M_ZERO); 317 hh->mp = mp; 318 319 if (cold) { 320 hh->post = 1; 321 error = g_post_event(g_retaste_event, hh, M_WAITOK, NULL); 322 } else { 323 error = g_waitfor_event(g_retaste_event, hh, M_WAITOK, NULL); 324 if (error == 0) 325 error = hh->error; 326 g_free(hh); 327 } 328 329 return (error); 330 } 331 332 struct g_geom * 333 g_new_geomf(struct g_class *mp, const char *fmt, ...) 334 { 335 struct g_geom *gp; 336 va_list ap; 337 struct sbuf *sb; 338 339 g_topology_assert(); 340 G_VALID_CLASS(mp); 341 sb = sbuf_new_auto(); 342 va_start(ap, fmt); 343 sbuf_vprintf(sb, fmt, ap); 344 va_end(ap); 345 sbuf_finish(sb); 346 gp = g_malloc(sizeof *gp, M_WAITOK | M_ZERO); 347 gp->name = g_malloc(sbuf_len(sb) + 1, M_WAITOK | M_ZERO); 348 gp->class = mp; 349 gp->rank = 1; 350 LIST_INIT(&gp->consumer); 351 LIST_INIT(&gp->provider); 352 LIST_INIT(&gp->aliases); 353 LIST_INSERT_HEAD(&mp->geom, gp, geom); 354 TAILQ_INSERT_HEAD(&geoms, gp, geoms); 355 strcpy(gp->name, sbuf_data(sb)); 356 sbuf_delete(sb); 357 /* Fill in defaults from class */ 358 gp->start = mp->start; 359 gp->spoiled = mp->spoiled; 360 gp->attrchanged = mp->attrchanged; 361 gp->providergone = mp->providergone; 362 gp->dumpconf = mp->dumpconf; 363 gp->access = mp->access; 364 gp->orphan = mp->orphan; 365 gp->ioctl = mp->ioctl; 366 gp->resize = mp->resize; 367 return (gp); 368 } 369 370 void 371 g_destroy_geom(struct g_geom *gp) 372 { 373 struct g_geom_alias *gap, *gaptmp; 374 375 g_topology_assert(); 376 G_VALID_GEOM(gp); 377 g_trace(G_T_TOPOLOGY, "g_destroy_geom(%p(%s))", gp, gp->name); 378 KASSERT(LIST_EMPTY(&gp->consumer), 379 ("g_destroy_geom(%s) with consumer(s) [%p]", 380 gp->name, LIST_FIRST(&gp->consumer))); 381 KASSERT(LIST_EMPTY(&gp->provider), 382 ("g_destroy_geom(%s) with provider(s) [%p]", 383 gp->name, LIST_FIRST(&gp->provider))); 384 g_cancel_event(gp); 385 LIST_REMOVE(gp, geom); 386 TAILQ_REMOVE(&geoms, gp, geoms); 387 LIST_FOREACH_SAFE(gap, &gp->aliases, ga_next, gaptmp) 388 g_free(gap); 389 g_free(gp->name); 390 g_free(gp); 391 } 392 393 /* 394 * This function is called (repeatedly) until the geom has withered away. 395 */ 396 void 397 g_wither_geom(struct g_geom *gp, int error) 398 { 399 struct g_provider *pp; 400 401 g_topology_assert(); 402 G_VALID_GEOM(gp); 403 g_trace(G_T_TOPOLOGY, "g_wither_geom(%p(%s))", gp, gp->name); 404 if (!(gp->flags & G_GEOM_WITHER)) { 405 gp->flags |= G_GEOM_WITHER; 406 LIST_FOREACH(pp, &gp->provider, provider) 407 if (!(pp->flags & G_PF_ORPHAN)) 408 g_orphan_provider(pp, error); 409 } 410 g_do_wither(); 411 } 412 413 /* 414 * Convenience function to destroy a particular provider. 415 */ 416 void 417 g_wither_provider(struct g_provider *pp, int error) 418 { 419 420 pp->flags |= G_PF_WITHER; 421 if (!(pp->flags & G_PF_ORPHAN)) 422 g_orphan_provider(pp, error); 423 } 424 425 /* 426 * This function is called (repeatedly) until the has withered away. 427 */ 428 void 429 g_wither_geom_close(struct g_geom *gp, int error) 430 { 431 struct g_consumer *cp; 432 433 g_topology_assert(); 434 G_VALID_GEOM(gp); 435 g_trace(G_T_TOPOLOGY, "g_wither_geom_close(%p(%s))", gp, gp->name); 436 LIST_FOREACH(cp, &gp->consumer, consumer) 437 if (cp->acr || cp->acw || cp->ace) 438 g_access(cp, -cp->acr, -cp->acw, -cp->ace); 439 g_wither_geom(gp, error); 440 } 441 442 /* 443 * This function is called (repeatedly) until we cant wash away more 444 * withered bits at present. 445 */ 446 void 447 g_wither_washer() 448 { 449 struct g_class *mp; 450 struct g_geom *gp, *gp2; 451 struct g_provider *pp, *pp2; 452 struct g_consumer *cp, *cp2; 453 454 g_topology_assert(); 455 LIST_FOREACH(mp, &g_classes, class) { 456 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) { 457 LIST_FOREACH_SAFE(pp, &gp->provider, provider, pp2) { 458 if (!(pp->flags & G_PF_WITHER)) 459 continue; 460 if (LIST_EMPTY(&pp->consumers)) 461 g_destroy_provider(pp); 462 } 463 if (!(gp->flags & G_GEOM_WITHER)) 464 continue; 465 LIST_FOREACH_SAFE(pp, &gp->provider, provider, pp2) { 466 if (LIST_EMPTY(&pp->consumers)) 467 g_destroy_provider(pp); 468 } 469 LIST_FOREACH_SAFE(cp, &gp->consumer, consumer, cp2) { 470 if (cp->acr || cp->acw || cp->ace) 471 continue; 472 if (cp->provider != NULL) 473 g_detach(cp); 474 g_destroy_consumer(cp); 475 } 476 if (LIST_EMPTY(&gp->provider) && 477 LIST_EMPTY(&gp->consumer)) 478 g_destroy_geom(gp); 479 } 480 } 481 } 482 483 struct g_consumer * 484 g_new_consumer(struct g_geom *gp) 485 { 486 struct g_consumer *cp; 487 488 g_topology_assert(); 489 G_VALID_GEOM(gp); 490 KASSERT(!(gp->flags & G_GEOM_WITHER), 491 ("g_new_consumer on WITHERing geom(%s) (class %s)", 492 gp->name, gp->class->name)); 493 KASSERT(gp->orphan != NULL, 494 ("g_new_consumer on geom(%s) (class %s) without orphan", 495 gp->name, gp->class->name)); 496 497 cp = g_malloc(sizeof *cp, M_WAITOK | M_ZERO); 498 cp->geom = gp; 499 cp->stat = devstat_new_entry(cp, -1, 0, DEVSTAT_ALL_SUPPORTED, 500 DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX); 501 LIST_INSERT_HEAD(&gp->consumer, cp, consumer); 502 return(cp); 503 } 504 505 void 506 g_destroy_consumer(struct g_consumer *cp) 507 { 508 struct g_geom *gp; 509 510 g_topology_assert(); 511 G_VALID_CONSUMER(cp); 512 g_trace(G_T_TOPOLOGY, "g_destroy_consumer(%p)", cp); 513 KASSERT (cp->provider == NULL, ("g_destroy_consumer but attached")); 514 KASSERT (cp->acr == 0, ("g_destroy_consumer with acr")); 515 KASSERT (cp->acw == 0, ("g_destroy_consumer with acw")); 516 KASSERT (cp->ace == 0, ("g_destroy_consumer with ace")); 517 g_cancel_event(cp); 518 gp = cp->geom; 519 LIST_REMOVE(cp, consumer); 520 devstat_remove_entry(cp->stat); 521 g_free(cp); 522 if (gp->flags & G_GEOM_WITHER) 523 g_do_wither(); 524 } 525 526 static void 527 g_new_provider_event(void *arg, int flag) 528 { 529 struct g_class *mp; 530 struct g_provider *pp; 531 struct g_consumer *cp, *next_cp; 532 533 g_topology_assert(); 534 if (flag == EV_CANCEL) 535 return; 536 if (g_shutdown) 537 return; 538 pp = arg; 539 G_VALID_PROVIDER(pp); 540 KASSERT(!(pp->flags & G_PF_WITHER), 541 ("g_new_provider_event but withered")); 542 LIST_FOREACH_SAFE(cp, &pp->consumers, consumers, next_cp) { 543 if ((cp->flags & G_CF_ORPHAN) == 0 && 544 cp->geom->attrchanged != NULL) 545 cp->geom->attrchanged(cp, "GEOM::media"); 546 } 547 if (g_notaste) 548 return; 549 LIST_FOREACH(mp, &g_classes, class) { 550 if (mp->taste == NULL) 551 continue; 552 LIST_FOREACH(cp, &pp->consumers, consumers) 553 if (cp->geom->class == mp && 554 (cp->flags & G_CF_ORPHAN) == 0) 555 break; 556 if (cp != NULL) 557 continue; 558 mp->taste(mp, pp, 0); 559 g_topology_assert(); 560 } 561 } 562 563 564 struct g_provider * 565 g_new_providerf(struct g_geom *gp, const char *fmt, ...) 566 { 567 struct g_provider *pp; 568 struct sbuf *sb; 569 va_list ap; 570 571 g_topology_assert(); 572 G_VALID_GEOM(gp); 573 KASSERT(gp->access != NULL, 574 ("new provider on geom(%s) without ->access (class %s)", 575 gp->name, gp->class->name)); 576 KASSERT(gp->start != NULL, 577 ("new provider on geom(%s) without ->start (class %s)", 578 gp->name, gp->class->name)); 579 KASSERT(!(gp->flags & G_GEOM_WITHER), 580 ("new provider on WITHERing geom(%s) (class %s)", 581 gp->name, gp->class->name)); 582 sb = sbuf_new_auto(); 583 va_start(ap, fmt); 584 sbuf_vprintf(sb, fmt, ap); 585 va_end(ap); 586 sbuf_finish(sb); 587 pp = g_malloc(sizeof *pp + sbuf_len(sb) + 1, M_WAITOK | M_ZERO); 588 pp->name = (char *)(pp + 1); 589 strcpy(pp->name, sbuf_data(sb)); 590 sbuf_delete(sb); 591 LIST_INIT(&pp->consumers); 592 pp->error = ENXIO; 593 pp->geom = gp; 594 pp->stat = devstat_new_entry(pp, -1, 0, DEVSTAT_ALL_SUPPORTED, 595 DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX); 596 LIST_INSERT_HEAD(&gp->provider, pp, provider); 597 g_post_event(g_new_provider_event, pp, M_WAITOK, pp, gp, NULL); 598 return (pp); 599 } 600 601 void 602 g_error_provider(struct g_provider *pp, int error) 603 { 604 605 /* G_VALID_PROVIDER(pp); We may not have g_topology */ 606 pp->error = error; 607 } 608 609 static void 610 g_resize_provider_event(void *arg, int flag) 611 { 612 struct g_hh00 *hh; 613 struct g_class *mp; 614 struct g_geom *gp; 615 struct g_provider *pp; 616 struct g_consumer *cp, *cp2; 617 off_t size; 618 619 g_topology_assert(); 620 if (g_shutdown) 621 return; 622 623 hh = arg; 624 pp = hh->pp; 625 size = hh->size; 626 g_free(hh); 627 628 G_VALID_PROVIDER(pp); 629 KASSERT(!(pp->flags & G_PF_WITHER), 630 ("g_resize_provider_event but withered")); 631 g_trace(G_T_TOPOLOGY, "g_resize_provider_event(%p)", pp); 632 633 LIST_FOREACH_SAFE(cp, &pp->consumers, consumers, cp2) { 634 gp = cp->geom; 635 if (gp->resize == NULL && size < pp->mediasize) { 636 /* 637 * XXX: g_dev_orphan method does deferred destroying 638 * and it is possible, that other event could already 639 * call the orphan method. Check consumer's flags to 640 * do not schedule it twice. 641 */ 642 if (cp->flags & G_CF_ORPHAN) 643 continue; 644 cp->flags |= G_CF_ORPHAN; 645 cp->geom->orphan(cp); 646 } 647 } 648 649 pp->mediasize = size; 650 651 LIST_FOREACH_SAFE(cp, &pp->consumers, consumers, cp2) { 652 gp = cp->geom; 653 if ((gp->flags & G_GEOM_WITHER) == 0 && gp->resize != NULL) 654 gp->resize(cp); 655 } 656 657 /* 658 * After resizing, the previously invalid GEOM class metadata 659 * might become valid. This means we should retaste. 660 */ 661 LIST_FOREACH(mp, &g_classes, class) { 662 if (mp->taste == NULL) 663 continue; 664 LIST_FOREACH(cp, &pp->consumers, consumers) 665 if (cp->geom->class == mp && 666 (cp->flags & G_CF_ORPHAN) == 0) 667 break; 668 if (cp != NULL) 669 continue; 670 mp->taste(mp, pp, 0); 671 g_topology_assert(); 672 } 673 } 674 675 void 676 g_resize_provider(struct g_provider *pp, off_t size) 677 { 678 struct g_hh00 *hh; 679 680 G_VALID_PROVIDER(pp); 681 if (pp->flags & G_PF_WITHER) 682 return; 683 684 if (size == pp->mediasize) 685 return; 686 687 hh = g_malloc(sizeof *hh, M_WAITOK | M_ZERO); 688 hh->pp = pp; 689 hh->size = size; 690 g_post_event(g_resize_provider_event, hh, M_WAITOK, NULL); 691 } 692 693 #ifndef _PATH_DEV 694 #define _PATH_DEV "/dev/" 695 #endif 696 697 struct g_provider * 698 g_provider_by_name(char const *arg) 699 { 700 struct g_class *cp; 701 struct g_geom *gp; 702 struct g_provider *pp, *wpp; 703 704 if (strncmp(arg, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 705 arg += sizeof(_PATH_DEV) - 1; 706 707 wpp = NULL; 708 LIST_FOREACH(cp, &g_classes, class) { 709 LIST_FOREACH(gp, &cp->geom, geom) { 710 LIST_FOREACH(pp, &gp->provider, provider) { 711 if (strcmp(arg, pp->name) != 0) 712 continue; 713 if ((gp->flags & G_GEOM_WITHER) == 0 && 714 (pp->flags & G_PF_WITHER) == 0) 715 return (pp); 716 else 717 wpp = pp; 718 } 719 } 720 } 721 722 return (wpp); 723 } 724 725 void 726 g_destroy_provider(struct g_provider *pp) 727 { 728 struct g_geom *gp; 729 730 g_topology_assert(); 731 G_VALID_PROVIDER(pp); 732 KASSERT(LIST_EMPTY(&pp->consumers), 733 ("g_destroy_provider but attached")); 734 KASSERT (pp->acr == 0, ("g_destroy_provider with acr")); 735 KASSERT (pp->acw == 0, ("g_destroy_provider with acw")); 736 KASSERT (pp->ace == 0, ("g_destroy_provider with ace")); 737 g_cancel_event(pp); 738 LIST_REMOVE(pp, provider); 739 gp = pp->geom; 740 devstat_remove_entry(pp->stat); 741 /* 742 * If a callback was provided, send notification that the provider 743 * is now gone. 744 */ 745 if (gp->providergone != NULL) 746 gp->providergone(pp); 747 748 g_free(pp); 749 if ((gp->flags & G_GEOM_WITHER)) 750 g_do_wither(); 751 } 752 753 /* 754 * We keep the "geoms" list sorted by topological order (== increasing 755 * numerical rank) at all times. 756 * When an attach is done, the attaching geoms rank is invalidated 757 * and it is moved to the tail of the list. 758 * All geoms later in the sequence has their ranks reevaluated in 759 * sequence. If we cannot assign rank to a geom because it's 760 * prerequisites do not have rank, we move that element to the tail 761 * of the sequence with invalid rank as well. 762 * At some point we encounter our original geom and if we stil fail 763 * to assign it a rank, there must be a loop and we fail back to 764 * g_attach() which detach again and calls redo_rank again 765 * to fix up the damage. 766 * It would be much simpler code wise to do it recursively, but we 767 * can't risk that on the kernel stack. 768 */ 769 770 static int 771 redo_rank(struct g_geom *gp) 772 { 773 struct g_consumer *cp; 774 struct g_geom *gp1, *gp2; 775 int n, m; 776 777 g_topology_assert(); 778 G_VALID_GEOM(gp); 779 780 /* Invalidate this geoms rank and move it to the tail */ 781 gp1 = TAILQ_NEXT(gp, geoms); 782 if (gp1 != NULL) { 783 gp->rank = 0; 784 TAILQ_REMOVE(&geoms, gp, geoms); 785 TAILQ_INSERT_TAIL(&geoms, gp, geoms); 786 } else { 787 gp1 = gp; 788 } 789 790 /* re-rank the rest of the sequence */ 791 for (; gp1 != NULL; gp1 = gp2) { 792 gp1->rank = 0; 793 m = 1; 794 LIST_FOREACH(cp, &gp1->consumer, consumer) { 795 if (cp->provider == NULL) 796 continue; 797 n = cp->provider->geom->rank; 798 if (n == 0) { 799 m = 0; 800 break; 801 } else if (n >= m) 802 m = n + 1; 803 } 804 gp1->rank = m; 805 gp2 = TAILQ_NEXT(gp1, geoms); 806 807 /* got a rank, moving on */ 808 if (m != 0) 809 continue; 810 811 /* no rank to original geom means loop */ 812 if (gp == gp1) 813 return (ELOOP); 814 815 /* no rank, put it at the end move on */ 816 TAILQ_REMOVE(&geoms, gp1, geoms); 817 TAILQ_INSERT_TAIL(&geoms, gp1, geoms); 818 } 819 return (0); 820 } 821 822 int 823 g_attach(struct g_consumer *cp, struct g_provider *pp) 824 { 825 int error; 826 827 g_topology_assert(); 828 G_VALID_CONSUMER(cp); 829 G_VALID_PROVIDER(pp); 830 g_trace(G_T_TOPOLOGY, "g_attach(%p, %p)", cp, pp); 831 KASSERT(cp->provider == NULL, ("attach but attached")); 832 cp->provider = pp; 833 cp->flags &= ~G_CF_ORPHAN; 834 LIST_INSERT_HEAD(&pp->consumers, cp, consumers); 835 error = redo_rank(cp->geom); 836 if (error) { 837 LIST_REMOVE(cp, consumers); 838 cp->provider = NULL; 839 redo_rank(cp->geom); 840 } 841 return (error); 842 } 843 844 void 845 g_detach(struct g_consumer *cp) 846 { 847 struct g_provider *pp; 848 849 g_topology_assert(); 850 G_VALID_CONSUMER(cp); 851 g_trace(G_T_TOPOLOGY, "g_detach(%p)", cp); 852 KASSERT(cp->provider != NULL, ("detach but not attached")); 853 KASSERT(cp->acr == 0, ("detach but nonzero acr")); 854 KASSERT(cp->acw == 0, ("detach but nonzero acw")); 855 KASSERT(cp->ace == 0, ("detach but nonzero ace")); 856 KASSERT(cp->nstart == cp->nend, 857 ("detach with active requests")); 858 pp = cp->provider; 859 LIST_REMOVE(cp, consumers); 860 cp->provider = NULL; 861 if ((cp->geom->flags & G_GEOM_WITHER) || 862 (pp->geom->flags & G_GEOM_WITHER) || 863 (pp->flags & G_PF_WITHER)) 864 g_do_wither(); 865 redo_rank(cp->geom); 866 } 867 868 /* 869 * g_access() 870 * 871 * Access-check with delta values. The question asked is "can provider 872 * "cp" change the access counters by the relative amounts dc[rwe] ?" 873 */ 874 875 int 876 g_access(struct g_consumer *cp, int dcr, int dcw, int dce) 877 { 878 struct g_provider *pp; 879 struct g_geom *gp; 880 int pw, pe; 881 #ifdef INVARIANTS 882 int sr, sw, se; 883 #endif 884 int error; 885 886 g_topology_assert(); 887 G_VALID_CONSUMER(cp); 888 pp = cp->provider; 889 KASSERT(pp != NULL, ("access but not attached")); 890 G_VALID_PROVIDER(pp); 891 gp = pp->geom; 892 893 g_trace(G_T_ACCESS, "g_access(%p(%s), %d, %d, %d)", 894 cp, pp->name, dcr, dcw, dce); 895 896 KASSERT(cp->acr + dcr >= 0, ("access resulting in negative acr")); 897 KASSERT(cp->acw + dcw >= 0, ("access resulting in negative acw")); 898 KASSERT(cp->ace + dce >= 0, ("access resulting in negative ace")); 899 KASSERT(dcr != 0 || dcw != 0 || dce != 0, ("NOP access request")); 900 KASSERT(gp->access != NULL, ("NULL geom->access")); 901 902 /* 903 * If our class cares about being spoiled, and we have been, we 904 * are probably just ahead of the event telling us that. Fail 905 * now rather than having to unravel this later. 906 */ 907 if (cp->geom->spoiled != NULL && (cp->flags & G_CF_SPOILED) && 908 (dcr > 0 || dcw > 0 || dce > 0)) 909 return (ENXIO); 910 911 /* 912 * A number of GEOM classes either need to perform an I/O on the first 913 * open or to acquire a different subsystem's lock. To do that they 914 * may have to drop the topology lock. 915 * Other GEOM classes perform special actions when opening a lower rank 916 * geom for the first time. As a result, more than one thread may 917 * end up performing the special actions. 918 * So, we prevent concurrent "first" opens by marking the consumer with 919 * special flag. 920 * 921 * Note that if the geom's access method never drops the topology lock, 922 * then we will never see G_GEOM_IN_ACCESS here. 923 */ 924 while ((gp->flags & G_GEOM_IN_ACCESS) != 0) { 925 g_trace(G_T_ACCESS, 926 "%s: race on geom %s via provider %s and consumer of %s", 927 __func__, gp->name, pp->name, cp->geom->name); 928 gp->flags |= G_GEOM_ACCESS_WAIT; 929 g_topology_sleep(gp, 0); 930 } 931 932 /* 933 * Figure out what counts the provider would have had, if this 934 * consumer had (r0w0e0) at this time. 935 */ 936 pw = pp->acw - cp->acw; 937 pe = pp->ace - cp->ace; 938 939 g_trace(G_T_ACCESS, 940 "open delta:[r%dw%de%d] old:[r%dw%de%d] provider:[r%dw%de%d] %p(%s)", 941 dcr, dcw, dce, 942 cp->acr, cp->acw, cp->ace, 943 pp->acr, pp->acw, pp->ace, 944 pp, pp->name); 945 946 /* If foot-shooting is enabled, any open on rank#1 is OK */ 947 if ((g_debugflags & 16) && gp->rank == 1) 948 ; 949 /* If we try exclusive but already write: fail */ 950 else if (dce > 0 && pw > 0) 951 return (EPERM); 952 /* If we try write but already exclusive: fail */ 953 else if (dcw > 0 && pe > 0) 954 return (EPERM); 955 /* If we try to open more but provider is error'ed: fail */ 956 else if ((dcr > 0 || dcw > 0 || dce > 0) && pp->error != 0) { 957 printf("%s(%d): provider %s has error %d set\n", 958 __func__, __LINE__, pp->name, pp->error); 959 return (pp->error); 960 } 961 962 /* Ok then... */ 963 964 #ifdef INVARIANTS 965 sr = cp->acr; 966 sw = cp->acw; 967 se = cp->ace; 968 #endif 969 gp->flags |= G_GEOM_IN_ACCESS; 970 error = gp->access(pp, dcr, dcw, dce); 971 KASSERT(dcr > 0 || dcw > 0 || dce > 0 || error == 0, 972 ("Geom provider %s::%s dcr=%d dcw=%d dce=%d error=%d failed " 973 "closing ->access()", gp->class->name, pp->name, dcr, dcw, 974 dce, error)); 975 976 g_topology_assert(); 977 gp->flags &= ~G_GEOM_IN_ACCESS; 978 KASSERT(cp->acr == sr && cp->acw == sw && cp->ace == se, 979 ("Access counts changed during geom->access")); 980 if ((gp->flags & G_GEOM_ACCESS_WAIT) != 0) { 981 gp->flags &= ~G_GEOM_ACCESS_WAIT; 982 wakeup(gp); 983 } 984 985 if (!error) { 986 /* 987 * If we open first write, spoil any partner consumers. 988 * If we close last write and provider is not errored, 989 * trigger re-taste. 990 */ 991 if (pp->acw == 0 && dcw != 0) 992 g_spoil(pp, cp); 993 else if (pp->acw != 0 && pp->acw == -dcw && pp->error == 0 && 994 !(gp->flags & G_GEOM_WITHER)) 995 g_post_event(g_new_provider_event, pp, M_WAITOK, 996 pp, NULL); 997 998 pp->acr += dcr; 999 pp->acw += dcw; 1000 pp->ace += dce; 1001 cp->acr += dcr; 1002 cp->acw += dcw; 1003 cp->ace += dce; 1004 if (pp->acr != 0 || pp->acw != 0 || pp->ace != 0) 1005 KASSERT(pp->sectorsize > 0, 1006 ("Provider %s lacks sectorsize", pp->name)); 1007 if ((cp->geom->flags & G_GEOM_WITHER) && 1008 cp->acr == 0 && cp->acw == 0 && cp->ace == 0) 1009 g_do_wither(); 1010 } 1011 return (error); 1012 } 1013 1014 int 1015 g_handleattr_int(struct bio *bp, const char *attribute, int val) 1016 { 1017 1018 return (g_handleattr(bp, attribute, &val, sizeof val)); 1019 } 1020 1021 int 1022 g_handleattr_uint16_t(struct bio *bp, const char *attribute, uint16_t val) 1023 { 1024 1025 return (g_handleattr(bp, attribute, &val, sizeof val)); 1026 } 1027 1028 int 1029 g_handleattr_off_t(struct bio *bp, const char *attribute, off_t val) 1030 { 1031 1032 return (g_handleattr(bp, attribute, &val, sizeof val)); 1033 } 1034 1035 int 1036 g_handleattr_str(struct bio *bp, const char *attribute, const char *str) 1037 { 1038 1039 return (g_handleattr(bp, attribute, str, 0)); 1040 } 1041 1042 int 1043 g_handleattr(struct bio *bp, const char *attribute, const void *val, int len) 1044 { 1045 int error = 0; 1046 1047 if (strcmp(bp->bio_attribute, attribute)) 1048 return (0); 1049 if (len == 0) { 1050 bzero(bp->bio_data, bp->bio_length); 1051 if (strlcpy(bp->bio_data, val, bp->bio_length) >= 1052 bp->bio_length) { 1053 printf("%s: %s %s bio_length %jd strlen %zu -> EFAULT\n", 1054 __func__, bp->bio_to->name, attribute, 1055 (intmax_t)bp->bio_length, strlen(val)); 1056 error = EFAULT; 1057 } 1058 } else if (bp->bio_length == len) { 1059 bcopy(val, bp->bio_data, len); 1060 } else { 1061 printf("%s: %s %s bio_length %jd len %d -> EFAULT\n", __func__, 1062 bp->bio_to->name, attribute, (intmax_t)bp->bio_length, len); 1063 error = EFAULT; 1064 } 1065 if (error == 0) 1066 bp->bio_completed = bp->bio_length; 1067 g_io_deliver(bp, error); 1068 return (1); 1069 } 1070 1071 int 1072 g_std_access(struct g_provider *pp, 1073 int dr __unused, int dw __unused, int de __unused) 1074 { 1075 1076 g_topology_assert(); 1077 G_VALID_PROVIDER(pp); 1078 return (0); 1079 } 1080 1081 void 1082 g_std_done(struct bio *bp) 1083 { 1084 struct bio *bp2; 1085 1086 bp2 = bp->bio_parent; 1087 if (bp2->bio_error == 0) 1088 bp2->bio_error = bp->bio_error; 1089 bp2->bio_completed += bp->bio_completed; 1090 g_destroy_bio(bp); 1091 bp2->bio_inbed++; 1092 if (bp2->bio_children == bp2->bio_inbed) 1093 g_io_deliver(bp2, bp2->bio_error); 1094 } 1095 1096 /* XXX: maybe this is only g_slice_spoiled */ 1097 1098 void 1099 g_std_spoiled(struct g_consumer *cp) 1100 { 1101 struct g_geom *gp; 1102 struct g_provider *pp; 1103 1104 g_topology_assert(); 1105 G_VALID_CONSUMER(cp); 1106 g_trace(G_T_TOPOLOGY, "g_std_spoiled(%p)", cp); 1107 cp->flags |= G_CF_ORPHAN; 1108 g_detach(cp); 1109 gp = cp->geom; 1110 LIST_FOREACH(pp, &gp->provider, provider) 1111 g_orphan_provider(pp, ENXIO); 1112 g_destroy_consumer(cp); 1113 if (LIST_EMPTY(&gp->provider) && LIST_EMPTY(&gp->consumer)) 1114 g_destroy_geom(gp); 1115 else 1116 gp->flags |= G_GEOM_WITHER; 1117 } 1118 1119 /* 1120 * Spoiling happens when a provider is opened for writing, but consumers 1121 * which are configured by in-band data are attached (slicers for instance). 1122 * Since the write might potentially change the in-band data, such consumers 1123 * need to re-evaluate their existence after the writing session closes. 1124 * We do this by (offering to) tear them down when the open for write happens 1125 * in return for a re-taste when it closes again. 1126 * Together with the fact that such consumers grab an 'e' bit whenever they 1127 * are open, regardless of mode, this ends up DTRT. 1128 */ 1129 1130 static void 1131 g_spoil_event(void *arg, int flag) 1132 { 1133 struct g_provider *pp; 1134 struct g_consumer *cp, *cp2; 1135 1136 g_topology_assert(); 1137 if (flag == EV_CANCEL) 1138 return; 1139 pp = arg; 1140 G_VALID_PROVIDER(pp); 1141 g_trace(G_T_TOPOLOGY, "%s %p(%s:%s:%s)", __func__, pp, 1142 pp->geom->class->name, pp->geom->name, pp->name); 1143 for (cp = LIST_FIRST(&pp->consumers); cp != NULL; cp = cp2) { 1144 cp2 = LIST_NEXT(cp, consumers); 1145 if ((cp->flags & G_CF_SPOILED) == 0) 1146 continue; 1147 cp->flags &= ~G_CF_SPOILED; 1148 if (cp->geom->spoiled == NULL) 1149 continue; 1150 cp->geom->spoiled(cp); 1151 g_topology_assert(); 1152 } 1153 } 1154 1155 void 1156 g_spoil(struct g_provider *pp, struct g_consumer *cp) 1157 { 1158 struct g_consumer *cp2; 1159 1160 g_topology_assert(); 1161 G_VALID_PROVIDER(pp); 1162 G_VALID_CONSUMER(cp); 1163 1164 LIST_FOREACH(cp2, &pp->consumers, consumers) { 1165 if (cp2 == cp) 1166 continue; 1167 /* 1168 KASSERT(cp2->acr == 0, ("spoiling cp->acr = %d", cp2->acr)); 1169 KASSERT(cp2->acw == 0, ("spoiling cp->acw = %d", cp2->acw)); 1170 */ 1171 KASSERT(cp2->ace == 0, ("spoiling cp->ace = %d", cp2->ace)); 1172 cp2->flags |= G_CF_SPOILED; 1173 } 1174 g_post_event(g_spoil_event, pp, M_WAITOK, pp, NULL); 1175 } 1176 1177 static void 1178 g_media_changed_event(void *arg, int flag) 1179 { 1180 struct g_provider *pp; 1181 int retaste; 1182 1183 g_topology_assert(); 1184 if (flag == EV_CANCEL) 1185 return; 1186 pp = arg; 1187 G_VALID_PROVIDER(pp); 1188 1189 /* 1190 * If provider was not open for writing, queue retaste after spoiling. 1191 * If it was, retaste will happen automatically on close. 1192 */ 1193 retaste = (pp->acw == 0 && pp->error == 0 && 1194 !(pp->geom->flags & G_GEOM_WITHER)); 1195 g_spoil_event(arg, flag); 1196 if (retaste) 1197 g_post_event(g_new_provider_event, pp, M_WAITOK, pp, NULL); 1198 } 1199 1200 int 1201 g_media_changed(struct g_provider *pp, int flag) 1202 { 1203 struct g_consumer *cp; 1204 1205 LIST_FOREACH(cp, &pp->consumers, consumers) 1206 cp->flags |= G_CF_SPOILED; 1207 return (g_post_event(g_media_changed_event, pp, flag, pp, NULL)); 1208 } 1209 1210 int 1211 g_media_gone(struct g_provider *pp, int flag) 1212 { 1213 struct g_consumer *cp; 1214 1215 LIST_FOREACH(cp, &pp->consumers, consumers) 1216 cp->flags |= G_CF_SPOILED; 1217 return (g_post_event(g_spoil_event, pp, flag, pp, NULL)); 1218 } 1219 1220 int 1221 g_getattr__(const char *attr, struct g_consumer *cp, void *var, int len) 1222 { 1223 int error, i; 1224 1225 i = len; 1226 error = g_io_getattr(attr, cp, &i, var); 1227 if (error) 1228 return (error); 1229 if (i != len) 1230 return (EINVAL); 1231 return (0); 1232 } 1233 1234 static int 1235 g_get_device_prefix_len(const char *name) 1236 { 1237 int len; 1238 1239 if (strncmp(name, "ada", 3) == 0) 1240 len = 3; 1241 else if (strncmp(name, "ad", 2) == 0) 1242 len = 2; 1243 else 1244 return (0); 1245 if (name[len] < '0' || name[len] > '9') 1246 return (0); 1247 do { 1248 len++; 1249 } while (name[len] >= '0' && name[len] <= '9'); 1250 return (len); 1251 } 1252 1253 int 1254 g_compare_names(const char *namea, const char *nameb) 1255 { 1256 int deva, devb; 1257 1258 if (strcmp(namea, nameb) == 0) 1259 return (1); 1260 deva = g_get_device_prefix_len(namea); 1261 if (deva == 0) 1262 return (0); 1263 devb = g_get_device_prefix_len(nameb); 1264 if (devb == 0) 1265 return (0); 1266 if (strcmp(namea + deva, nameb + devb) == 0) 1267 return (1); 1268 return (0); 1269 } 1270 1271 void 1272 g_geom_add_alias(struct g_geom *gp, const char *alias) 1273 { 1274 struct g_geom_alias *gap; 1275 1276 gap = (struct g_geom_alias *)g_malloc( 1277 sizeof(struct g_geom_alias) + strlen(alias) + 1, M_WAITOK); 1278 strcpy((char *)(gap + 1), alias); 1279 gap->ga_alias = (const char *)(gap + 1); 1280 LIST_INSERT_HEAD(&gp->aliases, gap, ga_next); 1281 } 1282 1283 #if defined(DIAGNOSTIC) || defined(DDB) 1284 /* 1285 * This function walks the mesh and returns a non-zero integer if it 1286 * finds the argument pointer is an object. The return value indicates 1287 * which type of object it is believed to be. If topology is not locked, 1288 * this function is potentially dangerous, but we don't assert that the 1289 * topology lock is held when called from debugger. 1290 */ 1291 int 1292 g_valid_obj(void const *ptr) 1293 { 1294 struct g_class *mp; 1295 struct g_geom *gp; 1296 struct g_consumer *cp; 1297 struct g_provider *pp; 1298 1299 #ifdef KDB 1300 if (kdb_active == 0) 1301 #endif 1302 g_topology_assert(); 1303 1304 LIST_FOREACH(mp, &g_classes, class) { 1305 if (ptr == mp) 1306 return (1); 1307 LIST_FOREACH(gp, &mp->geom, geom) { 1308 if (ptr == gp) 1309 return (2); 1310 LIST_FOREACH(cp, &gp->consumer, consumer) 1311 if (ptr == cp) 1312 return (3); 1313 LIST_FOREACH(pp, &gp->provider, provider) 1314 if (ptr == pp) 1315 return (4); 1316 } 1317 } 1318 return(0); 1319 } 1320 #endif 1321 1322 #ifdef DDB 1323 1324 #define gprintf(...) do { \ 1325 db_printf("%*s", indent, ""); \ 1326 db_printf(__VA_ARGS__); \ 1327 } while (0) 1328 #define gprintln(...) do { \ 1329 gprintf(__VA_ARGS__); \ 1330 db_printf("\n"); \ 1331 } while (0) 1332 1333 #define ADDFLAG(obj, flag, sflag) do { \ 1334 if ((obj)->flags & (flag)) { \ 1335 if (comma) \ 1336 strlcat(str, ",", size); \ 1337 strlcat(str, (sflag), size); \ 1338 comma = 1; \ 1339 } \ 1340 } while (0) 1341 1342 static char * 1343 provider_flags_to_string(struct g_provider *pp, char *str, size_t size) 1344 { 1345 int comma = 0; 1346 1347 bzero(str, size); 1348 if (pp->flags == 0) { 1349 strlcpy(str, "NONE", size); 1350 return (str); 1351 } 1352 ADDFLAG(pp, G_PF_WITHER, "G_PF_WITHER"); 1353 ADDFLAG(pp, G_PF_ORPHAN, "G_PF_ORPHAN"); 1354 return (str); 1355 } 1356 1357 static char * 1358 geom_flags_to_string(struct g_geom *gp, char *str, size_t size) 1359 { 1360 int comma = 0; 1361 1362 bzero(str, size); 1363 if (gp->flags == 0) { 1364 strlcpy(str, "NONE", size); 1365 return (str); 1366 } 1367 ADDFLAG(gp, G_GEOM_WITHER, "G_GEOM_WITHER"); 1368 return (str); 1369 } 1370 static void 1371 db_show_geom_consumer(int indent, struct g_consumer *cp) 1372 { 1373 1374 if (indent == 0) { 1375 gprintln("consumer: %p", cp); 1376 gprintln(" class: %s (%p)", cp->geom->class->name, 1377 cp->geom->class); 1378 gprintln(" geom: %s (%p)", cp->geom->name, cp->geom); 1379 if (cp->provider == NULL) 1380 gprintln(" provider: none"); 1381 else { 1382 gprintln(" provider: %s (%p)", cp->provider->name, 1383 cp->provider); 1384 } 1385 gprintln(" access: r%dw%de%d", cp->acr, cp->acw, cp->ace); 1386 gprintln(" flags: 0x%04x", cp->flags); 1387 gprintln(" nstart: %u", cp->nstart); 1388 gprintln(" nend: %u", cp->nend); 1389 } else { 1390 gprintf("consumer: %p (%s), access=r%dw%de%d", cp, 1391 cp->provider != NULL ? cp->provider->name : "none", 1392 cp->acr, cp->acw, cp->ace); 1393 if (cp->flags) 1394 db_printf(", flags=0x%04x", cp->flags); 1395 db_printf("\n"); 1396 } 1397 } 1398 1399 static void 1400 db_show_geom_provider(int indent, struct g_provider *pp) 1401 { 1402 struct g_consumer *cp; 1403 char flags[64]; 1404 1405 if (indent == 0) { 1406 gprintln("provider: %s (%p)", pp->name, pp); 1407 gprintln(" class: %s (%p)", pp->geom->class->name, 1408 pp->geom->class); 1409 gprintln(" geom: %s (%p)", pp->geom->name, pp->geom); 1410 gprintln(" mediasize: %jd", (intmax_t)pp->mediasize); 1411 gprintln(" sectorsize: %u", pp->sectorsize); 1412 gprintln(" stripesize: %u", pp->stripesize); 1413 gprintln(" stripeoffset: %u", pp->stripeoffset); 1414 gprintln(" access: r%dw%de%d", pp->acr, pp->acw, 1415 pp->ace); 1416 gprintln(" flags: %s (0x%04x)", 1417 provider_flags_to_string(pp, flags, sizeof(flags)), 1418 pp->flags); 1419 gprintln(" error: %d", pp->error); 1420 gprintln(" nstart: %u", pp->nstart); 1421 gprintln(" nend: %u", pp->nend); 1422 if (LIST_EMPTY(&pp->consumers)) 1423 gprintln(" consumers: none"); 1424 } else { 1425 gprintf("provider: %s (%p), access=r%dw%de%d", 1426 pp->name, pp, pp->acr, pp->acw, pp->ace); 1427 if (pp->flags != 0) { 1428 db_printf(", flags=%s (0x%04x)", 1429 provider_flags_to_string(pp, flags, sizeof(flags)), 1430 pp->flags); 1431 } 1432 db_printf("\n"); 1433 } 1434 if (!LIST_EMPTY(&pp->consumers)) { 1435 LIST_FOREACH(cp, &pp->consumers, consumers) { 1436 db_show_geom_consumer(indent + 2, cp); 1437 if (db_pager_quit) 1438 break; 1439 } 1440 } 1441 } 1442 1443 static void 1444 db_show_geom_geom(int indent, struct g_geom *gp) 1445 { 1446 struct g_provider *pp; 1447 struct g_consumer *cp; 1448 char flags[64]; 1449 1450 if (indent == 0) { 1451 gprintln("geom: %s (%p)", gp->name, gp); 1452 gprintln(" class: %s (%p)", gp->class->name, gp->class); 1453 gprintln(" flags: %s (0x%04x)", 1454 geom_flags_to_string(gp, flags, sizeof(flags)), gp->flags); 1455 gprintln(" rank: %d", gp->rank); 1456 if (LIST_EMPTY(&gp->provider)) 1457 gprintln(" providers: none"); 1458 if (LIST_EMPTY(&gp->consumer)) 1459 gprintln(" consumers: none"); 1460 } else { 1461 gprintf("geom: %s (%p), rank=%d", gp->name, gp, gp->rank); 1462 if (gp->flags != 0) { 1463 db_printf(", flags=%s (0x%04x)", 1464 geom_flags_to_string(gp, flags, sizeof(flags)), 1465 gp->flags); 1466 } 1467 db_printf("\n"); 1468 } 1469 if (!LIST_EMPTY(&gp->provider)) { 1470 LIST_FOREACH(pp, &gp->provider, provider) { 1471 db_show_geom_provider(indent + 2, pp); 1472 if (db_pager_quit) 1473 break; 1474 } 1475 } 1476 if (!LIST_EMPTY(&gp->consumer)) { 1477 LIST_FOREACH(cp, &gp->consumer, consumer) { 1478 db_show_geom_consumer(indent + 2, cp); 1479 if (db_pager_quit) 1480 break; 1481 } 1482 } 1483 } 1484 1485 static void 1486 db_show_geom_class(struct g_class *mp) 1487 { 1488 struct g_geom *gp; 1489 1490 db_printf("class: %s (%p)\n", mp->name, mp); 1491 LIST_FOREACH(gp, &mp->geom, geom) { 1492 db_show_geom_geom(2, gp); 1493 if (db_pager_quit) 1494 break; 1495 } 1496 } 1497 1498 /* 1499 * Print the GEOM topology or the given object. 1500 */ 1501 DB_SHOW_COMMAND(geom, db_show_geom) 1502 { 1503 struct g_class *mp; 1504 1505 if (!have_addr) { 1506 /* No address given, print the entire topology. */ 1507 LIST_FOREACH(mp, &g_classes, class) { 1508 db_show_geom_class(mp); 1509 db_printf("\n"); 1510 if (db_pager_quit) 1511 break; 1512 } 1513 } else { 1514 switch (g_valid_obj((void *)addr)) { 1515 case 1: 1516 db_show_geom_class((struct g_class *)addr); 1517 break; 1518 case 2: 1519 db_show_geom_geom(0, (struct g_geom *)addr); 1520 break; 1521 case 3: 1522 db_show_geom_consumer(0, (struct g_consumer *)addr); 1523 break; 1524 case 4: 1525 db_show_geom_provider(0, (struct g_provider *)addr); 1526 break; 1527 default: 1528 db_printf("Not a GEOM object.\n"); 1529 break; 1530 } 1531 } 1532 } 1533 1534 static void 1535 db_print_bio_cmd(struct bio *bp) 1536 { 1537 db_printf(" cmd: "); 1538 switch (bp->bio_cmd) { 1539 case BIO_READ: db_printf("BIO_READ"); break; 1540 case BIO_WRITE: db_printf("BIO_WRITE"); break; 1541 case BIO_DELETE: db_printf("BIO_DELETE"); break; 1542 case BIO_GETATTR: db_printf("BIO_GETATTR"); break; 1543 case BIO_FLUSH: db_printf("BIO_FLUSH"); break; 1544 case BIO_CMD0: db_printf("BIO_CMD0"); break; 1545 case BIO_CMD1: db_printf("BIO_CMD1"); break; 1546 case BIO_CMD2: db_printf("BIO_CMD2"); break; 1547 case BIO_ZONE: db_printf("BIO_ZONE"); break; 1548 default: db_printf("UNKNOWN"); break; 1549 } 1550 db_printf("\n"); 1551 } 1552 1553 static void 1554 db_print_bio_flags(struct bio *bp) 1555 { 1556 int comma; 1557 1558 comma = 0; 1559 db_printf(" flags: "); 1560 if (bp->bio_flags & BIO_ERROR) { 1561 db_printf("BIO_ERROR"); 1562 comma = 1; 1563 } 1564 if (bp->bio_flags & BIO_DONE) { 1565 db_printf("%sBIO_DONE", (comma ? ", " : "")); 1566 comma = 1; 1567 } 1568 if (bp->bio_flags & BIO_ONQUEUE) 1569 db_printf("%sBIO_ONQUEUE", (comma ? ", " : "")); 1570 db_printf("\n"); 1571 } 1572 1573 /* 1574 * Print useful information in a BIO 1575 */ 1576 DB_SHOW_COMMAND(bio, db_show_bio) 1577 { 1578 struct bio *bp; 1579 1580 if (have_addr) { 1581 bp = (struct bio *)addr; 1582 db_printf("BIO %p\n", bp); 1583 db_print_bio_cmd(bp); 1584 db_print_bio_flags(bp); 1585 db_printf(" cflags: 0x%hx\n", bp->bio_cflags); 1586 db_printf(" pflags: 0x%hx\n", bp->bio_pflags); 1587 db_printf(" offset: %jd\n", (intmax_t)bp->bio_offset); 1588 db_printf(" length: %jd\n", (intmax_t)bp->bio_length); 1589 db_printf(" bcount: %ld\n", bp->bio_bcount); 1590 db_printf(" resid: %ld\n", bp->bio_resid); 1591 db_printf(" completed: %jd\n", (intmax_t)bp->bio_completed); 1592 db_printf(" children: %u\n", bp->bio_children); 1593 db_printf(" inbed: %u\n", bp->bio_inbed); 1594 db_printf(" error: %d\n", bp->bio_error); 1595 db_printf(" parent: %p\n", bp->bio_parent); 1596 db_printf(" driver1: %p\n", bp->bio_driver1); 1597 db_printf(" driver2: %p\n", bp->bio_driver2); 1598 db_printf(" caller1: %p\n", bp->bio_caller1); 1599 db_printf(" caller2: %p\n", bp->bio_caller2); 1600 db_printf(" bio_from: %p\n", bp->bio_from); 1601 db_printf(" bio_to: %p\n", bp->bio_to); 1602 1603 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 1604 db_printf(" bio_track_bp: %p\n", bp->bio_track_bp); 1605 #endif 1606 } 1607 } 1608 1609 #undef gprintf 1610 #undef gprintln 1611 #undef ADDFLAG 1612 1613 #endif /* DDB */ 1614