1 /*- 2 * Copyright (c) 2002 Poul-Henning Kamp 3 * Copyright (c) 2002 Networks Associates Technology, Inc. 4 * All rights reserved. 5 * 6 * This software was developed for the FreeBSD Project by Poul-Henning Kamp 7 * and NAI Labs, the Security Research Division of Network Associates, Inc. 8 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the 9 * DARPA CHATS research program. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. The names of the authors may not be used to endorse or promote 20 * products derived from this software without specific prior written 21 * permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * $FreeBSD$ 36 */ 37 38 39 #include <sys/param.h> 40 #include <sys/stdint.h> 41 #ifndef _KERNEL 42 #include <stdio.h> 43 #include <unistd.h> 44 #include <stdlib.h> 45 #include <signal.h> 46 #include <string.h> 47 #include <err.h> 48 #else 49 #include <sys/systm.h> 50 #include <sys/kernel.h> 51 #include <sys/malloc.h> 52 #include <sys/bio.h> 53 #include <sys/sysctl.h> 54 #include <sys/proc.h> 55 #include <sys/kthread.h> 56 #include <sys/lock.h> 57 #include <sys/mutex.h> 58 #endif 59 #include <sys/errno.h> 60 #include <sys/sbuf.h> 61 #include <geom/geom.h> 62 #include <geom/geom_int.h> 63 #include <geom/geom_stats.h> 64 #include <machine/stdarg.h> 65 66 struct class_list_head g_classes = LIST_HEAD_INITIALIZER(g_classes); 67 static struct g_tailq_head geoms = TAILQ_HEAD_INITIALIZER(geoms); 68 static int g_nproviders; 69 char *g_wait_event, *g_wait_up, *g_wait_down, *g_wait_sim; 70 71 static int g_ignition; 72 73 void 74 g_add_class(struct g_class *mp) 75 { 76 77 if (!g_ignition) { 78 g_ignition++; 79 g_init(); 80 } 81 mp->protect = 0x020016600; 82 g_topology_lock(); 83 g_trace(G_T_TOPOLOGY, "g_add_class(%s)", mp->name); 84 LIST_INIT(&mp->geom); 85 LIST_INSERT_HEAD(&g_classes, mp, class); 86 if (g_nproviders > 0) 87 g_post_event(EV_NEW_CLASS, mp, NULL, NULL, NULL); 88 g_topology_unlock(); 89 } 90 91 struct g_geom * 92 g_new_geomf(struct g_class *mp, const char *fmt, ...) 93 { 94 struct g_geom *gp; 95 va_list ap; 96 struct sbuf *sb; 97 98 g_topology_assert(); 99 va_start(ap, fmt); 100 sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND); 101 sbuf_vprintf(sb, fmt, ap); 102 sbuf_finish(sb); 103 gp = g_malloc(sizeof *gp, M_WAITOK | M_ZERO); 104 gp->protect = 0x020016601; 105 gp->name = g_malloc(sbuf_len(sb) + 1, M_WAITOK | M_ZERO); 106 gp->class = mp; 107 gp->rank = 1; 108 LIST_INIT(&gp->consumer); 109 LIST_INIT(&gp->provider); 110 LIST_INSERT_HEAD(&mp->geom, gp, geom); 111 TAILQ_INSERT_HEAD(&geoms, gp, geoms); 112 strcpy(gp->name, sbuf_data(sb)); 113 sbuf_delete(sb); 114 return (gp); 115 } 116 117 void 118 g_destroy_geom(struct g_geom *gp) 119 { 120 121 g_trace(G_T_TOPOLOGY, "g_destroy_geom(%p(%s))", gp, gp->name); 122 g_topology_assert(); 123 KASSERT(gp->event == NULL, ("g_destroy_geom() with event")); 124 KASSERT(LIST_EMPTY(&gp->consumer), 125 ("g_destroy_geom(%s) with consumer(s) [%p]", 126 gp->name, LIST_FIRST(&gp->consumer))); 127 KASSERT(LIST_EMPTY(&gp->provider), 128 ("g_destroy_geom(%s) with provider(s) [%p]", 129 gp->name, LIST_FIRST(&gp->consumer))); 130 LIST_REMOVE(gp, geom); 131 TAILQ_REMOVE(&geoms, gp, geoms); 132 g_free(gp->name); 133 g_free(gp); 134 } 135 136 struct g_consumer * 137 g_new_consumer(struct g_geom *gp) 138 { 139 struct g_consumer *cp; 140 141 g_topology_assert(); 142 KASSERT(gp->orphan != NULL, 143 ("g_new_consumer on geom(%s) (class %s) without orphan", 144 gp->name, gp->class->name)); 145 146 cp = g_malloc(sizeof *cp, M_WAITOK | M_ZERO); 147 cp->protect = 0x020016602; 148 cp->geom = gp; 149 cp->stat = g_stat_new(cp); 150 LIST_INSERT_HEAD(&gp->consumer, cp, consumer); 151 return(cp); 152 } 153 154 void 155 g_destroy_consumer(struct g_consumer *cp) 156 { 157 158 g_trace(G_T_TOPOLOGY, "g_destroy_consumer(%p)", cp); 159 g_topology_assert(); 160 KASSERT(cp->event == NULL, ("g_destroy_consumer() with event")); 161 KASSERT (cp->provider == NULL, ("g_destroy_consumer but attached")); 162 KASSERT (cp->acr == 0, ("g_destroy_consumer with acr")); 163 KASSERT (cp->acw == 0, ("g_destroy_consumer with acw")); 164 KASSERT (cp->ace == 0, ("g_destroy_consumer with ace")); 165 LIST_REMOVE(cp, consumer); 166 g_stat_delete(cp->stat); 167 g_free(cp); 168 } 169 170 struct g_provider * 171 g_new_providerf(struct g_geom *gp, const char *fmt, ...) 172 { 173 struct g_provider *pp; 174 struct sbuf *sb; 175 va_list ap; 176 177 g_topology_assert(); 178 va_start(ap, fmt); 179 sb = sbuf_new(NULL, NULL, 0, SBUF_AUTOEXTEND); 180 sbuf_vprintf(sb, fmt, ap); 181 sbuf_finish(sb); 182 pp = g_malloc(sizeof *pp + sbuf_len(sb) + 1, M_WAITOK | M_ZERO); 183 pp->protect = 0x020016603; 184 pp->name = (char *)(pp + 1); 185 strcpy(pp->name, sbuf_data(sb)); 186 sbuf_delete(sb); 187 LIST_INIT(&pp->consumers); 188 pp->error = ENXIO; 189 pp->geom = gp; 190 pp->stat = g_stat_new(pp); 191 LIST_INSERT_HEAD(&gp->provider, pp, provider); 192 g_nproviders++; 193 g_post_event(EV_NEW_PROVIDER, NULL, NULL, pp, NULL); 194 return (pp); 195 } 196 197 void 198 g_error_provider(struct g_provider *pp, int error) 199 { 200 201 pp->error = error; 202 } 203 204 205 void 206 g_destroy_provider(struct g_provider *pp) 207 { 208 struct g_geom *gp; 209 struct g_consumer *cp; 210 211 g_topology_assert(); 212 KASSERT(pp->event == NULL, ("g_destroy_provider() with event")); 213 KASSERT(LIST_EMPTY(&pp->consumers), 214 ("g_destroy_provider but attached")); 215 KASSERT (pp->acr == 0, ("g_destroy_provider with acr")); 216 KASSERT (pp->acw == 0, ("g_destroy_provider with acw")); 217 KASSERT (pp->acw == 0, ("g_destroy_provider with ace")); 218 g_nproviders--; 219 LIST_REMOVE(pp, provider); 220 gp = pp->geom; 221 g_stat_delete(pp->stat); 222 g_free(pp); 223 if (!(gp->flags & G_GEOM_WITHER)) 224 return; 225 if (!LIST_EMPTY(&gp->provider)) 226 return; 227 for (;;) { 228 cp = LIST_FIRST(&gp->consumer); 229 if (cp == NULL) 230 break; 231 g_detach(cp); 232 g_destroy_consumer(cp); 233 } 234 g_destroy_geom(gp); 235 } 236 237 /* 238 * We keep the "geoms" list sorted by topological order (== increasing 239 * numerical rank) at all times. 240 * When an attach is done, the attaching geoms rank is invalidated 241 * and it is moved to the tail of the list. 242 * All geoms later in the sequence has their ranks reevaluated in 243 * sequence. If we cannot assign rank to a geom because it's 244 * prerequisites do not have rank, we move that element to the tail 245 * of the sequence with invalid rank as well. 246 * At some point we encounter our original geom and if we stil fail 247 * to assign it a rank, there must be a loop and we fail back to 248 * g_attach() which detach again and calls redo_rank again 249 * to fix up the damage. 250 * It would be much simpler code wise to do it recursively, but we 251 * can't risk that on the kernel stack. 252 */ 253 254 static int 255 redo_rank(struct g_geom *gp) 256 { 257 struct g_consumer *cp; 258 struct g_geom *gp1, *gp2; 259 int n, m; 260 261 g_topology_assert(); 262 263 /* Invalidate this geoms rank and move it to the tail */ 264 gp1 = TAILQ_NEXT(gp, geoms); 265 if (gp1 != NULL) { 266 gp->rank = 0; 267 TAILQ_REMOVE(&geoms, gp, geoms); 268 TAILQ_INSERT_TAIL(&geoms, gp, geoms); 269 } else { 270 gp1 = gp; 271 } 272 273 /* re-rank the rest of the sequence */ 274 for (; gp1 != NULL; gp1 = gp2) { 275 gp1->rank = 0; 276 m = 1; 277 LIST_FOREACH(cp, &gp1->consumer, consumer) { 278 if (cp->provider == NULL) 279 continue; 280 n = cp->provider->geom->rank; 281 if (n == 0) { 282 m = 0; 283 break; 284 } else if (n >= m) 285 m = n + 1; 286 } 287 gp1->rank = m; 288 gp2 = TAILQ_NEXT(gp1, geoms); 289 290 /* got a rank, moving on */ 291 if (m != 0) 292 continue; 293 294 /* no rank to original geom means loop */ 295 if (gp == gp1) 296 return (ELOOP); 297 298 /* no rank, put it at the end move on */ 299 TAILQ_REMOVE(&geoms, gp1, geoms); 300 TAILQ_INSERT_TAIL(&geoms, gp1, geoms); 301 } 302 return (0); 303 } 304 305 int 306 g_attach(struct g_consumer *cp, struct g_provider *pp) 307 { 308 int error; 309 310 g_topology_assert(); 311 KASSERT(cp->provider == NULL, ("attach but attached")); 312 cp->provider = pp; 313 LIST_INSERT_HEAD(&pp->consumers, cp, consumers); 314 error = redo_rank(cp->geom); 315 if (error) { 316 LIST_REMOVE(cp, consumers); 317 cp->provider = NULL; 318 redo_rank(cp->geom); 319 } 320 return (error); 321 } 322 323 void 324 g_detach(struct g_consumer *cp) 325 { 326 struct g_provider *pp; 327 328 g_trace(G_T_TOPOLOGY, "g_detach(%p)", cp); 329 KASSERT(cp != (void*)0xd0d0d0d0, ("ARGH!")); 330 g_topology_assert(); 331 KASSERT(cp->provider != NULL, ("detach but not attached")); 332 KASSERT(cp->acr == 0, ("detach but nonzero acr")); 333 KASSERT(cp->acw == 0, ("detach but nonzero acw")); 334 KASSERT(cp->ace == 0, ("detach but nonzero ace")); 335 KASSERT(cp->stat->nop == cp->stat->nend, 336 ("detach with active requests")); 337 pp = cp->provider; 338 LIST_REMOVE(cp, consumers); 339 cp->provider = NULL; 340 if (LIST_EMPTY(&pp->consumers)) { 341 if (pp->geom->flags & G_GEOM_WITHER) 342 g_destroy_provider(pp); 343 } 344 redo_rank(cp->geom); 345 } 346 347 348 /* 349 * g_access_abs() 350 * 351 * Access-check with absolute new values: Just fall through 352 * and use the relative version. 353 */ 354 int 355 g_access_abs(struct g_consumer *cp, int acr, int acw, int ace) 356 { 357 358 g_topology_assert(); 359 return(g_access_rel(cp, 360 acr - cp->acr, 361 acw - cp->acw, 362 ace - cp->ace)); 363 } 364 365 /* 366 * g_access_rel() 367 * 368 * Access-check with delta values. The question asked is "can provider 369 * "cp" change the access counters by the relative amounts dc[rwe] ?" 370 */ 371 372 int 373 g_access_rel(struct g_consumer *cp, int dcr, int dcw, int dce) 374 { 375 struct g_provider *pp; 376 int pr,pw,pe; 377 int error; 378 379 pp = cp->provider; 380 381 g_trace(G_T_ACCESS, "g_access_rel(%p(%s), %d, %d, %d)", 382 cp, pp->name, dcr, dcw, dce); 383 384 g_topology_assert(); 385 KASSERT(cp->provider != NULL, ("access but not attached")); 386 KASSERT(cp->acr + dcr >= 0, ("access resulting in negative acr")); 387 KASSERT(cp->acw + dcw >= 0, ("access resulting in negative acw")); 388 KASSERT(cp->ace + dce >= 0, ("access resulting in negative ace")); 389 KASSERT(pp->geom->access != NULL, ("NULL geom->access")); 390 391 /* 392 * If our class cares about being spoiled, and we have been, we 393 * are probably just ahead of the event telling us that. Fail 394 * now rather than having to unravel this later. 395 */ 396 if (cp->geom->spoiled != NULL && cp->spoiled) { 397 KASSERT(dcr >= 0, ("spoiled but dcr = %d", dcr)); 398 KASSERT(dcw >= 0, ("spoiled but dce = %d", dcw)); 399 KASSERT(dce >= 0, ("spoiled but dcw = %d", dce)); 400 KASSERT(cp->acr == 0, ("spoiled but cp->acr = %d", cp->acr)); 401 KASSERT(cp->acw == 0, ("spoiled but cp->acw = %d", cp->acw)); 402 KASSERT(cp->ace == 0, ("spoiled but cp->ace = %d", cp->ace)); 403 return(ENXIO); 404 } 405 406 /* 407 * Figure out what counts the provider would have had, if this 408 * consumer had (r0w0e0) at this time. 409 */ 410 pr = pp->acr - cp->acr; 411 pw = pp->acw - cp->acw; 412 pe = pp->ace - cp->ace; 413 414 g_trace(G_T_ACCESS, 415 "open delta:[r%dw%de%d] old:[r%dw%de%d] provider:[r%dw%de%d] %p(%s)", 416 dcr, dcw, dce, 417 cp->acr, cp->acw, cp->ace, 418 pp->acr, pp->acw, pp->ace, 419 pp, pp->name); 420 421 /* If foot-shooting is enabled, any open on rank#1 is OK */ 422 if ((g_debugflags & 16) && pp->geom->rank == 1) 423 ; 424 /* If we try exclusive but already write: fail */ 425 else if (dce > 0 && pw > 0) 426 return (EPERM); 427 /* If we try write but already exclusive: fail */ 428 else if (dcw > 0 && pe > 0) 429 return (EPERM); 430 /* If we try to open more but provider is error'ed: fail */ 431 else if ((dcr > 0 || dcw > 0 || dce > 0) && pp->error != 0) 432 return (pp->error); 433 434 /* Ok then... */ 435 436 /* 437 * If we open first write, spoil any partner consumers. 438 * If we close last write, trigger re-taste. 439 */ 440 if (pp->acw == 0 && dcw != 0) 441 g_spoil(pp, cp); 442 else if (pp->acw != 0 && pp->acw == -dcw && 443 !(pp->geom->flags & G_GEOM_WITHER)) 444 g_post_event(EV_NEW_PROVIDER, NULL, NULL, pp, NULL); 445 446 error = pp->geom->access(pp, dcr, dcw, dce); 447 if (!error) { 448 pp->acr += dcr; 449 pp->acw += dcw; 450 pp->ace += dce; 451 cp->acr += dcr; 452 cp->acw += dcw; 453 cp->ace += dce; 454 } 455 return (error); 456 } 457 458 int 459 g_handleattr_int(struct bio *bp, const char *attribute, int val) 460 { 461 462 return (g_handleattr(bp, attribute, &val, sizeof val)); 463 } 464 465 int 466 g_handleattr_off_t(struct bio *bp, const char *attribute, off_t val) 467 { 468 469 return (g_handleattr(bp, attribute, &val, sizeof val)); 470 } 471 472 473 int 474 g_handleattr(struct bio *bp, const char *attribute, void *val, int len) 475 { 476 int error; 477 478 if (strcmp(bp->bio_attribute, attribute)) 479 return (0); 480 if (bp->bio_length != len) { 481 printf("bio_length %jd len %d -> EFAULT\n", 482 (intmax_t)bp->bio_length, len); 483 error = EFAULT; 484 } else { 485 error = 0; 486 bcopy(val, bp->bio_data, len); 487 bp->bio_completed = len; 488 } 489 g_io_deliver(bp, error); 490 return (1); 491 } 492 493 int 494 g_std_access(struct g_provider *pp __unused, 495 int dr __unused, int dw __unused, int de __unused) 496 { 497 498 return (0); 499 } 500 501 void 502 g_std_done(struct bio *bp) 503 { 504 struct bio *bp2; 505 506 bp2 = bp->bio_parent; 507 if (bp2->bio_error == 0) 508 bp2->bio_error = bp->bio_error; 509 bp2->bio_completed += bp->bio_completed; 510 g_destroy_bio(bp); 511 bp2->bio_inbed++; 512 if (bp2->bio_children == bp2->bio_inbed) 513 g_io_deliver(bp2, bp2->bio_error); 514 } 515 516 /* XXX: maybe this is only g_slice_spoiled */ 517 518 void 519 g_std_spoiled(struct g_consumer *cp) 520 { 521 struct g_geom *gp; 522 struct g_provider *pp; 523 524 g_trace(G_T_TOPOLOGY, "g_std_spoiled(%p)", cp); 525 g_topology_assert(); 526 g_detach(cp); 527 gp = cp->geom; 528 LIST_FOREACH(pp, &gp->provider, provider) 529 g_orphan_provider(pp, ENXIO); 530 g_destroy_consumer(cp); 531 if (LIST_EMPTY(&gp->provider) && LIST_EMPTY(&gp->consumer)) 532 g_destroy_geom(gp); 533 else 534 gp->flags |= G_GEOM_WITHER; 535 } 536 537 /* 538 * Spoiling happens when a provider is opened for writing, but consumers 539 * which are configured by in-band data are attached (slicers for instance). 540 * Since the write might potentially change the in-band data, such consumers 541 * need to re-evaluate their existence after the writing session closes. 542 * We do this by (offering to) tear them down when the open for write happens 543 * in return for a re-taste when it closes again. 544 * Together with the fact that such consumers grab an 'e' bit whenever they 545 * are open, regardless of mode, this ends up DTRT. 546 */ 547 548 void 549 g_spoil(struct g_provider *pp, struct g_consumer *cp) 550 { 551 struct g_consumer *cp2; 552 553 g_topology_assert(); 554 555 if (!strcmp(pp->name, "geom.ctl")) 556 return; 557 LIST_FOREACH(cp2, &pp->consumers, consumers) { 558 if (cp2 == cp) 559 continue; 560 /* 561 KASSERT(cp2->acr == 0, ("spoiling cp->acr = %d", cp2->acr)); 562 KASSERT(cp2->acw == 0, ("spoiling cp->acw = %d", cp2->acw)); 563 */ 564 KASSERT(cp2->ace == 0, ("spoiling cp->ace = %d", cp2->ace)); 565 cp2->spoiled++; 566 } 567 g_post_event(EV_SPOILED, NULL, NULL, pp, cp); 568 } 569 570 static struct g_class * 571 g_class_by_name(const char *name) 572 { 573 struct g_class *mp; 574 575 g_trace(G_T_TOPOLOGY, "g_class_by_name(%s)", name); 576 g_topology_assert(); 577 LIST_FOREACH(mp, &g_classes, class) 578 if (!strcmp(mp->name, name)) 579 return (mp); 580 return (NULL); 581 } 582 583 struct g_geom * 584 g_insert_geom(const char *class, struct g_consumer *cp) 585 { 586 struct g_class *mp; 587 struct g_geom *gp; 588 struct g_provider *pp, *pp2; 589 struct g_consumer *cp2; 590 int error; 591 592 g_trace(G_T_TOPOLOGY, "g_insert_geomf(%s, %p)", class, cp); 593 g_topology_assert(); 594 KASSERT(cp->provider != NULL, ("g_insert_geomf but not attached")); 595 /* XXX: check for events ?? */ 596 mp = g_class_by_name(class); 597 if (mp == NULL) 598 return (NULL); 599 if (mp->config == NULL) 600 return (NULL); 601 pp = cp->provider; 602 gp = mp->taste(mp, pp, G_TF_TRANSPARENT); 603 if (gp == NULL) 604 return (NULL); 605 pp2 = LIST_FIRST(&gp->provider); 606 cp2 = LIST_FIRST(&gp->consumer); 607 cp2->acr += pp->acr; 608 cp2->acw += pp->acw; 609 cp2->ace += pp->ace; 610 pp2->acr += pp->acr; 611 pp2->acw += pp->acw; 612 pp2->ace += pp->ace; 613 LIST_REMOVE(cp, consumers); 614 LIST_INSERT_HEAD(&pp2->consumers, cp, consumers); 615 cp->provider = pp2; 616 error = redo_rank(gp); 617 KASSERT(error == 0, ("redo_rank failed in g_insert_geom")); 618 return (gp); 619 } 620 621 int 622 g_getattr__(const char *attr, struct g_consumer *cp, void *var, int len) 623 { 624 int error, i; 625 626 i = len; 627 error = g_io_getattr(attr, cp, &i, var); 628 if (error) 629 return (error); 630 if (i != len) 631 return (EINVAL); 632 return (0); 633 } 634 635 /* 636 * Check if the given pointer is a live object 637 */ 638 639 void 640 g_sanity(void *ptr) 641 { 642 struct g_class *mp; 643 struct g_geom *gp; 644 struct g_consumer *cp; 645 struct g_provider *pp; 646 647 if (!(g_debugflags & 0x8)) 648 return; 649 LIST_FOREACH(mp, &g_classes, class) { 650 KASSERT(mp != ptr, ("Ptr is live class")); 651 KASSERT(mp->protect == 0x20016600, 652 ("corrupt class %p %x", mp, mp->protect)); 653 LIST_FOREACH(gp, &mp->geom, geom) { 654 KASSERT(gp != ptr, ("Ptr is live geom")); 655 KASSERT(gp->protect == 0x20016601, 656 ("corrupt geom, %p %x", gp, gp->protect)); 657 KASSERT(gp->name != ptr, ("Ptr is live geom's name")); 658 LIST_FOREACH(cp, &gp->consumer, consumer) { 659 KASSERT(cp != ptr, ("Ptr is live consumer")); 660 KASSERT(cp->protect == 0x20016602, 661 ("corrupt consumer %p %x", 662 cp, cp->protect)); 663 } 664 LIST_FOREACH(pp, &gp->provider, provider) { 665 KASSERT(pp != ptr, ("Ptr is live provider")); 666 KASSERT(pp->protect == 0x20016603, 667 ("corrupt provider %p %x", 668 pp, pp->protect)); 669 } 670 } 671 } 672 } 673 674 #ifdef _KERNEL 675 struct g_class * 676 g_idclass(struct geomidorname *p) 677 { 678 struct g_class *mp; 679 char *n; 680 681 if (p->len == 0) { 682 LIST_FOREACH(mp, &g_classes, class) 683 if ((uintptr_t)mp == p->u.id) 684 return (mp); 685 return (NULL); 686 } 687 n = g_malloc(p->len + 1, M_WAITOK); 688 if (copyin(p->u.name, n, p->len) == 0) { 689 n[p->len] = '\0'; 690 LIST_FOREACH(mp, &g_classes, class) 691 if (!bcmp(n, mp->name, p->len + 1)) { 692 g_free(n); 693 return (mp); 694 } 695 } 696 g_free(n); 697 return (NULL); 698 } 699 700 struct g_geom * 701 g_idgeom(struct geomidorname *p) 702 { 703 struct g_class *mp; 704 struct g_geom *gp; 705 char *n; 706 707 if (p->len == 0) { 708 LIST_FOREACH(mp, &g_classes, class) 709 LIST_FOREACH(gp, &mp->geom, geom) 710 if ((uintptr_t)gp == p->u.id) 711 return (gp); 712 return (NULL); 713 } 714 n = g_malloc(p->len + 1, M_WAITOK); 715 if (copyin(p->u.name, n, p->len) == 0) { 716 n[p->len] = '\0'; 717 LIST_FOREACH(mp, &g_classes, class) 718 LIST_FOREACH(gp, &mp->geom, geom) 719 if (!bcmp(n, gp->name, p->len + 1)) { 720 g_free(n); 721 return (gp); 722 } 723 } 724 g_free(n); 725 return (NULL); 726 } 727 728 struct g_provider * 729 g_idprovider(struct geomidorname *p) 730 { 731 struct g_class *mp; 732 struct g_geom *gp; 733 struct g_provider *pp; 734 char *n; 735 736 if (p->len == 0) { 737 LIST_FOREACH(mp, &g_classes, class) 738 LIST_FOREACH(gp, &mp->geom, geom) 739 LIST_FOREACH(pp, &gp->provider, provider) 740 if ((uintptr_t)pp == p->u.id) 741 return (pp); 742 return (NULL); 743 } 744 n = g_malloc(p->len + 1, M_WAITOK); 745 if (copyin(p->u.name, n, p->len) == 0) { 746 n[p->len] = '\0'; 747 LIST_FOREACH(mp, &g_classes, class) 748 LIST_FOREACH(gp, &mp->geom, geom) 749 LIST_FOREACH(pp, &gp->provider, provider) 750 if (!bcmp(n, pp->name, p->len + 1)) { 751 g_free(n); 752 return (pp); 753 } 754 } 755 g_free(n); 756 return (NULL); 757 } 758 #endif /* _KERNEL */ 759