1 /*- 2 * Copyright (c) 2005 Pawel Jakub Dawidek <pjd@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/module.h> 34 #include <sys/lock.h> 35 #include <sys/mutex.h> 36 #include <sys/bio.h> 37 #include <sys/sysctl.h> 38 #include <sys/malloc.h> 39 #include <vm/uma.h> 40 #include <geom/geom.h> 41 #include <geom/shsec/g_shsec.h> 42 43 44 static MALLOC_DEFINE(M_SHSEC, "shsec_data", "GEOM_SHSEC Data"); 45 46 static uma_zone_t g_shsec_zone; 47 48 static int g_shsec_destroy(struct g_shsec_softc *sc, boolean_t force); 49 static int g_shsec_destroy_geom(struct gctl_req *req, struct g_class *mp, 50 struct g_geom *gp); 51 52 static g_taste_t g_shsec_taste; 53 static g_ctl_req_t g_shsec_config; 54 static g_dumpconf_t g_shsec_dumpconf; 55 static g_init_t g_shsec_init; 56 static g_fini_t g_shsec_fini; 57 58 struct g_class g_shsec_class = { 59 .name = G_SHSEC_CLASS_NAME, 60 .version = G_VERSION, 61 .ctlreq = g_shsec_config, 62 .taste = g_shsec_taste, 63 .destroy_geom = g_shsec_destroy_geom, 64 .init = g_shsec_init, 65 .fini = g_shsec_fini 66 }; 67 68 SYSCTL_DECL(_kern_geom); 69 SYSCTL_NODE(_kern_geom, OID_AUTO, shsec, CTLFLAG_RW, 0, "GEOM_SHSEC stuff"); 70 static u_int g_shsec_debug = 0; 71 TUNABLE_INT("kern.geom.shsec.debug", &g_shsec_debug); 72 SYSCTL_UINT(_kern_geom_shsec, OID_AUTO, debug, CTLFLAG_RW, &g_shsec_debug, 0, 73 "Debug level"); 74 static u_int g_shsec_maxmem = MAXPHYS * 100; 75 TUNABLE_INT("kern.geom.shsec.maxmem", &g_shsec_maxmem); 76 SYSCTL_UINT(_kern_geom_shsec, OID_AUTO, maxmem, CTLFLAG_RD, &g_shsec_maxmem, 77 0, "Maximum memory that can be allocated for I/O (in bytes)"); 78 static u_int g_shsec_alloc_failed = 0; 79 SYSCTL_UINT(_kern_geom_shsec, OID_AUTO, alloc_failed, CTLFLAG_RD, 80 &g_shsec_alloc_failed, 0, "How many times I/O allocation failed"); 81 82 /* 83 * Greatest Common Divisor. 84 */ 85 static u_int 86 gcd(u_int a, u_int b) 87 { 88 u_int c; 89 90 while (b != 0) { 91 c = a; 92 a = b; 93 b = (c % b); 94 } 95 return (a); 96 } 97 98 /* 99 * Least Common Multiple. 100 */ 101 static u_int 102 lcm(u_int a, u_int b) 103 { 104 105 return ((a * b) / gcd(a, b)); 106 } 107 108 static void 109 g_shsec_init(struct g_class *mp __unused) 110 { 111 112 g_shsec_zone = uma_zcreate("g_shsec_zone", MAXPHYS, NULL, NULL, NULL, 113 NULL, 0, 0); 114 g_shsec_maxmem -= g_shsec_maxmem % MAXPHYS; 115 uma_zone_set_max(g_shsec_zone, g_shsec_maxmem / MAXPHYS); 116 } 117 118 static void 119 g_shsec_fini(struct g_class *mp __unused) 120 { 121 122 uma_zdestroy(g_shsec_zone); 123 } 124 125 /* 126 * Return the number of valid disks. 127 */ 128 static u_int 129 g_shsec_nvalid(struct g_shsec_softc *sc) 130 { 131 u_int i, no; 132 133 no = 0; 134 for (i = 0; i < sc->sc_ndisks; i++) { 135 if (sc->sc_disks[i] != NULL) 136 no++; 137 } 138 139 return (no); 140 } 141 142 static void 143 g_shsec_remove_disk(struct g_consumer *cp) 144 { 145 struct g_shsec_softc *sc; 146 u_int no; 147 148 KASSERT(cp != NULL, ("Non-valid disk in %s.", __func__)); 149 sc = (struct g_shsec_softc *)cp->private; 150 KASSERT(sc != NULL, ("NULL sc in %s.", __func__)); 151 no = cp->index; 152 153 G_SHSEC_DEBUG(0, "Disk %s removed from %s.", cp->provider->name, 154 sc->sc_name); 155 156 sc->sc_disks[no] = NULL; 157 if (sc->sc_provider != NULL) { 158 g_orphan_provider(sc->sc_provider, ENXIO); 159 sc->sc_provider = NULL; 160 G_SHSEC_DEBUG(0, "Device %s removed.", sc->sc_name); 161 } 162 163 if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0) 164 g_access(cp, -cp->acr, -cp->acw, -cp->ace); 165 g_detach(cp); 166 g_destroy_consumer(cp); 167 } 168 169 static void 170 g_shsec_orphan(struct g_consumer *cp) 171 { 172 struct g_shsec_softc *sc; 173 struct g_geom *gp; 174 175 g_topology_assert(); 176 gp = cp->geom; 177 sc = gp->softc; 178 if (sc == NULL) 179 return; 180 181 g_shsec_remove_disk(cp); 182 /* If there are no valid disks anymore, remove device. */ 183 if (g_shsec_nvalid(sc) == 0) 184 g_shsec_destroy(sc, 1); 185 } 186 187 static int 188 g_shsec_access(struct g_provider *pp, int dr, int dw, int de) 189 { 190 struct g_consumer *cp1, *cp2; 191 struct g_shsec_softc *sc; 192 struct g_geom *gp; 193 int error; 194 195 gp = pp->geom; 196 sc = gp->softc; 197 198 if (sc == NULL) { 199 /* 200 * It looks like geom is being withered. 201 * In that case we allow only negative requests. 202 */ 203 KASSERT(dr <= 0 && dw <= 0 && de <= 0, 204 ("Positive access request (device=%s).", pp->name)); 205 if ((pp->acr + dr) == 0 && (pp->acw + dw) == 0 && 206 (pp->ace + de) == 0) { 207 G_SHSEC_DEBUG(0, "Device %s definitely destroyed.", 208 gp->name); 209 } 210 return (0); 211 } 212 213 /* On first open, grab an extra "exclusive" bit */ 214 if (pp->acr == 0 && pp->acw == 0 && pp->ace == 0) 215 de++; 216 /* ... and let go of it on last close */ 217 if ((pp->acr + dr) == 0 && (pp->acw + dw) == 0 && (pp->ace + de) == 0) 218 de--; 219 220 error = ENXIO; 221 LIST_FOREACH(cp1, &gp->consumer, consumer) { 222 error = g_access(cp1, dr, dw, de); 223 if (error == 0) 224 continue; 225 /* 226 * If we fail here, backout all previous changes. 227 */ 228 LIST_FOREACH(cp2, &gp->consumer, consumer) { 229 if (cp1 == cp2) 230 return (error); 231 g_access(cp2, -dr, -dw, -de); 232 } 233 /* NOTREACHED */ 234 } 235 236 return (error); 237 } 238 239 static void 240 g_shsec_xor1(uint32_t *src, uint32_t *dst, ssize_t len) 241 { 242 243 for (; len > 0; len -= sizeof(uint32_t), dst++) 244 *dst = *dst ^ *src++; 245 KASSERT(len == 0, ("len != 0 (len=%zd)", len)); 246 } 247 248 static void 249 g_shsec_done(struct bio *bp) 250 { 251 struct g_shsec_softc *sc; 252 struct bio *pbp; 253 254 pbp = bp->bio_parent; 255 sc = pbp->bio_to->geom->softc; 256 if (bp->bio_error == 0) 257 G_SHSEC_LOGREQ(2, bp, "Request done."); 258 else { 259 G_SHSEC_LOGREQ(0, bp, "Request failed (error=%d).", 260 bp->bio_error); 261 if (pbp->bio_error == 0) 262 pbp->bio_error = bp->bio_error; 263 } 264 if (pbp->bio_cmd == BIO_READ) { 265 if ((pbp->bio_pflags & G_SHSEC_BFLAG_FIRST) != 0) { 266 bcopy(bp->bio_data, pbp->bio_data, pbp->bio_length); 267 pbp->bio_pflags = 0; 268 } else { 269 g_shsec_xor1((uint32_t *)bp->bio_data, 270 (uint32_t *)pbp->bio_data, 271 (ssize_t)pbp->bio_length); 272 } 273 } 274 bzero(bp->bio_data, bp->bio_length); 275 uma_zfree(g_shsec_zone, bp->bio_data); 276 g_destroy_bio(bp); 277 pbp->bio_inbed++; 278 if (pbp->bio_children == pbp->bio_inbed) { 279 pbp->bio_completed = pbp->bio_length; 280 g_io_deliver(pbp, pbp->bio_error); 281 } 282 } 283 284 static void 285 g_shsec_xor2(uint32_t *rand, uint32_t *dst, ssize_t len) 286 { 287 288 for (; len > 0; len -= sizeof(uint32_t), dst++) { 289 *rand = arc4random(); 290 *dst = *dst ^ *rand++; 291 } 292 KASSERT(len == 0, ("len != 0 (len=%zd)", len)); 293 } 294 295 static void 296 g_shsec_start(struct bio *bp) 297 { 298 TAILQ_HEAD(, bio) queue = TAILQ_HEAD_INITIALIZER(queue); 299 struct g_shsec_softc *sc; 300 struct bio *cbp; 301 uint32_t *dst; 302 ssize_t len; 303 u_int no; 304 int error; 305 306 sc = bp->bio_to->geom->softc; 307 /* 308 * If sc == NULL, provider's error should be set and g_shsec_start() 309 * should not be called at all. 310 */ 311 KASSERT(sc != NULL, 312 ("Provider's error should be set (error=%d)(device=%s).", 313 bp->bio_to->error, bp->bio_to->name)); 314 315 G_SHSEC_LOGREQ(2, bp, "Request received."); 316 317 switch (bp->bio_cmd) { 318 case BIO_READ: 319 case BIO_WRITE: 320 case BIO_FLUSH: 321 /* 322 * Only those requests are supported. 323 */ 324 break; 325 case BIO_DELETE: 326 case BIO_GETATTR: 327 /* To which provider it should be delivered? */ 328 default: 329 g_io_deliver(bp, EOPNOTSUPP); 330 return; 331 } 332 333 /* 334 * Allocate all bios first and calculate XOR. 335 */ 336 dst = NULL; 337 len = bp->bio_length; 338 if (bp->bio_cmd == BIO_READ) 339 bp->bio_pflags = G_SHSEC_BFLAG_FIRST; 340 for (no = 0; no < sc->sc_ndisks; no++) { 341 cbp = g_clone_bio(bp); 342 if (cbp == NULL) { 343 error = ENOMEM; 344 goto failure; 345 } 346 TAILQ_INSERT_TAIL(&queue, cbp, bio_queue); 347 348 /* 349 * Fill in the component buf structure. 350 */ 351 cbp->bio_done = g_shsec_done; 352 cbp->bio_data = uma_zalloc(g_shsec_zone, M_NOWAIT); 353 if (cbp->bio_data == NULL) { 354 g_shsec_alloc_failed++; 355 error = ENOMEM; 356 goto failure; 357 } 358 cbp->bio_caller2 = sc->sc_disks[no]; 359 if (bp->bio_cmd == BIO_WRITE) { 360 if (no == 0) { 361 dst = (uint32_t *)cbp->bio_data; 362 bcopy(bp->bio_data, dst, len); 363 } else { 364 g_shsec_xor2((uint32_t *)cbp->bio_data, dst, 365 len); 366 } 367 } 368 } 369 /* 370 * Fire off all allocated requests! 371 */ 372 while ((cbp = TAILQ_FIRST(&queue)) != NULL) { 373 struct g_consumer *cp; 374 375 TAILQ_REMOVE(&queue, cbp, bio_queue); 376 cp = cbp->bio_caller2; 377 cbp->bio_caller2 = NULL; 378 cbp->bio_to = cp->provider; 379 G_SHSEC_LOGREQ(2, cbp, "Sending request."); 380 g_io_request(cbp, cp); 381 } 382 return; 383 failure: 384 while ((cbp = TAILQ_FIRST(&queue)) != NULL) { 385 TAILQ_REMOVE(&queue, cbp, bio_queue); 386 bp->bio_children--; 387 if (cbp->bio_data != NULL) { 388 bzero(cbp->bio_data, cbp->bio_length); 389 uma_zfree(g_shsec_zone, cbp->bio_data); 390 } 391 g_destroy_bio(cbp); 392 } 393 if (bp->bio_error == 0) 394 bp->bio_error = error; 395 g_io_deliver(bp, bp->bio_error); 396 } 397 398 static void 399 g_shsec_check_and_run(struct g_shsec_softc *sc) 400 { 401 off_t mediasize, ms; 402 u_int no, sectorsize = 0; 403 404 if (g_shsec_nvalid(sc) != sc->sc_ndisks) 405 return; 406 407 sc->sc_provider = g_new_providerf(sc->sc_geom, "shsec/%s", sc->sc_name); 408 /* 409 * Find the smallest disk. 410 */ 411 mediasize = sc->sc_disks[0]->provider->mediasize; 412 mediasize -= sc->sc_disks[0]->provider->sectorsize; 413 sectorsize = sc->sc_disks[0]->provider->sectorsize; 414 for (no = 1; no < sc->sc_ndisks; no++) { 415 ms = sc->sc_disks[no]->provider->mediasize; 416 ms -= sc->sc_disks[no]->provider->sectorsize; 417 if (ms < mediasize) 418 mediasize = ms; 419 sectorsize = lcm(sectorsize, 420 sc->sc_disks[no]->provider->sectorsize); 421 } 422 sc->sc_provider->sectorsize = sectorsize; 423 sc->sc_provider->mediasize = mediasize; 424 g_error_provider(sc->sc_provider, 0); 425 426 G_SHSEC_DEBUG(0, "Device %s activated.", sc->sc_name); 427 } 428 429 static int 430 g_shsec_read_metadata(struct g_consumer *cp, struct g_shsec_metadata *md) 431 { 432 struct g_provider *pp; 433 u_char *buf; 434 int error; 435 436 g_topology_assert(); 437 438 error = g_access(cp, 1, 0, 0); 439 if (error != 0) 440 return (error); 441 pp = cp->provider; 442 g_topology_unlock(); 443 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize, 444 &error); 445 g_topology_lock(); 446 g_access(cp, -1, 0, 0); 447 if (buf == NULL) 448 return (error); 449 450 /* Decode metadata. */ 451 shsec_metadata_decode(buf, md); 452 g_free(buf); 453 454 return (0); 455 } 456 457 /* 458 * Add disk to given device. 459 */ 460 static int 461 g_shsec_add_disk(struct g_shsec_softc *sc, struct g_provider *pp, u_int no) 462 { 463 struct g_consumer *cp, *fcp; 464 struct g_geom *gp; 465 struct g_shsec_metadata md; 466 int error; 467 468 /* Metadata corrupted? */ 469 if (no >= sc->sc_ndisks) 470 return (EINVAL); 471 472 /* Check if disk is not already attached. */ 473 if (sc->sc_disks[no] != NULL) 474 return (EEXIST); 475 476 gp = sc->sc_geom; 477 fcp = LIST_FIRST(&gp->consumer); 478 479 cp = g_new_consumer(gp); 480 error = g_attach(cp, pp); 481 if (error != 0) { 482 g_destroy_consumer(cp); 483 return (error); 484 } 485 486 if (fcp != NULL && (fcp->acr > 0 || fcp->acw > 0 || fcp->ace > 0)) { 487 error = g_access(cp, fcp->acr, fcp->acw, fcp->ace); 488 if (error != 0) { 489 g_detach(cp); 490 g_destroy_consumer(cp); 491 return (error); 492 } 493 } 494 495 /* Reread metadata. */ 496 error = g_shsec_read_metadata(cp, &md); 497 if (error != 0) 498 goto fail; 499 500 if (strcmp(md.md_magic, G_SHSEC_MAGIC) != 0 || 501 strcmp(md.md_name, sc->sc_name) != 0 || md.md_id != sc->sc_id) { 502 G_SHSEC_DEBUG(0, "Metadata on %s changed.", pp->name); 503 goto fail; 504 } 505 506 cp->private = sc; 507 cp->index = no; 508 sc->sc_disks[no] = cp; 509 510 G_SHSEC_DEBUG(0, "Disk %s attached to %s.", pp->name, sc->sc_name); 511 512 g_shsec_check_and_run(sc); 513 514 return (0); 515 fail: 516 if (fcp != NULL && (fcp->acr > 0 || fcp->acw > 0 || fcp->ace > 0)) 517 g_access(cp, -fcp->acr, -fcp->acw, -fcp->ace); 518 g_detach(cp); 519 g_destroy_consumer(cp); 520 return (error); 521 } 522 523 static struct g_geom * 524 g_shsec_create(struct g_class *mp, const struct g_shsec_metadata *md) 525 { 526 struct g_shsec_softc *sc; 527 struct g_geom *gp; 528 u_int no; 529 530 G_SHSEC_DEBUG(1, "Creating device %s (id=%u).", md->md_name, md->md_id); 531 532 /* Two disks is minimum. */ 533 if (md->md_all < 2) { 534 G_SHSEC_DEBUG(0, "Too few disks defined for %s.", md->md_name); 535 return (NULL); 536 } 537 538 /* Check for duplicate unit */ 539 LIST_FOREACH(gp, &mp->geom, geom) { 540 sc = gp->softc; 541 if (sc != NULL && strcmp(sc->sc_name, md->md_name) == 0) { 542 G_SHSEC_DEBUG(0, "Device %s already configured.", 543 sc->sc_name); 544 return (NULL); 545 } 546 } 547 gp = g_new_geomf(mp, "%s", md->md_name); 548 gp->softc = NULL; /* for a moment */ 549 550 sc = malloc(sizeof(*sc), M_SHSEC, M_WAITOK | M_ZERO); 551 gp->start = g_shsec_start; 552 gp->spoiled = g_shsec_orphan; 553 gp->orphan = g_shsec_orphan; 554 gp->access = g_shsec_access; 555 gp->dumpconf = g_shsec_dumpconf; 556 557 sc->sc_id = md->md_id; 558 sc->sc_ndisks = md->md_all; 559 sc->sc_disks = malloc(sizeof(struct g_consumer *) * sc->sc_ndisks, 560 M_SHSEC, M_WAITOK | M_ZERO); 561 for (no = 0; no < sc->sc_ndisks; no++) 562 sc->sc_disks[no] = NULL; 563 564 gp->softc = sc; 565 sc->sc_geom = gp; 566 sc->sc_provider = NULL; 567 568 G_SHSEC_DEBUG(0, "Device %s created (id=%u).", sc->sc_name, sc->sc_id); 569 570 return (gp); 571 } 572 573 static int 574 g_shsec_destroy(struct g_shsec_softc *sc, boolean_t force) 575 { 576 struct g_provider *pp; 577 struct g_geom *gp; 578 u_int no; 579 580 g_topology_assert(); 581 582 if (sc == NULL) 583 return (ENXIO); 584 585 pp = sc->sc_provider; 586 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) { 587 if (force) { 588 G_SHSEC_DEBUG(0, "Device %s is still open, so it " 589 "can't be definitely removed.", pp->name); 590 } else { 591 G_SHSEC_DEBUG(1, 592 "Device %s is still open (r%dw%de%d).", pp->name, 593 pp->acr, pp->acw, pp->ace); 594 return (EBUSY); 595 } 596 } 597 598 for (no = 0; no < sc->sc_ndisks; no++) { 599 if (sc->sc_disks[no] != NULL) 600 g_shsec_remove_disk(sc->sc_disks[no]); 601 } 602 603 gp = sc->sc_geom; 604 gp->softc = NULL; 605 KASSERT(sc->sc_provider == NULL, ("Provider still exists? (device=%s)", 606 gp->name)); 607 free(sc->sc_disks, M_SHSEC); 608 free(sc, M_SHSEC); 609 610 pp = LIST_FIRST(&gp->provider); 611 if (pp == NULL || (pp->acr == 0 && pp->acw == 0 && pp->ace == 0)) 612 G_SHSEC_DEBUG(0, "Device %s destroyed.", gp->name); 613 614 g_wither_geom(gp, ENXIO); 615 616 return (0); 617 } 618 619 static int 620 g_shsec_destroy_geom(struct gctl_req *req __unused, struct g_class *mp __unused, 621 struct g_geom *gp) 622 { 623 struct g_shsec_softc *sc; 624 625 sc = gp->softc; 626 return (g_shsec_destroy(sc, 0)); 627 } 628 629 static struct g_geom * 630 g_shsec_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 631 { 632 struct g_shsec_metadata md; 633 struct g_shsec_softc *sc; 634 struct g_consumer *cp; 635 struct g_geom *gp; 636 int error; 637 638 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name); 639 g_topology_assert(); 640 641 G_SHSEC_DEBUG(3, "Tasting %s.", pp->name); 642 643 gp = g_new_geomf(mp, "shsec:taste"); 644 gp->start = g_shsec_start; 645 gp->access = g_shsec_access; 646 gp->orphan = g_shsec_orphan; 647 cp = g_new_consumer(gp); 648 g_attach(cp, pp); 649 error = g_shsec_read_metadata(cp, &md); 650 g_detach(cp); 651 g_destroy_consumer(cp); 652 g_destroy_geom(gp); 653 if (error != 0) 654 return (NULL); 655 gp = NULL; 656 657 if (strcmp(md.md_magic, G_SHSEC_MAGIC) != 0) 658 return (NULL); 659 if (md.md_version > G_SHSEC_VERSION) { 660 G_SHSEC_DEBUG(0, "Kernel module is too old to handle %s.\n", 661 pp->name); 662 return (NULL); 663 } 664 /* 665 * Backward compatibility: 666 */ 667 /* There was no md_provsize field in earlier versions of metadata. */ 668 if (md.md_version < 1) 669 md.md_provsize = pp->mediasize; 670 671 if (md.md_provider[0] != '\0' && strcmp(md.md_provider, pp->name) != 0) 672 return (NULL); 673 if (md.md_provsize != pp->mediasize) 674 return (NULL); 675 676 /* 677 * Let's check if device already exists. 678 */ 679 sc = NULL; 680 LIST_FOREACH(gp, &mp->geom, geom) { 681 sc = gp->softc; 682 if (sc == NULL) 683 continue; 684 if (strcmp(md.md_name, sc->sc_name) != 0) 685 continue; 686 if (md.md_id != sc->sc_id) 687 continue; 688 break; 689 } 690 if (gp != NULL) { 691 G_SHSEC_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name); 692 error = g_shsec_add_disk(sc, pp, md.md_no); 693 if (error != 0) { 694 G_SHSEC_DEBUG(0, "Cannot add disk %s to %s (error=%d).", 695 pp->name, gp->name, error); 696 return (NULL); 697 } 698 } else { 699 gp = g_shsec_create(mp, &md); 700 if (gp == NULL) { 701 G_SHSEC_DEBUG(0, "Cannot create device %s.", md.md_name); 702 return (NULL); 703 } 704 sc = gp->softc; 705 G_SHSEC_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name); 706 error = g_shsec_add_disk(sc, pp, md.md_no); 707 if (error != 0) { 708 G_SHSEC_DEBUG(0, "Cannot add disk %s to %s (error=%d).", 709 pp->name, gp->name, error); 710 g_shsec_destroy(sc, 1); 711 return (NULL); 712 } 713 } 714 return (gp); 715 } 716 717 static struct g_shsec_softc * 718 g_shsec_find_device(struct g_class *mp, const char *name) 719 { 720 struct g_shsec_softc *sc; 721 struct g_geom *gp; 722 723 LIST_FOREACH(gp, &mp->geom, geom) { 724 sc = gp->softc; 725 if (sc == NULL) 726 continue; 727 if (strcmp(sc->sc_name, name) == 0) 728 return (sc); 729 } 730 return (NULL); 731 } 732 733 static void 734 g_shsec_ctl_destroy(struct gctl_req *req, struct g_class *mp) 735 { 736 struct g_shsec_softc *sc; 737 int *force, *nargs, error; 738 const char *name; 739 char param[16]; 740 u_int i; 741 742 g_topology_assert(); 743 744 nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs)); 745 if (nargs == NULL) { 746 gctl_error(req, "No '%s' argument.", "nargs"); 747 return; 748 } 749 if (*nargs <= 0) { 750 gctl_error(req, "Missing device(s)."); 751 return; 752 } 753 force = gctl_get_paraml(req, "force", sizeof(*force)); 754 if (force == NULL) { 755 gctl_error(req, "No '%s' argument.", "force"); 756 return; 757 } 758 759 for (i = 0; i < (u_int)*nargs; i++) { 760 snprintf(param, sizeof(param), "arg%u", i); 761 name = gctl_get_asciiparam(req, param); 762 if (name == NULL) { 763 gctl_error(req, "No 'arg%u' argument.", i); 764 return; 765 } 766 sc = g_shsec_find_device(mp, name); 767 if (sc == NULL) { 768 gctl_error(req, "No such device: %s.", name); 769 return; 770 } 771 error = g_shsec_destroy(sc, *force); 772 if (error != 0) { 773 gctl_error(req, "Cannot destroy device %s (error=%d).", 774 sc->sc_name, error); 775 return; 776 } 777 } 778 } 779 780 static void 781 g_shsec_config(struct gctl_req *req, struct g_class *mp, const char *verb) 782 { 783 uint32_t *version; 784 785 g_topology_assert(); 786 787 version = gctl_get_paraml(req, "version", sizeof(*version)); 788 if (version == NULL) { 789 gctl_error(req, "No '%s' argument.", "version"); 790 return; 791 } 792 if (*version != G_SHSEC_VERSION) { 793 gctl_error(req, "Userland and kernel parts are out of sync."); 794 return; 795 } 796 797 if (strcmp(verb, "stop") == 0) { 798 g_shsec_ctl_destroy(req, mp); 799 return; 800 } 801 802 gctl_error(req, "Unknown verb."); 803 } 804 805 static void 806 g_shsec_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 807 struct g_consumer *cp, struct g_provider *pp) 808 { 809 struct g_shsec_softc *sc; 810 811 sc = gp->softc; 812 if (sc == NULL) 813 return; 814 if (pp != NULL) { 815 /* Nothing here. */ 816 } else if (cp != NULL) { 817 sbuf_printf(sb, "%s<Number>%u</Number>\n", indent, 818 (u_int)cp->index); 819 } else { 820 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id); 821 sbuf_printf(sb, "%s<Status>Total=%u, Online=%u</Status>\n", 822 indent, sc->sc_ndisks, g_shsec_nvalid(sc)); 823 sbuf_printf(sb, "%s<State>", indent); 824 if (sc->sc_provider != NULL && sc->sc_provider->error == 0) 825 sbuf_printf(sb, "UP"); 826 else 827 sbuf_printf(sb, "DOWN"); 828 sbuf_printf(sb, "</State>\n"); 829 } 830 } 831 832 DECLARE_GEOM_CLASS(g_shsec_class, g_shsec); 833