1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2005 Pawel Jakub Dawidek <pjd@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/kernel.h> 35 #include <sys/module.h> 36 #include <sys/lock.h> 37 #include <sys/mutex.h> 38 #include <sys/bio.h> 39 #include <sys/sbuf.h> 40 #include <sys/sysctl.h> 41 #include <sys/malloc.h> 42 #include <vm/uma.h> 43 #include <geom/geom.h> 44 #include <geom/geom_dbg.h> 45 #include <geom/shsec/g_shsec.h> 46 47 FEATURE(geom_shsec, "GEOM shared secret device support"); 48 49 static MALLOC_DEFINE(M_SHSEC, "shsec_data", "GEOM_SHSEC Data"); 50 51 static uma_zone_t g_shsec_zone; 52 53 static int g_shsec_destroy(struct g_shsec_softc *sc, boolean_t force); 54 static int g_shsec_destroy_geom(struct gctl_req *req, struct g_class *mp, 55 struct g_geom *gp); 56 57 static g_taste_t g_shsec_taste; 58 static g_ctl_req_t g_shsec_config; 59 static g_dumpconf_t g_shsec_dumpconf; 60 static g_init_t g_shsec_init; 61 static g_fini_t g_shsec_fini; 62 63 struct g_class g_shsec_class = { 64 .name = G_SHSEC_CLASS_NAME, 65 .version = G_VERSION, 66 .ctlreq = g_shsec_config, 67 .taste = g_shsec_taste, 68 .destroy_geom = g_shsec_destroy_geom, 69 .init = g_shsec_init, 70 .fini = g_shsec_fini 71 }; 72 73 SYSCTL_DECL(_kern_geom); 74 static SYSCTL_NODE(_kern_geom, OID_AUTO, shsec, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 75 "GEOM_SHSEC stuff"); 76 static u_int g_shsec_debug; 77 SYSCTL_UINT(_kern_geom_shsec, OID_AUTO, debug, CTLFLAG_RWTUN, &g_shsec_debug, 0, 78 "Debug level"); 79 static u_long g_shsec_maxmem; 80 SYSCTL_ULONG(_kern_geom_shsec, OID_AUTO, maxmem, 81 CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &g_shsec_maxmem, 82 0, "Maximum memory that can be allocated for I/O (in bytes)"); 83 static u_int g_shsec_alloc_failed = 0; 84 SYSCTL_UINT(_kern_geom_shsec, OID_AUTO, alloc_failed, CTLFLAG_RD, 85 &g_shsec_alloc_failed, 0, "How many times I/O allocation failed"); 86 87 /* 88 * Greatest Common Divisor. 89 */ 90 static u_int 91 gcd(u_int a, u_int b) 92 { 93 u_int c; 94 95 while (b != 0) { 96 c = a; 97 a = b; 98 b = (c % b); 99 } 100 return (a); 101 } 102 103 /* 104 * Least Common Multiple. 105 */ 106 static u_int 107 lcm(u_int a, u_int b) 108 { 109 110 return ((a * b) / gcd(a, b)); 111 } 112 113 static void 114 g_shsec_init(struct g_class *mp __unused) 115 { 116 117 g_shsec_maxmem = maxphys * 100; 118 TUNABLE_ULONG_FETCH("kern.geom.shsec.maxmem,", &g_shsec_maxmem); 119 g_shsec_zone = uma_zcreate("g_shsec_zone", maxphys, NULL, NULL, NULL, 120 NULL, 0, 0); 121 g_shsec_maxmem -= g_shsec_maxmem % maxphys; 122 uma_zone_set_max(g_shsec_zone, g_shsec_maxmem / maxphys); 123 } 124 125 static void 126 g_shsec_fini(struct g_class *mp __unused) 127 { 128 129 uma_zdestroy(g_shsec_zone); 130 } 131 132 /* 133 * Return the number of valid disks. 134 */ 135 static u_int 136 g_shsec_nvalid(struct g_shsec_softc *sc) 137 { 138 u_int i, no; 139 140 no = 0; 141 for (i = 0; i < sc->sc_ndisks; i++) { 142 if (sc->sc_disks[i] != NULL) 143 no++; 144 } 145 146 return (no); 147 } 148 149 static void 150 g_shsec_remove_disk(struct g_consumer *cp) 151 { 152 struct g_shsec_softc *sc; 153 u_int no; 154 155 KASSERT(cp != NULL, ("Non-valid disk in %s.", __func__)); 156 sc = (struct g_shsec_softc *)cp->private; 157 KASSERT(sc != NULL, ("NULL sc in %s.", __func__)); 158 no = cp->index; 159 160 G_SHSEC_DEBUG(0, "Disk %s removed from %s.", cp->provider->name, 161 sc->sc_name); 162 163 sc->sc_disks[no] = NULL; 164 if (sc->sc_provider != NULL) { 165 g_wither_provider(sc->sc_provider, ENXIO); 166 sc->sc_provider = NULL; 167 G_SHSEC_DEBUG(0, "Device %s removed.", sc->sc_name); 168 } 169 170 if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0) 171 return; 172 g_detach(cp); 173 g_destroy_consumer(cp); 174 } 175 176 static void 177 g_shsec_orphan(struct g_consumer *cp) 178 { 179 struct g_shsec_softc *sc; 180 struct g_geom *gp; 181 182 g_topology_assert(); 183 gp = cp->geom; 184 sc = gp->softc; 185 if (sc == NULL) 186 return; 187 188 g_shsec_remove_disk(cp); 189 /* If there are no valid disks anymore, remove device. */ 190 if (LIST_EMPTY(&gp->consumer)) 191 g_shsec_destroy(sc, 1); 192 } 193 194 static int 195 g_shsec_access(struct g_provider *pp, int dr, int dw, int de) 196 { 197 struct g_consumer *cp1, *cp2, *tmp; 198 struct g_shsec_softc *sc; 199 struct g_geom *gp; 200 int error; 201 202 gp = pp->geom; 203 sc = gp->softc; 204 205 /* On first open, grab an extra "exclusive" bit */ 206 if (pp->acr == 0 && pp->acw == 0 && pp->ace == 0) 207 de++; 208 /* ... and let go of it on last close */ 209 if ((pp->acr + dr) == 0 && (pp->acw + dw) == 0 && (pp->ace + de) == 0) 210 de--; 211 212 error = ENXIO; 213 LIST_FOREACH_SAFE(cp1, &gp->consumer, consumer, tmp) { 214 error = g_access(cp1, dr, dw, de); 215 if (error != 0) 216 goto fail; 217 if (cp1->acr == 0 && cp1->acw == 0 && cp1->ace == 0 && 218 cp1->flags & G_CF_ORPHAN) { 219 g_detach(cp1); 220 g_destroy_consumer(cp1); 221 } 222 } 223 224 /* If there are no valid disks anymore, remove device. */ 225 if (LIST_EMPTY(&gp->consumer)) 226 g_shsec_destroy(sc, 1); 227 228 return (error); 229 230 fail: 231 /* If we fail here, backout all previous changes. */ 232 LIST_FOREACH(cp2, &gp->consumer, consumer) { 233 if (cp1 == cp2) 234 break; 235 g_access(cp2, -dr, -dw, -de); 236 } 237 return (error); 238 } 239 240 static void 241 g_shsec_xor1(uint32_t *src, uint32_t *dst, ssize_t len) 242 { 243 244 for (; len > 0; len -= sizeof(uint32_t), dst++) 245 *dst = *dst ^ *src++; 246 KASSERT(len == 0, ("len != 0 (len=%zd)", len)); 247 } 248 249 static void 250 g_shsec_done(struct bio *bp) 251 { 252 struct bio *pbp; 253 254 pbp = bp->bio_parent; 255 if (bp->bio_error == 0) 256 G_SHSEC_LOGREQ(2, bp, "Request done."); 257 else { 258 G_SHSEC_LOGREQ(0, bp, "Request failed (error=%d).", 259 bp->bio_error); 260 if (pbp->bio_error == 0) 261 pbp->bio_error = bp->bio_error; 262 } 263 if (pbp->bio_cmd == BIO_READ) { 264 if ((pbp->bio_pflags & G_SHSEC_BFLAG_FIRST) != 0) { 265 bcopy(bp->bio_data, pbp->bio_data, pbp->bio_length); 266 pbp->bio_pflags = 0; 267 } else { 268 g_shsec_xor1((uint32_t *)bp->bio_data, 269 (uint32_t *)pbp->bio_data, 270 (ssize_t)pbp->bio_length); 271 } 272 } 273 if (bp->bio_data != NULL) { 274 explicit_bzero(bp->bio_data, bp->bio_length); 275 uma_zfree(g_shsec_zone, bp->bio_data); 276 } 277 g_destroy_bio(bp); 278 pbp->bio_inbed++; 279 if (pbp->bio_children == pbp->bio_inbed) { 280 pbp->bio_completed = pbp->bio_length; 281 g_io_deliver(pbp, pbp->bio_error); 282 } 283 } 284 285 static void 286 g_shsec_xor2(uint32_t *rand, uint32_t *dst, ssize_t len) 287 { 288 289 for (; len > 0; len -= sizeof(uint32_t), dst++) { 290 *rand = arc4random(); 291 *dst = *dst ^ *rand++; 292 } 293 KASSERT(len == 0, ("len != 0 (len=%zd)", len)); 294 } 295 296 static void 297 g_shsec_start(struct bio *bp) 298 { 299 TAILQ_HEAD(, bio) queue = TAILQ_HEAD_INITIALIZER(queue); 300 struct g_shsec_softc *sc; 301 struct bio *cbp; 302 uint32_t *dst; 303 ssize_t len; 304 u_int no; 305 int error; 306 307 sc = bp->bio_to->geom->softc; 308 /* 309 * If sc == NULL, provider's error should be set and g_shsec_start() 310 * should not be called at all. 311 */ 312 KASSERT(sc != NULL, 313 ("Provider's error should be set (error=%d)(device=%s).", 314 bp->bio_to->error, bp->bio_to->name)); 315 316 G_SHSEC_LOGREQ(2, bp, "Request received."); 317 318 switch (bp->bio_cmd) { 319 case BIO_READ: 320 case BIO_WRITE: 321 case BIO_FLUSH: 322 case BIO_SPEEDUP: 323 /* 324 * Only those requests are supported. 325 */ 326 break; 327 case BIO_DELETE: 328 case BIO_GETATTR: 329 /* To which provider it should be delivered? */ 330 default: 331 g_io_deliver(bp, EOPNOTSUPP); 332 return; 333 } 334 335 /* 336 * Allocate all bios first and calculate XOR. 337 */ 338 dst = NULL; 339 len = bp->bio_length; 340 if (bp->bio_cmd == BIO_READ) 341 bp->bio_pflags = G_SHSEC_BFLAG_FIRST; 342 for (no = 0; no < sc->sc_ndisks; no++) { 343 cbp = g_clone_bio(bp); 344 if (cbp == NULL) { 345 error = ENOMEM; 346 goto failure; 347 } 348 TAILQ_INSERT_TAIL(&queue, cbp, bio_queue); 349 350 /* 351 * Fill in the component buf structure. 352 */ 353 cbp->bio_done = g_shsec_done; 354 cbp->bio_caller2 = sc->sc_disks[no]; 355 if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) { 356 cbp->bio_data = uma_zalloc(g_shsec_zone, M_NOWAIT); 357 if (cbp->bio_data == NULL) { 358 g_shsec_alloc_failed++; 359 error = ENOMEM; 360 goto failure; 361 } 362 if (bp->bio_cmd == BIO_WRITE) { 363 if (no == 0) { 364 dst = (uint32_t *)cbp->bio_data; 365 bcopy(bp->bio_data, dst, len); 366 } else { 367 g_shsec_xor2((uint32_t *)cbp->bio_data, 368 dst, len); 369 } 370 } 371 } 372 } 373 /* 374 * Fire off all allocated requests! 375 */ 376 while ((cbp = TAILQ_FIRST(&queue)) != NULL) { 377 struct g_consumer *cp; 378 379 TAILQ_REMOVE(&queue, cbp, bio_queue); 380 cp = cbp->bio_caller2; 381 cbp->bio_caller2 = NULL; 382 cbp->bio_to = cp->provider; 383 G_SHSEC_LOGREQ(2, cbp, "Sending request."); 384 g_io_request(cbp, cp); 385 } 386 return; 387 failure: 388 while ((cbp = TAILQ_FIRST(&queue)) != NULL) { 389 TAILQ_REMOVE(&queue, cbp, bio_queue); 390 bp->bio_children--; 391 if (cbp->bio_data != NULL) { 392 explicit_bzero(cbp->bio_data, cbp->bio_length); 393 uma_zfree(g_shsec_zone, cbp->bio_data); 394 } 395 g_destroy_bio(cbp); 396 } 397 if (bp->bio_error == 0) 398 bp->bio_error = error; 399 g_io_deliver(bp, bp->bio_error); 400 } 401 402 static void 403 g_shsec_check_and_run(struct g_shsec_softc *sc) 404 { 405 off_t mediasize, ms; 406 u_int no, sectorsize = 0; 407 408 if (g_shsec_nvalid(sc) != sc->sc_ndisks) 409 return; 410 411 sc->sc_provider = g_new_providerf(sc->sc_geom, "shsec/%s", sc->sc_name); 412 /* 413 * Find the smallest disk. 414 */ 415 mediasize = sc->sc_disks[0]->provider->mediasize; 416 mediasize -= sc->sc_disks[0]->provider->sectorsize; 417 sectorsize = sc->sc_disks[0]->provider->sectorsize; 418 for (no = 1; no < sc->sc_ndisks; no++) { 419 ms = sc->sc_disks[no]->provider->mediasize; 420 ms -= sc->sc_disks[no]->provider->sectorsize; 421 if (ms < mediasize) 422 mediasize = ms; 423 sectorsize = lcm(sectorsize, 424 sc->sc_disks[no]->provider->sectorsize); 425 } 426 sc->sc_provider->sectorsize = sectorsize; 427 sc->sc_provider->mediasize = mediasize; 428 g_error_provider(sc->sc_provider, 0); 429 430 G_SHSEC_DEBUG(0, "Device %s activated.", sc->sc_name); 431 } 432 433 static int 434 g_shsec_read_metadata(struct g_consumer *cp, struct g_shsec_metadata *md) 435 { 436 struct g_provider *pp; 437 u_char *buf; 438 int error; 439 440 g_topology_assert(); 441 442 error = g_access(cp, 1, 0, 0); 443 if (error != 0) 444 return (error); 445 pp = cp->provider; 446 g_topology_unlock(); 447 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize, 448 &error); 449 g_topology_lock(); 450 g_access(cp, -1, 0, 0); 451 if (buf == NULL) 452 return (error); 453 454 /* Decode metadata. */ 455 shsec_metadata_decode(buf, md); 456 g_free(buf); 457 458 return (0); 459 } 460 461 /* 462 * Add disk to given device. 463 */ 464 static int 465 g_shsec_add_disk(struct g_shsec_softc *sc, struct g_provider *pp, u_int no) 466 { 467 struct g_consumer *cp, *fcp; 468 struct g_geom *gp; 469 struct g_shsec_metadata md; 470 int error; 471 472 /* Metadata corrupted? */ 473 if (no >= sc->sc_ndisks) 474 return (EINVAL); 475 476 /* Check if disk is not already attached. */ 477 if (sc->sc_disks[no] != NULL) 478 return (EEXIST); 479 480 gp = sc->sc_geom; 481 fcp = LIST_FIRST(&gp->consumer); 482 483 cp = g_new_consumer(gp); 484 error = g_attach(cp, pp); 485 if (error != 0) { 486 g_destroy_consumer(cp); 487 return (error); 488 } 489 490 if (fcp != NULL && (fcp->acr > 0 || fcp->acw > 0 || fcp->ace > 0)) { 491 error = g_access(cp, fcp->acr, fcp->acw, fcp->ace); 492 if (error != 0) { 493 g_detach(cp); 494 g_destroy_consumer(cp); 495 return (error); 496 } 497 } 498 499 /* Reread metadata. */ 500 error = g_shsec_read_metadata(cp, &md); 501 if (error != 0) 502 goto fail; 503 504 if (strcmp(md.md_magic, G_SHSEC_MAGIC) != 0 || 505 strcmp(md.md_name, sc->sc_name) != 0 || md.md_id != sc->sc_id) { 506 G_SHSEC_DEBUG(0, "Metadata on %s changed.", pp->name); 507 goto fail; 508 } 509 510 cp->private = sc; 511 cp->index = no; 512 sc->sc_disks[no] = cp; 513 514 G_SHSEC_DEBUG(0, "Disk %s attached to %s.", pp->name, sc->sc_name); 515 516 g_shsec_check_and_run(sc); 517 518 return (0); 519 fail: 520 if (fcp != NULL && (fcp->acr > 0 || fcp->acw > 0 || fcp->ace > 0)) 521 g_access(cp, -fcp->acr, -fcp->acw, -fcp->ace); 522 g_detach(cp); 523 g_destroy_consumer(cp); 524 return (error); 525 } 526 527 static struct g_geom * 528 g_shsec_create(struct g_class *mp, const struct g_shsec_metadata *md) 529 { 530 struct g_shsec_softc *sc; 531 struct g_geom *gp; 532 u_int no; 533 534 G_SHSEC_DEBUG(1, "Creating device %s (id=%u).", md->md_name, md->md_id); 535 536 /* Two disks is minimum. */ 537 if (md->md_all < 2) { 538 G_SHSEC_DEBUG(0, "Too few disks defined for %s.", md->md_name); 539 return (NULL); 540 } 541 542 /* Check for duplicate unit */ 543 LIST_FOREACH(gp, &mp->geom, geom) { 544 sc = gp->softc; 545 if (sc != NULL && strcmp(sc->sc_name, md->md_name) == 0) { 546 G_SHSEC_DEBUG(0, "Device %s already configured.", 547 sc->sc_name); 548 return (NULL); 549 } 550 } 551 gp = g_new_geomf(mp, "%s", md->md_name); 552 sc = malloc(sizeof(*sc), M_SHSEC, M_WAITOK | M_ZERO); 553 gp->start = g_shsec_start; 554 gp->spoiled = g_shsec_orphan; 555 gp->orphan = g_shsec_orphan; 556 gp->access = g_shsec_access; 557 gp->dumpconf = g_shsec_dumpconf; 558 559 sc->sc_id = md->md_id; 560 sc->sc_ndisks = md->md_all; 561 sc->sc_disks = malloc(sizeof(struct g_consumer *) * sc->sc_ndisks, 562 M_SHSEC, M_WAITOK | M_ZERO); 563 for (no = 0; no < sc->sc_ndisks; no++) 564 sc->sc_disks[no] = NULL; 565 566 gp->softc = sc; 567 sc->sc_geom = gp; 568 sc->sc_provider = NULL; 569 570 G_SHSEC_DEBUG(0, "Device %s created (id=%u).", sc->sc_name, sc->sc_id); 571 572 return (gp); 573 } 574 575 static int 576 g_shsec_destroy(struct g_shsec_softc *sc, boolean_t force) 577 { 578 struct g_provider *pp; 579 struct g_geom *gp; 580 u_int no; 581 582 g_topology_assert(); 583 584 if (sc == NULL) 585 return (ENXIO); 586 587 pp = sc->sc_provider; 588 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) { 589 if (force) { 590 G_SHSEC_DEBUG(0, "Device %s is still open, so it " 591 "can't be definitely removed.", pp->name); 592 } else { 593 G_SHSEC_DEBUG(1, 594 "Device %s is still open (r%dw%de%d).", pp->name, 595 pp->acr, pp->acw, pp->ace); 596 return (EBUSY); 597 } 598 } 599 600 for (no = 0; no < sc->sc_ndisks; no++) { 601 if (sc->sc_disks[no] != NULL) 602 g_shsec_remove_disk(sc->sc_disks[no]); 603 } 604 605 gp = sc->sc_geom; 606 gp->softc = NULL; 607 KASSERT(sc->sc_provider == NULL, ("Provider still exists? (device=%s)", 608 gp->name)); 609 free(sc->sc_disks, M_SHSEC); 610 free(sc, M_SHSEC); 611 612 pp = LIST_FIRST(&gp->provider); 613 if (pp == NULL || (pp->acr == 0 && pp->acw == 0 && pp->ace == 0)) 614 G_SHSEC_DEBUG(0, "Device %s destroyed.", gp->name); 615 616 g_wither_geom(gp, ENXIO); 617 618 return (0); 619 } 620 621 static int 622 g_shsec_destroy_geom(struct gctl_req *req __unused, struct g_class *mp __unused, 623 struct g_geom *gp) 624 { 625 struct g_shsec_softc *sc; 626 627 sc = gp->softc; 628 return (g_shsec_destroy(sc, 0)); 629 } 630 631 static struct g_geom * 632 g_shsec_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 633 { 634 struct g_shsec_metadata md; 635 struct g_shsec_softc *sc; 636 struct g_consumer *cp; 637 struct g_geom *gp; 638 int error; 639 640 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name); 641 g_topology_assert(); 642 643 /* Skip providers that are already open for writing. */ 644 if (pp->acw > 0) 645 return (NULL); 646 647 G_SHSEC_DEBUG(3, "Tasting %s.", pp->name); 648 649 gp = g_new_geomf(mp, "shsec:taste"); 650 gp->start = g_shsec_start; 651 gp->access = g_shsec_access; 652 gp->orphan = g_shsec_orphan; 653 cp = g_new_consumer(gp); 654 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 655 error = g_attach(cp, pp); 656 if (error == 0) { 657 error = g_shsec_read_metadata(cp, &md); 658 g_detach(cp); 659 } 660 g_destroy_consumer(cp); 661 g_destroy_geom(gp); 662 if (error != 0) 663 return (NULL); 664 gp = NULL; 665 666 if (strcmp(md.md_magic, G_SHSEC_MAGIC) != 0) 667 return (NULL); 668 if (md.md_version > G_SHSEC_VERSION) { 669 G_SHSEC_DEBUG(0, "Kernel module is too old to handle %s.\n", 670 pp->name); 671 return (NULL); 672 } 673 /* 674 * Backward compatibility: 675 */ 676 /* There was no md_provsize field in earlier versions of metadata. */ 677 if (md.md_version < 1) 678 md.md_provsize = pp->mediasize; 679 680 if (md.md_provider[0] != '\0' && 681 !g_compare_names(md.md_provider, pp->name)) 682 return (NULL); 683 if (md.md_provsize != pp->mediasize) 684 return (NULL); 685 686 /* 687 * Let's check if device already exists. 688 */ 689 sc = NULL; 690 LIST_FOREACH(gp, &mp->geom, geom) { 691 sc = gp->softc; 692 if (sc == NULL) 693 continue; 694 if (strcmp(md.md_name, sc->sc_name) != 0) 695 continue; 696 if (md.md_id != sc->sc_id) 697 continue; 698 break; 699 } 700 if (gp != NULL) { 701 G_SHSEC_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name); 702 error = g_shsec_add_disk(sc, pp, md.md_no); 703 if (error != 0) { 704 G_SHSEC_DEBUG(0, "Cannot add disk %s to %s (error=%d).", 705 pp->name, gp->name, error); 706 return (NULL); 707 } 708 } else { 709 gp = g_shsec_create(mp, &md); 710 if (gp == NULL) { 711 G_SHSEC_DEBUG(0, "Cannot create device %s.", md.md_name); 712 return (NULL); 713 } 714 sc = gp->softc; 715 G_SHSEC_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name); 716 error = g_shsec_add_disk(sc, pp, md.md_no); 717 if (error != 0) { 718 G_SHSEC_DEBUG(0, "Cannot add disk %s to %s (error=%d).", 719 pp->name, gp->name, error); 720 g_shsec_destroy(sc, 1); 721 return (NULL); 722 } 723 } 724 return (gp); 725 } 726 727 static struct g_shsec_softc * 728 g_shsec_find_device(struct g_class *mp, const char *name) 729 { 730 struct g_shsec_softc *sc; 731 struct g_geom *gp; 732 733 LIST_FOREACH(gp, &mp->geom, geom) { 734 sc = gp->softc; 735 if (sc == NULL) 736 continue; 737 if (strcmp(sc->sc_name, name) == 0) 738 return (sc); 739 } 740 return (NULL); 741 } 742 743 static void 744 g_shsec_ctl_destroy(struct gctl_req *req, struct g_class *mp) 745 { 746 struct g_shsec_softc *sc; 747 int *force, *nargs, error; 748 const char *name; 749 char param[16]; 750 u_int i; 751 752 g_topology_assert(); 753 754 nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs)); 755 if (nargs == NULL) { 756 gctl_error(req, "No '%s' argument.", "nargs"); 757 return; 758 } 759 if (*nargs <= 0) { 760 gctl_error(req, "Missing device(s)."); 761 return; 762 } 763 force = gctl_get_paraml(req, "force", sizeof(*force)); 764 if (force == NULL) { 765 gctl_error(req, "No '%s' argument.", "force"); 766 return; 767 } 768 769 for (i = 0; i < (u_int)*nargs; i++) { 770 snprintf(param, sizeof(param), "arg%u", i); 771 name = gctl_get_asciiparam(req, param); 772 if (name == NULL) { 773 gctl_error(req, "No 'arg%u' argument.", i); 774 return; 775 } 776 sc = g_shsec_find_device(mp, name); 777 if (sc == NULL) { 778 gctl_error(req, "No such device: %s.", name); 779 return; 780 } 781 error = g_shsec_destroy(sc, *force); 782 if (error != 0) { 783 gctl_error(req, "Cannot destroy device %s (error=%d).", 784 sc->sc_name, error); 785 return; 786 } 787 } 788 } 789 790 static void 791 g_shsec_config(struct gctl_req *req, struct g_class *mp, const char *verb) 792 { 793 uint32_t *version; 794 795 g_topology_assert(); 796 797 version = gctl_get_paraml(req, "version", sizeof(*version)); 798 if (version == NULL) { 799 gctl_error(req, "No '%s' argument.", "version"); 800 return; 801 } 802 if (*version != G_SHSEC_VERSION) { 803 gctl_error(req, "Userland and kernel parts are out of sync."); 804 return; 805 } 806 807 if (strcmp(verb, "stop") == 0) { 808 g_shsec_ctl_destroy(req, mp); 809 return; 810 } 811 812 gctl_error(req, "Unknown verb."); 813 } 814 815 static void 816 g_shsec_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 817 struct g_consumer *cp, struct g_provider *pp) 818 { 819 struct g_shsec_softc *sc; 820 821 sc = gp->softc; 822 if (sc == NULL) 823 return; 824 if (pp != NULL) { 825 /* Nothing here. */ 826 } else if (cp != NULL) { 827 sbuf_printf(sb, "%s<Number>%u</Number>\n", indent, 828 (u_int)cp->index); 829 } else { 830 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id); 831 sbuf_printf(sb, "%s<Status>Total=%u, Online=%u</Status>\n", 832 indent, sc->sc_ndisks, g_shsec_nvalid(sc)); 833 sbuf_printf(sb, "%s<State>", indent); 834 if (sc->sc_provider != NULL && sc->sc_provider->error == 0) 835 sbuf_printf(sb, "UP"); 836 else 837 sbuf_printf(sb, "DOWN"); 838 sbuf_printf(sb, "</State>\n"); 839 } 840 } 841 842 DECLARE_GEOM_CLASS(g_shsec_class, g_shsec); 843 MODULE_VERSION(geom_shsec, 0); 844