1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2005-2011 Pawel Jakub Dawidek <pawel@dawidek.net> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/cons.h> 35 #include <sys/kernel.h> 36 #include <sys/linker.h> 37 #include <sys/module.h> 38 #include <sys/lock.h> 39 #include <sys/mutex.h> 40 #include <sys/bio.h> 41 #include <sys/sbuf.h> 42 #include <sys/sysctl.h> 43 #include <sys/malloc.h> 44 #include <sys/eventhandler.h> 45 #include <sys/kthread.h> 46 #include <sys/proc.h> 47 #include <sys/sched.h> 48 #include <sys/smp.h> 49 #include <sys/uio.h> 50 #include <sys/vnode.h> 51 52 #include <vm/uma.h> 53 54 #include <geom/geom.h> 55 #include <geom/eli/g_eli.h> 56 #include <geom/eli/pkcs5v2.h> 57 58 #include <crypto/intake.h> 59 60 FEATURE(geom_eli, "GEOM crypto module"); 61 62 MALLOC_DEFINE(M_ELI, "eli data", "GEOM_ELI Data"); 63 64 SYSCTL_DECL(_kern_geom); 65 SYSCTL_NODE(_kern_geom, OID_AUTO, eli, CTLFLAG_RW, 0, "GEOM_ELI stuff"); 66 static int g_eli_version = G_ELI_VERSION; 67 SYSCTL_INT(_kern_geom_eli, OID_AUTO, version, CTLFLAG_RD, &g_eli_version, 0, 68 "GELI version"); 69 int g_eli_debug = 0; 70 SYSCTL_INT(_kern_geom_eli, OID_AUTO, debug, CTLFLAG_RWTUN, &g_eli_debug, 0, 71 "Debug level"); 72 static u_int g_eli_tries = 3; 73 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, tries, CTLFLAG_RWTUN, &g_eli_tries, 0, 74 "Number of tries for entering the passphrase"); 75 static u_int g_eli_visible_passphrase = GETS_NOECHO; 76 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, visible_passphrase, CTLFLAG_RWTUN, 77 &g_eli_visible_passphrase, 0, 78 "Visibility of passphrase prompt (0 = invisible, 1 = visible, 2 = asterisk)"); 79 u_int g_eli_overwrites = G_ELI_OVERWRITES; 80 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, overwrites, CTLFLAG_RWTUN, &g_eli_overwrites, 81 0, "Number of times on-disk keys should be overwritten when destroying them"); 82 static u_int g_eli_threads = 0; 83 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, threads, CTLFLAG_RWTUN, &g_eli_threads, 0, 84 "Number of threads doing crypto work"); 85 u_int g_eli_batch = 0; 86 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, batch, CTLFLAG_RWTUN, &g_eli_batch, 0, 87 "Use crypto operations batching"); 88 89 /* 90 * Passphrase cached during boot, in order to be more user-friendly if 91 * there are multiple providers using the same passphrase. 92 */ 93 static char cached_passphrase[256]; 94 static u_int g_eli_boot_passcache = 1; 95 TUNABLE_INT("kern.geom.eli.boot_passcache", &g_eli_boot_passcache); 96 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, boot_passcache, CTLFLAG_RD, 97 &g_eli_boot_passcache, 0, 98 "Passphrases are cached during boot process for possible reuse"); 99 static void 100 fetch_loader_passphrase(void * dummy) 101 { 102 char * env_passphrase; 103 104 KASSERT(dynamic_kenv, ("need dynamic kenv")); 105 106 if ((env_passphrase = kern_getenv("kern.geom.eli.passphrase")) != NULL) { 107 /* Extract passphrase from the environment. */ 108 strlcpy(cached_passphrase, env_passphrase, 109 sizeof(cached_passphrase)); 110 freeenv(env_passphrase); 111 112 /* Wipe the passphrase from the environment. */ 113 kern_unsetenv("kern.geom.eli.passphrase"); 114 } 115 } 116 SYSINIT(geli_fetch_loader_passphrase, SI_SUB_KMEM + 1, SI_ORDER_ANY, 117 fetch_loader_passphrase, NULL); 118 119 static void 120 zero_boot_passcache(void) 121 { 122 123 explicit_bzero(cached_passphrase, sizeof(cached_passphrase)); 124 } 125 126 static void 127 zero_geli_intake_keys(void) 128 { 129 struct keybuf *keybuf; 130 int i; 131 132 if ((keybuf = get_keybuf()) != NULL) { 133 /* Scan the key buffer, clear all GELI keys. */ 134 for (i = 0; i < keybuf->kb_nents; i++) { 135 if (keybuf->kb_ents[i].ke_type == KEYBUF_TYPE_GELI) { 136 explicit_bzero(keybuf->kb_ents[i].ke_data, 137 sizeof(keybuf->kb_ents[i].ke_data)); 138 keybuf->kb_ents[i].ke_type = KEYBUF_TYPE_NONE; 139 } 140 } 141 } 142 } 143 144 static void 145 zero_intake_passcache(void *dummy) 146 { 147 zero_boot_passcache(); 148 zero_geli_intake_keys(); 149 } 150 EVENTHANDLER_DEFINE(mountroot, zero_intake_passcache, NULL, 0); 151 152 static eventhandler_tag g_eli_pre_sync = NULL; 153 154 static int g_eli_destroy_geom(struct gctl_req *req, struct g_class *mp, 155 struct g_geom *gp); 156 static void g_eli_init(struct g_class *mp); 157 static void g_eli_fini(struct g_class *mp); 158 159 static g_taste_t g_eli_taste; 160 static g_dumpconf_t g_eli_dumpconf; 161 162 struct g_class g_eli_class = { 163 .name = G_ELI_CLASS_NAME, 164 .version = G_VERSION, 165 .ctlreq = g_eli_config, 166 .taste = g_eli_taste, 167 .destroy_geom = g_eli_destroy_geom, 168 .init = g_eli_init, 169 .fini = g_eli_fini 170 }; 171 172 173 /* 174 * Code paths: 175 * BIO_READ: 176 * g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver 177 * BIO_WRITE: 178 * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver 179 */ 180 181 182 /* 183 * EAGAIN from crypto(9) means, that we were probably balanced to another crypto 184 * accelerator or something like this. 185 * The function updates the SID and rerun the operation. 186 */ 187 int 188 g_eli_crypto_rerun(struct cryptop *crp) 189 { 190 struct g_eli_softc *sc; 191 struct g_eli_worker *wr; 192 struct bio *bp; 193 int error; 194 195 bp = (struct bio *)crp->crp_opaque; 196 sc = bp->bio_to->geom->softc; 197 LIST_FOREACH(wr, &sc->sc_workers, w_next) { 198 if (wr->w_number == bp->bio_pflags) 199 break; 200 } 201 KASSERT(wr != NULL, ("Invalid worker (%u).", bp->bio_pflags)); 202 G_ELI_DEBUG(1, "Rerunning crypto %s request (sid: %ju -> %ju).", 203 bp->bio_cmd == BIO_READ ? "READ" : "WRITE", (uintmax_t)wr->w_sid, 204 (uintmax_t)crp->crp_sid); 205 wr->w_sid = crp->crp_sid; 206 crp->crp_etype = 0; 207 error = crypto_dispatch(crp); 208 if (error == 0) 209 return (0); 210 G_ELI_DEBUG(1, "%s: crypto_dispatch() returned %d.", __func__, error); 211 crp->crp_etype = error; 212 return (error); 213 } 214 215 /* 216 * The function is called afer reading encrypted data from the provider. 217 * 218 * g_eli_start -> g_eli_crypto_read -> g_io_request -> G_ELI_READ_DONE -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver 219 */ 220 void 221 g_eli_read_done(struct bio *bp) 222 { 223 struct g_eli_softc *sc; 224 struct bio *pbp; 225 226 G_ELI_LOGREQ(2, bp, "Request done."); 227 pbp = bp->bio_parent; 228 if (pbp->bio_error == 0 && bp->bio_error != 0) 229 pbp->bio_error = bp->bio_error; 230 g_destroy_bio(bp); 231 /* 232 * Do we have all sectors already? 233 */ 234 pbp->bio_inbed++; 235 if (pbp->bio_inbed < pbp->bio_children) 236 return; 237 sc = pbp->bio_to->geom->softc; 238 if (pbp->bio_error != 0) { 239 G_ELI_LOGREQ(0, pbp, "%s() failed (error=%d)", __func__, 240 pbp->bio_error); 241 pbp->bio_completed = 0; 242 if (pbp->bio_driver2 != NULL) { 243 free(pbp->bio_driver2, M_ELI); 244 pbp->bio_driver2 = NULL; 245 } 246 g_io_deliver(pbp, pbp->bio_error); 247 atomic_subtract_int(&sc->sc_inflight, 1); 248 return; 249 } 250 mtx_lock(&sc->sc_queue_mtx); 251 bioq_insert_tail(&sc->sc_queue, pbp); 252 mtx_unlock(&sc->sc_queue_mtx); 253 wakeup(sc); 254 } 255 256 /* 257 * The function is called after we encrypt and write data. 258 * 259 * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> G_ELI_WRITE_DONE -> g_io_deliver 260 */ 261 void 262 g_eli_write_done(struct bio *bp) 263 { 264 struct g_eli_softc *sc; 265 struct bio *pbp; 266 267 G_ELI_LOGREQ(2, bp, "Request done."); 268 pbp = bp->bio_parent; 269 if (pbp->bio_error == 0 && bp->bio_error != 0) 270 pbp->bio_error = bp->bio_error; 271 g_destroy_bio(bp); 272 /* 273 * Do we have all sectors already? 274 */ 275 pbp->bio_inbed++; 276 if (pbp->bio_inbed < pbp->bio_children) 277 return; 278 free(pbp->bio_driver2, M_ELI); 279 pbp->bio_driver2 = NULL; 280 if (pbp->bio_error != 0) { 281 G_ELI_LOGREQ(0, pbp, "%s() failed (error=%d)", __func__, 282 pbp->bio_error); 283 pbp->bio_completed = 0; 284 } else 285 pbp->bio_completed = pbp->bio_length; 286 287 /* 288 * Write is finished, send it up. 289 */ 290 sc = pbp->bio_to->geom->softc; 291 g_io_deliver(pbp, pbp->bio_error); 292 atomic_subtract_int(&sc->sc_inflight, 1); 293 } 294 295 /* 296 * This function should never be called, but GEOM made as it set ->orphan() 297 * method for every geom. 298 */ 299 static void 300 g_eli_orphan_spoil_assert(struct g_consumer *cp) 301 { 302 303 panic("Function %s() called for %s.", __func__, cp->geom->name); 304 } 305 306 static void 307 g_eli_orphan(struct g_consumer *cp) 308 { 309 struct g_eli_softc *sc; 310 311 g_topology_assert(); 312 sc = cp->geom->softc; 313 if (sc == NULL) 314 return; 315 g_eli_destroy(sc, TRUE); 316 } 317 318 /* 319 * BIO_READ: 320 * G_ELI_START -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver 321 * BIO_WRITE: 322 * G_ELI_START -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver 323 */ 324 static void 325 g_eli_start(struct bio *bp) 326 { 327 struct g_eli_softc *sc; 328 struct g_consumer *cp; 329 struct bio *cbp; 330 331 sc = bp->bio_to->geom->softc; 332 KASSERT(sc != NULL, 333 ("Provider's error should be set (error=%d)(device=%s).", 334 bp->bio_to->error, bp->bio_to->name)); 335 G_ELI_LOGREQ(2, bp, "Request received."); 336 337 switch (bp->bio_cmd) { 338 case BIO_READ: 339 case BIO_WRITE: 340 case BIO_GETATTR: 341 case BIO_FLUSH: 342 case BIO_ZONE: 343 break; 344 case BIO_DELETE: 345 /* 346 * If the user hasn't set the NODELETE flag, we just pass 347 * it down the stack and let the layers beneath us do (or 348 * not) whatever they do with it. If they have, we 349 * reject it. A possible extension would be an 350 * additional flag to take it as a hint to shred the data 351 * with [multiple?] overwrites. 352 */ 353 if (!(sc->sc_flags & G_ELI_FLAG_NODELETE)) 354 break; 355 default: 356 g_io_deliver(bp, EOPNOTSUPP); 357 return; 358 } 359 cbp = g_clone_bio(bp); 360 if (cbp == NULL) { 361 g_io_deliver(bp, ENOMEM); 362 return; 363 } 364 bp->bio_driver1 = cbp; 365 bp->bio_pflags = G_ELI_NEW_BIO; 366 switch (bp->bio_cmd) { 367 case BIO_READ: 368 if (!(sc->sc_flags & G_ELI_FLAG_AUTH)) { 369 g_eli_crypto_read(sc, bp, 0); 370 break; 371 } 372 /* FALLTHROUGH */ 373 case BIO_WRITE: 374 mtx_lock(&sc->sc_queue_mtx); 375 bioq_insert_tail(&sc->sc_queue, bp); 376 mtx_unlock(&sc->sc_queue_mtx); 377 wakeup(sc); 378 break; 379 case BIO_GETATTR: 380 case BIO_FLUSH: 381 case BIO_DELETE: 382 case BIO_ZONE: 383 cbp->bio_done = g_std_done; 384 cp = LIST_FIRST(&sc->sc_geom->consumer); 385 cbp->bio_to = cp->provider; 386 G_ELI_LOGREQ(2, cbp, "Sending request."); 387 g_io_request(cbp, cp); 388 break; 389 } 390 } 391 392 static int 393 g_eli_newsession(struct g_eli_worker *wr) 394 { 395 struct g_eli_softc *sc; 396 struct cryptoini crie, cria; 397 int error; 398 399 sc = wr->w_softc; 400 401 bzero(&crie, sizeof(crie)); 402 crie.cri_alg = sc->sc_ealgo; 403 crie.cri_klen = sc->sc_ekeylen; 404 if (sc->sc_ealgo == CRYPTO_AES_XTS) 405 crie.cri_klen <<= 1; 406 if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) != 0) { 407 crie.cri_key = g_eli_key_hold(sc, 0, 408 LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize); 409 } else { 410 crie.cri_key = sc->sc_ekey; 411 } 412 if (sc->sc_flags & G_ELI_FLAG_AUTH) { 413 bzero(&cria, sizeof(cria)); 414 cria.cri_alg = sc->sc_aalgo; 415 cria.cri_klen = sc->sc_akeylen; 416 cria.cri_key = sc->sc_akey; 417 crie.cri_next = &cria; 418 } 419 420 switch (sc->sc_crypto) { 421 case G_ELI_CRYPTO_SW: 422 error = crypto_newsession(&wr->w_sid, &crie, 423 CRYPTOCAP_F_SOFTWARE); 424 break; 425 case G_ELI_CRYPTO_HW: 426 error = crypto_newsession(&wr->w_sid, &crie, 427 CRYPTOCAP_F_HARDWARE); 428 break; 429 case G_ELI_CRYPTO_UNKNOWN: 430 error = crypto_newsession(&wr->w_sid, &crie, 431 CRYPTOCAP_F_HARDWARE); 432 if (error == 0) { 433 mtx_lock(&sc->sc_queue_mtx); 434 if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN) 435 sc->sc_crypto = G_ELI_CRYPTO_HW; 436 mtx_unlock(&sc->sc_queue_mtx); 437 } else { 438 error = crypto_newsession(&wr->w_sid, &crie, 439 CRYPTOCAP_F_SOFTWARE); 440 mtx_lock(&sc->sc_queue_mtx); 441 if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN) 442 sc->sc_crypto = G_ELI_CRYPTO_SW; 443 mtx_unlock(&sc->sc_queue_mtx); 444 } 445 break; 446 default: 447 panic("%s: invalid condition", __func__); 448 } 449 450 if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) != 0) 451 g_eli_key_drop(sc, crie.cri_key); 452 453 return (error); 454 } 455 456 static void 457 g_eli_freesession(struct g_eli_worker *wr) 458 { 459 460 crypto_freesession(wr->w_sid); 461 } 462 463 static void 464 g_eli_cancel(struct g_eli_softc *sc) 465 { 466 struct bio *bp; 467 468 mtx_assert(&sc->sc_queue_mtx, MA_OWNED); 469 470 while ((bp = bioq_takefirst(&sc->sc_queue)) != NULL) { 471 KASSERT(bp->bio_pflags == G_ELI_NEW_BIO, 472 ("Not new bio when canceling (bp=%p).", bp)); 473 g_io_deliver(bp, ENXIO); 474 } 475 } 476 477 static struct bio * 478 g_eli_takefirst(struct g_eli_softc *sc) 479 { 480 struct bio *bp; 481 482 mtx_assert(&sc->sc_queue_mtx, MA_OWNED); 483 484 if (!(sc->sc_flags & G_ELI_FLAG_SUSPEND)) 485 return (bioq_takefirst(&sc->sc_queue)); 486 /* 487 * Device suspended, so we skip new I/O requests. 488 */ 489 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) { 490 if (bp->bio_pflags != G_ELI_NEW_BIO) 491 break; 492 } 493 if (bp != NULL) 494 bioq_remove(&sc->sc_queue, bp); 495 return (bp); 496 } 497 498 /* 499 * This is the main function for kernel worker thread when we don't have 500 * hardware acceleration and we have to do cryptography in software. 501 * Dedicated thread is needed, so we don't slow down g_up/g_down GEOM 502 * threads with crypto work. 503 */ 504 static void 505 g_eli_worker(void *arg) 506 { 507 struct g_eli_softc *sc; 508 struct g_eli_worker *wr; 509 struct bio *bp; 510 int error; 511 512 wr = arg; 513 sc = wr->w_softc; 514 #ifdef EARLY_AP_STARTUP 515 MPASS(!sc->sc_cpubind || smp_started); 516 #elif defined(SMP) 517 /* Before sched_bind() to a CPU, wait for all CPUs to go on-line. */ 518 if (sc->sc_cpubind) { 519 while (!smp_started) 520 tsleep(wr, 0, "geli:smp", hz / 4); 521 } 522 #endif 523 thread_lock(curthread); 524 sched_prio(curthread, PUSER); 525 if (sc->sc_cpubind) 526 sched_bind(curthread, wr->w_number % mp_ncpus); 527 thread_unlock(curthread); 528 529 G_ELI_DEBUG(1, "Thread %s started.", curthread->td_proc->p_comm); 530 531 for (;;) { 532 mtx_lock(&sc->sc_queue_mtx); 533 again: 534 bp = g_eli_takefirst(sc); 535 if (bp == NULL) { 536 if (sc->sc_flags & G_ELI_FLAG_DESTROY) { 537 g_eli_cancel(sc); 538 LIST_REMOVE(wr, w_next); 539 g_eli_freesession(wr); 540 free(wr, M_ELI); 541 G_ELI_DEBUG(1, "Thread %s exiting.", 542 curthread->td_proc->p_comm); 543 wakeup(&sc->sc_workers); 544 mtx_unlock(&sc->sc_queue_mtx); 545 kproc_exit(0); 546 } 547 while (sc->sc_flags & G_ELI_FLAG_SUSPEND) { 548 if (sc->sc_inflight > 0) { 549 G_ELI_DEBUG(0, "inflight=%d", 550 sc->sc_inflight); 551 /* 552 * We still have inflight BIOs, so 553 * sleep and retry. 554 */ 555 msleep(sc, &sc->sc_queue_mtx, PRIBIO, 556 "geli:inf", hz / 5); 557 goto again; 558 } 559 /* 560 * Suspend requested, mark the worker as 561 * suspended and go to sleep. 562 */ 563 if (wr->w_active) { 564 g_eli_freesession(wr); 565 wr->w_active = FALSE; 566 } 567 wakeup(&sc->sc_workers); 568 msleep(sc, &sc->sc_queue_mtx, PRIBIO, 569 "geli:suspend", 0); 570 if (!wr->w_active && 571 !(sc->sc_flags & G_ELI_FLAG_SUSPEND)) { 572 error = g_eli_newsession(wr); 573 KASSERT(error == 0, 574 ("g_eli_newsession() failed on resume (error=%d)", 575 error)); 576 wr->w_active = TRUE; 577 } 578 goto again; 579 } 580 msleep(sc, &sc->sc_queue_mtx, PDROP, "geli:w", 0); 581 continue; 582 } 583 if (bp->bio_pflags == G_ELI_NEW_BIO) 584 atomic_add_int(&sc->sc_inflight, 1); 585 mtx_unlock(&sc->sc_queue_mtx); 586 if (bp->bio_pflags == G_ELI_NEW_BIO) { 587 bp->bio_pflags = 0; 588 if (sc->sc_flags & G_ELI_FLAG_AUTH) { 589 if (bp->bio_cmd == BIO_READ) 590 g_eli_auth_read(sc, bp); 591 else 592 g_eli_auth_run(wr, bp); 593 } else { 594 if (bp->bio_cmd == BIO_READ) 595 g_eli_crypto_read(sc, bp, 1); 596 else 597 g_eli_crypto_run(wr, bp); 598 } 599 } else { 600 if (sc->sc_flags & G_ELI_FLAG_AUTH) 601 g_eli_auth_run(wr, bp); 602 else 603 g_eli_crypto_run(wr, bp); 604 } 605 } 606 } 607 608 int 609 g_eli_read_metadata(struct g_class *mp, struct g_provider *pp, 610 struct g_eli_metadata *md) 611 { 612 struct g_geom *gp; 613 struct g_consumer *cp; 614 u_char *buf = NULL; 615 int error; 616 617 g_topology_assert(); 618 619 gp = g_new_geomf(mp, "eli:taste"); 620 gp->start = g_eli_start; 621 gp->access = g_std_access; 622 /* 623 * g_eli_read_metadata() is always called from the event thread. 624 * Our geom is created and destroyed in the same event, so there 625 * could be no orphan nor spoil event in the meantime. 626 */ 627 gp->orphan = g_eli_orphan_spoil_assert; 628 gp->spoiled = g_eli_orphan_spoil_assert; 629 cp = g_new_consumer(gp); 630 error = g_attach(cp, pp); 631 if (error != 0) 632 goto end; 633 error = g_access(cp, 1, 0, 0); 634 if (error != 0) 635 goto end; 636 g_topology_unlock(); 637 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize, 638 &error); 639 g_topology_lock(); 640 if (buf == NULL) 641 goto end; 642 error = eli_metadata_decode(buf, md); 643 if (error != 0) 644 goto end; 645 /* Metadata was read and decoded successfully. */ 646 end: 647 if (buf != NULL) 648 g_free(buf); 649 if (cp->provider != NULL) { 650 if (cp->acr == 1) 651 g_access(cp, -1, 0, 0); 652 g_detach(cp); 653 } 654 g_destroy_consumer(cp); 655 g_destroy_geom(gp); 656 return (error); 657 } 658 659 /* 660 * The function is called when we had last close on provider and user requested 661 * to close it when this situation occur. 662 */ 663 static void 664 g_eli_last_close(void *arg, int flags __unused) 665 { 666 struct g_geom *gp; 667 char gpname[64]; 668 int error; 669 670 g_topology_assert(); 671 gp = arg; 672 strlcpy(gpname, gp->name, sizeof(gpname)); 673 error = g_eli_destroy(gp->softc, TRUE); 674 KASSERT(error == 0, ("Cannot detach %s on last close (error=%d).", 675 gpname, error)); 676 G_ELI_DEBUG(0, "Detached %s on last close.", gpname); 677 } 678 679 int 680 g_eli_access(struct g_provider *pp, int dr, int dw, int de) 681 { 682 struct g_eli_softc *sc; 683 struct g_geom *gp; 684 685 gp = pp->geom; 686 sc = gp->softc; 687 688 if (dw > 0) { 689 if (sc->sc_flags & G_ELI_FLAG_RO) { 690 /* Deny write attempts. */ 691 return (EROFS); 692 } 693 /* Someone is opening us for write, we need to remember that. */ 694 sc->sc_flags |= G_ELI_FLAG_WOPEN; 695 return (0); 696 } 697 /* Is this the last close? */ 698 if (pp->acr + dr > 0 || pp->acw + dw > 0 || pp->ace + de > 0) 699 return (0); 700 701 /* 702 * Automatically detach on last close if requested. 703 */ 704 if ((sc->sc_flags & G_ELI_FLAG_RW_DETACH) || 705 (sc->sc_flags & G_ELI_FLAG_WOPEN)) { 706 g_post_event(g_eli_last_close, gp, M_WAITOK, NULL); 707 } 708 return (0); 709 } 710 711 static int 712 g_eli_cpu_is_disabled(int cpu) 713 { 714 #ifdef SMP 715 return (CPU_ISSET(cpu, &hlt_cpus_mask)); 716 #else 717 return (0); 718 #endif 719 } 720 721 struct g_geom * 722 g_eli_create(struct gctl_req *req, struct g_class *mp, struct g_provider *bpp, 723 const struct g_eli_metadata *md, const u_char *mkey, int nkey) 724 { 725 struct g_eli_softc *sc; 726 struct g_eli_worker *wr; 727 struct g_geom *gp; 728 struct g_provider *pp; 729 struct g_consumer *cp; 730 u_int i, threads; 731 int error; 732 733 G_ELI_DEBUG(1, "Creating device %s%s.", bpp->name, G_ELI_SUFFIX); 734 735 gp = g_new_geomf(mp, "%s%s", bpp->name, G_ELI_SUFFIX); 736 sc = malloc(sizeof(*sc), M_ELI, M_WAITOK | M_ZERO); 737 gp->start = g_eli_start; 738 /* 739 * Spoiling can happen even though we have the provider open 740 * exclusively, e.g. through media change events. 741 */ 742 gp->spoiled = g_eli_orphan; 743 gp->orphan = g_eli_orphan; 744 gp->dumpconf = g_eli_dumpconf; 745 /* 746 * If detach-on-last-close feature is not enabled and we don't operate 747 * on read-only provider, we can simply use g_std_access(). 748 */ 749 if (md->md_flags & (G_ELI_FLAG_WO_DETACH | G_ELI_FLAG_RO)) 750 gp->access = g_eli_access; 751 else 752 gp->access = g_std_access; 753 754 eli_metadata_softc(sc, md, bpp->sectorsize, bpp->mediasize); 755 sc->sc_nkey = nkey; 756 757 gp->softc = sc; 758 sc->sc_geom = gp; 759 760 bioq_init(&sc->sc_queue); 761 mtx_init(&sc->sc_queue_mtx, "geli:queue", NULL, MTX_DEF); 762 mtx_init(&sc->sc_ekeys_lock, "geli:ekeys", NULL, MTX_DEF); 763 764 pp = NULL; 765 cp = g_new_consumer(gp); 766 error = g_attach(cp, bpp); 767 if (error != 0) { 768 if (req != NULL) { 769 gctl_error(req, "Cannot attach to %s (error=%d).", 770 bpp->name, error); 771 } else { 772 G_ELI_DEBUG(1, "Cannot attach to %s (error=%d).", 773 bpp->name, error); 774 } 775 goto failed; 776 } 777 /* 778 * Keep provider open all the time, so we can run critical tasks, 779 * like Master Keys deletion, without wondering if we can open 780 * provider or not. 781 * We don't open provider for writing only when user requested read-only 782 * access. 783 */ 784 if (sc->sc_flags & G_ELI_FLAG_RO) 785 error = g_access(cp, 1, 0, 1); 786 else 787 error = g_access(cp, 1, 1, 1); 788 if (error != 0) { 789 if (req != NULL) { 790 gctl_error(req, "Cannot access %s (error=%d).", 791 bpp->name, error); 792 } else { 793 G_ELI_DEBUG(1, "Cannot access %s (error=%d).", 794 bpp->name, error); 795 } 796 goto failed; 797 } 798 799 /* 800 * Remember the keys in our softc structure. 801 */ 802 g_eli_mkey_propagate(sc, mkey); 803 804 LIST_INIT(&sc->sc_workers); 805 806 threads = g_eli_threads; 807 if (threads == 0) 808 threads = mp_ncpus; 809 sc->sc_cpubind = (mp_ncpus > 1 && threads == mp_ncpus); 810 for (i = 0; i < threads; i++) { 811 if (g_eli_cpu_is_disabled(i)) { 812 G_ELI_DEBUG(1, "%s: CPU %u disabled, skipping.", 813 bpp->name, i); 814 continue; 815 } 816 wr = malloc(sizeof(*wr), M_ELI, M_WAITOK | M_ZERO); 817 wr->w_softc = sc; 818 wr->w_number = i; 819 wr->w_active = TRUE; 820 821 error = g_eli_newsession(wr); 822 if (error != 0) { 823 free(wr, M_ELI); 824 if (req != NULL) { 825 gctl_error(req, "Cannot set up crypto session " 826 "for %s (error=%d).", bpp->name, error); 827 } else { 828 G_ELI_DEBUG(1, "Cannot set up crypto session " 829 "for %s (error=%d).", bpp->name, error); 830 } 831 goto failed; 832 } 833 834 error = kproc_create(g_eli_worker, wr, &wr->w_proc, 0, 0, 835 "g_eli[%u] %s", i, bpp->name); 836 if (error != 0) { 837 g_eli_freesession(wr); 838 free(wr, M_ELI); 839 if (req != NULL) { 840 gctl_error(req, "Cannot create kernel thread " 841 "for %s (error=%d).", bpp->name, error); 842 } else { 843 G_ELI_DEBUG(1, "Cannot create kernel thread " 844 "for %s (error=%d).", bpp->name, error); 845 } 846 goto failed; 847 } 848 LIST_INSERT_HEAD(&sc->sc_workers, wr, w_next); 849 } 850 851 /* 852 * Create decrypted provider. 853 */ 854 pp = g_new_providerf(gp, "%s%s", bpp->name, G_ELI_SUFFIX); 855 pp->mediasize = sc->sc_mediasize; 856 pp->sectorsize = sc->sc_sectorsize; 857 858 g_error_provider(pp, 0); 859 860 G_ELI_DEBUG(0, "Device %s created.", pp->name); 861 G_ELI_DEBUG(0, "Encryption: %s %u", g_eli_algo2str(sc->sc_ealgo), 862 sc->sc_ekeylen); 863 if (sc->sc_flags & G_ELI_FLAG_AUTH) 864 G_ELI_DEBUG(0, " Integrity: %s", g_eli_algo2str(sc->sc_aalgo)); 865 G_ELI_DEBUG(0, " Crypto: %s", 866 sc->sc_crypto == G_ELI_CRYPTO_SW ? "software" : "hardware"); 867 return (gp); 868 failed: 869 mtx_lock(&sc->sc_queue_mtx); 870 sc->sc_flags |= G_ELI_FLAG_DESTROY; 871 wakeup(sc); 872 /* 873 * Wait for kernel threads self destruction. 874 */ 875 while (!LIST_EMPTY(&sc->sc_workers)) { 876 msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO, 877 "geli:destroy", 0); 878 } 879 mtx_destroy(&sc->sc_queue_mtx); 880 if (cp->provider != NULL) { 881 if (cp->acr == 1) 882 g_access(cp, -1, -1, -1); 883 g_detach(cp); 884 } 885 g_destroy_consumer(cp); 886 g_destroy_geom(gp); 887 g_eli_key_destroy(sc); 888 bzero(sc, sizeof(*sc)); 889 free(sc, M_ELI); 890 return (NULL); 891 } 892 893 int 894 g_eli_destroy(struct g_eli_softc *sc, boolean_t force) 895 { 896 struct g_geom *gp; 897 struct g_provider *pp; 898 899 g_topology_assert(); 900 901 if (sc == NULL) 902 return (ENXIO); 903 904 gp = sc->sc_geom; 905 pp = LIST_FIRST(&gp->provider); 906 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) { 907 if (force) { 908 G_ELI_DEBUG(1, "Device %s is still open, so it " 909 "cannot be definitely removed.", pp->name); 910 sc->sc_flags |= G_ELI_FLAG_RW_DETACH; 911 gp->access = g_eli_access; 912 g_wither_provider(pp, ENXIO); 913 return (EBUSY); 914 } else { 915 G_ELI_DEBUG(1, 916 "Device %s is still open (r%dw%de%d).", pp->name, 917 pp->acr, pp->acw, pp->ace); 918 return (EBUSY); 919 } 920 } 921 922 mtx_lock(&sc->sc_queue_mtx); 923 sc->sc_flags |= G_ELI_FLAG_DESTROY; 924 wakeup(sc); 925 while (!LIST_EMPTY(&sc->sc_workers)) { 926 msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO, 927 "geli:destroy", 0); 928 } 929 mtx_destroy(&sc->sc_queue_mtx); 930 gp->softc = NULL; 931 g_eli_key_destroy(sc); 932 bzero(sc, sizeof(*sc)); 933 free(sc, M_ELI); 934 935 if (pp == NULL || (pp->acr == 0 && pp->acw == 0 && pp->ace == 0)) 936 G_ELI_DEBUG(0, "Device %s destroyed.", gp->name); 937 g_wither_geom_close(gp, ENXIO); 938 939 return (0); 940 } 941 942 static int 943 g_eli_destroy_geom(struct gctl_req *req __unused, 944 struct g_class *mp __unused, struct g_geom *gp) 945 { 946 struct g_eli_softc *sc; 947 948 sc = gp->softc; 949 return (g_eli_destroy(sc, FALSE)); 950 } 951 952 static int 953 g_eli_keyfiles_load(struct hmac_ctx *ctx, const char *provider) 954 { 955 u_char *keyfile, *data; 956 char *file, name[64]; 957 size_t size; 958 int i; 959 960 for (i = 0; ; i++) { 961 snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i); 962 keyfile = preload_search_by_type(name); 963 if (keyfile == NULL && i == 0) { 964 /* 965 * If there is only one keyfile, allow simpler name. 966 */ 967 snprintf(name, sizeof(name), "%s:geli_keyfile", provider); 968 keyfile = preload_search_by_type(name); 969 } 970 if (keyfile == NULL) 971 return (i); /* Return number of loaded keyfiles. */ 972 data = preload_fetch_addr(keyfile); 973 if (data == NULL) { 974 G_ELI_DEBUG(0, "Cannot find key file data for %s.", 975 name); 976 return (0); 977 } 978 size = preload_fetch_size(keyfile); 979 if (size == 0) { 980 G_ELI_DEBUG(0, "Cannot find key file size for %s.", 981 name); 982 return (0); 983 } 984 file = preload_search_info(keyfile, MODINFO_NAME); 985 if (file == NULL) { 986 G_ELI_DEBUG(0, "Cannot find key file name for %s.", 987 name); 988 return (0); 989 } 990 G_ELI_DEBUG(1, "Loaded keyfile %s for %s (type: %s).", file, 991 provider, name); 992 g_eli_crypto_hmac_update(ctx, data, size); 993 } 994 } 995 996 static void 997 g_eli_keyfiles_clear(const char *provider) 998 { 999 u_char *keyfile, *data; 1000 char name[64]; 1001 size_t size; 1002 int i; 1003 1004 for (i = 0; ; i++) { 1005 snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i); 1006 keyfile = preload_search_by_type(name); 1007 if (keyfile == NULL) 1008 return; 1009 data = preload_fetch_addr(keyfile); 1010 size = preload_fetch_size(keyfile); 1011 if (data != NULL && size != 0) 1012 bzero(data, size); 1013 } 1014 } 1015 1016 /* 1017 * Tasting is only made on boot. 1018 * We detect providers which should be attached before root is mounted. 1019 */ 1020 static struct g_geom * 1021 g_eli_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 1022 { 1023 struct g_eli_metadata md; 1024 struct g_geom *gp; 1025 struct hmac_ctx ctx; 1026 char passphrase[256]; 1027 u_char key[G_ELI_USERKEYLEN], mkey[G_ELI_DATAIVKEYLEN]; 1028 u_int i, nkey, nkeyfiles, tries, showpass; 1029 int error; 1030 struct keybuf *keybuf; 1031 1032 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name); 1033 g_topology_assert(); 1034 1035 if (root_mounted() || g_eli_tries == 0) 1036 return (NULL); 1037 1038 G_ELI_DEBUG(3, "Tasting %s.", pp->name); 1039 1040 error = g_eli_read_metadata(mp, pp, &md); 1041 if (error != 0) 1042 return (NULL); 1043 gp = NULL; 1044 1045 if (strcmp(md.md_magic, G_ELI_MAGIC) != 0) 1046 return (NULL); 1047 if (md.md_version > G_ELI_VERSION) { 1048 printf("geom_eli.ko module is too old to handle %s.\n", 1049 pp->name); 1050 return (NULL); 1051 } 1052 if (md.md_provsize != pp->mediasize) 1053 return (NULL); 1054 /* Should we attach it on boot? */ 1055 if (!(md.md_flags & G_ELI_FLAG_BOOT)) 1056 return (NULL); 1057 if (md.md_keys == 0x00) { 1058 G_ELI_DEBUG(0, "No valid keys on %s.", pp->name); 1059 return (NULL); 1060 } 1061 if (md.md_iterations == -1) { 1062 /* If there is no passphrase, we try only once. */ 1063 tries = 1; 1064 } else { 1065 /* Ask for the passphrase no more than g_eli_tries times. */ 1066 tries = g_eli_tries; 1067 } 1068 1069 if ((keybuf = get_keybuf()) != NULL) { 1070 /* Scan the key buffer, try all GELI keys. */ 1071 for (i = 0; i < keybuf->kb_nents; i++) { 1072 if (keybuf->kb_ents[i].ke_type == KEYBUF_TYPE_GELI) { 1073 memcpy(key, keybuf->kb_ents[i].ke_data, 1074 sizeof(key)); 1075 1076 if (g_eli_mkey_decrypt(&md, key, 1077 mkey, &nkey) == 0 ) { 1078 explicit_bzero(key, sizeof(key)); 1079 goto have_key; 1080 } 1081 } 1082 } 1083 } 1084 1085 for (i = 0; i <= tries; i++) { 1086 g_eli_crypto_hmac_init(&ctx, NULL, 0); 1087 1088 /* 1089 * Load all key files. 1090 */ 1091 nkeyfiles = g_eli_keyfiles_load(&ctx, pp->name); 1092 1093 if (nkeyfiles == 0 && md.md_iterations == -1) { 1094 /* 1095 * No key files and no passphrase, something is 1096 * definitely wrong here. 1097 * geli(8) doesn't allow for such situation, so assume 1098 * that there was really no passphrase and in that case 1099 * key files are no properly defined in loader.conf. 1100 */ 1101 G_ELI_DEBUG(0, 1102 "Found no key files in loader.conf for %s.", 1103 pp->name); 1104 return (NULL); 1105 } 1106 1107 /* Ask for the passphrase if defined. */ 1108 if (md.md_iterations >= 0) { 1109 /* Try first with cached passphrase. */ 1110 if (i == 0) { 1111 if (!g_eli_boot_passcache) 1112 continue; 1113 memcpy(passphrase, cached_passphrase, 1114 sizeof(passphrase)); 1115 } else { 1116 printf("Enter passphrase for %s: ", pp->name); 1117 showpass = g_eli_visible_passphrase; 1118 if ((md.md_flags & G_ELI_FLAG_GELIDISPLAYPASS) != 0) 1119 showpass = GETS_ECHOPASS; 1120 cngets(passphrase, sizeof(passphrase), 1121 showpass); 1122 memcpy(cached_passphrase, passphrase, 1123 sizeof(passphrase)); 1124 } 1125 } 1126 1127 /* 1128 * Prepare Derived-Key from the user passphrase. 1129 */ 1130 if (md.md_iterations == 0) { 1131 g_eli_crypto_hmac_update(&ctx, md.md_salt, 1132 sizeof(md.md_salt)); 1133 g_eli_crypto_hmac_update(&ctx, passphrase, 1134 strlen(passphrase)); 1135 explicit_bzero(passphrase, sizeof(passphrase)); 1136 } else if (md.md_iterations > 0) { 1137 u_char dkey[G_ELI_USERKEYLEN]; 1138 1139 pkcs5v2_genkey(dkey, sizeof(dkey), md.md_salt, 1140 sizeof(md.md_salt), passphrase, md.md_iterations); 1141 bzero(passphrase, sizeof(passphrase)); 1142 g_eli_crypto_hmac_update(&ctx, dkey, sizeof(dkey)); 1143 explicit_bzero(dkey, sizeof(dkey)); 1144 } 1145 1146 g_eli_crypto_hmac_final(&ctx, key, 0); 1147 1148 /* 1149 * Decrypt Master-Key. 1150 */ 1151 error = g_eli_mkey_decrypt(&md, key, mkey, &nkey); 1152 bzero(key, sizeof(key)); 1153 if (error == -1) { 1154 if (i == tries) { 1155 G_ELI_DEBUG(0, 1156 "Wrong key for %s. No tries left.", 1157 pp->name); 1158 g_eli_keyfiles_clear(pp->name); 1159 return (NULL); 1160 } 1161 if (i > 0) { 1162 G_ELI_DEBUG(0, 1163 "Wrong key for %s. Tries left: %u.", 1164 pp->name, tries - i); 1165 } 1166 /* Try again. */ 1167 continue; 1168 } else if (error > 0) { 1169 G_ELI_DEBUG(0, 1170 "Cannot decrypt Master Key for %s (error=%d).", 1171 pp->name, error); 1172 g_eli_keyfiles_clear(pp->name); 1173 return (NULL); 1174 } 1175 g_eli_keyfiles_clear(pp->name); 1176 G_ELI_DEBUG(1, "Using Master Key %u for %s.", nkey, pp->name); 1177 break; 1178 } 1179 have_key: 1180 1181 /* 1182 * We have correct key, let's attach provider. 1183 */ 1184 gp = g_eli_create(NULL, mp, pp, &md, mkey, nkey); 1185 bzero(mkey, sizeof(mkey)); 1186 bzero(&md, sizeof(md)); 1187 if (gp == NULL) { 1188 G_ELI_DEBUG(0, "Cannot create device %s%s.", pp->name, 1189 G_ELI_SUFFIX); 1190 return (NULL); 1191 } 1192 return (gp); 1193 } 1194 1195 static void 1196 g_eli_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 1197 struct g_consumer *cp, struct g_provider *pp) 1198 { 1199 struct g_eli_softc *sc; 1200 1201 g_topology_assert(); 1202 sc = gp->softc; 1203 if (sc == NULL) 1204 return; 1205 if (pp != NULL || cp != NULL) 1206 return; /* Nothing here. */ 1207 1208 sbuf_printf(sb, "%s<KeysTotal>%ju</KeysTotal>\n", indent, 1209 (uintmax_t)sc->sc_ekeys_total); 1210 sbuf_printf(sb, "%s<KeysAllocated>%ju</KeysAllocated>\n", indent, 1211 (uintmax_t)sc->sc_ekeys_allocated); 1212 sbuf_printf(sb, "%s<Flags>", indent); 1213 if (sc->sc_flags == 0) 1214 sbuf_printf(sb, "NONE"); 1215 else { 1216 int first = 1; 1217 1218 #define ADD_FLAG(flag, name) do { \ 1219 if (sc->sc_flags & (flag)) { \ 1220 if (!first) \ 1221 sbuf_printf(sb, ", "); \ 1222 else \ 1223 first = 0; \ 1224 sbuf_printf(sb, name); \ 1225 } \ 1226 } while (0) 1227 ADD_FLAG(G_ELI_FLAG_SUSPEND, "SUSPEND"); 1228 ADD_FLAG(G_ELI_FLAG_SINGLE_KEY, "SINGLE-KEY"); 1229 ADD_FLAG(G_ELI_FLAG_NATIVE_BYTE_ORDER, "NATIVE-BYTE-ORDER"); 1230 ADD_FLAG(G_ELI_FLAG_ONETIME, "ONETIME"); 1231 ADD_FLAG(G_ELI_FLAG_BOOT, "BOOT"); 1232 ADD_FLAG(G_ELI_FLAG_WO_DETACH, "W-DETACH"); 1233 ADD_FLAG(G_ELI_FLAG_RW_DETACH, "RW-DETACH"); 1234 ADD_FLAG(G_ELI_FLAG_AUTH, "AUTH"); 1235 ADD_FLAG(G_ELI_FLAG_WOPEN, "W-OPEN"); 1236 ADD_FLAG(G_ELI_FLAG_DESTROY, "DESTROY"); 1237 ADD_FLAG(G_ELI_FLAG_RO, "READ-ONLY"); 1238 ADD_FLAG(G_ELI_FLAG_NODELETE, "NODELETE"); 1239 ADD_FLAG(G_ELI_FLAG_GELIBOOT, "GELIBOOT"); 1240 ADD_FLAG(G_ELI_FLAG_GELIDISPLAYPASS, "GELIDISPLAYPASS"); 1241 #undef ADD_FLAG 1242 } 1243 sbuf_printf(sb, "</Flags>\n"); 1244 1245 if (!(sc->sc_flags & G_ELI_FLAG_ONETIME)) { 1246 sbuf_printf(sb, "%s<UsedKey>%u</UsedKey>\n", indent, 1247 sc->sc_nkey); 1248 } 1249 sbuf_printf(sb, "%s<Version>%u</Version>\n", indent, sc->sc_version); 1250 sbuf_printf(sb, "%s<Crypto>", indent); 1251 switch (sc->sc_crypto) { 1252 case G_ELI_CRYPTO_HW: 1253 sbuf_printf(sb, "hardware"); 1254 break; 1255 case G_ELI_CRYPTO_SW: 1256 sbuf_printf(sb, "software"); 1257 break; 1258 default: 1259 sbuf_printf(sb, "UNKNOWN"); 1260 break; 1261 } 1262 sbuf_printf(sb, "</Crypto>\n"); 1263 if (sc->sc_flags & G_ELI_FLAG_AUTH) { 1264 sbuf_printf(sb, 1265 "%s<AuthenticationAlgorithm>%s</AuthenticationAlgorithm>\n", 1266 indent, g_eli_algo2str(sc->sc_aalgo)); 1267 } 1268 sbuf_printf(sb, "%s<KeyLength>%u</KeyLength>\n", indent, 1269 sc->sc_ekeylen); 1270 sbuf_printf(sb, "%s<EncryptionAlgorithm>%s</EncryptionAlgorithm>\n", 1271 indent, g_eli_algo2str(sc->sc_ealgo)); 1272 sbuf_printf(sb, "%s<State>%s</State>\n", indent, 1273 (sc->sc_flags & G_ELI_FLAG_SUSPEND) ? "SUSPENDED" : "ACTIVE"); 1274 } 1275 1276 static void 1277 g_eli_shutdown_pre_sync(void *arg, int howto) 1278 { 1279 struct g_class *mp; 1280 struct g_geom *gp, *gp2; 1281 struct g_provider *pp; 1282 struct g_eli_softc *sc; 1283 int error; 1284 1285 mp = arg; 1286 g_topology_lock(); 1287 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) { 1288 sc = gp->softc; 1289 if (sc == NULL) 1290 continue; 1291 pp = LIST_FIRST(&gp->provider); 1292 KASSERT(pp != NULL, ("No provider? gp=%p (%s)", gp, gp->name)); 1293 if (pp->acr + pp->acw + pp->ace == 0) 1294 error = g_eli_destroy(sc, TRUE); 1295 else { 1296 sc->sc_flags |= G_ELI_FLAG_RW_DETACH; 1297 gp->access = g_eli_access; 1298 } 1299 } 1300 g_topology_unlock(); 1301 } 1302 1303 static void 1304 g_eli_init(struct g_class *mp) 1305 { 1306 1307 g_eli_pre_sync = EVENTHANDLER_REGISTER(shutdown_pre_sync, 1308 g_eli_shutdown_pre_sync, mp, SHUTDOWN_PRI_FIRST); 1309 if (g_eli_pre_sync == NULL) 1310 G_ELI_DEBUG(0, "Warning! Cannot register shutdown event."); 1311 } 1312 1313 static void 1314 g_eli_fini(struct g_class *mp) 1315 { 1316 1317 if (g_eli_pre_sync != NULL) 1318 EVENTHANDLER_DEREGISTER(shutdown_pre_sync, g_eli_pre_sync); 1319 } 1320 1321 DECLARE_GEOM_CLASS(g_eli_class, g_eli); 1322 MODULE_DEPEND(g_eli, crypto, 1, 1, 1); 1323