1 /*- 2 * Copyright (c) 2005-2011 Pawel Jakub Dawidek <pawel@dawidek.net> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/cons.h> 33 #include <sys/kernel.h> 34 #include <sys/linker.h> 35 #include <sys/module.h> 36 #include <sys/lock.h> 37 #include <sys/mutex.h> 38 #include <sys/bio.h> 39 #include <sys/sbuf.h> 40 #include <sys/sysctl.h> 41 #include <sys/malloc.h> 42 #include <sys/eventhandler.h> 43 #include <sys/kthread.h> 44 #include <sys/proc.h> 45 #include <sys/sched.h> 46 #include <sys/smp.h> 47 #include <sys/uio.h> 48 #include <sys/vnode.h> 49 50 #include <vm/uma.h> 51 52 #include <geom/geom.h> 53 #include <geom/eli/g_eli.h> 54 #include <geom/eli/pkcs5v2.h> 55 56 #include <crypto/intake.h> 57 58 FEATURE(geom_eli, "GEOM crypto module"); 59 60 MALLOC_DEFINE(M_ELI, "eli data", "GEOM_ELI Data"); 61 62 SYSCTL_DECL(_kern_geom); 63 SYSCTL_NODE(_kern_geom, OID_AUTO, eli, CTLFLAG_RW, 0, "GEOM_ELI stuff"); 64 static int g_eli_version = G_ELI_VERSION; 65 SYSCTL_INT(_kern_geom_eli, OID_AUTO, version, CTLFLAG_RD, &g_eli_version, 0, 66 "GELI version"); 67 int g_eli_debug = 0; 68 SYSCTL_INT(_kern_geom_eli, OID_AUTO, debug, CTLFLAG_RWTUN, &g_eli_debug, 0, 69 "Debug level"); 70 static u_int g_eli_tries = 3; 71 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, tries, CTLFLAG_RWTUN, &g_eli_tries, 0, 72 "Number of tries for entering the passphrase"); 73 static u_int g_eli_visible_passphrase = GETS_NOECHO; 74 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, visible_passphrase, CTLFLAG_RWTUN, 75 &g_eli_visible_passphrase, 0, 76 "Visibility of passphrase prompt (0 = invisible, 1 = visible, 2 = asterisk)"); 77 u_int g_eli_overwrites = G_ELI_OVERWRITES; 78 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, overwrites, CTLFLAG_RWTUN, &g_eli_overwrites, 79 0, "Number of times on-disk keys should be overwritten when destroying them"); 80 static u_int g_eli_threads = 0; 81 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, threads, CTLFLAG_RWTUN, &g_eli_threads, 0, 82 "Number of threads doing crypto work"); 83 u_int g_eli_batch = 0; 84 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, batch, CTLFLAG_RWTUN, &g_eli_batch, 0, 85 "Use crypto operations batching"); 86 87 /* 88 * Passphrase cached during boot, in order to be more user-friendly if 89 * there are multiple providers using the same passphrase. 90 */ 91 static char cached_passphrase[256]; 92 static u_int g_eli_boot_passcache = 1; 93 TUNABLE_INT("kern.geom.eli.boot_passcache", &g_eli_boot_passcache); 94 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, boot_passcache, CTLFLAG_RD, 95 &g_eli_boot_passcache, 0, 96 "Passphrases are cached during boot process for possible reuse"); 97 static void 98 fetch_loader_passphrase(void * dummy) 99 { 100 char * env_passphrase; 101 102 KASSERT(dynamic_kenv, ("need dynamic kenv")); 103 104 if ((env_passphrase = kern_getenv("kern.geom.eli.passphrase")) != NULL) { 105 /* Extract passphrase from the environment. */ 106 strlcpy(cached_passphrase, env_passphrase, 107 sizeof(cached_passphrase)); 108 freeenv(env_passphrase); 109 110 /* Wipe the passphrase from the environment. */ 111 kern_unsetenv("kern.geom.eli.passphrase"); 112 } 113 } 114 SYSINIT(geli_fetch_loader_passphrase, SI_SUB_KMEM + 1, SI_ORDER_ANY, 115 fetch_loader_passphrase, NULL); 116 117 static void 118 zero_boot_passcache(void) 119 { 120 121 explicit_bzero(cached_passphrase, sizeof(cached_passphrase)); 122 } 123 124 static void 125 zero_geli_intake_keys(void) 126 { 127 struct keybuf *keybuf; 128 int i; 129 130 if ((keybuf = get_keybuf()) != NULL) { 131 /* Scan the key buffer, clear all GELI keys. */ 132 for (i = 0; i < keybuf->kb_nents; i++) { 133 if (keybuf->kb_ents[i].ke_type == KEYBUF_TYPE_GELI) { 134 explicit_bzero(keybuf->kb_ents[i].ke_data, 135 sizeof(keybuf->kb_ents[i].ke_data)); 136 keybuf->kb_ents[i].ke_type = KEYBUF_TYPE_NONE; 137 } 138 } 139 } 140 } 141 142 static void 143 zero_intake_passcache(void *dummy) 144 { 145 zero_boot_passcache(); 146 zero_geli_intake_keys(); 147 } 148 EVENTHANDLER_DEFINE(mountroot, zero_intake_passcache, NULL, 0); 149 150 static eventhandler_tag g_eli_pre_sync = NULL; 151 152 static int g_eli_destroy_geom(struct gctl_req *req, struct g_class *mp, 153 struct g_geom *gp); 154 static void g_eli_init(struct g_class *mp); 155 static void g_eli_fini(struct g_class *mp); 156 157 static g_taste_t g_eli_taste; 158 static g_dumpconf_t g_eli_dumpconf; 159 160 struct g_class g_eli_class = { 161 .name = G_ELI_CLASS_NAME, 162 .version = G_VERSION, 163 .ctlreq = g_eli_config, 164 .taste = g_eli_taste, 165 .destroy_geom = g_eli_destroy_geom, 166 .init = g_eli_init, 167 .fini = g_eli_fini 168 }; 169 170 171 /* 172 * Code paths: 173 * BIO_READ: 174 * g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver 175 * BIO_WRITE: 176 * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver 177 */ 178 179 180 /* 181 * EAGAIN from crypto(9) means, that we were probably balanced to another crypto 182 * accelerator or something like this. 183 * The function updates the SID and rerun the operation. 184 */ 185 int 186 g_eli_crypto_rerun(struct cryptop *crp) 187 { 188 struct g_eli_softc *sc; 189 struct g_eli_worker *wr; 190 struct bio *bp; 191 int error; 192 193 bp = (struct bio *)crp->crp_opaque; 194 sc = bp->bio_to->geom->softc; 195 LIST_FOREACH(wr, &sc->sc_workers, w_next) { 196 if (wr->w_number == bp->bio_pflags) 197 break; 198 } 199 KASSERT(wr != NULL, ("Invalid worker (%u).", bp->bio_pflags)); 200 G_ELI_DEBUG(1, "Rerunning crypto %s request (sid: %ju -> %ju).", 201 bp->bio_cmd == BIO_READ ? "READ" : "WRITE", (uintmax_t)wr->w_sid, 202 (uintmax_t)crp->crp_sid); 203 wr->w_sid = crp->crp_sid; 204 crp->crp_etype = 0; 205 error = crypto_dispatch(crp); 206 if (error == 0) 207 return (0); 208 G_ELI_DEBUG(1, "%s: crypto_dispatch() returned %d.", __func__, error); 209 crp->crp_etype = error; 210 return (error); 211 } 212 213 /* 214 * The function is called afer reading encrypted data from the provider. 215 * 216 * g_eli_start -> g_eli_crypto_read -> g_io_request -> G_ELI_READ_DONE -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver 217 */ 218 void 219 g_eli_read_done(struct bio *bp) 220 { 221 struct g_eli_softc *sc; 222 struct bio *pbp; 223 224 G_ELI_LOGREQ(2, bp, "Request done."); 225 pbp = bp->bio_parent; 226 if (pbp->bio_error == 0 && bp->bio_error != 0) 227 pbp->bio_error = bp->bio_error; 228 g_destroy_bio(bp); 229 /* 230 * Do we have all sectors already? 231 */ 232 pbp->bio_inbed++; 233 if (pbp->bio_inbed < pbp->bio_children) 234 return; 235 sc = pbp->bio_to->geom->softc; 236 if (pbp->bio_error != 0) { 237 G_ELI_LOGREQ(0, pbp, "%s() failed (error=%d)", __func__, 238 pbp->bio_error); 239 pbp->bio_completed = 0; 240 if (pbp->bio_driver2 != NULL) { 241 free(pbp->bio_driver2, M_ELI); 242 pbp->bio_driver2 = NULL; 243 } 244 g_io_deliver(pbp, pbp->bio_error); 245 atomic_subtract_int(&sc->sc_inflight, 1); 246 return; 247 } 248 mtx_lock(&sc->sc_queue_mtx); 249 bioq_insert_tail(&sc->sc_queue, pbp); 250 mtx_unlock(&sc->sc_queue_mtx); 251 wakeup(sc); 252 } 253 254 /* 255 * The function is called after we encrypt and write data. 256 * 257 * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> G_ELI_WRITE_DONE -> g_io_deliver 258 */ 259 void 260 g_eli_write_done(struct bio *bp) 261 { 262 struct g_eli_softc *sc; 263 struct bio *pbp; 264 265 G_ELI_LOGREQ(2, bp, "Request done."); 266 pbp = bp->bio_parent; 267 if (pbp->bio_error == 0 && bp->bio_error != 0) 268 pbp->bio_error = bp->bio_error; 269 g_destroy_bio(bp); 270 /* 271 * Do we have all sectors already? 272 */ 273 pbp->bio_inbed++; 274 if (pbp->bio_inbed < pbp->bio_children) 275 return; 276 free(pbp->bio_driver2, M_ELI); 277 pbp->bio_driver2 = NULL; 278 if (pbp->bio_error != 0) { 279 G_ELI_LOGREQ(0, pbp, "%s() failed (error=%d)", __func__, 280 pbp->bio_error); 281 pbp->bio_completed = 0; 282 } else 283 pbp->bio_completed = pbp->bio_length; 284 285 /* 286 * Write is finished, send it up. 287 */ 288 sc = pbp->bio_to->geom->softc; 289 g_io_deliver(pbp, pbp->bio_error); 290 atomic_subtract_int(&sc->sc_inflight, 1); 291 } 292 293 /* 294 * This function should never be called, but GEOM made as it set ->orphan() 295 * method for every geom. 296 */ 297 static void 298 g_eli_orphan_spoil_assert(struct g_consumer *cp) 299 { 300 301 panic("Function %s() called for %s.", __func__, cp->geom->name); 302 } 303 304 static void 305 g_eli_orphan(struct g_consumer *cp) 306 { 307 struct g_eli_softc *sc; 308 309 g_topology_assert(); 310 sc = cp->geom->softc; 311 if (sc == NULL) 312 return; 313 g_eli_destroy(sc, TRUE); 314 } 315 316 /* 317 * BIO_READ: 318 * G_ELI_START -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver 319 * BIO_WRITE: 320 * G_ELI_START -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver 321 */ 322 static void 323 g_eli_start(struct bio *bp) 324 { 325 struct g_eli_softc *sc; 326 struct g_consumer *cp; 327 struct bio *cbp; 328 329 sc = bp->bio_to->geom->softc; 330 KASSERT(sc != NULL, 331 ("Provider's error should be set (error=%d)(device=%s).", 332 bp->bio_to->error, bp->bio_to->name)); 333 G_ELI_LOGREQ(2, bp, "Request received."); 334 335 switch (bp->bio_cmd) { 336 case BIO_READ: 337 case BIO_WRITE: 338 case BIO_GETATTR: 339 case BIO_FLUSH: 340 case BIO_ZONE: 341 break; 342 case BIO_DELETE: 343 /* 344 * If the user hasn't set the NODELETE flag, we just pass 345 * it down the stack and let the layers beneath us do (or 346 * not) whatever they do with it. If they have, we 347 * reject it. A possible extension would be an 348 * additional flag to take it as a hint to shred the data 349 * with [multiple?] overwrites. 350 */ 351 if (!(sc->sc_flags & G_ELI_FLAG_NODELETE)) 352 break; 353 default: 354 g_io_deliver(bp, EOPNOTSUPP); 355 return; 356 } 357 cbp = g_clone_bio(bp); 358 if (cbp == NULL) { 359 g_io_deliver(bp, ENOMEM); 360 return; 361 } 362 bp->bio_driver1 = cbp; 363 bp->bio_pflags = G_ELI_NEW_BIO; 364 switch (bp->bio_cmd) { 365 case BIO_READ: 366 if (!(sc->sc_flags & G_ELI_FLAG_AUTH)) { 367 g_eli_crypto_read(sc, bp, 0); 368 break; 369 } 370 /* FALLTHROUGH */ 371 case BIO_WRITE: 372 mtx_lock(&sc->sc_queue_mtx); 373 bioq_insert_tail(&sc->sc_queue, bp); 374 mtx_unlock(&sc->sc_queue_mtx); 375 wakeup(sc); 376 break; 377 case BIO_GETATTR: 378 case BIO_FLUSH: 379 case BIO_DELETE: 380 case BIO_ZONE: 381 cbp->bio_done = g_std_done; 382 cp = LIST_FIRST(&sc->sc_geom->consumer); 383 cbp->bio_to = cp->provider; 384 G_ELI_LOGREQ(2, cbp, "Sending request."); 385 g_io_request(cbp, cp); 386 break; 387 } 388 } 389 390 static int 391 g_eli_newsession(struct g_eli_worker *wr) 392 { 393 struct g_eli_softc *sc; 394 struct cryptoini crie, cria; 395 int error; 396 397 sc = wr->w_softc; 398 399 bzero(&crie, sizeof(crie)); 400 crie.cri_alg = sc->sc_ealgo; 401 crie.cri_klen = sc->sc_ekeylen; 402 if (sc->sc_ealgo == CRYPTO_AES_XTS) 403 crie.cri_klen <<= 1; 404 if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) != 0) { 405 crie.cri_key = g_eli_key_hold(sc, 0, 406 LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize); 407 } else { 408 crie.cri_key = sc->sc_ekey; 409 } 410 if (sc->sc_flags & G_ELI_FLAG_AUTH) { 411 bzero(&cria, sizeof(cria)); 412 cria.cri_alg = sc->sc_aalgo; 413 cria.cri_klen = sc->sc_akeylen; 414 cria.cri_key = sc->sc_akey; 415 crie.cri_next = &cria; 416 } 417 418 switch (sc->sc_crypto) { 419 case G_ELI_CRYPTO_SW: 420 error = crypto_newsession(&wr->w_sid, &crie, 421 CRYPTOCAP_F_SOFTWARE); 422 break; 423 case G_ELI_CRYPTO_HW: 424 error = crypto_newsession(&wr->w_sid, &crie, 425 CRYPTOCAP_F_HARDWARE); 426 break; 427 case G_ELI_CRYPTO_UNKNOWN: 428 error = crypto_newsession(&wr->w_sid, &crie, 429 CRYPTOCAP_F_HARDWARE); 430 if (error == 0) { 431 mtx_lock(&sc->sc_queue_mtx); 432 if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN) 433 sc->sc_crypto = G_ELI_CRYPTO_HW; 434 mtx_unlock(&sc->sc_queue_mtx); 435 } else { 436 error = crypto_newsession(&wr->w_sid, &crie, 437 CRYPTOCAP_F_SOFTWARE); 438 mtx_lock(&sc->sc_queue_mtx); 439 if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN) 440 sc->sc_crypto = G_ELI_CRYPTO_SW; 441 mtx_unlock(&sc->sc_queue_mtx); 442 } 443 break; 444 default: 445 panic("%s: invalid condition", __func__); 446 } 447 448 if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) != 0) 449 g_eli_key_drop(sc, crie.cri_key); 450 451 return (error); 452 } 453 454 static void 455 g_eli_freesession(struct g_eli_worker *wr) 456 { 457 458 crypto_freesession(wr->w_sid); 459 } 460 461 static void 462 g_eli_cancel(struct g_eli_softc *sc) 463 { 464 struct bio *bp; 465 466 mtx_assert(&sc->sc_queue_mtx, MA_OWNED); 467 468 while ((bp = bioq_takefirst(&sc->sc_queue)) != NULL) { 469 KASSERT(bp->bio_pflags == G_ELI_NEW_BIO, 470 ("Not new bio when canceling (bp=%p).", bp)); 471 g_io_deliver(bp, ENXIO); 472 } 473 } 474 475 static struct bio * 476 g_eli_takefirst(struct g_eli_softc *sc) 477 { 478 struct bio *bp; 479 480 mtx_assert(&sc->sc_queue_mtx, MA_OWNED); 481 482 if (!(sc->sc_flags & G_ELI_FLAG_SUSPEND)) 483 return (bioq_takefirst(&sc->sc_queue)); 484 /* 485 * Device suspended, so we skip new I/O requests. 486 */ 487 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) { 488 if (bp->bio_pflags != G_ELI_NEW_BIO) 489 break; 490 } 491 if (bp != NULL) 492 bioq_remove(&sc->sc_queue, bp); 493 return (bp); 494 } 495 496 /* 497 * This is the main function for kernel worker thread when we don't have 498 * hardware acceleration and we have to do cryptography in software. 499 * Dedicated thread is needed, so we don't slow down g_up/g_down GEOM 500 * threads with crypto work. 501 */ 502 static void 503 g_eli_worker(void *arg) 504 { 505 struct g_eli_softc *sc; 506 struct g_eli_worker *wr; 507 struct bio *bp; 508 int error; 509 510 wr = arg; 511 sc = wr->w_softc; 512 #ifdef EARLY_AP_STARTUP 513 MPASS(!sc->sc_cpubind || smp_started); 514 #elif defined(SMP) 515 /* Before sched_bind() to a CPU, wait for all CPUs to go on-line. */ 516 if (sc->sc_cpubind) { 517 while (!smp_started) 518 tsleep(wr, 0, "geli:smp", hz / 4); 519 } 520 #endif 521 thread_lock(curthread); 522 sched_prio(curthread, PUSER); 523 if (sc->sc_cpubind) 524 sched_bind(curthread, wr->w_number % mp_ncpus); 525 thread_unlock(curthread); 526 527 G_ELI_DEBUG(1, "Thread %s started.", curthread->td_proc->p_comm); 528 529 for (;;) { 530 mtx_lock(&sc->sc_queue_mtx); 531 again: 532 bp = g_eli_takefirst(sc); 533 if (bp == NULL) { 534 if (sc->sc_flags & G_ELI_FLAG_DESTROY) { 535 g_eli_cancel(sc); 536 LIST_REMOVE(wr, w_next); 537 g_eli_freesession(wr); 538 free(wr, M_ELI); 539 G_ELI_DEBUG(1, "Thread %s exiting.", 540 curthread->td_proc->p_comm); 541 wakeup(&sc->sc_workers); 542 mtx_unlock(&sc->sc_queue_mtx); 543 kproc_exit(0); 544 } 545 while (sc->sc_flags & G_ELI_FLAG_SUSPEND) { 546 if (sc->sc_inflight > 0) { 547 G_ELI_DEBUG(0, "inflight=%d", 548 sc->sc_inflight); 549 /* 550 * We still have inflight BIOs, so 551 * sleep and retry. 552 */ 553 msleep(sc, &sc->sc_queue_mtx, PRIBIO, 554 "geli:inf", hz / 5); 555 goto again; 556 } 557 /* 558 * Suspend requested, mark the worker as 559 * suspended and go to sleep. 560 */ 561 if (wr->w_active) { 562 g_eli_freesession(wr); 563 wr->w_active = FALSE; 564 } 565 wakeup(&sc->sc_workers); 566 msleep(sc, &sc->sc_queue_mtx, PRIBIO, 567 "geli:suspend", 0); 568 if (!wr->w_active && 569 !(sc->sc_flags & G_ELI_FLAG_SUSPEND)) { 570 error = g_eli_newsession(wr); 571 KASSERT(error == 0, 572 ("g_eli_newsession() failed on resume (error=%d)", 573 error)); 574 wr->w_active = TRUE; 575 } 576 goto again; 577 } 578 msleep(sc, &sc->sc_queue_mtx, PDROP, "geli:w", 0); 579 continue; 580 } 581 if (bp->bio_pflags == G_ELI_NEW_BIO) 582 atomic_add_int(&sc->sc_inflight, 1); 583 mtx_unlock(&sc->sc_queue_mtx); 584 if (bp->bio_pflags == G_ELI_NEW_BIO) { 585 bp->bio_pflags = 0; 586 if (sc->sc_flags & G_ELI_FLAG_AUTH) { 587 if (bp->bio_cmd == BIO_READ) 588 g_eli_auth_read(sc, bp); 589 else 590 g_eli_auth_run(wr, bp); 591 } else { 592 if (bp->bio_cmd == BIO_READ) 593 g_eli_crypto_read(sc, bp, 1); 594 else 595 g_eli_crypto_run(wr, bp); 596 } 597 } else { 598 if (sc->sc_flags & G_ELI_FLAG_AUTH) 599 g_eli_auth_run(wr, bp); 600 else 601 g_eli_crypto_run(wr, bp); 602 } 603 } 604 } 605 606 int 607 g_eli_read_metadata(struct g_class *mp, struct g_provider *pp, 608 struct g_eli_metadata *md) 609 { 610 struct g_geom *gp; 611 struct g_consumer *cp; 612 u_char *buf = NULL; 613 int error; 614 615 g_topology_assert(); 616 617 gp = g_new_geomf(mp, "eli:taste"); 618 gp->start = g_eli_start; 619 gp->access = g_std_access; 620 /* 621 * g_eli_read_metadata() is always called from the event thread. 622 * Our geom is created and destroyed in the same event, so there 623 * could be no orphan nor spoil event in the meantime. 624 */ 625 gp->orphan = g_eli_orphan_spoil_assert; 626 gp->spoiled = g_eli_orphan_spoil_assert; 627 cp = g_new_consumer(gp); 628 error = g_attach(cp, pp); 629 if (error != 0) 630 goto end; 631 error = g_access(cp, 1, 0, 0); 632 if (error != 0) 633 goto end; 634 g_topology_unlock(); 635 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize, 636 &error); 637 g_topology_lock(); 638 if (buf == NULL) 639 goto end; 640 error = eli_metadata_decode(buf, md); 641 if (error != 0) 642 goto end; 643 /* Metadata was read and decoded successfully. */ 644 end: 645 if (buf != NULL) 646 g_free(buf); 647 if (cp->provider != NULL) { 648 if (cp->acr == 1) 649 g_access(cp, -1, 0, 0); 650 g_detach(cp); 651 } 652 g_destroy_consumer(cp); 653 g_destroy_geom(gp); 654 return (error); 655 } 656 657 /* 658 * The function is called when we had last close on provider and user requested 659 * to close it when this situation occur. 660 */ 661 static void 662 g_eli_last_close(void *arg, int flags __unused) 663 { 664 struct g_geom *gp; 665 char gpname[64]; 666 int error; 667 668 g_topology_assert(); 669 gp = arg; 670 strlcpy(gpname, gp->name, sizeof(gpname)); 671 error = g_eli_destroy(gp->softc, TRUE); 672 KASSERT(error == 0, ("Cannot detach %s on last close (error=%d).", 673 gpname, error)); 674 G_ELI_DEBUG(0, "Detached %s on last close.", gpname); 675 } 676 677 int 678 g_eli_access(struct g_provider *pp, int dr, int dw, int de) 679 { 680 struct g_eli_softc *sc; 681 struct g_geom *gp; 682 683 gp = pp->geom; 684 sc = gp->softc; 685 686 if (dw > 0) { 687 if (sc->sc_flags & G_ELI_FLAG_RO) { 688 /* Deny write attempts. */ 689 return (EROFS); 690 } 691 /* Someone is opening us for write, we need to remember that. */ 692 sc->sc_flags |= G_ELI_FLAG_WOPEN; 693 return (0); 694 } 695 /* Is this the last close? */ 696 if (pp->acr + dr > 0 || pp->acw + dw > 0 || pp->ace + de > 0) 697 return (0); 698 699 /* 700 * Automatically detach on last close if requested. 701 */ 702 if ((sc->sc_flags & G_ELI_FLAG_RW_DETACH) || 703 (sc->sc_flags & G_ELI_FLAG_WOPEN)) { 704 g_post_event(g_eli_last_close, gp, M_WAITOK, NULL); 705 } 706 return (0); 707 } 708 709 static int 710 g_eli_cpu_is_disabled(int cpu) 711 { 712 #ifdef SMP 713 return (CPU_ISSET(cpu, &hlt_cpus_mask)); 714 #else 715 return (0); 716 #endif 717 } 718 719 struct g_geom * 720 g_eli_create(struct gctl_req *req, struct g_class *mp, struct g_provider *bpp, 721 const struct g_eli_metadata *md, const u_char *mkey, int nkey) 722 { 723 struct g_eli_softc *sc; 724 struct g_eli_worker *wr; 725 struct g_geom *gp; 726 struct g_provider *pp; 727 struct g_consumer *cp; 728 u_int i, threads; 729 int error; 730 731 G_ELI_DEBUG(1, "Creating device %s%s.", bpp->name, G_ELI_SUFFIX); 732 733 gp = g_new_geomf(mp, "%s%s", bpp->name, G_ELI_SUFFIX); 734 sc = malloc(sizeof(*sc), M_ELI, M_WAITOK | M_ZERO); 735 gp->start = g_eli_start; 736 /* 737 * Spoiling can happen even though we have the provider open 738 * exclusively, e.g. through media change events. 739 */ 740 gp->spoiled = g_eli_orphan; 741 gp->orphan = g_eli_orphan; 742 gp->dumpconf = g_eli_dumpconf; 743 /* 744 * If detach-on-last-close feature is not enabled and we don't operate 745 * on read-only provider, we can simply use g_std_access(). 746 */ 747 if (md->md_flags & (G_ELI_FLAG_WO_DETACH | G_ELI_FLAG_RO)) 748 gp->access = g_eli_access; 749 else 750 gp->access = g_std_access; 751 752 eli_metadata_softc(sc, md, bpp->sectorsize, bpp->mediasize); 753 sc->sc_nkey = nkey; 754 755 gp->softc = sc; 756 sc->sc_geom = gp; 757 758 bioq_init(&sc->sc_queue); 759 mtx_init(&sc->sc_queue_mtx, "geli:queue", NULL, MTX_DEF); 760 mtx_init(&sc->sc_ekeys_lock, "geli:ekeys", NULL, MTX_DEF); 761 762 pp = NULL; 763 cp = g_new_consumer(gp); 764 error = g_attach(cp, bpp); 765 if (error != 0) { 766 if (req != NULL) { 767 gctl_error(req, "Cannot attach to %s (error=%d).", 768 bpp->name, error); 769 } else { 770 G_ELI_DEBUG(1, "Cannot attach to %s (error=%d).", 771 bpp->name, error); 772 } 773 goto failed; 774 } 775 /* 776 * Keep provider open all the time, so we can run critical tasks, 777 * like Master Keys deletion, without wondering if we can open 778 * provider or not. 779 * We don't open provider for writing only when user requested read-only 780 * access. 781 */ 782 if (sc->sc_flags & G_ELI_FLAG_RO) 783 error = g_access(cp, 1, 0, 1); 784 else 785 error = g_access(cp, 1, 1, 1); 786 if (error != 0) { 787 if (req != NULL) { 788 gctl_error(req, "Cannot access %s (error=%d).", 789 bpp->name, error); 790 } else { 791 G_ELI_DEBUG(1, "Cannot access %s (error=%d).", 792 bpp->name, error); 793 } 794 goto failed; 795 } 796 797 /* 798 * Remember the keys in our softc structure. 799 */ 800 g_eli_mkey_propagate(sc, mkey); 801 802 LIST_INIT(&sc->sc_workers); 803 804 threads = g_eli_threads; 805 if (threads == 0) 806 threads = mp_ncpus; 807 sc->sc_cpubind = (mp_ncpus > 1 && threads == mp_ncpus); 808 for (i = 0; i < threads; i++) { 809 if (g_eli_cpu_is_disabled(i)) { 810 G_ELI_DEBUG(1, "%s: CPU %u disabled, skipping.", 811 bpp->name, i); 812 continue; 813 } 814 wr = malloc(sizeof(*wr), M_ELI, M_WAITOK | M_ZERO); 815 wr->w_softc = sc; 816 wr->w_number = i; 817 wr->w_active = TRUE; 818 819 error = g_eli_newsession(wr); 820 if (error != 0) { 821 free(wr, M_ELI); 822 if (req != NULL) { 823 gctl_error(req, "Cannot set up crypto session " 824 "for %s (error=%d).", bpp->name, error); 825 } else { 826 G_ELI_DEBUG(1, "Cannot set up crypto session " 827 "for %s (error=%d).", bpp->name, error); 828 } 829 goto failed; 830 } 831 832 error = kproc_create(g_eli_worker, wr, &wr->w_proc, 0, 0, 833 "g_eli[%u] %s", i, bpp->name); 834 if (error != 0) { 835 g_eli_freesession(wr); 836 free(wr, M_ELI); 837 if (req != NULL) { 838 gctl_error(req, "Cannot create kernel thread " 839 "for %s (error=%d).", bpp->name, error); 840 } else { 841 G_ELI_DEBUG(1, "Cannot create kernel thread " 842 "for %s (error=%d).", bpp->name, error); 843 } 844 goto failed; 845 } 846 LIST_INSERT_HEAD(&sc->sc_workers, wr, w_next); 847 } 848 849 /* 850 * Create decrypted provider. 851 */ 852 pp = g_new_providerf(gp, "%s%s", bpp->name, G_ELI_SUFFIX); 853 pp->mediasize = sc->sc_mediasize; 854 pp->sectorsize = sc->sc_sectorsize; 855 856 g_error_provider(pp, 0); 857 858 G_ELI_DEBUG(0, "Device %s created.", pp->name); 859 G_ELI_DEBUG(0, "Encryption: %s %u", g_eli_algo2str(sc->sc_ealgo), 860 sc->sc_ekeylen); 861 if (sc->sc_flags & G_ELI_FLAG_AUTH) 862 G_ELI_DEBUG(0, " Integrity: %s", g_eli_algo2str(sc->sc_aalgo)); 863 G_ELI_DEBUG(0, " Crypto: %s", 864 sc->sc_crypto == G_ELI_CRYPTO_SW ? "software" : "hardware"); 865 return (gp); 866 failed: 867 mtx_lock(&sc->sc_queue_mtx); 868 sc->sc_flags |= G_ELI_FLAG_DESTROY; 869 wakeup(sc); 870 /* 871 * Wait for kernel threads self destruction. 872 */ 873 while (!LIST_EMPTY(&sc->sc_workers)) { 874 msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO, 875 "geli:destroy", 0); 876 } 877 mtx_destroy(&sc->sc_queue_mtx); 878 if (cp->provider != NULL) { 879 if (cp->acr == 1) 880 g_access(cp, -1, -1, -1); 881 g_detach(cp); 882 } 883 g_destroy_consumer(cp); 884 g_destroy_geom(gp); 885 g_eli_key_destroy(sc); 886 bzero(sc, sizeof(*sc)); 887 free(sc, M_ELI); 888 return (NULL); 889 } 890 891 int 892 g_eli_destroy(struct g_eli_softc *sc, boolean_t force) 893 { 894 struct g_geom *gp; 895 struct g_provider *pp; 896 897 g_topology_assert(); 898 899 if (sc == NULL) 900 return (ENXIO); 901 902 gp = sc->sc_geom; 903 pp = LIST_FIRST(&gp->provider); 904 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) { 905 if (force) { 906 G_ELI_DEBUG(1, "Device %s is still open, so it " 907 "cannot be definitely removed.", pp->name); 908 sc->sc_flags |= G_ELI_FLAG_RW_DETACH; 909 gp->access = g_eli_access; 910 g_wither_provider(pp, ENXIO); 911 return (EBUSY); 912 } else { 913 G_ELI_DEBUG(1, 914 "Device %s is still open (r%dw%de%d).", pp->name, 915 pp->acr, pp->acw, pp->ace); 916 return (EBUSY); 917 } 918 } 919 920 mtx_lock(&sc->sc_queue_mtx); 921 sc->sc_flags |= G_ELI_FLAG_DESTROY; 922 wakeup(sc); 923 while (!LIST_EMPTY(&sc->sc_workers)) { 924 msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO, 925 "geli:destroy", 0); 926 } 927 mtx_destroy(&sc->sc_queue_mtx); 928 gp->softc = NULL; 929 g_eli_key_destroy(sc); 930 bzero(sc, sizeof(*sc)); 931 free(sc, M_ELI); 932 933 if (pp == NULL || (pp->acr == 0 && pp->acw == 0 && pp->ace == 0)) 934 G_ELI_DEBUG(0, "Device %s destroyed.", gp->name); 935 g_wither_geom_close(gp, ENXIO); 936 937 return (0); 938 } 939 940 static int 941 g_eli_destroy_geom(struct gctl_req *req __unused, 942 struct g_class *mp __unused, struct g_geom *gp) 943 { 944 struct g_eli_softc *sc; 945 946 sc = gp->softc; 947 return (g_eli_destroy(sc, FALSE)); 948 } 949 950 static int 951 g_eli_keyfiles_load(struct hmac_ctx *ctx, const char *provider) 952 { 953 u_char *keyfile, *data; 954 char *file, name[64]; 955 size_t size; 956 int i; 957 958 for (i = 0; ; i++) { 959 snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i); 960 keyfile = preload_search_by_type(name); 961 if (keyfile == NULL && i == 0) { 962 /* 963 * If there is only one keyfile, allow simpler name. 964 */ 965 snprintf(name, sizeof(name), "%s:geli_keyfile", provider); 966 keyfile = preload_search_by_type(name); 967 } 968 if (keyfile == NULL) 969 return (i); /* Return number of loaded keyfiles. */ 970 data = preload_fetch_addr(keyfile); 971 if (data == NULL) { 972 G_ELI_DEBUG(0, "Cannot find key file data for %s.", 973 name); 974 return (0); 975 } 976 size = preload_fetch_size(keyfile); 977 if (size == 0) { 978 G_ELI_DEBUG(0, "Cannot find key file size for %s.", 979 name); 980 return (0); 981 } 982 file = preload_search_info(keyfile, MODINFO_NAME); 983 if (file == NULL) { 984 G_ELI_DEBUG(0, "Cannot find key file name for %s.", 985 name); 986 return (0); 987 } 988 G_ELI_DEBUG(1, "Loaded keyfile %s for %s (type: %s).", file, 989 provider, name); 990 g_eli_crypto_hmac_update(ctx, data, size); 991 } 992 } 993 994 static void 995 g_eli_keyfiles_clear(const char *provider) 996 { 997 u_char *keyfile, *data; 998 char name[64]; 999 size_t size; 1000 int i; 1001 1002 for (i = 0; ; i++) { 1003 snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i); 1004 keyfile = preload_search_by_type(name); 1005 if (keyfile == NULL) 1006 return; 1007 data = preload_fetch_addr(keyfile); 1008 size = preload_fetch_size(keyfile); 1009 if (data != NULL && size != 0) 1010 bzero(data, size); 1011 } 1012 } 1013 1014 /* 1015 * Tasting is only made on boot. 1016 * We detect providers which should be attached before root is mounted. 1017 */ 1018 static struct g_geom * 1019 g_eli_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 1020 { 1021 struct g_eli_metadata md; 1022 struct g_geom *gp; 1023 struct hmac_ctx ctx; 1024 char passphrase[256]; 1025 u_char key[G_ELI_USERKEYLEN], mkey[G_ELI_DATAIVKEYLEN]; 1026 u_int i, nkey, nkeyfiles, tries, showpass; 1027 int error; 1028 struct keybuf *keybuf; 1029 1030 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name); 1031 g_topology_assert(); 1032 1033 if (root_mounted() || g_eli_tries == 0) 1034 return (NULL); 1035 1036 G_ELI_DEBUG(3, "Tasting %s.", pp->name); 1037 1038 error = g_eli_read_metadata(mp, pp, &md); 1039 if (error != 0) 1040 return (NULL); 1041 gp = NULL; 1042 1043 if (strcmp(md.md_magic, G_ELI_MAGIC) != 0) 1044 return (NULL); 1045 if (md.md_version > G_ELI_VERSION) { 1046 printf("geom_eli.ko module is too old to handle %s.\n", 1047 pp->name); 1048 return (NULL); 1049 } 1050 if (md.md_provsize != pp->mediasize) 1051 return (NULL); 1052 /* Should we attach it on boot? */ 1053 if (!(md.md_flags & G_ELI_FLAG_BOOT)) 1054 return (NULL); 1055 if (md.md_keys == 0x00) { 1056 G_ELI_DEBUG(0, "No valid keys on %s.", pp->name); 1057 return (NULL); 1058 } 1059 if (md.md_iterations == -1) { 1060 /* If there is no passphrase, we try only once. */ 1061 tries = 1; 1062 } else { 1063 /* Ask for the passphrase no more than g_eli_tries times. */ 1064 tries = g_eli_tries; 1065 } 1066 1067 if ((keybuf = get_keybuf()) != NULL) { 1068 /* Scan the key buffer, try all GELI keys. */ 1069 for (i = 0; i < keybuf->kb_nents; i++) { 1070 if (keybuf->kb_ents[i].ke_type == KEYBUF_TYPE_GELI) { 1071 memcpy(key, keybuf->kb_ents[i].ke_data, 1072 sizeof(key)); 1073 1074 if (g_eli_mkey_decrypt(&md, key, 1075 mkey, &nkey) == 0 ) { 1076 explicit_bzero(key, sizeof(key)); 1077 goto have_key; 1078 } 1079 } 1080 } 1081 } 1082 1083 for (i = 0; i <= tries; i++) { 1084 g_eli_crypto_hmac_init(&ctx, NULL, 0); 1085 1086 /* 1087 * Load all key files. 1088 */ 1089 nkeyfiles = g_eli_keyfiles_load(&ctx, pp->name); 1090 1091 if (nkeyfiles == 0 && md.md_iterations == -1) { 1092 /* 1093 * No key files and no passphrase, something is 1094 * definitely wrong here. 1095 * geli(8) doesn't allow for such situation, so assume 1096 * that there was really no passphrase and in that case 1097 * key files are no properly defined in loader.conf. 1098 */ 1099 G_ELI_DEBUG(0, 1100 "Found no key files in loader.conf for %s.", 1101 pp->name); 1102 return (NULL); 1103 } 1104 1105 /* Ask for the passphrase if defined. */ 1106 if (md.md_iterations >= 0) { 1107 /* Try first with cached passphrase. */ 1108 if (i == 0) { 1109 if (!g_eli_boot_passcache) 1110 continue; 1111 memcpy(passphrase, cached_passphrase, 1112 sizeof(passphrase)); 1113 } else { 1114 printf("Enter passphrase for %s: ", pp->name); 1115 showpass = g_eli_visible_passphrase; 1116 if ((md.md_flags & G_ELI_FLAG_GELIDISPLAYPASS) != 0) 1117 showpass = GETS_ECHOPASS; 1118 cngets(passphrase, sizeof(passphrase), 1119 showpass); 1120 memcpy(cached_passphrase, passphrase, 1121 sizeof(passphrase)); 1122 } 1123 } 1124 1125 /* 1126 * Prepare Derived-Key from the user passphrase. 1127 */ 1128 if (md.md_iterations == 0) { 1129 g_eli_crypto_hmac_update(&ctx, md.md_salt, 1130 sizeof(md.md_salt)); 1131 g_eli_crypto_hmac_update(&ctx, passphrase, 1132 strlen(passphrase)); 1133 explicit_bzero(passphrase, sizeof(passphrase)); 1134 } else if (md.md_iterations > 0) { 1135 u_char dkey[G_ELI_USERKEYLEN]; 1136 1137 pkcs5v2_genkey(dkey, sizeof(dkey), md.md_salt, 1138 sizeof(md.md_salt), passphrase, md.md_iterations); 1139 bzero(passphrase, sizeof(passphrase)); 1140 g_eli_crypto_hmac_update(&ctx, dkey, sizeof(dkey)); 1141 explicit_bzero(dkey, sizeof(dkey)); 1142 } 1143 1144 g_eli_crypto_hmac_final(&ctx, key, 0); 1145 1146 /* 1147 * Decrypt Master-Key. 1148 */ 1149 error = g_eli_mkey_decrypt(&md, key, mkey, &nkey); 1150 bzero(key, sizeof(key)); 1151 if (error == -1) { 1152 if (i == tries) { 1153 G_ELI_DEBUG(0, 1154 "Wrong key for %s. No tries left.", 1155 pp->name); 1156 g_eli_keyfiles_clear(pp->name); 1157 return (NULL); 1158 } 1159 if (i > 0) { 1160 G_ELI_DEBUG(0, 1161 "Wrong key for %s. Tries left: %u.", 1162 pp->name, tries - i); 1163 } 1164 /* Try again. */ 1165 continue; 1166 } else if (error > 0) { 1167 G_ELI_DEBUG(0, 1168 "Cannot decrypt Master Key for %s (error=%d).", 1169 pp->name, error); 1170 g_eli_keyfiles_clear(pp->name); 1171 return (NULL); 1172 } 1173 g_eli_keyfiles_clear(pp->name); 1174 G_ELI_DEBUG(1, "Using Master Key %u for %s.", nkey, pp->name); 1175 break; 1176 } 1177 have_key: 1178 1179 /* 1180 * We have correct key, let's attach provider. 1181 */ 1182 gp = g_eli_create(NULL, mp, pp, &md, mkey, nkey); 1183 bzero(mkey, sizeof(mkey)); 1184 bzero(&md, sizeof(md)); 1185 if (gp == NULL) { 1186 G_ELI_DEBUG(0, "Cannot create device %s%s.", pp->name, 1187 G_ELI_SUFFIX); 1188 return (NULL); 1189 } 1190 return (gp); 1191 } 1192 1193 static void 1194 g_eli_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 1195 struct g_consumer *cp, struct g_provider *pp) 1196 { 1197 struct g_eli_softc *sc; 1198 1199 g_topology_assert(); 1200 sc = gp->softc; 1201 if (sc == NULL) 1202 return; 1203 if (pp != NULL || cp != NULL) 1204 return; /* Nothing here. */ 1205 1206 sbuf_printf(sb, "%s<KeysTotal>%ju</KeysTotal>\n", indent, 1207 (uintmax_t)sc->sc_ekeys_total); 1208 sbuf_printf(sb, "%s<KeysAllocated>%ju</KeysAllocated>\n", indent, 1209 (uintmax_t)sc->sc_ekeys_allocated); 1210 sbuf_printf(sb, "%s<Flags>", indent); 1211 if (sc->sc_flags == 0) 1212 sbuf_printf(sb, "NONE"); 1213 else { 1214 int first = 1; 1215 1216 #define ADD_FLAG(flag, name) do { \ 1217 if (sc->sc_flags & (flag)) { \ 1218 if (!first) \ 1219 sbuf_printf(sb, ", "); \ 1220 else \ 1221 first = 0; \ 1222 sbuf_printf(sb, name); \ 1223 } \ 1224 } while (0) 1225 ADD_FLAG(G_ELI_FLAG_SUSPEND, "SUSPEND"); 1226 ADD_FLAG(G_ELI_FLAG_SINGLE_KEY, "SINGLE-KEY"); 1227 ADD_FLAG(G_ELI_FLAG_NATIVE_BYTE_ORDER, "NATIVE-BYTE-ORDER"); 1228 ADD_FLAG(G_ELI_FLAG_ONETIME, "ONETIME"); 1229 ADD_FLAG(G_ELI_FLAG_BOOT, "BOOT"); 1230 ADD_FLAG(G_ELI_FLAG_WO_DETACH, "W-DETACH"); 1231 ADD_FLAG(G_ELI_FLAG_RW_DETACH, "RW-DETACH"); 1232 ADD_FLAG(G_ELI_FLAG_AUTH, "AUTH"); 1233 ADD_FLAG(G_ELI_FLAG_WOPEN, "W-OPEN"); 1234 ADD_FLAG(G_ELI_FLAG_DESTROY, "DESTROY"); 1235 ADD_FLAG(G_ELI_FLAG_RO, "READ-ONLY"); 1236 ADD_FLAG(G_ELI_FLAG_NODELETE, "NODELETE"); 1237 ADD_FLAG(G_ELI_FLAG_GELIBOOT, "GELIBOOT"); 1238 ADD_FLAG(G_ELI_FLAG_GELIDISPLAYPASS, "GELIDISPLAYPASS"); 1239 #undef ADD_FLAG 1240 } 1241 sbuf_printf(sb, "</Flags>\n"); 1242 1243 if (!(sc->sc_flags & G_ELI_FLAG_ONETIME)) { 1244 sbuf_printf(sb, "%s<UsedKey>%u</UsedKey>\n", indent, 1245 sc->sc_nkey); 1246 } 1247 sbuf_printf(sb, "%s<Version>%u</Version>\n", indent, sc->sc_version); 1248 sbuf_printf(sb, "%s<Crypto>", indent); 1249 switch (sc->sc_crypto) { 1250 case G_ELI_CRYPTO_HW: 1251 sbuf_printf(sb, "hardware"); 1252 break; 1253 case G_ELI_CRYPTO_SW: 1254 sbuf_printf(sb, "software"); 1255 break; 1256 default: 1257 sbuf_printf(sb, "UNKNOWN"); 1258 break; 1259 } 1260 sbuf_printf(sb, "</Crypto>\n"); 1261 if (sc->sc_flags & G_ELI_FLAG_AUTH) { 1262 sbuf_printf(sb, 1263 "%s<AuthenticationAlgorithm>%s</AuthenticationAlgorithm>\n", 1264 indent, g_eli_algo2str(sc->sc_aalgo)); 1265 } 1266 sbuf_printf(sb, "%s<KeyLength>%u</KeyLength>\n", indent, 1267 sc->sc_ekeylen); 1268 sbuf_printf(sb, "%s<EncryptionAlgorithm>%s</EncryptionAlgorithm>\n", 1269 indent, g_eli_algo2str(sc->sc_ealgo)); 1270 sbuf_printf(sb, "%s<State>%s</State>\n", indent, 1271 (sc->sc_flags & G_ELI_FLAG_SUSPEND) ? "SUSPENDED" : "ACTIVE"); 1272 } 1273 1274 static void 1275 g_eli_shutdown_pre_sync(void *arg, int howto) 1276 { 1277 struct g_class *mp; 1278 struct g_geom *gp, *gp2; 1279 struct g_provider *pp; 1280 struct g_eli_softc *sc; 1281 int error; 1282 1283 mp = arg; 1284 g_topology_lock(); 1285 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) { 1286 sc = gp->softc; 1287 if (sc == NULL) 1288 continue; 1289 pp = LIST_FIRST(&gp->provider); 1290 KASSERT(pp != NULL, ("No provider? gp=%p (%s)", gp, gp->name)); 1291 if (pp->acr + pp->acw + pp->ace == 0) 1292 error = g_eli_destroy(sc, TRUE); 1293 else { 1294 sc->sc_flags |= G_ELI_FLAG_RW_DETACH; 1295 gp->access = g_eli_access; 1296 } 1297 } 1298 g_topology_unlock(); 1299 } 1300 1301 static void 1302 g_eli_init(struct g_class *mp) 1303 { 1304 1305 g_eli_pre_sync = EVENTHANDLER_REGISTER(shutdown_pre_sync, 1306 g_eli_shutdown_pre_sync, mp, SHUTDOWN_PRI_FIRST); 1307 if (g_eli_pre_sync == NULL) 1308 G_ELI_DEBUG(0, "Warning! Cannot register shutdown event."); 1309 } 1310 1311 static void 1312 g_eli_fini(struct g_class *mp) 1313 { 1314 1315 if (g_eli_pre_sync != NULL) 1316 EVENTHANDLER_DEREGISTER(shutdown_pre_sync, g_eli_pre_sync); 1317 } 1318 1319 DECLARE_GEOM_CLASS(g_eli_class, g_eli); 1320 MODULE_DEPEND(g_eli, crypto, 1, 1, 1); 1321