1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2005-2011 Pawel Jakub Dawidek <pawel@dawidek.net> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/cons.h> 35 #include <sys/kernel.h> 36 #include <sys/linker.h> 37 #include <sys/module.h> 38 #include <sys/lock.h> 39 #include <sys/mutex.h> 40 #include <sys/bio.h> 41 #include <sys/sbuf.h> 42 #include <sys/sysctl.h> 43 #include <sys/malloc.h> 44 #include <sys/eventhandler.h> 45 #include <sys/kthread.h> 46 #include <sys/proc.h> 47 #include <sys/sched.h> 48 #include <sys/smp.h> 49 #include <sys/uio.h> 50 #include <sys/vnode.h> 51 52 #include <vm/uma.h> 53 54 #include <geom/geom.h> 55 #include <geom/eli/g_eli.h> 56 #include <geom/eli/pkcs5v2.h> 57 58 #include <crypto/intake.h> 59 60 FEATURE(geom_eli, "GEOM crypto module"); 61 62 MALLOC_DEFINE(M_ELI, "eli data", "GEOM_ELI Data"); 63 64 SYSCTL_DECL(_kern_geom); 65 SYSCTL_NODE(_kern_geom, OID_AUTO, eli, CTLFLAG_RW, 0, "GEOM_ELI stuff"); 66 static int g_eli_version = G_ELI_VERSION; 67 SYSCTL_INT(_kern_geom_eli, OID_AUTO, version, CTLFLAG_RD, &g_eli_version, 0, 68 "GELI version"); 69 int g_eli_debug = 0; 70 SYSCTL_INT(_kern_geom_eli, OID_AUTO, debug, CTLFLAG_RWTUN, &g_eli_debug, 0, 71 "Debug level"); 72 static u_int g_eli_tries = 3; 73 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, tries, CTLFLAG_RWTUN, &g_eli_tries, 0, 74 "Number of tries for entering the passphrase"); 75 static u_int g_eli_visible_passphrase = GETS_NOECHO; 76 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, visible_passphrase, CTLFLAG_RWTUN, 77 &g_eli_visible_passphrase, 0, 78 "Visibility of passphrase prompt (0 = invisible, 1 = visible, 2 = asterisk)"); 79 u_int g_eli_overwrites = G_ELI_OVERWRITES; 80 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, overwrites, CTLFLAG_RWTUN, &g_eli_overwrites, 81 0, "Number of times on-disk keys should be overwritten when destroying them"); 82 static u_int g_eli_threads = 0; 83 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, threads, CTLFLAG_RWTUN, &g_eli_threads, 0, 84 "Number of threads doing crypto work"); 85 u_int g_eli_batch = 0; 86 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, batch, CTLFLAG_RWTUN, &g_eli_batch, 0, 87 "Use crypto operations batching"); 88 89 /* 90 * Passphrase cached during boot, in order to be more user-friendly if 91 * there are multiple providers using the same passphrase. 92 */ 93 static char cached_passphrase[256]; 94 static u_int g_eli_boot_passcache = 1; 95 TUNABLE_INT("kern.geom.eli.boot_passcache", &g_eli_boot_passcache); 96 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, boot_passcache, CTLFLAG_RD, 97 &g_eli_boot_passcache, 0, 98 "Passphrases are cached during boot process for possible reuse"); 99 static void 100 fetch_loader_passphrase(void * dummy) 101 { 102 char * env_passphrase; 103 104 KASSERT(dynamic_kenv, ("need dynamic kenv")); 105 106 if ((env_passphrase = kern_getenv("kern.geom.eli.passphrase")) != NULL) { 107 /* Extract passphrase from the environment. */ 108 strlcpy(cached_passphrase, env_passphrase, 109 sizeof(cached_passphrase)); 110 freeenv(env_passphrase); 111 112 /* Wipe the passphrase from the environment. */ 113 kern_unsetenv("kern.geom.eli.passphrase"); 114 } 115 } 116 SYSINIT(geli_fetch_loader_passphrase, SI_SUB_KMEM + 1, SI_ORDER_ANY, 117 fetch_loader_passphrase, NULL); 118 119 static void 120 zero_boot_passcache(void) 121 { 122 123 explicit_bzero(cached_passphrase, sizeof(cached_passphrase)); 124 } 125 126 static void 127 zero_geli_intake_keys(void) 128 { 129 struct keybuf *keybuf; 130 int i; 131 132 if ((keybuf = get_keybuf()) != NULL) { 133 /* Scan the key buffer, clear all GELI keys. */ 134 for (i = 0; i < keybuf->kb_nents; i++) { 135 if (keybuf->kb_ents[i].ke_type == KEYBUF_TYPE_GELI) { 136 explicit_bzero(keybuf->kb_ents[i].ke_data, 137 sizeof(keybuf->kb_ents[i].ke_data)); 138 keybuf->kb_ents[i].ke_type = KEYBUF_TYPE_NONE; 139 } 140 } 141 } 142 } 143 144 static void 145 zero_intake_passcache(void *dummy) 146 { 147 zero_boot_passcache(); 148 zero_geli_intake_keys(); 149 } 150 EVENTHANDLER_DEFINE(mountroot, zero_intake_passcache, NULL, 0); 151 152 static eventhandler_tag g_eli_pre_sync = NULL; 153 154 static int g_eli_destroy_geom(struct gctl_req *req, struct g_class *mp, 155 struct g_geom *gp); 156 static void g_eli_init(struct g_class *mp); 157 static void g_eli_fini(struct g_class *mp); 158 159 static g_taste_t g_eli_taste; 160 static g_dumpconf_t g_eli_dumpconf; 161 162 struct g_class g_eli_class = { 163 .name = G_ELI_CLASS_NAME, 164 .version = G_VERSION, 165 .ctlreq = g_eli_config, 166 .taste = g_eli_taste, 167 .destroy_geom = g_eli_destroy_geom, 168 .init = g_eli_init, 169 .fini = g_eli_fini 170 }; 171 172 173 /* 174 * Code paths: 175 * BIO_READ: 176 * g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver 177 * BIO_WRITE: 178 * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver 179 */ 180 181 182 /* 183 * EAGAIN from crypto(9) means, that we were probably balanced to another crypto 184 * accelerator or something like this. 185 * The function updates the SID and rerun the operation. 186 */ 187 int 188 g_eli_crypto_rerun(struct cryptop *crp) 189 { 190 struct g_eli_softc *sc; 191 struct g_eli_worker *wr; 192 struct bio *bp; 193 int error; 194 195 bp = (struct bio *)crp->crp_opaque; 196 sc = bp->bio_to->geom->softc; 197 LIST_FOREACH(wr, &sc->sc_workers, w_next) { 198 if (wr->w_number == bp->bio_pflags) 199 break; 200 } 201 KASSERT(wr != NULL, ("Invalid worker (%u).", bp->bio_pflags)); 202 G_ELI_DEBUG(1, "Rerunning crypto %s request (sid: %ju -> %ju).", 203 bp->bio_cmd == BIO_READ ? "READ" : "WRITE", (uintmax_t)wr->w_sid, 204 (uintmax_t)crp->crp_sid); 205 wr->w_sid = crp->crp_sid; 206 crp->crp_etype = 0; 207 error = crypto_dispatch(crp); 208 if (error == 0) 209 return (0); 210 G_ELI_DEBUG(1, "%s: crypto_dispatch() returned %d.", __func__, error); 211 crp->crp_etype = error; 212 return (error); 213 } 214 215 static void 216 g_eli_getattr_done(struct bio *bp) 217 { 218 if (bp->bio_error == 0 && 219 !strcmp(bp->bio_attribute, "GEOM::physpath")) { 220 strlcat(bp->bio_data, "/eli", bp->bio_length); 221 } 222 g_std_done(bp); 223 } 224 225 /* 226 * The function is called afer reading encrypted data from the provider. 227 * 228 * g_eli_start -> g_eli_crypto_read -> g_io_request -> G_ELI_READ_DONE -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver 229 */ 230 void 231 g_eli_read_done(struct bio *bp) 232 { 233 struct g_eli_softc *sc; 234 struct bio *pbp; 235 236 G_ELI_LOGREQ(2, bp, "Request done."); 237 pbp = bp->bio_parent; 238 if (pbp->bio_error == 0 && bp->bio_error != 0) 239 pbp->bio_error = bp->bio_error; 240 g_destroy_bio(bp); 241 /* 242 * Do we have all sectors already? 243 */ 244 pbp->bio_inbed++; 245 if (pbp->bio_inbed < pbp->bio_children) 246 return; 247 sc = pbp->bio_to->geom->softc; 248 if (pbp->bio_error != 0) { 249 G_ELI_LOGREQ(0, pbp, "%s() failed (error=%d)", __func__, 250 pbp->bio_error); 251 pbp->bio_completed = 0; 252 if (pbp->bio_driver2 != NULL) { 253 free(pbp->bio_driver2, M_ELI); 254 pbp->bio_driver2 = NULL; 255 } 256 g_io_deliver(pbp, pbp->bio_error); 257 atomic_subtract_int(&sc->sc_inflight, 1); 258 return; 259 } 260 mtx_lock(&sc->sc_queue_mtx); 261 bioq_insert_tail(&sc->sc_queue, pbp); 262 mtx_unlock(&sc->sc_queue_mtx); 263 wakeup(sc); 264 } 265 266 /* 267 * The function is called after we encrypt and write data. 268 * 269 * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> G_ELI_WRITE_DONE -> g_io_deliver 270 */ 271 void 272 g_eli_write_done(struct bio *bp) 273 { 274 struct g_eli_softc *sc; 275 struct bio *pbp; 276 277 G_ELI_LOGREQ(2, bp, "Request done."); 278 pbp = bp->bio_parent; 279 if (pbp->bio_error == 0 && bp->bio_error != 0) 280 pbp->bio_error = bp->bio_error; 281 g_destroy_bio(bp); 282 /* 283 * Do we have all sectors already? 284 */ 285 pbp->bio_inbed++; 286 if (pbp->bio_inbed < pbp->bio_children) 287 return; 288 free(pbp->bio_driver2, M_ELI); 289 pbp->bio_driver2 = NULL; 290 if (pbp->bio_error != 0) { 291 G_ELI_LOGREQ(0, pbp, "%s() failed (error=%d)", __func__, 292 pbp->bio_error); 293 pbp->bio_completed = 0; 294 } else 295 pbp->bio_completed = pbp->bio_length; 296 297 /* 298 * Write is finished, send it up. 299 */ 300 sc = pbp->bio_to->geom->softc; 301 g_io_deliver(pbp, pbp->bio_error); 302 atomic_subtract_int(&sc->sc_inflight, 1); 303 } 304 305 /* 306 * This function should never be called, but GEOM made as it set ->orphan() 307 * method for every geom. 308 */ 309 static void 310 g_eli_orphan_spoil_assert(struct g_consumer *cp) 311 { 312 313 panic("Function %s() called for %s.", __func__, cp->geom->name); 314 } 315 316 static void 317 g_eli_orphan(struct g_consumer *cp) 318 { 319 struct g_eli_softc *sc; 320 321 g_topology_assert(); 322 sc = cp->geom->softc; 323 if (sc == NULL) 324 return; 325 g_eli_destroy(sc, TRUE); 326 } 327 328 /* 329 * BIO_READ: 330 * G_ELI_START -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver 331 * BIO_WRITE: 332 * G_ELI_START -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver 333 */ 334 static void 335 g_eli_start(struct bio *bp) 336 { 337 struct g_eli_softc *sc; 338 struct g_consumer *cp; 339 struct bio *cbp; 340 341 sc = bp->bio_to->geom->softc; 342 KASSERT(sc != NULL, 343 ("Provider's error should be set (error=%d)(device=%s).", 344 bp->bio_to->error, bp->bio_to->name)); 345 G_ELI_LOGREQ(2, bp, "Request received."); 346 347 switch (bp->bio_cmd) { 348 case BIO_READ: 349 case BIO_WRITE: 350 case BIO_GETATTR: 351 case BIO_FLUSH: 352 case BIO_ZONE: 353 break; 354 case BIO_DELETE: 355 /* 356 * If the user hasn't set the NODELETE flag, we just pass 357 * it down the stack and let the layers beneath us do (or 358 * not) whatever they do with it. If they have, we 359 * reject it. A possible extension would be an 360 * additional flag to take it as a hint to shred the data 361 * with [multiple?] overwrites. 362 */ 363 if (!(sc->sc_flags & G_ELI_FLAG_NODELETE)) 364 break; 365 default: 366 g_io_deliver(bp, EOPNOTSUPP); 367 return; 368 } 369 cbp = g_clone_bio(bp); 370 if (cbp == NULL) { 371 g_io_deliver(bp, ENOMEM); 372 return; 373 } 374 bp->bio_driver1 = cbp; 375 bp->bio_pflags = G_ELI_NEW_BIO; 376 switch (bp->bio_cmd) { 377 case BIO_READ: 378 if (!(sc->sc_flags & G_ELI_FLAG_AUTH)) { 379 g_eli_crypto_read(sc, bp, 0); 380 break; 381 } 382 /* FALLTHROUGH */ 383 case BIO_WRITE: 384 mtx_lock(&sc->sc_queue_mtx); 385 bioq_insert_tail(&sc->sc_queue, bp); 386 mtx_unlock(&sc->sc_queue_mtx); 387 wakeup(sc); 388 break; 389 case BIO_GETATTR: 390 case BIO_FLUSH: 391 case BIO_DELETE: 392 case BIO_ZONE: 393 if (bp->bio_cmd == BIO_GETATTR) 394 cbp->bio_done = g_eli_getattr_done; 395 else 396 cbp->bio_done = g_std_done; 397 cp = LIST_FIRST(&sc->sc_geom->consumer); 398 cbp->bio_to = cp->provider; 399 G_ELI_LOGREQ(2, cbp, "Sending request."); 400 g_io_request(cbp, cp); 401 break; 402 } 403 } 404 405 static int 406 g_eli_newsession(struct g_eli_worker *wr) 407 { 408 struct g_eli_softc *sc; 409 struct cryptoini crie, cria; 410 int error; 411 412 sc = wr->w_softc; 413 414 bzero(&crie, sizeof(crie)); 415 crie.cri_alg = sc->sc_ealgo; 416 crie.cri_klen = sc->sc_ekeylen; 417 if (sc->sc_ealgo == CRYPTO_AES_XTS) 418 crie.cri_klen <<= 1; 419 if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) != 0) { 420 crie.cri_key = g_eli_key_hold(sc, 0, 421 LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize); 422 } else { 423 crie.cri_key = sc->sc_ekey; 424 } 425 if (sc->sc_flags & G_ELI_FLAG_AUTH) { 426 bzero(&cria, sizeof(cria)); 427 cria.cri_alg = sc->sc_aalgo; 428 cria.cri_klen = sc->sc_akeylen; 429 cria.cri_key = sc->sc_akey; 430 crie.cri_next = &cria; 431 } 432 433 switch (sc->sc_crypto) { 434 case G_ELI_CRYPTO_SW: 435 error = crypto_newsession(&wr->w_sid, &crie, 436 CRYPTOCAP_F_SOFTWARE); 437 break; 438 case G_ELI_CRYPTO_HW: 439 error = crypto_newsession(&wr->w_sid, &crie, 440 CRYPTOCAP_F_HARDWARE); 441 break; 442 case G_ELI_CRYPTO_UNKNOWN: 443 error = crypto_newsession(&wr->w_sid, &crie, 444 CRYPTOCAP_F_HARDWARE); 445 if (error == 0) { 446 mtx_lock(&sc->sc_queue_mtx); 447 if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN) 448 sc->sc_crypto = G_ELI_CRYPTO_HW; 449 mtx_unlock(&sc->sc_queue_mtx); 450 } else { 451 error = crypto_newsession(&wr->w_sid, &crie, 452 CRYPTOCAP_F_SOFTWARE); 453 mtx_lock(&sc->sc_queue_mtx); 454 if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN) 455 sc->sc_crypto = G_ELI_CRYPTO_SW; 456 mtx_unlock(&sc->sc_queue_mtx); 457 } 458 break; 459 default: 460 panic("%s: invalid condition", __func__); 461 } 462 463 if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) != 0) 464 g_eli_key_drop(sc, crie.cri_key); 465 466 return (error); 467 } 468 469 static void 470 g_eli_freesession(struct g_eli_worker *wr) 471 { 472 473 crypto_freesession(wr->w_sid); 474 } 475 476 static void 477 g_eli_cancel(struct g_eli_softc *sc) 478 { 479 struct bio *bp; 480 481 mtx_assert(&sc->sc_queue_mtx, MA_OWNED); 482 483 while ((bp = bioq_takefirst(&sc->sc_queue)) != NULL) { 484 KASSERT(bp->bio_pflags == G_ELI_NEW_BIO, 485 ("Not new bio when canceling (bp=%p).", bp)); 486 g_io_deliver(bp, ENXIO); 487 } 488 } 489 490 static struct bio * 491 g_eli_takefirst(struct g_eli_softc *sc) 492 { 493 struct bio *bp; 494 495 mtx_assert(&sc->sc_queue_mtx, MA_OWNED); 496 497 if (!(sc->sc_flags & G_ELI_FLAG_SUSPEND)) 498 return (bioq_takefirst(&sc->sc_queue)); 499 /* 500 * Device suspended, so we skip new I/O requests. 501 */ 502 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) { 503 if (bp->bio_pflags != G_ELI_NEW_BIO) 504 break; 505 } 506 if (bp != NULL) 507 bioq_remove(&sc->sc_queue, bp); 508 return (bp); 509 } 510 511 /* 512 * This is the main function for kernel worker thread when we don't have 513 * hardware acceleration and we have to do cryptography in software. 514 * Dedicated thread is needed, so we don't slow down g_up/g_down GEOM 515 * threads with crypto work. 516 */ 517 static void 518 g_eli_worker(void *arg) 519 { 520 struct g_eli_softc *sc; 521 struct g_eli_worker *wr; 522 struct bio *bp; 523 int error; 524 525 wr = arg; 526 sc = wr->w_softc; 527 #ifdef EARLY_AP_STARTUP 528 MPASS(!sc->sc_cpubind || smp_started); 529 #elif defined(SMP) 530 /* Before sched_bind() to a CPU, wait for all CPUs to go on-line. */ 531 if (sc->sc_cpubind) { 532 while (!smp_started) 533 tsleep(wr, 0, "geli:smp", hz / 4); 534 } 535 #endif 536 thread_lock(curthread); 537 sched_prio(curthread, PUSER); 538 if (sc->sc_cpubind) 539 sched_bind(curthread, wr->w_number % mp_ncpus); 540 thread_unlock(curthread); 541 542 G_ELI_DEBUG(1, "Thread %s started.", curthread->td_proc->p_comm); 543 544 for (;;) { 545 mtx_lock(&sc->sc_queue_mtx); 546 again: 547 bp = g_eli_takefirst(sc); 548 if (bp == NULL) { 549 if (sc->sc_flags & G_ELI_FLAG_DESTROY) { 550 g_eli_cancel(sc); 551 LIST_REMOVE(wr, w_next); 552 g_eli_freesession(wr); 553 free(wr, M_ELI); 554 G_ELI_DEBUG(1, "Thread %s exiting.", 555 curthread->td_proc->p_comm); 556 wakeup(&sc->sc_workers); 557 mtx_unlock(&sc->sc_queue_mtx); 558 kproc_exit(0); 559 } 560 while (sc->sc_flags & G_ELI_FLAG_SUSPEND) { 561 if (sc->sc_inflight > 0) { 562 G_ELI_DEBUG(0, "inflight=%d", 563 sc->sc_inflight); 564 /* 565 * We still have inflight BIOs, so 566 * sleep and retry. 567 */ 568 msleep(sc, &sc->sc_queue_mtx, PRIBIO, 569 "geli:inf", hz / 5); 570 goto again; 571 } 572 /* 573 * Suspend requested, mark the worker as 574 * suspended and go to sleep. 575 */ 576 if (wr->w_active) { 577 g_eli_freesession(wr); 578 wr->w_active = FALSE; 579 } 580 wakeup(&sc->sc_workers); 581 msleep(sc, &sc->sc_queue_mtx, PRIBIO, 582 "geli:suspend", 0); 583 if (!wr->w_active && 584 !(sc->sc_flags & G_ELI_FLAG_SUSPEND)) { 585 error = g_eli_newsession(wr); 586 KASSERT(error == 0, 587 ("g_eli_newsession() failed on resume (error=%d)", 588 error)); 589 wr->w_active = TRUE; 590 } 591 goto again; 592 } 593 msleep(sc, &sc->sc_queue_mtx, PDROP, "geli:w", 0); 594 continue; 595 } 596 if (bp->bio_pflags == G_ELI_NEW_BIO) 597 atomic_add_int(&sc->sc_inflight, 1); 598 mtx_unlock(&sc->sc_queue_mtx); 599 if (bp->bio_pflags == G_ELI_NEW_BIO) { 600 bp->bio_pflags = 0; 601 if (sc->sc_flags & G_ELI_FLAG_AUTH) { 602 if (bp->bio_cmd == BIO_READ) 603 g_eli_auth_read(sc, bp); 604 else 605 g_eli_auth_run(wr, bp); 606 } else { 607 if (bp->bio_cmd == BIO_READ) 608 g_eli_crypto_read(sc, bp, 1); 609 else 610 g_eli_crypto_run(wr, bp); 611 } 612 } else { 613 if (sc->sc_flags & G_ELI_FLAG_AUTH) 614 g_eli_auth_run(wr, bp); 615 else 616 g_eli_crypto_run(wr, bp); 617 } 618 } 619 } 620 621 int 622 g_eli_read_metadata(struct g_class *mp, struct g_provider *pp, 623 struct g_eli_metadata *md) 624 { 625 struct g_geom *gp; 626 struct g_consumer *cp; 627 u_char *buf = NULL; 628 int error; 629 630 g_topology_assert(); 631 632 gp = g_new_geomf(mp, "eli:taste"); 633 gp->start = g_eli_start; 634 gp->access = g_std_access; 635 /* 636 * g_eli_read_metadata() is always called from the event thread. 637 * Our geom is created and destroyed in the same event, so there 638 * could be no orphan nor spoil event in the meantime. 639 */ 640 gp->orphan = g_eli_orphan_spoil_assert; 641 gp->spoiled = g_eli_orphan_spoil_assert; 642 cp = g_new_consumer(gp); 643 error = g_attach(cp, pp); 644 if (error != 0) 645 goto end; 646 error = g_access(cp, 1, 0, 0); 647 if (error != 0) 648 goto end; 649 g_topology_unlock(); 650 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize, 651 &error); 652 g_topology_lock(); 653 if (buf == NULL) 654 goto end; 655 error = eli_metadata_decode(buf, md); 656 if (error != 0) 657 goto end; 658 /* Metadata was read and decoded successfully. */ 659 end: 660 if (buf != NULL) 661 g_free(buf); 662 if (cp->provider != NULL) { 663 if (cp->acr == 1) 664 g_access(cp, -1, 0, 0); 665 g_detach(cp); 666 } 667 g_destroy_consumer(cp); 668 g_destroy_geom(gp); 669 return (error); 670 } 671 672 /* 673 * The function is called when we had last close on provider and user requested 674 * to close it when this situation occur. 675 */ 676 static void 677 g_eli_last_close(void *arg, int flags __unused) 678 { 679 struct g_geom *gp; 680 char gpname[64]; 681 int error; 682 683 g_topology_assert(); 684 gp = arg; 685 strlcpy(gpname, gp->name, sizeof(gpname)); 686 error = g_eli_destroy(gp->softc, TRUE); 687 KASSERT(error == 0, ("Cannot detach %s on last close (error=%d).", 688 gpname, error)); 689 G_ELI_DEBUG(0, "Detached %s on last close.", gpname); 690 } 691 692 int 693 g_eli_access(struct g_provider *pp, int dr, int dw, int de) 694 { 695 struct g_eli_softc *sc; 696 struct g_geom *gp; 697 698 gp = pp->geom; 699 sc = gp->softc; 700 701 if (dw > 0) { 702 if (sc->sc_flags & G_ELI_FLAG_RO) { 703 /* Deny write attempts. */ 704 return (EROFS); 705 } 706 /* Someone is opening us for write, we need to remember that. */ 707 sc->sc_flags |= G_ELI_FLAG_WOPEN; 708 return (0); 709 } 710 /* Is this the last close? */ 711 if (pp->acr + dr > 0 || pp->acw + dw > 0 || pp->ace + de > 0) 712 return (0); 713 714 /* 715 * Automatically detach on last close if requested. 716 */ 717 if ((sc->sc_flags & G_ELI_FLAG_RW_DETACH) || 718 (sc->sc_flags & G_ELI_FLAG_WOPEN)) { 719 g_post_event(g_eli_last_close, gp, M_WAITOK, NULL); 720 } 721 return (0); 722 } 723 724 static int 725 g_eli_cpu_is_disabled(int cpu) 726 { 727 #ifdef SMP 728 return (CPU_ISSET(cpu, &hlt_cpus_mask)); 729 #else 730 return (0); 731 #endif 732 } 733 734 struct g_geom * 735 g_eli_create(struct gctl_req *req, struct g_class *mp, struct g_provider *bpp, 736 const struct g_eli_metadata *md, const u_char *mkey, int nkey) 737 { 738 struct g_eli_softc *sc; 739 struct g_eli_worker *wr; 740 struct g_geom *gp; 741 struct g_provider *pp; 742 struct g_consumer *cp; 743 u_int i, threads; 744 int error; 745 746 G_ELI_DEBUG(1, "Creating device %s%s.", bpp->name, G_ELI_SUFFIX); 747 748 gp = g_new_geomf(mp, "%s%s", bpp->name, G_ELI_SUFFIX); 749 sc = malloc(sizeof(*sc), M_ELI, M_WAITOK | M_ZERO); 750 gp->start = g_eli_start; 751 /* 752 * Spoiling can happen even though we have the provider open 753 * exclusively, e.g. through media change events. 754 */ 755 gp->spoiled = g_eli_orphan; 756 gp->orphan = g_eli_orphan; 757 gp->dumpconf = g_eli_dumpconf; 758 /* 759 * If detach-on-last-close feature is not enabled and we don't operate 760 * on read-only provider, we can simply use g_std_access(). 761 */ 762 if (md->md_flags & (G_ELI_FLAG_WO_DETACH | G_ELI_FLAG_RO)) 763 gp->access = g_eli_access; 764 else 765 gp->access = g_std_access; 766 767 eli_metadata_softc(sc, md, bpp->sectorsize, bpp->mediasize); 768 sc->sc_nkey = nkey; 769 770 gp->softc = sc; 771 sc->sc_geom = gp; 772 773 bioq_init(&sc->sc_queue); 774 mtx_init(&sc->sc_queue_mtx, "geli:queue", NULL, MTX_DEF); 775 mtx_init(&sc->sc_ekeys_lock, "geli:ekeys", NULL, MTX_DEF); 776 777 pp = NULL; 778 cp = g_new_consumer(gp); 779 error = g_attach(cp, bpp); 780 if (error != 0) { 781 if (req != NULL) { 782 gctl_error(req, "Cannot attach to %s (error=%d).", 783 bpp->name, error); 784 } else { 785 G_ELI_DEBUG(1, "Cannot attach to %s (error=%d).", 786 bpp->name, error); 787 } 788 goto failed; 789 } 790 /* 791 * Keep provider open all the time, so we can run critical tasks, 792 * like Master Keys deletion, without wondering if we can open 793 * provider or not. 794 * We don't open provider for writing only when user requested read-only 795 * access. 796 */ 797 if (sc->sc_flags & G_ELI_FLAG_RO) 798 error = g_access(cp, 1, 0, 1); 799 else 800 error = g_access(cp, 1, 1, 1); 801 if (error != 0) { 802 if (req != NULL) { 803 gctl_error(req, "Cannot access %s (error=%d).", 804 bpp->name, error); 805 } else { 806 G_ELI_DEBUG(1, "Cannot access %s (error=%d).", 807 bpp->name, error); 808 } 809 goto failed; 810 } 811 812 /* 813 * Remember the keys in our softc structure. 814 */ 815 g_eli_mkey_propagate(sc, mkey); 816 817 LIST_INIT(&sc->sc_workers); 818 819 threads = g_eli_threads; 820 if (threads == 0) 821 threads = mp_ncpus; 822 sc->sc_cpubind = (mp_ncpus > 1 && threads == mp_ncpus); 823 for (i = 0; i < threads; i++) { 824 if (g_eli_cpu_is_disabled(i)) { 825 G_ELI_DEBUG(1, "%s: CPU %u disabled, skipping.", 826 bpp->name, i); 827 continue; 828 } 829 wr = malloc(sizeof(*wr), M_ELI, M_WAITOK | M_ZERO); 830 wr->w_softc = sc; 831 wr->w_number = i; 832 wr->w_active = TRUE; 833 834 error = g_eli_newsession(wr); 835 if (error != 0) { 836 free(wr, M_ELI); 837 if (req != NULL) { 838 gctl_error(req, "Cannot set up crypto session " 839 "for %s (error=%d).", bpp->name, error); 840 } else { 841 G_ELI_DEBUG(1, "Cannot set up crypto session " 842 "for %s (error=%d).", bpp->name, error); 843 } 844 goto failed; 845 } 846 847 error = kproc_create(g_eli_worker, wr, &wr->w_proc, 0, 0, 848 "g_eli[%u] %s", i, bpp->name); 849 if (error != 0) { 850 g_eli_freesession(wr); 851 free(wr, M_ELI); 852 if (req != NULL) { 853 gctl_error(req, "Cannot create kernel thread " 854 "for %s (error=%d).", bpp->name, error); 855 } else { 856 G_ELI_DEBUG(1, "Cannot create kernel thread " 857 "for %s (error=%d).", bpp->name, error); 858 } 859 goto failed; 860 } 861 LIST_INSERT_HEAD(&sc->sc_workers, wr, w_next); 862 } 863 864 /* 865 * Create decrypted provider. 866 */ 867 pp = g_new_providerf(gp, "%s%s", bpp->name, G_ELI_SUFFIX); 868 pp->mediasize = sc->sc_mediasize; 869 pp->sectorsize = sc->sc_sectorsize; 870 871 g_error_provider(pp, 0); 872 873 G_ELI_DEBUG(0, "Device %s created.", pp->name); 874 G_ELI_DEBUG(0, "Encryption: %s %u", g_eli_algo2str(sc->sc_ealgo), 875 sc->sc_ekeylen); 876 if (sc->sc_flags & G_ELI_FLAG_AUTH) 877 G_ELI_DEBUG(0, " Integrity: %s", g_eli_algo2str(sc->sc_aalgo)); 878 G_ELI_DEBUG(0, " Crypto: %s", 879 sc->sc_crypto == G_ELI_CRYPTO_SW ? "software" : "hardware"); 880 return (gp); 881 failed: 882 mtx_lock(&sc->sc_queue_mtx); 883 sc->sc_flags |= G_ELI_FLAG_DESTROY; 884 wakeup(sc); 885 /* 886 * Wait for kernel threads self destruction. 887 */ 888 while (!LIST_EMPTY(&sc->sc_workers)) { 889 msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO, 890 "geli:destroy", 0); 891 } 892 mtx_destroy(&sc->sc_queue_mtx); 893 if (cp->provider != NULL) { 894 if (cp->acr == 1) 895 g_access(cp, -1, -1, -1); 896 g_detach(cp); 897 } 898 g_destroy_consumer(cp); 899 g_destroy_geom(gp); 900 g_eli_key_destroy(sc); 901 bzero(sc, sizeof(*sc)); 902 free(sc, M_ELI); 903 return (NULL); 904 } 905 906 int 907 g_eli_destroy(struct g_eli_softc *sc, boolean_t force) 908 { 909 struct g_geom *gp; 910 struct g_provider *pp; 911 912 g_topology_assert(); 913 914 if (sc == NULL) 915 return (ENXIO); 916 917 gp = sc->sc_geom; 918 pp = LIST_FIRST(&gp->provider); 919 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) { 920 if (force) { 921 G_ELI_DEBUG(1, "Device %s is still open, so it " 922 "cannot be definitely removed.", pp->name); 923 sc->sc_flags |= G_ELI_FLAG_RW_DETACH; 924 gp->access = g_eli_access; 925 g_wither_provider(pp, ENXIO); 926 return (EBUSY); 927 } else { 928 G_ELI_DEBUG(1, 929 "Device %s is still open (r%dw%de%d).", pp->name, 930 pp->acr, pp->acw, pp->ace); 931 return (EBUSY); 932 } 933 } 934 935 mtx_lock(&sc->sc_queue_mtx); 936 sc->sc_flags |= G_ELI_FLAG_DESTROY; 937 wakeup(sc); 938 while (!LIST_EMPTY(&sc->sc_workers)) { 939 msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO, 940 "geli:destroy", 0); 941 } 942 mtx_destroy(&sc->sc_queue_mtx); 943 gp->softc = NULL; 944 g_eli_key_destroy(sc); 945 bzero(sc, sizeof(*sc)); 946 free(sc, M_ELI); 947 948 if (pp == NULL || (pp->acr == 0 && pp->acw == 0 && pp->ace == 0)) 949 G_ELI_DEBUG(0, "Device %s destroyed.", gp->name); 950 g_wither_geom_close(gp, ENXIO); 951 952 return (0); 953 } 954 955 static int 956 g_eli_destroy_geom(struct gctl_req *req __unused, 957 struct g_class *mp __unused, struct g_geom *gp) 958 { 959 struct g_eli_softc *sc; 960 961 sc = gp->softc; 962 return (g_eli_destroy(sc, FALSE)); 963 } 964 965 static int 966 g_eli_keyfiles_load(struct hmac_ctx *ctx, const char *provider) 967 { 968 u_char *keyfile, *data; 969 char *file, name[64]; 970 size_t size; 971 int i; 972 973 for (i = 0; ; i++) { 974 snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i); 975 keyfile = preload_search_by_type(name); 976 if (keyfile == NULL && i == 0) { 977 /* 978 * If there is only one keyfile, allow simpler name. 979 */ 980 snprintf(name, sizeof(name), "%s:geli_keyfile", provider); 981 keyfile = preload_search_by_type(name); 982 } 983 if (keyfile == NULL) 984 return (i); /* Return number of loaded keyfiles. */ 985 data = preload_fetch_addr(keyfile); 986 if (data == NULL) { 987 G_ELI_DEBUG(0, "Cannot find key file data for %s.", 988 name); 989 return (0); 990 } 991 size = preload_fetch_size(keyfile); 992 if (size == 0) { 993 G_ELI_DEBUG(0, "Cannot find key file size for %s.", 994 name); 995 return (0); 996 } 997 file = preload_search_info(keyfile, MODINFO_NAME); 998 if (file == NULL) { 999 G_ELI_DEBUG(0, "Cannot find key file name for %s.", 1000 name); 1001 return (0); 1002 } 1003 G_ELI_DEBUG(1, "Loaded keyfile %s for %s (type: %s).", file, 1004 provider, name); 1005 g_eli_crypto_hmac_update(ctx, data, size); 1006 } 1007 } 1008 1009 static void 1010 g_eli_keyfiles_clear(const char *provider) 1011 { 1012 u_char *keyfile, *data; 1013 char name[64]; 1014 size_t size; 1015 int i; 1016 1017 for (i = 0; ; i++) { 1018 snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i); 1019 keyfile = preload_search_by_type(name); 1020 if (keyfile == NULL) 1021 return; 1022 data = preload_fetch_addr(keyfile); 1023 size = preload_fetch_size(keyfile); 1024 if (data != NULL && size != 0) 1025 bzero(data, size); 1026 } 1027 } 1028 1029 /* 1030 * Tasting is only made on boot. 1031 * We detect providers which should be attached before root is mounted. 1032 */ 1033 static struct g_geom * 1034 g_eli_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 1035 { 1036 struct g_eli_metadata md; 1037 struct g_geom *gp; 1038 struct hmac_ctx ctx; 1039 char passphrase[256]; 1040 u_char key[G_ELI_USERKEYLEN], mkey[G_ELI_DATAIVKEYLEN]; 1041 u_int i, nkey, nkeyfiles, tries, showpass; 1042 int error; 1043 struct keybuf *keybuf; 1044 1045 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name); 1046 g_topology_assert(); 1047 1048 if (root_mounted() || g_eli_tries == 0) 1049 return (NULL); 1050 1051 G_ELI_DEBUG(3, "Tasting %s.", pp->name); 1052 1053 error = g_eli_read_metadata(mp, pp, &md); 1054 if (error != 0) 1055 return (NULL); 1056 gp = NULL; 1057 1058 if (strcmp(md.md_magic, G_ELI_MAGIC) != 0) 1059 return (NULL); 1060 if (md.md_version > G_ELI_VERSION) { 1061 printf("geom_eli.ko module is too old to handle %s.\n", 1062 pp->name); 1063 return (NULL); 1064 } 1065 if (md.md_provsize != pp->mediasize) 1066 return (NULL); 1067 /* Should we attach it on boot? */ 1068 if (!(md.md_flags & G_ELI_FLAG_BOOT)) 1069 return (NULL); 1070 if (md.md_keys == 0x00) { 1071 G_ELI_DEBUG(0, "No valid keys on %s.", pp->name); 1072 return (NULL); 1073 } 1074 if (md.md_iterations == -1) { 1075 /* If there is no passphrase, we try only once. */ 1076 tries = 1; 1077 } else { 1078 /* Ask for the passphrase no more than g_eli_tries times. */ 1079 tries = g_eli_tries; 1080 } 1081 1082 if ((keybuf = get_keybuf()) != NULL) { 1083 /* Scan the key buffer, try all GELI keys. */ 1084 for (i = 0; i < keybuf->kb_nents; i++) { 1085 if (keybuf->kb_ents[i].ke_type == KEYBUF_TYPE_GELI) { 1086 memcpy(key, keybuf->kb_ents[i].ke_data, 1087 sizeof(key)); 1088 1089 if (g_eli_mkey_decrypt_any(&md, key, 1090 mkey, &nkey) == 0 ) { 1091 explicit_bzero(key, sizeof(key)); 1092 goto have_key; 1093 } 1094 } 1095 } 1096 } 1097 1098 for (i = 0; i <= tries; i++) { 1099 g_eli_crypto_hmac_init(&ctx, NULL, 0); 1100 1101 /* 1102 * Load all key files. 1103 */ 1104 nkeyfiles = g_eli_keyfiles_load(&ctx, pp->name); 1105 1106 if (nkeyfiles == 0 && md.md_iterations == -1) { 1107 /* 1108 * No key files and no passphrase, something is 1109 * definitely wrong here. 1110 * geli(8) doesn't allow for such situation, so assume 1111 * that there was really no passphrase and in that case 1112 * key files are no properly defined in loader.conf. 1113 */ 1114 G_ELI_DEBUG(0, 1115 "Found no key files in loader.conf for %s.", 1116 pp->name); 1117 return (NULL); 1118 } 1119 1120 /* Ask for the passphrase if defined. */ 1121 if (md.md_iterations >= 0) { 1122 /* Try first with cached passphrase. */ 1123 if (i == 0) { 1124 if (!g_eli_boot_passcache) 1125 continue; 1126 memcpy(passphrase, cached_passphrase, 1127 sizeof(passphrase)); 1128 } else { 1129 printf("Enter passphrase for %s: ", pp->name); 1130 showpass = g_eli_visible_passphrase; 1131 if ((md.md_flags & G_ELI_FLAG_GELIDISPLAYPASS) != 0) 1132 showpass = GETS_ECHOPASS; 1133 cngets(passphrase, sizeof(passphrase), 1134 showpass); 1135 memcpy(cached_passphrase, passphrase, 1136 sizeof(passphrase)); 1137 } 1138 } 1139 1140 /* 1141 * Prepare Derived-Key from the user passphrase. 1142 */ 1143 if (md.md_iterations == 0) { 1144 g_eli_crypto_hmac_update(&ctx, md.md_salt, 1145 sizeof(md.md_salt)); 1146 g_eli_crypto_hmac_update(&ctx, passphrase, 1147 strlen(passphrase)); 1148 explicit_bzero(passphrase, sizeof(passphrase)); 1149 } else if (md.md_iterations > 0) { 1150 u_char dkey[G_ELI_USERKEYLEN]; 1151 1152 pkcs5v2_genkey(dkey, sizeof(dkey), md.md_salt, 1153 sizeof(md.md_salt), passphrase, md.md_iterations); 1154 bzero(passphrase, sizeof(passphrase)); 1155 g_eli_crypto_hmac_update(&ctx, dkey, sizeof(dkey)); 1156 explicit_bzero(dkey, sizeof(dkey)); 1157 } 1158 1159 g_eli_crypto_hmac_final(&ctx, key, 0); 1160 1161 /* 1162 * Decrypt Master-Key. 1163 */ 1164 error = g_eli_mkey_decrypt_any(&md, key, mkey, &nkey); 1165 bzero(key, sizeof(key)); 1166 if (error == -1) { 1167 if (i == tries) { 1168 G_ELI_DEBUG(0, 1169 "Wrong key for %s. No tries left.", 1170 pp->name); 1171 g_eli_keyfiles_clear(pp->name); 1172 return (NULL); 1173 } 1174 if (i > 0) { 1175 G_ELI_DEBUG(0, 1176 "Wrong key for %s. Tries left: %u.", 1177 pp->name, tries - i); 1178 } 1179 /* Try again. */ 1180 continue; 1181 } else if (error > 0) { 1182 G_ELI_DEBUG(0, 1183 "Cannot decrypt Master Key for %s (error=%d).", 1184 pp->name, error); 1185 g_eli_keyfiles_clear(pp->name); 1186 return (NULL); 1187 } 1188 g_eli_keyfiles_clear(pp->name); 1189 G_ELI_DEBUG(1, "Using Master Key %u for %s.", nkey, pp->name); 1190 break; 1191 } 1192 have_key: 1193 1194 /* 1195 * We have correct key, let's attach provider. 1196 */ 1197 gp = g_eli_create(NULL, mp, pp, &md, mkey, nkey); 1198 bzero(mkey, sizeof(mkey)); 1199 bzero(&md, sizeof(md)); 1200 if (gp == NULL) { 1201 G_ELI_DEBUG(0, "Cannot create device %s%s.", pp->name, 1202 G_ELI_SUFFIX); 1203 return (NULL); 1204 } 1205 return (gp); 1206 } 1207 1208 static void 1209 g_eli_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 1210 struct g_consumer *cp, struct g_provider *pp) 1211 { 1212 struct g_eli_softc *sc; 1213 1214 g_topology_assert(); 1215 sc = gp->softc; 1216 if (sc == NULL) 1217 return; 1218 if (pp != NULL || cp != NULL) 1219 return; /* Nothing here. */ 1220 1221 sbuf_printf(sb, "%s<KeysTotal>%ju</KeysTotal>\n", indent, 1222 (uintmax_t)sc->sc_ekeys_total); 1223 sbuf_printf(sb, "%s<KeysAllocated>%ju</KeysAllocated>\n", indent, 1224 (uintmax_t)sc->sc_ekeys_allocated); 1225 sbuf_printf(sb, "%s<Flags>", indent); 1226 if (sc->sc_flags == 0) 1227 sbuf_printf(sb, "NONE"); 1228 else { 1229 int first = 1; 1230 1231 #define ADD_FLAG(flag, name) do { \ 1232 if (sc->sc_flags & (flag)) { \ 1233 if (!first) \ 1234 sbuf_printf(sb, ", "); \ 1235 else \ 1236 first = 0; \ 1237 sbuf_printf(sb, name); \ 1238 } \ 1239 } while (0) 1240 ADD_FLAG(G_ELI_FLAG_SUSPEND, "SUSPEND"); 1241 ADD_FLAG(G_ELI_FLAG_SINGLE_KEY, "SINGLE-KEY"); 1242 ADD_FLAG(G_ELI_FLAG_NATIVE_BYTE_ORDER, "NATIVE-BYTE-ORDER"); 1243 ADD_FLAG(G_ELI_FLAG_ONETIME, "ONETIME"); 1244 ADD_FLAG(G_ELI_FLAG_BOOT, "BOOT"); 1245 ADD_FLAG(G_ELI_FLAG_WO_DETACH, "W-DETACH"); 1246 ADD_FLAG(G_ELI_FLAG_RW_DETACH, "RW-DETACH"); 1247 ADD_FLAG(G_ELI_FLAG_AUTH, "AUTH"); 1248 ADD_FLAG(G_ELI_FLAG_WOPEN, "W-OPEN"); 1249 ADD_FLAG(G_ELI_FLAG_DESTROY, "DESTROY"); 1250 ADD_FLAG(G_ELI_FLAG_RO, "READ-ONLY"); 1251 ADD_FLAG(G_ELI_FLAG_NODELETE, "NODELETE"); 1252 ADD_FLAG(G_ELI_FLAG_GELIBOOT, "GELIBOOT"); 1253 ADD_FLAG(G_ELI_FLAG_GELIDISPLAYPASS, "GELIDISPLAYPASS"); 1254 #undef ADD_FLAG 1255 } 1256 sbuf_printf(sb, "</Flags>\n"); 1257 1258 if (!(sc->sc_flags & G_ELI_FLAG_ONETIME)) { 1259 sbuf_printf(sb, "%s<UsedKey>%u</UsedKey>\n", indent, 1260 sc->sc_nkey); 1261 } 1262 sbuf_printf(sb, "%s<Version>%u</Version>\n", indent, sc->sc_version); 1263 sbuf_printf(sb, "%s<Crypto>", indent); 1264 switch (sc->sc_crypto) { 1265 case G_ELI_CRYPTO_HW: 1266 sbuf_printf(sb, "hardware"); 1267 break; 1268 case G_ELI_CRYPTO_SW: 1269 sbuf_printf(sb, "software"); 1270 break; 1271 default: 1272 sbuf_printf(sb, "UNKNOWN"); 1273 break; 1274 } 1275 sbuf_printf(sb, "</Crypto>\n"); 1276 if (sc->sc_flags & G_ELI_FLAG_AUTH) { 1277 sbuf_printf(sb, 1278 "%s<AuthenticationAlgorithm>%s</AuthenticationAlgorithm>\n", 1279 indent, g_eli_algo2str(sc->sc_aalgo)); 1280 } 1281 sbuf_printf(sb, "%s<KeyLength>%u</KeyLength>\n", indent, 1282 sc->sc_ekeylen); 1283 sbuf_printf(sb, "%s<EncryptionAlgorithm>%s</EncryptionAlgorithm>\n", 1284 indent, g_eli_algo2str(sc->sc_ealgo)); 1285 sbuf_printf(sb, "%s<State>%s</State>\n", indent, 1286 (sc->sc_flags & G_ELI_FLAG_SUSPEND) ? "SUSPENDED" : "ACTIVE"); 1287 } 1288 1289 static void 1290 g_eli_shutdown_pre_sync(void *arg, int howto) 1291 { 1292 struct g_class *mp; 1293 struct g_geom *gp, *gp2; 1294 struct g_provider *pp; 1295 struct g_eli_softc *sc; 1296 int error; 1297 1298 mp = arg; 1299 g_topology_lock(); 1300 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) { 1301 sc = gp->softc; 1302 if (sc == NULL) 1303 continue; 1304 pp = LIST_FIRST(&gp->provider); 1305 KASSERT(pp != NULL, ("No provider? gp=%p (%s)", gp, gp->name)); 1306 if (pp->acr + pp->acw + pp->ace == 0) 1307 error = g_eli_destroy(sc, TRUE); 1308 else { 1309 sc->sc_flags |= G_ELI_FLAG_RW_DETACH; 1310 gp->access = g_eli_access; 1311 } 1312 } 1313 g_topology_unlock(); 1314 } 1315 1316 static void 1317 g_eli_init(struct g_class *mp) 1318 { 1319 1320 g_eli_pre_sync = EVENTHANDLER_REGISTER(shutdown_pre_sync, 1321 g_eli_shutdown_pre_sync, mp, SHUTDOWN_PRI_FIRST); 1322 if (g_eli_pre_sync == NULL) 1323 G_ELI_DEBUG(0, "Warning! Cannot register shutdown event."); 1324 } 1325 1326 static void 1327 g_eli_fini(struct g_class *mp) 1328 { 1329 1330 if (g_eli_pre_sync != NULL) 1331 EVENTHANDLER_DEREGISTER(shutdown_pre_sync, g_eli_pre_sync); 1332 } 1333 1334 DECLARE_GEOM_CLASS(g_eli_class, g_eli); 1335 MODULE_DEPEND(g_eli, crypto, 1, 1, 1); 1336 MODULE_VERSION(geom_eli, 0); 1337