1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2005-2019 Pawel Jakub Dawidek <pawel@dawidek.net> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/cons.h> 35 #include <sys/kernel.h> 36 #include <sys/linker.h> 37 #include <sys/module.h> 38 #include <sys/lock.h> 39 #include <sys/mutex.h> 40 #include <sys/bio.h> 41 #include <sys/sbuf.h> 42 #include <sys/sysctl.h> 43 #include <sys/malloc.h> 44 #include <sys/eventhandler.h> 45 #include <sys/kthread.h> 46 #include <sys/proc.h> 47 #include <sys/sched.h> 48 #include <sys/smp.h> 49 #include <sys/uio.h> 50 #include <sys/vnode.h> 51 52 #include <vm/uma.h> 53 54 #include <geom/geom.h> 55 #include <geom/eli/g_eli.h> 56 #include <geom/eli/pkcs5v2.h> 57 58 #include <crypto/intake.h> 59 60 FEATURE(geom_eli, "GEOM crypto module"); 61 62 MALLOC_DEFINE(M_ELI, "eli data", "GEOM_ELI Data"); 63 64 SYSCTL_DECL(_kern_geom); 65 SYSCTL_NODE(_kern_geom, OID_AUTO, eli, CTLFLAG_RW, 0, "GEOM_ELI stuff"); 66 static int g_eli_version = G_ELI_VERSION; 67 SYSCTL_INT(_kern_geom_eli, OID_AUTO, version, CTLFLAG_RD, &g_eli_version, 0, 68 "GELI version"); 69 int g_eli_debug = 0; 70 SYSCTL_INT(_kern_geom_eli, OID_AUTO, debug, CTLFLAG_RWTUN, &g_eli_debug, 0, 71 "Debug level"); 72 static u_int g_eli_tries = 3; 73 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, tries, CTLFLAG_RWTUN, &g_eli_tries, 0, 74 "Number of tries for entering the passphrase"); 75 static u_int g_eli_visible_passphrase = GETS_NOECHO; 76 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, visible_passphrase, CTLFLAG_RWTUN, 77 &g_eli_visible_passphrase, 0, 78 "Visibility of passphrase prompt (0 = invisible, 1 = visible, 2 = asterisk)"); 79 u_int g_eli_overwrites = G_ELI_OVERWRITES; 80 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, overwrites, CTLFLAG_RWTUN, &g_eli_overwrites, 81 0, "Number of times on-disk keys should be overwritten when destroying them"); 82 static u_int g_eli_threads = 0; 83 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, threads, CTLFLAG_RWTUN, &g_eli_threads, 0, 84 "Number of threads doing crypto work"); 85 u_int g_eli_batch = 0; 86 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, batch, CTLFLAG_RWTUN, &g_eli_batch, 0, 87 "Use crypto operations batching"); 88 89 /* 90 * Passphrase cached during boot, in order to be more user-friendly if 91 * there are multiple providers using the same passphrase. 92 */ 93 static char cached_passphrase[256]; 94 static u_int g_eli_boot_passcache = 1; 95 TUNABLE_INT("kern.geom.eli.boot_passcache", &g_eli_boot_passcache); 96 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, boot_passcache, CTLFLAG_RD, 97 &g_eli_boot_passcache, 0, 98 "Passphrases are cached during boot process for possible reuse"); 99 static void 100 fetch_loader_passphrase(void * dummy) 101 { 102 char * env_passphrase; 103 104 KASSERT(dynamic_kenv, ("need dynamic kenv")); 105 106 if ((env_passphrase = kern_getenv("kern.geom.eli.passphrase")) != NULL) { 107 /* Extract passphrase from the environment. */ 108 strlcpy(cached_passphrase, env_passphrase, 109 sizeof(cached_passphrase)); 110 freeenv(env_passphrase); 111 112 /* Wipe the passphrase from the environment. */ 113 kern_unsetenv("kern.geom.eli.passphrase"); 114 } 115 } 116 SYSINIT(geli_fetch_loader_passphrase, SI_SUB_KMEM + 1, SI_ORDER_ANY, 117 fetch_loader_passphrase, NULL); 118 119 static void 120 zero_boot_passcache(void) 121 { 122 123 explicit_bzero(cached_passphrase, sizeof(cached_passphrase)); 124 } 125 126 static void 127 zero_geli_intake_keys(void) 128 { 129 struct keybuf *keybuf; 130 int i; 131 132 if ((keybuf = get_keybuf()) != NULL) { 133 /* Scan the key buffer, clear all GELI keys. */ 134 for (i = 0; i < keybuf->kb_nents; i++) { 135 if (keybuf->kb_ents[i].ke_type == KEYBUF_TYPE_GELI) { 136 explicit_bzero(keybuf->kb_ents[i].ke_data, 137 sizeof(keybuf->kb_ents[i].ke_data)); 138 keybuf->kb_ents[i].ke_type = KEYBUF_TYPE_NONE; 139 } 140 } 141 } 142 } 143 144 static void 145 zero_intake_passcache(void *dummy) 146 { 147 zero_boot_passcache(); 148 zero_geli_intake_keys(); 149 } 150 EVENTHANDLER_DEFINE(mountroot, zero_intake_passcache, NULL, 0); 151 152 static eventhandler_tag g_eli_pre_sync = NULL; 153 154 static int g_eli_read_metadata_offset(struct g_class *mp, struct g_provider *pp, 155 off_t offset, struct g_eli_metadata *md); 156 157 static int g_eli_destroy_geom(struct gctl_req *req, struct g_class *mp, 158 struct g_geom *gp); 159 static void g_eli_init(struct g_class *mp); 160 static void g_eli_fini(struct g_class *mp); 161 162 static g_taste_t g_eli_taste; 163 static g_dumpconf_t g_eli_dumpconf; 164 165 struct g_class g_eli_class = { 166 .name = G_ELI_CLASS_NAME, 167 .version = G_VERSION, 168 .ctlreq = g_eli_config, 169 .taste = g_eli_taste, 170 .destroy_geom = g_eli_destroy_geom, 171 .init = g_eli_init, 172 .fini = g_eli_fini 173 }; 174 175 176 /* 177 * Code paths: 178 * BIO_READ: 179 * g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver 180 * BIO_WRITE: 181 * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver 182 */ 183 184 185 /* 186 * EAGAIN from crypto(9) means, that we were probably balanced to another crypto 187 * accelerator or something like this. 188 * The function updates the SID and rerun the operation. 189 */ 190 int 191 g_eli_crypto_rerun(struct cryptop *crp) 192 { 193 struct g_eli_softc *sc; 194 struct g_eli_worker *wr; 195 struct bio *bp; 196 int error; 197 198 bp = (struct bio *)crp->crp_opaque; 199 sc = bp->bio_to->geom->softc; 200 LIST_FOREACH(wr, &sc->sc_workers, w_next) { 201 if (wr->w_number == bp->bio_pflags) 202 break; 203 } 204 KASSERT(wr != NULL, ("Invalid worker (%u).", bp->bio_pflags)); 205 G_ELI_DEBUG(1, "Rerunning crypto %s request (sid: %p -> %p).", 206 bp->bio_cmd == BIO_READ ? "READ" : "WRITE", wr->w_sid, 207 crp->crp_session); 208 wr->w_sid = crp->crp_session; 209 crp->crp_etype = 0; 210 error = crypto_dispatch(crp); 211 if (error == 0) 212 return (0); 213 G_ELI_DEBUG(1, "%s: crypto_dispatch() returned %d.", __func__, error); 214 crp->crp_etype = error; 215 return (error); 216 } 217 218 static void 219 g_eli_getattr_done(struct bio *bp) 220 { 221 if (bp->bio_error == 0 && 222 !strcmp(bp->bio_attribute, "GEOM::physpath")) { 223 strlcat(bp->bio_data, "/eli", bp->bio_length); 224 } 225 g_std_done(bp); 226 } 227 228 /* 229 * The function is called afer reading encrypted data from the provider. 230 * 231 * g_eli_start -> g_eli_crypto_read -> g_io_request -> G_ELI_READ_DONE -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver 232 */ 233 void 234 g_eli_read_done(struct bio *bp) 235 { 236 struct g_eli_softc *sc; 237 struct bio *pbp; 238 239 G_ELI_LOGREQ(2, bp, "Request done."); 240 pbp = bp->bio_parent; 241 if (pbp->bio_error == 0 && bp->bio_error != 0) 242 pbp->bio_error = bp->bio_error; 243 g_destroy_bio(bp); 244 /* 245 * Do we have all sectors already? 246 */ 247 pbp->bio_inbed++; 248 if (pbp->bio_inbed < pbp->bio_children) 249 return; 250 sc = pbp->bio_to->geom->softc; 251 if (pbp->bio_error != 0) { 252 G_ELI_LOGREQ(0, pbp, "%s() failed (error=%d)", __func__, 253 pbp->bio_error); 254 pbp->bio_completed = 0; 255 if (pbp->bio_driver2 != NULL) { 256 free(pbp->bio_driver2, M_ELI); 257 pbp->bio_driver2 = NULL; 258 } 259 g_io_deliver(pbp, pbp->bio_error); 260 if (sc != NULL) 261 atomic_subtract_int(&sc->sc_inflight, 1); 262 return; 263 } 264 mtx_lock(&sc->sc_queue_mtx); 265 bioq_insert_tail(&sc->sc_queue, pbp); 266 mtx_unlock(&sc->sc_queue_mtx); 267 wakeup(sc); 268 } 269 270 /* 271 * The function is called after we encrypt and write data. 272 * 273 * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> G_ELI_WRITE_DONE -> g_io_deliver 274 */ 275 void 276 g_eli_write_done(struct bio *bp) 277 { 278 struct g_eli_softc *sc; 279 struct bio *pbp; 280 281 G_ELI_LOGREQ(2, bp, "Request done."); 282 pbp = bp->bio_parent; 283 if (pbp->bio_error == 0 && bp->bio_error != 0) 284 pbp->bio_error = bp->bio_error; 285 g_destroy_bio(bp); 286 /* 287 * Do we have all sectors already? 288 */ 289 pbp->bio_inbed++; 290 if (pbp->bio_inbed < pbp->bio_children) 291 return; 292 free(pbp->bio_driver2, M_ELI); 293 pbp->bio_driver2 = NULL; 294 if (pbp->bio_error != 0) { 295 G_ELI_LOGREQ(0, pbp, "%s() failed (error=%d)", __func__, 296 pbp->bio_error); 297 pbp->bio_completed = 0; 298 } else 299 pbp->bio_completed = pbp->bio_length; 300 301 /* 302 * Write is finished, send it up. 303 */ 304 sc = pbp->bio_to->geom->softc; 305 g_io_deliver(pbp, pbp->bio_error); 306 if (sc != NULL) 307 atomic_subtract_int(&sc->sc_inflight, 1); 308 } 309 310 /* 311 * This function should never be called, but GEOM made as it set ->orphan() 312 * method for every geom. 313 */ 314 static void 315 g_eli_orphan_spoil_assert(struct g_consumer *cp) 316 { 317 318 panic("Function %s() called for %s.", __func__, cp->geom->name); 319 } 320 321 static void 322 g_eli_orphan(struct g_consumer *cp) 323 { 324 struct g_eli_softc *sc; 325 326 g_topology_assert(); 327 sc = cp->geom->softc; 328 if (sc == NULL) 329 return; 330 g_eli_destroy(sc, TRUE); 331 } 332 333 static void 334 g_eli_resize(struct g_consumer *cp) 335 { 336 struct g_eli_softc *sc; 337 struct g_provider *epp, *pp; 338 off_t oldsize; 339 340 g_topology_assert(); 341 sc = cp->geom->softc; 342 if (sc == NULL) 343 return; 344 345 if ((sc->sc_flags & G_ELI_FLAG_AUTORESIZE) == 0) { 346 G_ELI_DEBUG(0, "Autoresize is turned off, old size: %jd.", 347 (intmax_t)sc->sc_provsize); 348 return; 349 } 350 351 pp = cp->provider; 352 353 if ((sc->sc_flags & G_ELI_FLAG_ONETIME) == 0) { 354 struct g_eli_metadata md; 355 u_char *sector; 356 int error; 357 358 sector = NULL; 359 360 error = g_eli_read_metadata_offset(cp->geom->class, pp, 361 sc->sc_provsize - pp->sectorsize, &md); 362 if (error != 0) { 363 G_ELI_DEBUG(0, "Cannot read metadata from %s (error=%d).", 364 pp->name, error); 365 goto iofail; 366 } 367 368 md.md_provsize = pp->mediasize; 369 370 sector = malloc(pp->sectorsize, M_ELI, M_WAITOK | M_ZERO); 371 eli_metadata_encode(&md, sector); 372 error = g_write_data(cp, pp->mediasize - pp->sectorsize, sector, 373 pp->sectorsize); 374 if (error != 0) { 375 G_ELI_DEBUG(0, "Cannot store metadata on %s (error=%d).", 376 pp->name, error); 377 goto iofail; 378 } 379 explicit_bzero(sector, pp->sectorsize); 380 error = g_write_data(cp, sc->sc_provsize - pp->sectorsize, 381 sector, pp->sectorsize); 382 if (error != 0) { 383 G_ELI_DEBUG(0, "Cannot clear old metadata from %s (error=%d).", 384 pp->name, error); 385 goto iofail; 386 } 387 iofail: 388 explicit_bzero(&md, sizeof(md)); 389 if (sector != NULL) { 390 explicit_bzero(sector, pp->sectorsize); 391 free(sector, M_ELI); 392 } 393 } 394 395 oldsize = sc->sc_mediasize; 396 sc->sc_mediasize = eli_mediasize(sc, pp->mediasize, pp->sectorsize); 397 g_eli_key_resize(sc); 398 sc->sc_provsize = pp->mediasize; 399 400 epp = LIST_FIRST(&sc->sc_geom->provider); 401 g_resize_provider(epp, sc->sc_mediasize); 402 G_ELI_DEBUG(0, "Device %s size changed from %jd to %jd.", epp->name, 403 (intmax_t)oldsize, (intmax_t)sc->sc_mediasize); 404 } 405 406 /* 407 * BIO_READ: 408 * G_ELI_START -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver 409 * BIO_WRITE: 410 * G_ELI_START -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver 411 */ 412 static void 413 g_eli_start(struct bio *bp) 414 { 415 struct g_eli_softc *sc; 416 struct g_consumer *cp; 417 struct bio *cbp; 418 419 sc = bp->bio_to->geom->softc; 420 KASSERT(sc != NULL, 421 ("Provider's error should be set (error=%d)(device=%s).", 422 bp->bio_to->error, bp->bio_to->name)); 423 G_ELI_LOGREQ(2, bp, "Request received."); 424 425 switch (bp->bio_cmd) { 426 case BIO_READ: 427 case BIO_WRITE: 428 case BIO_GETATTR: 429 case BIO_FLUSH: 430 case BIO_ZONE: 431 break; 432 case BIO_DELETE: 433 /* 434 * If the user hasn't set the NODELETE flag, we just pass 435 * it down the stack and let the layers beneath us do (or 436 * not) whatever they do with it. If they have, we 437 * reject it. A possible extension would be an 438 * additional flag to take it as a hint to shred the data 439 * with [multiple?] overwrites. 440 */ 441 if (!(sc->sc_flags & G_ELI_FLAG_NODELETE)) 442 break; 443 default: 444 g_io_deliver(bp, EOPNOTSUPP); 445 return; 446 } 447 cbp = g_clone_bio(bp); 448 if (cbp == NULL) { 449 g_io_deliver(bp, ENOMEM); 450 return; 451 } 452 bp->bio_driver1 = cbp; 453 bp->bio_pflags = G_ELI_NEW_BIO; 454 switch (bp->bio_cmd) { 455 case BIO_READ: 456 if (!(sc->sc_flags & G_ELI_FLAG_AUTH)) { 457 g_eli_crypto_read(sc, bp, 0); 458 break; 459 } 460 /* FALLTHROUGH */ 461 case BIO_WRITE: 462 mtx_lock(&sc->sc_queue_mtx); 463 bioq_insert_tail(&sc->sc_queue, bp); 464 mtx_unlock(&sc->sc_queue_mtx); 465 wakeup(sc); 466 break; 467 case BIO_GETATTR: 468 case BIO_FLUSH: 469 case BIO_DELETE: 470 case BIO_ZONE: 471 if (bp->bio_cmd == BIO_GETATTR) 472 cbp->bio_done = g_eli_getattr_done; 473 else 474 cbp->bio_done = g_std_done; 475 cp = LIST_FIRST(&sc->sc_geom->consumer); 476 cbp->bio_to = cp->provider; 477 G_ELI_LOGREQ(2, cbp, "Sending request."); 478 g_io_request(cbp, cp); 479 break; 480 } 481 } 482 483 static int 484 g_eli_newsession(struct g_eli_worker *wr) 485 { 486 struct g_eli_softc *sc; 487 struct cryptoini crie, cria; 488 int error; 489 490 sc = wr->w_softc; 491 492 bzero(&crie, sizeof(crie)); 493 crie.cri_alg = sc->sc_ealgo; 494 crie.cri_klen = sc->sc_ekeylen; 495 if (sc->sc_ealgo == CRYPTO_AES_XTS) 496 crie.cri_klen <<= 1; 497 if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) != 0) { 498 crie.cri_key = g_eli_key_hold(sc, 0, 499 LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize); 500 } else { 501 crie.cri_key = sc->sc_ekey; 502 } 503 if (sc->sc_flags & G_ELI_FLAG_AUTH) { 504 bzero(&cria, sizeof(cria)); 505 cria.cri_alg = sc->sc_aalgo; 506 cria.cri_klen = sc->sc_akeylen; 507 cria.cri_key = sc->sc_akey; 508 crie.cri_next = &cria; 509 } 510 511 switch (sc->sc_crypto) { 512 case G_ELI_CRYPTO_SW: 513 error = crypto_newsession(&wr->w_sid, &crie, 514 CRYPTOCAP_F_SOFTWARE); 515 break; 516 case G_ELI_CRYPTO_HW: 517 error = crypto_newsession(&wr->w_sid, &crie, 518 CRYPTOCAP_F_HARDWARE); 519 break; 520 case G_ELI_CRYPTO_UNKNOWN: 521 error = crypto_newsession(&wr->w_sid, &crie, 522 CRYPTOCAP_F_HARDWARE); 523 if (error == 0) { 524 mtx_lock(&sc->sc_queue_mtx); 525 if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN) 526 sc->sc_crypto = G_ELI_CRYPTO_HW; 527 mtx_unlock(&sc->sc_queue_mtx); 528 } else { 529 error = crypto_newsession(&wr->w_sid, &crie, 530 CRYPTOCAP_F_SOFTWARE); 531 mtx_lock(&sc->sc_queue_mtx); 532 if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN) 533 sc->sc_crypto = G_ELI_CRYPTO_SW; 534 mtx_unlock(&sc->sc_queue_mtx); 535 } 536 break; 537 default: 538 panic("%s: invalid condition", __func__); 539 } 540 541 if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) != 0) 542 g_eli_key_drop(sc, crie.cri_key); 543 544 return (error); 545 } 546 547 static void 548 g_eli_freesession(struct g_eli_worker *wr) 549 { 550 551 crypto_freesession(wr->w_sid); 552 } 553 554 static void 555 g_eli_cancel(struct g_eli_softc *sc) 556 { 557 struct bio *bp; 558 559 mtx_assert(&sc->sc_queue_mtx, MA_OWNED); 560 561 while ((bp = bioq_takefirst(&sc->sc_queue)) != NULL) { 562 KASSERT(bp->bio_pflags == G_ELI_NEW_BIO, 563 ("Not new bio when canceling (bp=%p).", bp)); 564 g_io_deliver(bp, ENXIO); 565 } 566 } 567 568 static struct bio * 569 g_eli_takefirst(struct g_eli_softc *sc) 570 { 571 struct bio *bp; 572 573 mtx_assert(&sc->sc_queue_mtx, MA_OWNED); 574 575 if (!(sc->sc_flags & G_ELI_FLAG_SUSPEND)) 576 return (bioq_takefirst(&sc->sc_queue)); 577 /* 578 * Device suspended, so we skip new I/O requests. 579 */ 580 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) { 581 if (bp->bio_pflags != G_ELI_NEW_BIO) 582 break; 583 } 584 if (bp != NULL) 585 bioq_remove(&sc->sc_queue, bp); 586 return (bp); 587 } 588 589 /* 590 * This is the main function for kernel worker thread when we don't have 591 * hardware acceleration and we have to do cryptography in software. 592 * Dedicated thread is needed, so we don't slow down g_up/g_down GEOM 593 * threads with crypto work. 594 */ 595 static void 596 g_eli_worker(void *arg) 597 { 598 struct g_eli_softc *sc; 599 struct g_eli_worker *wr; 600 struct bio *bp; 601 int error; 602 603 wr = arg; 604 sc = wr->w_softc; 605 #ifdef EARLY_AP_STARTUP 606 MPASS(!sc->sc_cpubind || smp_started); 607 #elif defined(SMP) 608 /* Before sched_bind() to a CPU, wait for all CPUs to go on-line. */ 609 if (sc->sc_cpubind) { 610 while (!smp_started) 611 tsleep(wr, 0, "geli:smp", hz / 4); 612 } 613 #endif 614 thread_lock(curthread); 615 sched_prio(curthread, PUSER); 616 if (sc->sc_cpubind) 617 sched_bind(curthread, wr->w_number % mp_ncpus); 618 thread_unlock(curthread); 619 620 G_ELI_DEBUG(1, "Thread %s started.", curthread->td_proc->p_comm); 621 622 for (;;) { 623 mtx_lock(&sc->sc_queue_mtx); 624 again: 625 bp = g_eli_takefirst(sc); 626 if (bp == NULL) { 627 if (sc->sc_flags & G_ELI_FLAG_DESTROY) { 628 g_eli_cancel(sc); 629 LIST_REMOVE(wr, w_next); 630 g_eli_freesession(wr); 631 free(wr, M_ELI); 632 G_ELI_DEBUG(1, "Thread %s exiting.", 633 curthread->td_proc->p_comm); 634 wakeup(&sc->sc_workers); 635 mtx_unlock(&sc->sc_queue_mtx); 636 kproc_exit(0); 637 } 638 while (sc->sc_flags & G_ELI_FLAG_SUSPEND) { 639 if (sc->sc_inflight > 0) { 640 G_ELI_DEBUG(0, "inflight=%d", 641 sc->sc_inflight); 642 /* 643 * We still have inflight BIOs, so 644 * sleep and retry. 645 */ 646 msleep(sc, &sc->sc_queue_mtx, PRIBIO, 647 "geli:inf", hz / 5); 648 goto again; 649 } 650 /* 651 * Suspend requested, mark the worker as 652 * suspended and go to sleep. 653 */ 654 if (wr->w_active) { 655 g_eli_freesession(wr); 656 wr->w_active = FALSE; 657 } 658 wakeup(&sc->sc_workers); 659 msleep(sc, &sc->sc_queue_mtx, PRIBIO, 660 "geli:suspend", 0); 661 if (!wr->w_active && 662 !(sc->sc_flags & G_ELI_FLAG_SUSPEND)) { 663 error = g_eli_newsession(wr); 664 KASSERT(error == 0, 665 ("g_eli_newsession() failed on resume (error=%d)", 666 error)); 667 wr->w_active = TRUE; 668 } 669 goto again; 670 } 671 msleep(sc, &sc->sc_queue_mtx, PDROP, "geli:w", 0); 672 continue; 673 } 674 if (bp->bio_pflags == G_ELI_NEW_BIO) 675 atomic_add_int(&sc->sc_inflight, 1); 676 mtx_unlock(&sc->sc_queue_mtx); 677 if (bp->bio_pflags == G_ELI_NEW_BIO) { 678 bp->bio_pflags = 0; 679 if (sc->sc_flags & G_ELI_FLAG_AUTH) { 680 if (bp->bio_cmd == BIO_READ) 681 g_eli_auth_read(sc, bp); 682 else 683 g_eli_auth_run(wr, bp); 684 } else { 685 if (bp->bio_cmd == BIO_READ) 686 g_eli_crypto_read(sc, bp, 1); 687 else 688 g_eli_crypto_run(wr, bp); 689 } 690 } else { 691 if (sc->sc_flags & G_ELI_FLAG_AUTH) 692 g_eli_auth_run(wr, bp); 693 else 694 g_eli_crypto_run(wr, bp); 695 } 696 } 697 } 698 699 static int 700 g_eli_read_metadata_offset(struct g_class *mp, struct g_provider *pp, 701 off_t offset, struct g_eli_metadata *md) 702 { 703 struct g_geom *gp; 704 struct g_consumer *cp; 705 u_char *buf = NULL; 706 int error; 707 708 g_topology_assert(); 709 710 gp = g_new_geomf(mp, "eli:taste"); 711 gp->start = g_eli_start; 712 gp->access = g_std_access; 713 /* 714 * g_eli_read_metadata() is always called from the event thread. 715 * Our geom is created and destroyed in the same event, so there 716 * could be no orphan nor spoil event in the meantime. 717 */ 718 gp->orphan = g_eli_orphan_spoil_assert; 719 gp->spoiled = g_eli_orphan_spoil_assert; 720 cp = g_new_consumer(gp); 721 error = g_attach(cp, pp); 722 if (error != 0) 723 goto end; 724 error = g_access(cp, 1, 0, 0); 725 if (error != 0) 726 goto end; 727 g_topology_unlock(); 728 buf = g_read_data(cp, offset, pp->sectorsize, &error); 729 g_topology_lock(); 730 if (buf == NULL) 731 goto end; 732 error = eli_metadata_decode(buf, md); 733 if (error != 0) 734 goto end; 735 /* Metadata was read and decoded successfully. */ 736 end: 737 if (buf != NULL) 738 g_free(buf); 739 if (cp->provider != NULL) { 740 if (cp->acr == 1) 741 g_access(cp, -1, 0, 0); 742 g_detach(cp); 743 } 744 g_destroy_consumer(cp); 745 g_destroy_geom(gp); 746 return (error); 747 } 748 749 int 750 g_eli_read_metadata(struct g_class *mp, struct g_provider *pp, 751 struct g_eli_metadata *md) 752 { 753 754 return (g_eli_read_metadata_offset(mp, pp, 755 pp->mediasize - pp->sectorsize, md)); 756 } 757 758 /* 759 * The function is called when we had last close on provider and user requested 760 * to close it when this situation occur. 761 */ 762 static void 763 g_eli_last_close(void *arg, int flags __unused) 764 { 765 struct g_geom *gp; 766 char gpname[64]; 767 int error; 768 769 g_topology_assert(); 770 gp = arg; 771 strlcpy(gpname, gp->name, sizeof(gpname)); 772 error = g_eli_destroy(gp->softc, TRUE); 773 KASSERT(error == 0, ("Cannot detach %s on last close (error=%d).", 774 gpname, error)); 775 G_ELI_DEBUG(0, "Detached %s on last close.", gpname); 776 } 777 778 int 779 g_eli_access(struct g_provider *pp, int dr, int dw, int de) 780 { 781 struct g_eli_softc *sc; 782 struct g_geom *gp; 783 784 gp = pp->geom; 785 sc = gp->softc; 786 787 if (dw > 0) { 788 if (sc->sc_flags & G_ELI_FLAG_RO) { 789 /* Deny write attempts. */ 790 return (EROFS); 791 } 792 /* Someone is opening us for write, we need to remember that. */ 793 sc->sc_flags |= G_ELI_FLAG_WOPEN; 794 return (0); 795 } 796 /* Is this the last close? */ 797 if (pp->acr + dr > 0 || pp->acw + dw > 0 || pp->ace + de > 0) 798 return (0); 799 800 /* 801 * Automatically detach on last close if requested. 802 */ 803 if ((sc->sc_flags & G_ELI_FLAG_RW_DETACH) || 804 (sc->sc_flags & G_ELI_FLAG_WOPEN)) { 805 g_post_event(g_eli_last_close, gp, M_WAITOK, NULL); 806 } 807 return (0); 808 } 809 810 static int 811 g_eli_cpu_is_disabled(int cpu) 812 { 813 #ifdef SMP 814 return (CPU_ISSET(cpu, &hlt_cpus_mask)); 815 #else 816 return (0); 817 #endif 818 } 819 820 struct g_geom * 821 g_eli_create(struct gctl_req *req, struct g_class *mp, struct g_provider *bpp, 822 const struct g_eli_metadata *md, const u_char *mkey, int nkey) 823 { 824 struct g_eli_softc *sc; 825 struct g_eli_worker *wr; 826 struct g_geom *gp; 827 struct g_provider *pp; 828 struct g_consumer *cp; 829 u_int i, threads; 830 int error; 831 832 G_ELI_DEBUG(1, "Creating device %s%s.", bpp->name, G_ELI_SUFFIX); 833 834 gp = g_new_geomf(mp, "%s%s", bpp->name, G_ELI_SUFFIX); 835 sc = malloc(sizeof(*sc), M_ELI, M_WAITOK | M_ZERO); 836 gp->start = g_eli_start; 837 /* 838 * Spoiling can happen even though we have the provider open 839 * exclusively, e.g. through media change events. 840 */ 841 gp->spoiled = g_eli_orphan; 842 gp->orphan = g_eli_orphan; 843 gp->resize = g_eli_resize; 844 gp->dumpconf = g_eli_dumpconf; 845 /* 846 * If detach-on-last-close feature is not enabled and we don't operate 847 * on read-only provider, we can simply use g_std_access(). 848 */ 849 if (md->md_flags & (G_ELI_FLAG_WO_DETACH | G_ELI_FLAG_RO)) 850 gp->access = g_eli_access; 851 else 852 gp->access = g_std_access; 853 854 eli_metadata_softc(sc, md, bpp->sectorsize, bpp->mediasize); 855 sc->sc_nkey = nkey; 856 857 gp->softc = sc; 858 sc->sc_geom = gp; 859 860 bioq_init(&sc->sc_queue); 861 mtx_init(&sc->sc_queue_mtx, "geli:queue", NULL, MTX_DEF); 862 mtx_init(&sc->sc_ekeys_lock, "geli:ekeys", NULL, MTX_DEF); 863 864 pp = NULL; 865 cp = g_new_consumer(gp); 866 error = g_attach(cp, bpp); 867 if (error != 0) { 868 if (req != NULL) { 869 gctl_error(req, "Cannot attach to %s (error=%d).", 870 bpp->name, error); 871 } else { 872 G_ELI_DEBUG(1, "Cannot attach to %s (error=%d).", 873 bpp->name, error); 874 } 875 goto failed; 876 } 877 /* 878 * Keep provider open all the time, so we can run critical tasks, 879 * like Master Keys deletion, without wondering if we can open 880 * provider or not. 881 * We don't open provider for writing only when user requested read-only 882 * access. 883 */ 884 if (sc->sc_flags & G_ELI_FLAG_RO) 885 error = g_access(cp, 1, 0, 1); 886 else 887 error = g_access(cp, 1, 1, 1); 888 if (error != 0) { 889 if (req != NULL) { 890 gctl_error(req, "Cannot access %s (error=%d).", 891 bpp->name, error); 892 } else { 893 G_ELI_DEBUG(1, "Cannot access %s (error=%d).", 894 bpp->name, error); 895 } 896 goto failed; 897 } 898 899 /* 900 * Remember the keys in our softc structure. 901 */ 902 g_eli_mkey_propagate(sc, mkey); 903 904 LIST_INIT(&sc->sc_workers); 905 906 threads = g_eli_threads; 907 if (threads == 0) 908 threads = mp_ncpus; 909 sc->sc_cpubind = (mp_ncpus > 1 && threads == mp_ncpus); 910 for (i = 0; i < threads; i++) { 911 if (g_eli_cpu_is_disabled(i)) { 912 G_ELI_DEBUG(1, "%s: CPU %u disabled, skipping.", 913 bpp->name, i); 914 continue; 915 } 916 wr = malloc(sizeof(*wr), M_ELI, M_WAITOK | M_ZERO); 917 wr->w_softc = sc; 918 wr->w_number = i; 919 wr->w_active = TRUE; 920 921 error = g_eli_newsession(wr); 922 if (error != 0) { 923 free(wr, M_ELI); 924 if (req != NULL) { 925 gctl_error(req, "Cannot set up crypto session " 926 "for %s (error=%d).", bpp->name, error); 927 } else { 928 G_ELI_DEBUG(1, "Cannot set up crypto session " 929 "for %s (error=%d).", bpp->name, error); 930 } 931 goto failed; 932 } 933 934 error = kproc_create(g_eli_worker, wr, &wr->w_proc, 0, 0, 935 "g_eli[%u] %s", i, bpp->name); 936 if (error != 0) { 937 g_eli_freesession(wr); 938 free(wr, M_ELI); 939 if (req != NULL) { 940 gctl_error(req, "Cannot create kernel thread " 941 "for %s (error=%d).", bpp->name, error); 942 } else { 943 G_ELI_DEBUG(1, "Cannot create kernel thread " 944 "for %s (error=%d).", bpp->name, error); 945 } 946 goto failed; 947 } 948 LIST_INSERT_HEAD(&sc->sc_workers, wr, w_next); 949 } 950 951 /* 952 * Create decrypted provider. 953 */ 954 pp = g_new_providerf(gp, "%s%s", bpp->name, G_ELI_SUFFIX); 955 pp->mediasize = sc->sc_mediasize; 956 pp->sectorsize = sc->sc_sectorsize; 957 958 g_error_provider(pp, 0); 959 960 G_ELI_DEBUG(0, "Device %s created.", pp->name); 961 G_ELI_DEBUG(0, "Encryption: %s %u", g_eli_algo2str(sc->sc_ealgo), 962 sc->sc_ekeylen); 963 if (sc->sc_flags & G_ELI_FLAG_AUTH) 964 G_ELI_DEBUG(0, " Integrity: %s", g_eli_algo2str(sc->sc_aalgo)); 965 G_ELI_DEBUG(0, " Crypto: %s", 966 sc->sc_crypto == G_ELI_CRYPTO_SW ? "software" : "hardware"); 967 return (gp); 968 failed: 969 mtx_lock(&sc->sc_queue_mtx); 970 sc->sc_flags |= G_ELI_FLAG_DESTROY; 971 wakeup(sc); 972 /* 973 * Wait for kernel threads self destruction. 974 */ 975 while (!LIST_EMPTY(&sc->sc_workers)) { 976 msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO, 977 "geli:destroy", 0); 978 } 979 mtx_destroy(&sc->sc_queue_mtx); 980 if (cp->provider != NULL) { 981 if (cp->acr == 1) 982 g_access(cp, -1, -1, -1); 983 g_detach(cp); 984 } 985 g_destroy_consumer(cp); 986 g_destroy_geom(gp); 987 g_eli_key_destroy(sc); 988 bzero(sc, sizeof(*sc)); 989 free(sc, M_ELI); 990 return (NULL); 991 } 992 993 int 994 g_eli_destroy(struct g_eli_softc *sc, boolean_t force) 995 { 996 struct g_geom *gp; 997 struct g_provider *pp; 998 999 g_topology_assert(); 1000 1001 if (sc == NULL) 1002 return (ENXIO); 1003 1004 gp = sc->sc_geom; 1005 pp = LIST_FIRST(&gp->provider); 1006 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) { 1007 if (force) { 1008 G_ELI_DEBUG(1, "Device %s is still open, so it " 1009 "cannot be definitely removed.", pp->name); 1010 sc->sc_flags |= G_ELI_FLAG_RW_DETACH; 1011 gp->access = g_eli_access; 1012 g_wither_provider(pp, ENXIO); 1013 return (EBUSY); 1014 } else { 1015 G_ELI_DEBUG(1, 1016 "Device %s is still open (r%dw%de%d).", pp->name, 1017 pp->acr, pp->acw, pp->ace); 1018 return (EBUSY); 1019 } 1020 } 1021 1022 mtx_lock(&sc->sc_queue_mtx); 1023 sc->sc_flags |= G_ELI_FLAG_DESTROY; 1024 wakeup(sc); 1025 while (!LIST_EMPTY(&sc->sc_workers)) { 1026 msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO, 1027 "geli:destroy", 0); 1028 } 1029 mtx_destroy(&sc->sc_queue_mtx); 1030 gp->softc = NULL; 1031 g_eli_key_destroy(sc); 1032 bzero(sc, sizeof(*sc)); 1033 free(sc, M_ELI); 1034 1035 if (pp == NULL || (pp->acr == 0 && pp->acw == 0 && pp->ace == 0)) 1036 G_ELI_DEBUG(0, "Device %s destroyed.", gp->name); 1037 g_wither_geom_close(gp, ENXIO); 1038 1039 return (0); 1040 } 1041 1042 static int 1043 g_eli_destroy_geom(struct gctl_req *req __unused, 1044 struct g_class *mp __unused, struct g_geom *gp) 1045 { 1046 struct g_eli_softc *sc; 1047 1048 sc = gp->softc; 1049 return (g_eli_destroy(sc, FALSE)); 1050 } 1051 1052 static int 1053 g_eli_keyfiles_load(struct hmac_ctx *ctx, const char *provider) 1054 { 1055 u_char *keyfile, *data; 1056 char *file, name[64]; 1057 size_t size; 1058 int i; 1059 1060 for (i = 0; ; i++) { 1061 snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i); 1062 keyfile = preload_search_by_type(name); 1063 if (keyfile == NULL && i == 0) { 1064 /* 1065 * If there is only one keyfile, allow simpler name. 1066 */ 1067 snprintf(name, sizeof(name), "%s:geli_keyfile", provider); 1068 keyfile = preload_search_by_type(name); 1069 } 1070 if (keyfile == NULL) 1071 return (i); /* Return number of loaded keyfiles. */ 1072 data = preload_fetch_addr(keyfile); 1073 if (data == NULL) { 1074 G_ELI_DEBUG(0, "Cannot find key file data for %s.", 1075 name); 1076 return (0); 1077 } 1078 size = preload_fetch_size(keyfile); 1079 if (size == 0) { 1080 G_ELI_DEBUG(0, "Cannot find key file size for %s.", 1081 name); 1082 return (0); 1083 } 1084 file = preload_search_info(keyfile, MODINFO_NAME); 1085 if (file == NULL) { 1086 G_ELI_DEBUG(0, "Cannot find key file name for %s.", 1087 name); 1088 return (0); 1089 } 1090 G_ELI_DEBUG(1, "Loaded keyfile %s for %s (type: %s).", file, 1091 provider, name); 1092 g_eli_crypto_hmac_update(ctx, data, size); 1093 } 1094 } 1095 1096 static void 1097 g_eli_keyfiles_clear(const char *provider) 1098 { 1099 u_char *keyfile, *data; 1100 char name[64]; 1101 size_t size; 1102 int i; 1103 1104 for (i = 0; ; i++) { 1105 snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i); 1106 keyfile = preload_search_by_type(name); 1107 if (keyfile == NULL) 1108 return; 1109 data = preload_fetch_addr(keyfile); 1110 size = preload_fetch_size(keyfile); 1111 if (data != NULL && size != 0) 1112 bzero(data, size); 1113 } 1114 } 1115 1116 /* 1117 * Tasting is only made on boot. 1118 * We detect providers which should be attached before root is mounted. 1119 */ 1120 static struct g_geom * 1121 g_eli_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 1122 { 1123 struct g_eli_metadata md; 1124 struct g_geom *gp; 1125 struct hmac_ctx ctx; 1126 char passphrase[256]; 1127 u_char key[G_ELI_USERKEYLEN], mkey[G_ELI_DATAIVKEYLEN]; 1128 u_int i, nkey, nkeyfiles, tries, showpass; 1129 int error; 1130 struct keybuf *keybuf; 1131 1132 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name); 1133 g_topology_assert(); 1134 1135 if (root_mounted() || g_eli_tries == 0) 1136 return (NULL); 1137 1138 G_ELI_DEBUG(3, "Tasting %s.", pp->name); 1139 1140 error = g_eli_read_metadata(mp, pp, &md); 1141 if (error != 0) 1142 return (NULL); 1143 gp = NULL; 1144 1145 if (strcmp(md.md_magic, G_ELI_MAGIC) != 0) 1146 return (NULL); 1147 if (md.md_version > G_ELI_VERSION) { 1148 printf("geom_eli.ko module is too old to handle %s.\n", 1149 pp->name); 1150 return (NULL); 1151 } 1152 if (md.md_provsize != pp->mediasize) 1153 return (NULL); 1154 /* Should we attach it on boot? */ 1155 if (!(md.md_flags & G_ELI_FLAG_BOOT)) 1156 return (NULL); 1157 if (md.md_keys == 0x00) { 1158 G_ELI_DEBUG(0, "No valid keys on %s.", pp->name); 1159 return (NULL); 1160 } 1161 if (md.md_iterations == -1) { 1162 /* If there is no passphrase, we try only once. */ 1163 tries = 1; 1164 } else { 1165 /* Ask for the passphrase no more than g_eli_tries times. */ 1166 tries = g_eli_tries; 1167 } 1168 1169 if ((keybuf = get_keybuf()) != NULL) { 1170 /* Scan the key buffer, try all GELI keys. */ 1171 for (i = 0; i < keybuf->kb_nents; i++) { 1172 if (keybuf->kb_ents[i].ke_type == KEYBUF_TYPE_GELI) { 1173 memcpy(key, keybuf->kb_ents[i].ke_data, 1174 sizeof(key)); 1175 1176 if (g_eli_mkey_decrypt_any(&md, key, 1177 mkey, &nkey) == 0 ) { 1178 explicit_bzero(key, sizeof(key)); 1179 goto have_key; 1180 } 1181 } 1182 } 1183 } 1184 1185 for (i = 0; i <= tries; i++) { 1186 g_eli_crypto_hmac_init(&ctx, NULL, 0); 1187 1188 /* 1189 * Load all key files. 1190 */ 1191 nkeyfiles = g_eli_keyfiles_load(&ctx, pp->name); 1192 1193 if (nkeyfiles == 0 && md.md_iterations == -1) { 1194 /* 1195 * No key files and no passphrase, something is 1196 * definitely wrong here. 1197 * geli(8) doesn't allow for such situation, so assume 1198 * that there was really no passphrase and in that case 1199 * key files are no properly defined in loader.conf. 1200 */ 1201 G_ELI_DEBUG(0, 1202 "Found no key files in loader.conf for %s.", 1203 pp->name); 1204 return (NULL); 1205 } 1206 1207 /* Ask for the passphrase if defined. */ 1208 if (md.md_iterations >= 0) { 1209 /* Try first with cached passphrase. */ 1210 if (i == 0) { 1211 if (!g_eli_boot_passcache) 1212 continue; 1213 memcpy(passphrase, cached_passphrase, 1214 sizeof(passphrase)); 1215 } else { 1216 printf("Enter passphrase for %s: ", pp->name); 1217 showpass = g_eli_visible_passphrase; 1218 if ((md.md_flags & G_ELI_FLAG_GELIDISPLAYPASS) != 0) 1219 showpass = GETS_ECHOPASS; 1220 cngets(passphrase, sizeof(passphrase), 1221 showpass); 1222 memcpy(cached_passphrase, passphrase, 1223 sizeof(passphrase)); 1224 } 1225 } 1226 1227 /* 1228 * Prepare Derived-Key from the user passphrase. 1229 */ 1230 if (md.md_iterations == 0) { 1231 g_eli_crypto_hmac_update(&ctx, md.md_salt, 1232 sizeof(md.md_salt)); 1233 g_eli_crypto_hmac_update(&ctx, passphrase, 1234 strlen(passphrase)); 1235 explicit_bzero(passphrase, sizeof(passphrase)); 1236 } else if (md.md_iterations > 0) { 1237 u_char dkey[G_ELI_USERKEYLEN]; 1238 1239 pkcs5v2_genkey(dkey, sizeof(dkey), md.md_salt, 1240 sizeof(md.md_salt), passphrase, md.md_iterations); 1241 bzero(passphrase, sizeof(passphrase)); 1242 g_eli_crypto_hmac_update(&ctx, dkey, sizeof(dkey)); 1243 explicit_bzero(dkey, sizeof(dkey)); 1244 } 1245 1246 g_eli_crypto_hmac_final(&ctx, key, 0); 1247 1248 /* 1249 * Decrypt Master-Key. 1250 */ 1251 error = g_eli_mkey_decrypt_any(&md, key, mkey, &nkey); 1252 bzero(key, sizeof(key)); 1253 if (error == -1) { 1254 if (i == tries) { 1255 G_ELI_DEBUG(0, 1256 "Wrong key for %s. No tries left.", 1257 pp->name); 1258 g_eli_keyfiles_clear(pp->name); 1259 return (NULL); 1260 } 1261 if (i > 0) { 1262 G_ELI_DEBUG(0, 1263 "Wrong key for %s. Tries left: %u.", 1264 pp->name, tries - i); 1265 } 1266 /* Try again. */ 1267 continue; 1268 } else if (error > 0) { 1269 G_ELI_DEBUG(0, 1270 "Cannot decrypt Master Key for %s (error=%d).", 1271 pp->name, error); 1272 g_eli_keyfiles_clear(pp->name); 1273 return (NULL); 1274 } 1275 g_eli_keyfiles_clear(pp->name); 1276 G_ELI_DEBUG(1, "Using Master Key %u for %s.", nkey, pp->name); 1277 break; 1278 } 1279 have_key: 1280 1281 /* 1282 * We have correct key, let's attach provider. 1283 */ 1284 gp = g_eli_create(NULL, mp, pp, &md, mkey, nkey); 1285 bzero(mkey, sizeof(mkey)); 1286 bzero(&md, sizeof(md)); 1287 if (gp == NULL) { 1288 G_ELI_DEBUG(0, "Cannot create device %s%s.", pp->name, 1289 G_ELI_SUFFIX); 1290 return (NULL); 1291 } 1292 return (gp); 1293 } 1294 1295 static void 1296 g_eli_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 1297 struct g_consumer *cp, struct g_provider *pp) 1298 { 1299 struct g_eli_softc *sc; 1300 1301 g_topology_assert(); 1302 sc = gp->softc; 1303 if (sc == NULL) 1304 return; 1305 if (pp != NULL || cp != NULL) 1306 return; /* Nothing here. */ 1307 1308 sbuf_printf(sb, "%s<KeysTotal>%ju</KeysTotal>\n", indent, 1309 (uintmax_t)sc->sc_ekeys_total); 1310 sbuf_printf(sb, "%s<KeysAllocated>%ju</KeysAllocated>\n", indent, 1311 (uintmax_t)sc->sc_ekeys_allocated); 1312 sbuf_printf(sb, "%s<Flags>", indent); 1313 if (sc->sc_flags == 0) 1314 sbuf_printf(sb, "NONE"); 1315 else { 1316 int first = 1; 1317 1318 #define ADD_FLAG(flag, name) do { \ 1319 if (sc->sc_flags & (flag)) { \ 1320 if (!first) \ 1321 sbuf_printf(sb, ", "); \ 1322 else \ 1323 first = 0; \ 1324 sbuf_printf(sb, name); \ 1325 } \ 1326 } while (0) 1327 ADD_FLAG(G_ELI_FLAG_SUSPEND, "SUSPEND"); 1328 ADD_FLAG(G_ELI_FLAG_SINGLE_KEY, "SINGLE-KEY"); 1329 ADD_FLAG(G_ELI_FLAG_NATIVE_BYTE_ORDER, "NATIVE-BYTE-ORDER"); 1330 ADD_FLAG(G_ELI_FLAG_ONETIME, "ONETIME"); 1331 ADD_FLAG(G_ELI_FLAG_BOOT, "BOOT"); 1332 ADD_FLAG(G_ELI_FLAG_WO_DETACH, "W-DETACH"); 1333 ADD_FLAG(G_ELI_FLAG_RW_DETACH, "RW-DETACH"); 1334 ADD_FLAG(G_ELI_FLAG_AUTH, "AUTH"); 1335 ADD_FLAG(G_ELI_FLAG_WOPEN, "W-OPEN"); 1336 ADD_FLAG(G_ELI_FLAG_DESTROY, "DESTROY"); 1337 ADD_FLAG(G_ELI_FLAG_RO, "READ-ONLY"); 1338 ADD_FLAG(G_ELI_FLAG_NODELETE, "NODELETE"); 1339 ADD_FLAG(G_ELI_FLAG_GELIBOOT, "GELIBOOT"); 1340 ADD_FLAG(G_ELI_FLAG_GELIDISPLAYPASS, "GELIDISPLAYPASS"); 1341 ADD_FLAG(G_ELI_FLAG_AUTORESIZE, "AUTORESIZE"); 1342 #undef ADD_FLAG 1343 } 1344 sbuf_printf(sb, "</Flags>\n"); 1345 1346 if (!(sc->sc_flags & G_ELI_FLAG_ONETIME)) { 1347 sbuf_printf(sb, "%s<UsedKey>%u</UsedKey>\n", indent, 1348 sc->sc_nkey); 1349 } 1350 sbuf_printf(sb, "%s<Version>%u</Version>\n", indent, sc->sc_version); 1351 sbuf_printf(sb, "%s<Crypto>", indent); 1352 switch (sc->sc_crypto) { 1353 case G_ELI_CRYPTO_HW: 1354 sbuf_printf(sb, "hardware"); 1355 break; 1356 case G_ELI_CRYPTO_SW: 1357 sbuf_printf(sb, "software"); 1358 break; 1359 default: 1360 sbuf_printf(sb, "UNKNOWN"); 1361 break; 1362 } 1363 sbuf_printf(sb, "</Crypto>\n"); 1364 if (sc->sc_flags & G_ELI_FLAG_AUTH) { 1365 sbuf_printf(sb, 1366 "%s<AuthenticationAlgorithm>%s</AuthenticationAlgorithm>\n", 1367 indent, g_eli_algo2str(sc->sc_aalgo)); 1368 } 1369 sbuf_printf(sb, "%s<KeyLength>%u</KeyLength>\n", indent, 1370 sc->sc_ekeylen); 1371 sbuf_printf(sb, "%s<EncryptionAlgorithm>%s</EncryptionAlgorithm>\n", 1372 indent, g_eli_algo2str(sc->sc_ealgo)); 1373 sbuf_printf(sb, "%s<State>%s</State>\n", indent, 1374 (sc->sc_flags & G_ELI_FLAG_SUSPEND) ? "SUSPENDED" : "ACTIVE"); 1375 } 1376 1377 static void 1378 g_eli_shutdown_pre_sync(void *arg, int howto) 1379 { 1380 struct g_class *mp; 1381 struct g_geom *gp, *gp2; 1382 struct g_provider *pp; 1383 struct g_eli_softc *sc; 1384 int error; 1385 1386 mp = arg; 1387 g_topology_lock(); 1388 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) { 1389 sc = gp->softc; 1390 if (sc == NULL) 1391 continue; 1392 pp = LIST_FIRST(&gp->provider); 1393 KASSERT(pp != NULL, ("No provider? gp=%p (%s)", gp, gp->name)); 1394 if (pp->acr + pp->acw + pp->ace == 0) 1395 error = g_eli_destroy(sc, TRUE); 1396 else { 1397 sc->sc_flags |= G_ELI_FLAG_RW_DETACH; 1398 gp->access = g_eli_access; 1399 } 1400 } 1401 g_topology_unlock(); 1402 } 1403 1404 static void 1405 g_eli_init(struct g_class *mp) 1406 { 1407 1408 g_eli_pre_sync = EVENTHANDLER_REGISTER(shutdown_pre_sync, 1409 g_eli_shutdown_pre_sync, mp, SHUTDOWN_PRI_FIRST); 1410 if (g_eli_pre_sync == NULL) 1411 G_ELI_DEBUG(0, "Warning! Cannot register shutdown event."); 1412 } 1413 1414 static void 1415 g_eli_fini(struct g_class *mp) 1416 { 1417 1418 if (g_eli_pre_sync != NULL) 1419 EVENTHANDLER_DEREGISTER(shutdown_pre_sync, g_eli_pre_sync); 1420 } 1421 1422 DECLARE_GEOM_CLASS(g_eli_class, g_eli); 1423 MODULE_DEPEND(g_eli, crypto, 1, 1, 1); 1424 MODULE_VERSION(geom_eli, 0); 1425