1 /*- 2 * Copyright (c) 2005-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/linker.h> 34 #include <sys/module.h> 35 #include <sys/lock.h> 36 #include <sys/mutex.h> 37 #include <sys/bio.h> 38 #include <sys/sysctl.h> 39 #include <sys/malloc.h> 40 #include <sys/kthread.h> 41 #include <sys/proc.h> 42 #include <sys/sched.h> 43 #include <sys/smp.h> 44 #include <sys/uio.h> 45 #include <sys/vnode.h> 46 47 #include <vm/uma.h> 48 49 #include <geom/geom.h> 50 #include <geom/eli/g_eli.h> 51 #include <geom/eli/pkcs5v2.h> 52 53 54 MALLOC_DEFINE(M_ELI, "eli data", "GEOM_ELI Data"); 55 56 SYSCTL_DECL(_kern_geom); 57 SYSCTL_NODE(_kern_geom, OID_AUTO, eli, CTLFLAG_RW, 0, "GEOM_ELI stuff"); 58 u_int g_eli_debug = 0; 59 TUNABLE_INT("kern.geom.eli.debug", &g_eli_debug); 60 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, debug, CTLFLAG_RW, &g_eli_debug, 0, 61 "Debug level"); 62 static u_int g_eli_tries = 3; 63 TUNABLE_INT("kern.geom.eli.tries", &g_eli_tries); 64 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, tries, CTLFLAG_RW, &g_eli_tries, 0, 65 "Number of tries for entering the passphrase"); 66 static u_int g_eli_visible_passphrase = 0; 67 TUNABLE_INT("kern.geom.eli.visible_passphrase", &g_eli_visible_passphrase); 68 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, visible_passphrase, CTLFLAG_RW, 69 &g_eli_visible_passphrase, 0, 70 "Turn on echo when entering the passphrase (for debug purposes only!!)"); 71 u_int g_eli_overwrites = 5; 72 TUNABLE_INT("kern.geom.eli.overwrites", &g_eli_overwrites); 73 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, overwrites, CTLFLAG_RW, &g_eli_overwrites, 74 0, "Number of times on-disk keys should be overwritten when destroying them"); 75 static u_int g_eli_threads = 0; 76 TUNABLE_INT("kern.geom.eli.threads", &g_eli_threads); 77 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, threads, CTLFLAG_RW, &g_eli_threads, 0, 78 "Number of threads doing crypto work"); 79 u_int g_eli_batch = 0; 80 TUNABLE_INT("kern.geom.eli.batch", &g_eli_batch); 81 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, batch, CTLFLAG_RW, &g_eli_batch, 0, 82 "Use crypto operations batching"); 83 84 static int g_eli_destroy_geom(struct gctl_req *req, struct g_class *mp, 85 struct g_geom *gp); 86 87 static g_taste_t g_eli_taste; 88 static g_dumpconf_t g_eli_dumpconf; 89 90 struct g_class g_eli_class = { 91 .name = G_ELI_CLASS_NAME, 92 .version = G_VERSION, 93 .ctlreq = g_eli_config, 94 .taste = g_eli_taste, 95 .destroy_geom = g_eli_destroy_geom 96 }; 97 98 99 /* 100 * Code paths: 101 * BIO_READ: 102 * g_eli_start -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver 103 * BIO_WRITE: 104 * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver 105 */ 106 107 108 /* 109 * EAGAIN from crypto(9) means, that we were probably balanced to another crypto 110 * accelerator or something like this. 111 * The function updates the SID and rerun the operation. 112 */ 113 int 114 g_eli_crypto_rerun(struct cryptop *crp) 115 { 116 struct g_eli_softc *sc; 117 struct g_eli_worker *wr; 118 struct bio *bp; 119 int error; 120 121 bp = (struct bio *)crp->crp_opaque; 122 sc = bp->bio_to->geom->softc; 123 LIST_FOREACH(wr, &sc->sc_workers, w_next) { 124 if (wr->w_number == bp->bio_pflags) 125 break; 126 } 127 KASSERT(wr != NULL, ("Invalid worker (%u).", bp->bio_pflags)); 128 G_ELI_DEBUG(1, "Rerunning crypto %s request (sid: %ju -> %ju).", 129 bp->bio_cmd == BIO_READ ? "READ" : "WRITE", (uintmax_t)wr->w_sid, 130 (uintmax_t)crp->crp_sid); 131 wr->w_sid = crp->crp_sid; 132 crp->crp_etype = 0; 133 error = crypto_dispatch(crp); 134 if (error == 0) 135 return (0); 136 G_ELI_DEBUG(1, "%s: crypto_dispatch() returned %d.", __func__, error); 137 crp->crp_etype = error; 138 return (error); 139 } 140 141 /* 142 * The function is called afer reading encrypted data from the provider. 143 * 144 * g_eli_start -> g_io_request -> G_ELI_READ_DONE -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver 145 */ 146 void 147 g_eli_read_done(struct bio *bp) 148 { 149 struct g_eli_softc *sc; 150 struct bio *pbp; 151 152 G_ELI_LOGREQ(2, bp, "Request done."); 153 pbp = bp->bio_parent; 154 if (pbp->bio_error == 0) 155 pbp->bio_error = bp->bio_error; 156 /* 157 * Do we have all sectors already? 158 */ 159 pbp->bio_inbed++; 160 if (pbp->bio_inbed < pbp->bio_children) 161 return; 162 g_destroy_bio(bp); 163 if (pbp->bio_error != 0) { 164 G_ELI_LOGREQ(0, pbp, "%s() failed", __func__); 165 pbp->bio_completed = 0; 166 if (pbp->bio_driver2 != NULL) { 167 free(pbp->bio_driver2, M_ELI); 168 pbp->bio_driver2 = NULL; 169 } 170 g_io_deliver(pbp, pbp->bio_error); 171 return; 172 } 173 sc = pbp->bio_to->geom->softc; 174 mtx_lock(&sc->sc_queue_mtx); 175 bioq_insert_tail(&sc->sc_queue, pbp); 176 mtx_unlock(&sc->sc_queue_mtx); 177 wakeup(sc); 178 } 179 180 /* 181 * The function is called after we encrypt and write data. 182 * 183 * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> G_ELI_WRITE_DONE -> g_io_deliver 184 */ 185 void 186 g_eli_write_done(struct bio *bp) 187 { 188 struct bio *pbp; 189 190 G_ELI_LOGREQ(2, bp, "Request done."); 191 pbp = bp->bio_parent; 192 if (pbp->bio_error == 0) { 193 if (bp->bio_error != 0) 194 pbp->bio_error = bp->bio_error; 195 } 196 /* 197 * Do we have all sectors already? 198 */ 199 pbp->bio_inbed++; 200 if (pbp->bio_inbed < pbp->bio_children) 201 return; 202 free(pbp->bio_driver2, M_ELI); 203 pbp->bio_driver2 = NULL; 204 if (pbp->bio_error != 0) { 205 G_ELI_LOGREQ(0, pbp, "Crypto WRITE request failed (error=%d).", 206 pbp->bio_error); 207 pbp->bio_completed = 0; 208 } 209 g_destroy_bio(bp); 210 /* 211 * Write is finished, send it up. 212 */ 213 pbp->bio_completed = pbp->bio_length; 214 g_io_deliver(pbp, pbp->bio_error); 215 } 216 217 /* 218 * This function should never be called, but GEOM made as it set ->orphan() 219 * method for every geom. 220 */ 221 static void 222 g_eli_orphan_spoil_assert(struct g_consumer *cp) 223 { 224 225 panic("Function %s() called for %s.", __func__, cp->geom->name); 226 } 227 228 static void 229 g_eli_orphan(struct g_consumer *cp) 230 { 231 struct g_eli_softc *sc; 232 233 g_topology_assert(); 234 sc = cp->geom->softc; 235 if (sc == NULL) 236 return; 237 g_eli_destroy(sc, 1); 238 } 239 240 /* 241 * BIO_READ : G_ELI_START -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver 242 * BIO_WRITE: G_ELI_START -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver 243 */ 244 static void 245 g_eli_start(struct bio *bp) 246 { 247 struct g_eli_softc *sc; 248 struct g_consumer *cp; 249 struct bio *cbp; 250 251 sc = bp->bio_to->geom->softc; 252 KASSERT(sc != NULL, 253 ("Provider's error should be set (error=%d)(device=%s).", 254 bp->bio_to->error, bp->bio_to->name)); 255 G_ELI_LOGREQ(2, bp, "Request received."); 256 257 switch (bp->bio_cmd) { 258 case BIO_READ: 259 case BIO_WRITE: 260 case BIO_GETATTR: 261 case BIO_FLUSH: 262 break; 263 case BIO_DELETE: 264 /* 265 * We could eventually support BIO_DELETE request. 266 * It could be done by overwritting requested sector with 267 * random data g_eli_overwrites number of times. 268 */ 269 default: 270 g_io_deliver(bp, EOPNOTSUPP); 271 return; 272 } 273 cbp = g_clone_bio(bp); 274 if (cbp == NULL) { 275 g_io_deliver(bp, ENOMEM); 276 return; 277 } 278 switch (bp->bio_cmd) { 279 case BIO_READ: 280 if (!(sc->sc_flags & G_ELI_FLAG_AUTH)) { 281 bp->bio_driver2 = NULL; 282 cbp->bio_done = g_eli_read_done; 283 cp = LIST_FIRST(&sc->sc_geom->consumer); 284 cbp->bio_to = cp->provider; 285 G_ELI_LOGREQ(2, cbp, "Sending request."); 286 /* 287 * Read encrypted data from provider. 288 */ 289 g_io_request(cbp, cp); 290 break; 291 } 292 bp->bio_pflags = 255; 293 /* FALLTHROUGH */ 294 case BIO_WRITE: 295 bp->bio_driver1 = cbp; 296 mtx_lock(&sc->sc_queue_mtx); 297 bioq_insert_tail(&sc->sc_queue, bp); 298 mtx_unlock(&sc->sc_queue_mtx); 299 wakeup(sc); 300 break; 301 case BIO_GETATTR: 302 case BIO_FLUSH: 303 cbp->bio_done = g_std_done; 304 cp = LIST_FIRST(&sc->sc_geom->consumer); 305 cbp->bio_to = cp->provider; 306 G_ELI_LOGREQ(2, cbp, "Sending request."); 307 g_io_request(cbp, cp); 308 break; 309 } 310 } 311 312 /* 313 * This is the main function for kernel worker thread when we don't have 314 * hardware acceleration and we have to do cryptography in software. 315 * Dedicated thread is needed, so we don't slow down g_up/g_down GEOM 316 * threads with crypto work. 317 */ 318 static void 319 g_eli_worker(void *arg) 320 { 321 struct g_eli_softc *sc; 322 struct g_eli_worker *wr; 323 struct bio *bp; 324 325 wr = arg; 326 sc = wr->w_softc; 327 #ifdef SMP 328 /* Before sched_bind() to a CPU, wait for all CPUs to go on-line. */ 329 if (mp_ncpus > 1 && sc->sc_crypto == G_ELI_CRYPTO_SW && 330 g_eli_threads == 0) { 331 while (!smp_started) 332 tsleep(wr, 0, "geli:smp", hz / 4); 333 } 334 #endif 335 thread_lock(curthread); 336 sched_prio(curthread, PRIBIO); 337 if (sc->sc_crypto == G_ELI_CRYPTO_SW && g_eli_threads == 0) 338 sched_bind(curthread, wr->w_number); 339 thread_unlock(curthread); 340 341 G_ELI_DEBUG(1, "Thread %s started.", curthread->td_proc->p_comm); 342 343 for (;;) { 344 mtx_lock(&sc->sc_queue_mtx); 345 bp = bioq_takefirst(&sc->sc_queue); 346 if (bp == NULL) { 347 if (sc->sc_flags & G_ELI_FLAG_DESTROY) { 348 LIST_REMOVE(wr, w_next); 349 crypto_freesession(wr->w_sid); 350 free(wr, M_ELI); 351 G_ELI_DEBUG(1, "Thread %s exiting.", 352 curthread->td_proc->p_comm); 353 wakeup(&sc->sc_workers); 354 mtx_unlock(&sc->sc_queue_mtx); 355 kproc_exit(0); 356 } 357 msleep(sc, &sc->sc_queue_mtx, PRIBIO | PDROP, 358 "geli:w", 0); 359 continue; 360 } 361 mtx_unlock(&sc->sc_queue_mtx); 362 if (bp->bio_cmd == BIO_READ && bp->bio_pflags == 255) 363 g_eli_auth_read(sc, bp); 364 else if (sc->sc_flags & G_ELI_FLAG_AUTH) 365 g_eli_auth_run(wr, bp); 366 else 367 g_eli_crypto_run(wr, bp); 368 } 369 } 370 371 /* 372 * Here we generate IV. It is unique for every sector. 373 */ 374 void 375 g_eli_crypto_ivgen(struct g_eli_softc *sc, off_t offset, u_char *iv, 376 size_t size) 377 { 378 u_char off[8], hash[SHA256_DIGEST_LENGTH]; 379 SHA256_CTX ctx; 380 381 if (!(sc->sc_flags & G_ELI_FLAG_NATIVE_BYTE_ORDER)) 382 le64enc(off, (uint64_t)offset); 383 /* Copy precalculated SHA256 context for IV-Key. */ 384 bcopy(&sc->sc_ivctx, &ctx, sizeof(ctx)); 385 SHA256_Update(&ctx, (uint8_t *)&offset, sizeof(offset)); 386 SHA256_Final(hash, &ctx); 387 bcopy(hash, iv, size); 388 } 389 390 int 391 g_eli_read_metadata(struct g_class *mp, struct g_provider *pp, 392 struct g_eli_metadata *md) 393 { 394 struct g_geom *gp; 395 struct g_consumer *cp; 396 u_char *buf = NULL; 397 int error; 398 399 g_topology_assert(); 400 401 gp = g_new_geomf(mp, "eli:taste"); 402 gp->start = g_eli_start; 403 gp->access = g_std_access; 404 /* 405 * g_eli_read_metadata() is always called from the event thread. 406 * Our geom is created and destroyed in the same event, so there 407 * could be no orphan nor spoil event in the meantime. 408 */ 409 gp->orphan = g_eli_orphan_spoil_assert; 410 gp->spoiled = g_eli_orphan_spoil_assert; 411 cp = g_new_consumer(gp); 412 error = g_attach(cp, pp); 413 if (error != 0) 414 goto end; 415 error = g_access(cp, 1, 0, 0); 416 if (error != 0) 417 goto end; 418 g_topology_unlock(); 419 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize, 420 &error); 421 g_topology_lock(); 422 if (buf == NULL) 423 goto end; 424 eli_metadata_decode(buf, md); 425 end: 426 if (buf != NULL) 427 g_free(buf); 428 if (cp->provider != NULL) { 429 if (cp->acr == 1) 430 g_access(cp, -1, 0, 0); 431 g_detach(cp); 432 } 433 g_destroy_consumer(cp); 434 g_destroy_geom(gp); 435 return (error); 436 } 437 438 /* 439 * The function is called when we had last close on provider and user requested 440 * to close it when this situation occur. 441 */ 442 static void 443 g_eli_last_close(struct g_eli_softc *sc) 444 { 445 struct g_geom *gp; 446 struct g_provider *pp; 447 char ppname[64]; 448 int error; 449 450 g_topology_assert(); 451 gp = sc->sc_geom; 452 pp = LIST_FIRST(&gp->provider); 453 strlcpy(ppname, pp->name, sizeof(ppname)); 454 error = g_eli_destroy(sc, 1); 455 KASSERT(error == 0, ("Cannot detach %s on last close (error=%d).", 456 ppname, error)); 457 G_ELI_DEBUG(0, "Detached %s on last close.", ppname); 458 } 459 460 int 461 g_eli_access(struct g_provider *pp, int dr, int dw, int de) 462 { 463 struct g_eli_softc *sc; 464 struct g_geom *gp; 465 466 gp = pp->geom; 467 sc = gp->softc; 468 469 if (dw > 0) { 470 if (sc->sc_flags & G_ELI_FLAG_RO) { 471 /* Deny write attempts. */ 472 return (EROFS); 473 } 474 /* Someone is opening us for write, we need to remember that. */ 475 sc->sc_flags |= G_ELI_FLAG_WOPEN; 476 return (0); 477 } 478 /* Is this the last close? */ 479 if (pp->acr + dr > 0 || pp->acw + dw > 0 || pp->ace + de > 0) 480 return (0); 481 482 /* 483 * Automatically detach on last close if requested. 484 */ 485 if ((sc->sc_flags & G_ELI_FLAG_RW_DETACH) || 486 (sc->sc_flags & G_ELI_FLAG_WOPEN)) { 487 g_eli_last_close(sc); 488 } 489 return (0); 490 } 491 492 static int 493 g_eli_cpu_is_disabled(int cpu) 494 { 495 #ifdef SMP 496 return ((hlt_cpus_mask & (1 << cpu)) != 0); 497 #else 498 return (0); 499 #endif 500 } 501 502 struct g_geom * 503 g_eli_create(struct gctl_req *req, struct g_class *mp, struct g_provider *bpp, 504 const struct g_eli_metadata *md, const u_char *mkey, int nkey) 505 { 506 struct g_eli_softc *sc; 507 struct g_eli_worker *wr; 508 struct g_geom *gp; 509 struct g_provider *pp; 510 struct g_consumer *cp; 511 struct cryptoini crie, cria; 512 u_int i, threads; 513 int error; 514 515 G_ELI_DEBUG(1, "Creating device %s%s.", bpp->name, G_ELI_SUFFIX); 516 517 gp = g_new_geomf(mp, "%s%s", bpp->name, G_ELI_SUFFIX); 518 gp->softc = NULL; /* for a moment */ 519 520 sc = malloc(sizeof(*sc), M_ELI, M_WAITOK | M_ZERO); 521 gp->start = g_eli_start; 522 /* 523 * Spoiling cannot happen actually, because we keep provider open for 524 * writing all the time or provider is read-only. 525 */ 526 gp->spoiled = g_eli_orphan_spoil_assert; 527 gp->orphan = g_eli_orphan; 528 gp->dumpconf = g_eli_dumpconf; 529 /* 530 * If detach-on-last-close feature is not enabled and we don't operate 531 * on read-only provider, we can simply use g_std_access(). 532 */ 533 if (md->md_flags & (G_ELI_FLAG_WO_DETACH | G_ELI_FLAG_RO)) 534 gp->access = g_eli_access; 535 else 536 gp->access = g_std_access; 537 538 sc->sc_crypto = G_ELI_CRYPTO_SW; 539 sc->sc_flags = md->md_flags; 540 /* Backward compatibility. */ 541 if (md->md_version < 2) 542 sc->sc_flags |= G_ELI_FLAG_NATIVE_BYTE_ORDER; 543 sc->sc_ealgo = md->md_ealgo; 544 sc->sc_nkey = nkey; 545 /* 546 * Remember the keys in our softc structure. 547 */ 548 g_eli_mkey_propagate(sc, mkey); 549 sc->sc_ekeylen = md->md_keylen; 550 551 if (sc->sc_flags & G_ELI_FLAG_AUTH) { 552 sc->sc_akeylen = sizeof(sc->sc_akey) * 8; 553 sc->sc_aalgo = md->md_aalgo; 554 sc->sc_alen = g_eli_hashlen(sc->sc_aalgo); 555 556 sc->sc_data_per_sector = bpp->sectorsize - sc->sc_alen; 557 /* 558 * Some hash functions (like SHA1 and RIPEMD160) generates hash 559 * which length is not multiple of 128 bits, but we want data 560 * length to be multiple of 128, so we can encrypt without 561 * padding. The line below rounds down data length to multiple 562 * of 128 bits. 563 */ 564 sc->sc_data_per_sector -= sc->sc_data_per_sector % 16; 565 566 sc->sc_bytes_per_sector = 567 (md->md_sectorsize - 1) / sc->sc_data_per_sector + 1; 568 sc->sc_bytes_per_sector *= bpp->sectorsize; 569 /* 570 * Precalculate SHA256 for HMAC key generation. 571 * This is expensive operation and we can do it only once now or 572 * for every access to sector, so now will be much better. 573 */ 574 SHA256_Init(&sc->sc_akeyctx); 575 SHA256_Update(&sc->sc_akeyctx, sc->sc_akey, 576 sizeof(sc->sc_akey)); 577 } 578 579 /* 580 * Precalculate SHA256 for IV generation. 581 * This is expensive operation and we can do it only once now or for 582 * every access to sector, so now will be much better. 583 */ 584 SHA256_Init(&sc->sc_ivctx); 585 SHA256_Update(&sc->sc_ivctx, sc->sc_ivkey, sizeof(sc->sc_ivkey)); 586 587 gp->softc = sc; 588 sc->sc_geom = gp; 589 590 bioq_init(&sc->sc_queue); 591 mtx_init(&sc->sc_queue_mtx, "geli:queue", NULL, MTX_DEF); 592 593 pp = NULL; 594 cp = g_new_consumer(gp); 595 error = g_attach(cp, bpp); 596 if (error != 0) { 597 if (req != NULL) { 598 gctl_error(req, "Cannot attach to %s (error=%d).", 599 bpp->name, error); 600 } else { 601 G_ELI_DEBUG(1, "Cannot attach to %s (error=%d).", 602 bpp->name, error); 603 } 604 goto failed; 605 } 606 /* 607 * Keep provider open all the time, so we can run critical tasks, 608 * like Master Keys deletion, without wondering if we can open 609 * provider or not. 610 * We don't open provider for writing only when user requested read-only 611 * access. 612 */ 613 if (sc->sc_flags & G_ELI_FLAG_RO) 614 error = g_access(cp, 1, 0, 1); 615 else 616 error = g_access(cp, 1, 1, 1); 617 if (error != 0) { 618 if (req != NULL) { 619 gctl_error(req, "Cannot access %s (error=%d).", 620 bpp->name, error); 621 } else { 622 G_ELI_DEBUG(1, "Cannot access %s (error=%d).", 623 bpp->name, error); 624 } 625 goto failed; 626 } 627 628 LIST_INIT(&sc->sc_workers); 629 630 bzero(&crie, sizeof(crie)); 631 crie.cri_alg = sc->sc_ealgo; 632 crie.cri_klen = sc->sc_ekeylen; 633 crie.cri_key = sc->sc_ekey; 634 if (sc->sc_flags & G_ELI_FLAG_AUTH) { 635 bzero(&cria, sizeof(cria)); 636 cria.cri_alg = sc->sc_aalgo; 637 cria.cri_klen = sc->sc_akeylen; 638 cria.cri_key = sc->sc_akey; 639 crie.cri_next = &cria; 640 } 641 642 threads = g_eli_threads; 643 if (threads == 0) 644 threads = mp_ncpus; 645 else if (threads > mp_ncpus) { 646 /* There is really no need for too many worker threads. */ 647 threads = mp_ncpus; 648 G_ELI_DEBUG(0, "Reducing number of threads to %u.", threads); 649 } 650 for (i = 0; i < threads; i++) { 651 if (g_eli_cpu_is_disabled(i)) { 652 G_ELI_DEBUG(1, "%s: CPU %u disabled, skipping.", 653 bpp->name, i); 654 continue; 655 } 656 wr = malloc(sizeof(*wr), M_ELI, M_WAITOK | M_ZERO); 657 wr->w_softc = sc; 658 wr->w_number = i; 659 660 /* 661 * If this is the first pass, try to get hardware support. 662 * Use software cryptography, if we cannot get it. 663 */ 664 if (LIST_EMPTY(&sc->sc_workers)) { 665 error = crypto_newsession(&wr->w_sid, &crie, 666 CRYPTOCAP_F_HARDWARE); 667 if (error == 0) 668 sc->sc_crypto = G_ELI_CRYPTO_HW; 669 } 670 if (sc->sc_crypto == G_ELI_CRYPTO_SW) { 671 error = crypto_newsession(&wr->w_sid, &crie, 672 CRYPTOCAP_F_SOFTWARE); 673 } 674 if (error != 0) { 675 free(wr, M_ELI); 676 if (req != NULL) { 677 gctl_error(req, "Cannot set up crypto session " 678 "for %s (error=%d).", bpp->name, error); 679 } else { 680 G_ELI_DEBUG(1, "Cannot set up crypto session " 681 "for %s (error=%d).", bpp->name, error); 682 } 683 goto failed; 684 } 685 686 error = kproc_create(g_eli_worker, wr, &wr->w_proc, 0, 0, 687 "g_eli[%u] %s", i, bpp->name); 688 if (error != 0) { 689 crypto_freesession(wr->w_sid); 690 free(wr, M_ELI); 691 if (req != NULL) { 692 gctl_error(req, "Cannot create kernel thread " 693 "for %s (error=%d).", bpp->name, error); 694 } else { 695 G_ELI_DEBUG(1, "Cannot create kernel thread " 696 "for %s (error=%d).", bpp->name, error); 697 } 698 goto failed; 699 } 700 LIST_INSERT_HEAD(&sc->sc_workers, wr, w_next); 701 /* If we have hardware support, one thread is enough. */ 702 if (sc->sc_crypto == G_ELI_CRYPTO_HW) 703 break; 704 } 705 706 /* 707 * Create decrypted provider. 708 */ 709 pp = g_new_providerf(gp, "%s%s", bpp->name, G_ELI_SUFFIX); 710 pp->sectorsize = md->md_sectorsize; 711 pp->mediasize = bpp->mediasize; 712 if (!(sc->sc_flags & G_ELI_FLAG_ONETIME)) 713 pp->mediasize -= bpp->sectorsize; 714 if (!(sc->sc_flags & G_ELI_FLAG_AUTH)) 715 pp->mediasize -= (pp->mediasize % pp->sectorsize); 716 else { 717 pp->mediasize /= sc->sc_bytes_per_sector; 718 pp->mediasize *= pp->sectorsize; 719 } 720 721 g_error_provider(pp, 0); 722 723 G_ELI_DEBUG(0, "Device %s created.", pp->name); 724 G_ELI_DEBUG(0, "Encryption: %s %u", g_eli_algo2str(sc->sc_ealgo), 725 sc->sc_ekeylen); 726 if (sc->sc_flags & G_ELI_FLAG_AUTH) 727 G_ELI_DEBUG(0, " Integrity: %s", g_eli_algo2str(sc->sc_aalgo)); 728 G_ELI_DEBUG(0, " Crypto: %s", 729 sc->sc_crypto == G_ELI_CRYPTO_SW ? "software" : "hardware"); 730 return (gp); 731 failed: 732 mtx_lock(&sc->sc_queue_mtx); 733 sc->sc_flags |= G_ELI_FLAG_DESTROY; 734 wakeup(sc); 735 /* 736 * Wait for kernel threads self destruction. 737 */ 738 while (!LIST_EMPTY(&sc->sc_workers)) { 739 msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO, 740 "geli:destroy", 0); 741 } 742 mtx_destroy(&sc->sc_queue_mtx); 743 if (cp->provider != NULL) { 744 if (cp->acr == 1) 745 g_access(cp, -1, -1, -1); 746 g_detach(cp); 747 } 748 g_destroy_consumer(cp); 749 g_destroy_geom(gp); 750 bzero(sc, sizeof(*sc)); 751 free(sc, M_ELI); 752 return (NULL); 753 } 754 755 int 756 g_eli_destroy(struct g_eli_softc *sc, boolean_t force) 757 { 758 struct g_geom *gp; 759 struct g_provider *pp; 760 761 g_topology_assert(); 762 763 if (sc == NULL) 764 return (ENXIO); 765 766 gp = sc->sc_geom; 767 pp = LIST_FIRST(&gp->provider); 768 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) { 769 if (force) { 770 G_ELI_DEBUG(1, "Device %s is still open, so it " 771 "cannot be definitely removed.", pp->name); 772 } else { 773 G_ELI_DEBUG(1, 774 "Device %s is still open (r%dw%de%d).", pp->name, 775 pp->acr, pp->acw, pp->ace); 776 return (EBUSY); 777 } 778 } 779 780 mtx_lock(&sc->sc_queue_mtx); 781 sc->sc_flags |= G_ELI_FLAG_DESTROY; 782 wakeup(sc); 783 while (!LIST_EMPTY(&sc->sc_workers)) { 784 msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO, 785 "geli:destroy", 0); 786 } 787 mtx_destroy(&sc->sc_queue_mtx); 788 gp->softc = NULL; 789 bzero(sc, sizeof(*sc)); 790 free(sc, M_ELI); 791 792 if (pp == NULL || (pp->acr == 0 && pp->acw == 0 && pp->ace == 0)) 793 G_ELI_DEBUG(0, "Device %s destroyed.", gp->name); 794 g_wither_geom_close(gp, ENXIO); 795 796 return (0); 797 } 798 799 static int 800 g_eli_destroy_geom(struct gctl_req *req __unused, 801 struct g_class *mp __unused, struct g_geom *gp) 802 { 803 struct g_eli_softc *sc; 804 805 sc = gp->softc; 806 return (g_eli_destroy(sc, 0)); 807 } 808 809 static int 810 g_eli_keyfiles_load(struct hmac_ctx *ctx, const char *provider) 811 { 812 u_char *keyfile, *data, *size; 813 char *file, name[64]; 814 int i; 815 816 for (i = 0; ; i++) { 817 snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i); 818 keyfile = preload_search_by_type(name); 819 if (keyfile == NULL) 820 return (i); /* Return number of loaded keyfiles. */ 821 data = preload_search_info(keyfile, MODINFO_ADDR); 822 if (data == NULL) { 823 G_ELI_DEBUG(0, "Cannot find key file data for %s.", 824 name); 825 return (0); 826 } 827 data = *(void **)data; 828 size = preload_search_info(keyfile, MODINFO_SIZE); 829 if (size == NULL) { 830 G_ELI_DEBUG(0, "Cannot find key file size for %s.", 831 name); 832 return (0); 833 } 834 file = preload_search_info(keyfile, MODINFO_NAME); 835 if (file == NULL) { 836 G_ELI_DEBUG(0, "Cannot find key file name for %s.", 837 name); 838 return (0); 839 } 840 G_ELI_DEBUG(1, "Loaded keyfile %s for %s (type: %s).", file, 841 provider, name); 842 g_eli_crypto_hmac_update(ctx, data, *(size_t *)size); 843 } 844 } 845 846 static void 847 g_eli_keyfiles_clear(const char *provider) 848 { 849 u_char *keyfile, *data, *size; 850 char name[64]; 851 int i; 852 853 for (i = 0; ; i++) { 854 snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i); 855 keyfile = preload_search_by_type(name); 856 if (keyfile == NULL) 857 return; 858 data = preload_search_info(keyfile, MODINFO_ADDR); 859 size = preload_search_info(keyfile, MODINFO_SIZE); 860 if (data == NULL || size == NULL) 861 continue; 862 data = *(void **)data; 863 bzero(data, *(size_t *)size); 864 } 865 } 866 867 /* 868 * Tasting is only made on boot. 869 * We detect providers which should be attached before root is mounted. 870 */ 871 static struct g_geom * 872 g_eli_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 873 { 874 struct g_eli_metadata md; 875 struct g_geom *gp; 876 struct hmac_ctx ctx; 877 char passphrase[256]; 878 u_char key[G_ELI_USERKEYLEN], mkey[G_ELI_DATAIVKEYLEN]; 879 u_int i, nkey, nkeyfiles, tries; 880 int error; 881 882 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name); 883 g_topology_assert(); 884 885 if (root_mounted() || g_eli_tries == 0) 886 return (NULL); 887 888 G_ELI_DEBUG(3, "Tasting %s.", pp->name); 889 890 error = g_eli_read_metadata(mp, pp, &md); 891 if (error != 0) 892 return (NULL); 893 gp = NULL; 894 895 if (strcmp(md.md_magic, G_ELI_MAGIC) != 0) 896 return (NULL); 897 if (md.md_version > G_ELI_VERSION) { 898 printf("geom_eli.ko module is too old to handle %s.\n", 899 pp->name); 900 return (NULL); 901 } 902 if (md.md_provsize != pp->mediasize) 903 return (NULL); 904 /* Should we attach it on boot? */ 905 if (!(md.md_flags & G_ELI_FLAG_BOOT)) 906 return (NULL); 907 if (md.md_keys == 0x00) { 908 G_ELI_DEBUG(0, "No valid keys on %s.", pp->name); 909 return (NULL); 910 } 911 if (md.md_iterations == -1) { 912 /* If there is no passphrase, we try only once. */ 913 tries = 1; 914 } else { 915 /* Ask for the passphrase no more than g_eli_tries times. */ 916 tries = g_eli_tries; 917 } 918 919 for (i = 0; i < tries; i++) { 920 g_eli_crypto_hmac_init(&ctx, NULL, 0); 921 922 /* 923 * Load all key files. 924 */ 925 nkeyfiles = g_eli_keyfiles_load(&ctx, pp->name); 926 927 if (nkeyfiles == 0 && md.md_iterations == -1) { 928 /* 929 * No key files and no passphrase, something is 930 * definitely wrong here. 931 * geli(8) doesn't allow for such situation, so assume 932 * that there was really no passphrase and in that case 933 * key files are no properly defined in loader.conf. 934 */ 935 G_ELI_DEBUG(0, 936 "Found no key files in loader.conf for %s.", 937 pp->name); 938 return (NULL); 939 } 940 941 /* Ask for the passphrase if defined. */ 942 if (md.md_iterations >= 0) { 943 printf("Enter passphrase for %s: ", pp->name); 944 gets(passphrase, sizeof(passphrase), 945 g_eli_visible_passphrase); 946 } 947 948 /* 949 * Prepare Derived-Key from the user passphrase. 950 */ 951 if (md.md_iterations == 0) { 952 g_eli_crypto_hmac_update(&ctx, md.md_salt, 953 sizeof(md.md_salt)); 954 g_eli_crypto_hmac_update(&ctx, passphrase, 955 strlen(passphrase)); 956 bzero(passphrase, sizeof(passphrase)); 957 } else if (md.md_iterations > 0) { 958 u_char dkey[G_ELI_USERKEYLEN]; 959 960 pkcs5v2_genkey(dkey, sizeof(dkey), md.md_salt, 961 sizeof(md.md_salt), passphrase, md.md_iterations); 962 bzero(passphrase, sizeof(passphrase)); 963 g_eli_crypto_hmac_update(&ctx, dkey, sizeof(dkey)); 964 bzero(dkey, sizeof(dkey)); 965 } 966 967 g_eli_crypto_hmac_final(&ctx, key, 0); 968 969 /* 970 * Decrypt Master-Key. 971 */ 972 error = g_eli_mkey_decrypt(&md, key, mkey, &nkey); 973 bzero(key, sizeof(key)); 974 if (error == -1) { 975 if (i == tries - 1) { 976 G_ELI_DEBUG(0, 977 "Wrong key for %s. No tries left.", 978 pp->name); 979 g_eli_keyfiles_clear(pp->name); 980 return (NULL); 981 } 982 G_ELI_DEBUG(0, "Wrong key for %s. Tries left: %u.", 983 pp->name, tries - i - 1); 984 /* Try again. */ 985 continue; 986 } else if (error > 0) { 987 G_ELI_DEBUG(0, "Cannot decrypt Master Key for %s (error=%d).", 988 pp->name, error); 989 g_eli_keyfiles_clear(pp->name); 990 return (NULL); 991 } 992 G_ELI_DEBUG(1, "Using Master Key %u for %s.", nkey, pp->name); 993 break; 994 } 995 996 /* 997 * We have correct key, let's attach provider. 998 */ 999 gp = g_eli_create(NULL, mp, pp, &md, mkey, nkey); 1000 bzero(mkey, sizeof(mkey)); 1001 bzero(&md, sizeof(md)); 1002 if (gp == NULL) { 1003 G_ELI_DEBUG(0, "Cannot create device %s%s.", pp->name, 1004 G_ELI_SUFFIX); 1005 return (NULL); 1006 } 1007 return (gp); 1008 } 1009 1010 static void 1011 g_eli_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 1012 struct g_consumer *cp, struct g_provider *pp) 1013 { 1014 struct g_eli_softc *sc; 1015 1016 g_topology_assert(); 1017 sc = gp->softc; 1018 if (sc == NULL) 1019 return; 1020 if (pp != NULL || cp != NULL) 1021 return; /* Nothing here. */ 1022 sbuf_printf(sb, "%s<Flags>", indent); 1023 if (sc->sc_flags == 0) 1024 sbuf_printf(sb, "NONE"); 1025 else { 1026 int first = 1; 1027 1028 #define ADD_FLAG(flag, name) do { \ 1029 if (sc->sc_flags & (flag)) { \ 1030 if (!first) \ 1031 sbuf_printf(sb, ", "); \ 1032 else \ 1033 first = 0; \ 1034 sbuf_printf(sb, name); \ 1035 } \ 1036 } while (0) 1037 ADD_FLAG(G_ELI_FLAG_NATIVE_BYTE_ORDER, "NATIVE-BYTE-ORDER"); 1038 ADD_FLAG(G_ELI_FLAG_ONETIME, "ONETIME"); 1039 ADD_FLAG(G_ELI_FLAG_BOOT, "BOOT"); 1040 ADD_FLAG(G_ELI_FLAG_WO_DETACH, "W-DETACH"); 1041 ADD_FLAG(G_ELI_FLAG_RW_DETACH, "RW-DETACH"); 1042 ADD_FLAG(G_ELI_FLAG_AUTH, "AUTH"); 1043 ADD_FLAG(G_ELI_FLAG_WOPEN, "W-OPEN"); 1044 ADD_FLAG(G_ELI_FLAG_DESTROY, "DESTROY"); 1045 ADD_FLAG(G_ELI_FLAG_RO, "READ-ONLY"); 1046 #undef ADD_FLAG 1047 } 1048 sbuf_printf(sb, "</Flags>\n"); 1049 1050 if (!(sc->sc_flags & G_ELI_FLAG_ONETIME)) { 1051 sbuf_printf(sb, "%s<UsedKey>%u</UsedKey>\n", indent, 1052 sc->sc_nkey); 1053 } 1054 sbuf_printf(sb, "%s<Crypto>", indent); 1055 switch (sc->sc_crypto) { 1056 case G_ELI_CRYPTO_HW: 1057 sbuf_printf(sb, "hardware"); 1058 break; 1059 case G_ELI_CRYPTO_SW: 1060 sbuf_printf(sb, "software"); 1061 break; 1062 default: 1063 sbuf_printf(sb, "UNKNOWN"); 1064 break; 1065 } 1066 sbuf_printf(sb, "</Crypto>\n"); 1067 if (sc->sc_flags & G_ELI_FLAG_AUTH) { 1068 sbuf_printf(sb, 1069 "%s<AuthenticationAlgorithm>%s</AuthenticationAlgorithm>\n", 1070 indent, g_eli_algo2str(sc->sc_aalgo)); 1071 } 1072 sbuf_printf(sb, "%s<KeyLength>%u</KeyLength>\n", indent, 1073 sc->sc_ekeylen); 1074 sbuf_printf(sb, "%s<EncryptionAlgorithm>%s</EncryptionAlgorithm>\n", indent, 1075 g_eli_algo2str(sc->sc_ealgo)); 1076 } 1077 1078 DECLARE_GEOM_CLASS(g_eli_class, g_eli); 1079 MODULE_DEPEND(g_eli, crypto, 1, 1, 1); 1080