1 /*- 2 * Copyright (c) 2005-2011 Pawel Jakub Dawidek <pawel@dawidek.net> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/linker.h> 34 #include <sys/module.h> 35 #include <sys/lock.h> 36 #include <sys/mutex.h> 37 #include <sys/bio.h> 38 #include <sys/sysctl.h> 39 #include <sys/malloc.h> 40 #include <sys/eventhandler.h> 41 #include <sys/kthread.h> 42 #include <sys/proc.h> 43 #include <sys/sched.h> 44 #include <sys/smp.h> 45 #include <sys/uio.h> 46 #include <sys/vnode.h> 47 48 #include <vm/uma.h> 49 50 #include <geom/geom.h> 51 #include <geom/eli/g_eli.h> 52 #include <geom/eli/pkcs5v2.h> 53 54 FEATURE(geom_eli, "GEOM crypto module"); 55 56 MALLOC_DEFINE(M_ELI, "eli data", "GEOM_ELI Data"); 57 58 SYSCTL_DECL(_kern_geom); 59 SYSCTL_NODE(_kern_geom, OID_AUTO, eli, CTLFLAG_RW, 0, "GEOM_ELI stuff"); 60 int g_eli_debug = 0; 61 TUNABLE_INT("kern.geom.eli.debug", &g_eli_debug); 62 SYSCTL_INT(_kern_geom_eli, OID_AUTO, debug, CTLFLAG_RW, &g_eli_debug, 0, 63 "Debug level"); 64 static u_int g_eli_tries = 3; 65 TUNABLE_INT("kern.geom.eli.tries", &g_eli_tries); 66 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, tries, CTLFLAG_RW, &g_eli_tries, 0, 67 "Number of tries for entering the passphrase"); 68 static u_int g_eli_visible_passphrase = GETS_NOECHO; 69 TUNABLE_INT("kern.geom.eli.visible_passphrase", &g_eli_visible_passphrase); 70 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, visible_passphrase, CTLFLAG_RW, 71 &g_eli_visible_passphrase, 0, 72 "Visibility of passphrase prompt (0 = invisible, 1 = visible, 2 = asterisk)"); 73 u_int g_eli_overwrites = G_ELI_OVERWRITES; 74 TUNABLE_INT("kern.geom.eli.overwrites", &g_eli_overwrites); 75 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, overwrites, CTLFLAG_RW, &g_eli_overwrites, 76 0, "Number of times on-disk keys should be overwritten when destroying them"); 77 static u_int g_eli_threads = 0; 78 TUNABLE_INT("kern.geom.eli.threads", &g_eli_threads); 79 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, threads, CTLFLAG_RW, &g_eli_threads, 0, 80 "Number of threads doing crypto work"); 81 u_int g_eli_batch = 0; 82 TUNABLE_INT("kern.geom.eli.batch", &g_eli_batch); 83 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, batch, CTLFLAG_RW, &g_eli_batch, 0, 84 "Use crypto operations batching"); 85 86 static eventhandler_tag g_eli_pre_sync = NULL; 87 88 static int g_eli_destroy_geom(struct gctl_req *req, struct g_class *mp, 89 struct g_geom *gp); 90 static void g_eli_init(struct g_class *mp); 91 static void g_eli_fini(struct g_class *mp); 92 93 static g_taste_t g_eli_taste; 94 static g_dumpconf_t g_eli_dumpconf; 95 96 struct g_class g_eli_class = { 97 .name = G_ELI_CLASS_NAME, 98 .version = G_VERSION, 99 .ctlreq = g_eli_config, 100 .taste = g_eli_taste, 101 .destroy_geom = g_eli_destroy_geom, 102 .init = g_eli_init, 103 .fini = g_eli_fini 104 }; 105 106 107 /* 108 * Code paths: 109 * BIO_READ: 110 * g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver 111 * BIO_WRITE: 112 * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver 113 */ 114 115 116 /* 117 * EAGAIN from crypto(9) means, that we were probably balanced to another crypto 118 * accelerator or something like this. 119 * The function updates the SID and rerun the operation. 120 */ 121 int 122 g_eli_crypto_rerun(struct cryptop *crp) 123 { 124 struct g_eli_softc *sc; 125 struct g_eli_worker *wr; 126 struct bio *bp; 127 int error; 128 129 bp = (struct bio *)crp->crp_opaque; 130 sc = bp->bio_to->geom->softc; 131 LIST_FOREACH(wr, &sc->sc_workers, w_next) { 132 if (wr->w_number == bp->bio_pflags) 133 break; 134 } 135 KASSERT(wr != NULL, ("Invalid worker (%u).", bp->bio_pflags)); 136 G_ELI_DEBUG(1, "Rerunning crypto %s request (sid: %ju -> %ju).", 137 bp->bio_cmd == BIO_READ ? "READ" : "WRITE", (uintmax_t)wr->w_sid, 138 (uintmax_t)crp->crp_sid); 139 wr->w_sid = crp->crp_sid; 140 crp->crp_etype = 0; 141 error = crypto_dispatch(crp); 142 if (error == 0) 143 return (0); 144 G_ELI_DEBUG(1, "%s: crypto_dispatch() returned %d.", __func__, error); 145 crp->crp_etype = error; 146 return (error); 147 } 148 149 /* 150 * The function is called afer reading encrypted data from the provider. 151 * 152 * g_eli_start -> g_eli_crypto_read -> g_io_request -> G_ELI_READ_DONE -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver 153 */ 154 void 155 g_eli_read_done(struct bio *bp) 156 { 157 struct g_eli_softc *sc; 158 struct bio *pbp; 159 160 G_ELI_LOGREQ(2, bp, "Request done."); 161 pbp = bp->bio_parent; 162 if (pbp->bio_error == 0) 163 pbp->bio_error = bp->bio_error; 164 g_destroy_bio(bp); 165 /* 166 * Do we have all sectors already? 167 */ 168 pbp->bio_inbed++; 169 if (pbp->bio_inbed < pbp->bio_children) 170 return; 171 sc = pbp->bio_to->geom->softc; 172 if (pbp->bio_error != 0) { 173 G_ELI_LOGREQ(0, pbp, "%s() failed", __func__); 174 pbp->bio_completed = 0; 175 if (pbp->bio_driver2 != NULL) { 176 free(pbp->bio_driver2, M_ELI); 177 pbp->bio_driver2 = NULL; 178 } 179 g_io_deliver(pbp, pbp->bio_error); 180 atomic_subtract_int(&sc->sc_inflight, 1); 181 return; 182 } 183 mtx_lock(&sc->sc_queue_mtx); 184 bioq_insert_tail(&sc->sc_queue, pbp); 185 mtx_unlock(&sc->sc_queue_mtx); 186 wakeup(sc); 187 } 188 189 /* 190 * The function is called after we encrypt and write data. 191 * 192 * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> G_ELI_WRITE_DONE -> g_io_deliver 193 */ 194 void 195 g_eli_write_done(struct bio *bp) 196 { 197 struct g_eli_softc *sc; 198 struct bio *pbp; 199 200 G_ELI_LOGREQ(2, bp, "Request done."); 201 pbp = bp->bio_parent; 202 if (pbp->bio_error == 0) { 203 if (bp->bio_error != 0) 204 pbp->bio_error = bp->bio_error; 205 } 206 g_destroy_bio(bp); 207 /* 208 * Do we have all sectors already? 209 */ 210 pbp->bio_inbed++; 211 if (pbp->bio_inbed < pbp->bio_children) 212 return; 213 free(pbp->bio_driver2, M_ELI); 214 pbp->bio_driver2 = NULL; 215 if (pbp->bio_error != 0) { 216 G_ELI_LOGREQ(0, pbp, "Crypto WRITE request failed (error=%d).", 217 pbp->bio_error); 218 pbp->bio_completed = 0; 219 } 220 /* 221 * Write is finished, send it up. 222 */ 223 pbp->bio_completed = pbp->bio_length; 224 sc = pbp->bio_to->geom->softc; 225 g_io_deliver(pbp, pbp->bio_error); 226 atomic_subtract_int(&sc->sc_inflight, 1); 227 } 228 229 /* 230 * This function should never be called, but GEOM made as it set ->orphan() 231 * method for every geom. 232 */ 233 static void 234 g_eli_orphan_spoil_assert(struct g_consumer *cp) 235 { 236 237 panic("Function %s() called for %s.", __func__, cp->geom->name); 238 } 239 240 static void 241 g_eli_orphan(struct g_consumer *cp) 242 { 243 struct g_eli_softc *sc; 244 245 g_topology_assert(); 246 sc = cp->geom->softc; 247 if (sc == NULL) 248 return; 249 g_eli_destroy(sc, TRUE); 250 } 251 252 /* 253 * BIO_READ: 254 * G_ELI_START -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver 255 * BIO_WRITE: 256 * G_ELI_START -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver 257 */ 258 static void 259 g_eli_start(struct bio *bp) 260 { 261 struct g_eli_softc *sc; 262 struct g_consumer *cp; 263 struct bio *cbp; 264 265 sc = bp->bio_to->geom->softc; 266 KASSERT(sc != NULL, 267 ("Provider's error should be set (error=%d)(device=%s).", 268 bp->bio_to->error, bp->bio_to->name)); 269 G_ELI_LOGREQ(2, bp, "Request received."); 270 271 switch (bp->bio_cmd) { 272 case BIO_READ: 273 case BIO_WRITE: 274 case BIO_GETATTR: 275 case BIO_FLUSH: 276 break; 277 case BIO_DELETE: 278 /* 279 * We could eventually support BIO_DELETE request. 280 * It could be done by overwritting requested sector with 281 * random data g_eli_overwrites number of times. 282 */ 283 default: 284 g_io_deliver(bp, EOPNOTSUPP); 285 return; 286 } 287 cbp = g_clone_bio(bp); 288 if (cbp == NULL) { 289 g_io_deliver(bp, ENOMEM); 290 return; 291 } 292 bp->bio_driver1 = cbp; 293 bp->bio_pflags = G_ELI_NEW_BIO; 294 switch (bp->bio_cmd) { 295 case BIO_READ: 296 if (!(sc->sc_flags & G_ELI_FLAG_AUTH)) { 297 g_eli_crypto_read(sc, bp, 0); 298 break; 299 } 300 /* FALLTHROUGH */ 301 case BIO_WRITE: 302 mtx_lock(&sc->sc_queue_mtx); 303 bioq_insert_tail(&sc->sc_queue, bp); 304 mtx_unlock(&sc->sc_queue_mtx); 305 wakeup(sc); 306 break; 307 case BIO_GETATTR: 308 case BIO_FLUSH: 309 cbp->bio_done = g_std_done; 310 cp = LIST_FIRST(&sc->sc_geom->consumer); 311 cbp->bio_to = cp->provider; 312 G_ELI_LOGREQ(2, cbp, "Sending request."); 313 g_io_request(cbp, cp); 314 break; 315 } 316 } 317 318 static int 319 g_eli_newsession(struct g_eli_worker *wr) 320 { 321 struct g_eli_softc *sc; 322 struct cryptoini crie, cria; 323 int error; 324 325 sc = wr->w_softc; 326 327 bzero(&crie, sizeof(crie)); 328 crie.cri_alg = sc->sc_ealgo; 329 crie.cri_klen = sc->sc_ekeylen; 330 if (sc->sc_ealgo == CRYPTO_AES_XTS) 331 crie.cri_klen <<= 1; 332 crie.cri_key = sc->sc_ekey; 333 if (sc->sc_flags & G_ELI_FLAG_AUTH) { 334 bzero(&cria, sizeof(cria)); 335 cria.cri_alg = sc->sc_aalgo; 336 cria.cri_klen = sc->sc_akeylen; 337 cria.cri_key = sc->sc_akey; 338 crie.cri_next = &cria; 339 } 340 341 switch (sc->sc_crypto) { 342 case G_ELI_CRYPTO_SW: 343 error = crypto_newsession(&wr->w_sid, &crie, 344 CRYPTOCAP_F_SOFTWARE); 345 break; 346 case G_ELI_CRYPTO_HW: 347 error = crypto_newsession(&wr->w_sid, &crie, 348 CRYPTOCAP_F_HARDWARE); 349 break; 350 case G_ELI_CRYPTO_UNKNOWN: 351 error = crypto_newsession(&wr->w_sid, &crie, 352 CRYPTOCAP_F_HARDWARE); 353 if (error == 0) { 354 mtx_lock(&sc->sc_queue_mtx); 355 if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN) 356 sc->sc_crypto = G_ELI_CRYPTO_HW; 357 mtx_unlock(&sc->sc_queue_mtx); 358 } else { 359 error = crypto_newsession(&wr->w_sid, &crie, 360 CRYPTOCAP_F_SOFTWARE); 361 mtx_lock(&sc->sc_queue_mtx); 362 if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN) 363 sc->sc_crypto = G_ELI_CRYPTO_SW; 364 mtx_unlock(&sc->sc_queue_mtx); 365 } 366 break; 367 default: 368 panic("%s: invalid condition", __func__); 369 } 370 371 return (error); 372 } 373 374 static void 375 g_eli_freesession(struct g_eli_worker *wr) 376 { 377 378 crypto_freesession(wr->w_sid); 379 } 380 381 static void 382 g_eli_cancel(struct g_eli_softc *sc) 383 { 384 struct bio *bp; 385 386 mtx_assert(&sc->sc_queue_mtx, MA_OWNED); 387 388 while ((bp = bioq_takefirst(&sc->sc_queue)) != NULL) { 389 KASSERT(bp->bio_pflags == G_ELI_NEW_BIO, 390 ("Not new bio when canceling (bp=%p).", bp)); 391 g_io_deliver(bp, ENXIO); 392 } 393 } 394 395 static struct bio * 396 g_eli_takefirst(struct g_eli_softc *sc) 397 { 398 struct bio *bp; 399 400 mtx_assert(&sc->sc_queue_mtx, MA_OWNED); 401 402 if (!(sc->sc_flags & G_ELI_FLAG_SUSPEND)) 403 return (bioq_takefirst(&sc->sc_queue)); 404 /* 405 * Device suspended, so we skip new I/O requests. 406 */ 407 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) { 408 if (bp->bio_pflags != G_ELI_NEW_BIO) 409 break; 410 } 411 if (bp != NULL) 412 bioq_remove(&sc->sc_queue, bp); 413 return (bp); 414 } 415 416 /* 417 * This is the main function for kernel worker thread when we don't have 418 * hardware acceleration and we have to do cryptography in software. 419 * Dedicated thread is needed, so we don't slow down g_up/g_down GEOM 420 * threads with crypto work. 421 */ 422 static void 423 g_eli_worker(void *arg) 424 { 425 struct g_eli_softc *sc; 426 struct g_eli_worker *wr; 427 struct bio *bp; 428 int error; 429 430 wr = arg; 431 sc = wr->w_softc; 432 #ifdef SMP 433 /* Before sched_bind() to a CPU, wait for all CPUs to go on-line. */ 434 if (mp_ncpus > 1 && sc->sc_crypto == G_ELI_CRYPTO_SW && 435 g_eli_threads == 0) { 436 while (!smp_started) 437 tsleep(wr, 0, "geli:smp", hz / 4); 438 } 439 #endif 440 thread_lock(curthread); 441 sched_prio(curthread, PUSER); 442 if (sc->sc_crypto == G_ELI_CRYPTO_SW && g_eli_threads == 0) 443 sched_bind(curthread, wr->w_number); 444 thread_unlock(curthread); 445 446 G_ELI_DEBUG(1, "Thread %s started.", curthread->td_proc->p_comm); 447 448 for (;;) { 449 mtx_lock(&sc->sc_queue_mtx); 450 again: 451 bp = g_eli_takefirst(sc); 452 if (bp == NULL) { 453 if (sc->sc_flags & G_ELI_FLAG_DESTROY) { 454 g_eli_cancel(sc); 455 LIST_REMOVE(wr, w_next); 456 g_eli_freesession(wr); 457 free(wr, M_ELI); 458 G_ELI_DEBUG(1, "Thread %s exiting.", 459 curthread->td_proc->p_comm); 460 wakeup(&sc->sc_workers); 461 mtx_unlock(&sc->sc_queue_mtx); 462 kproc_exit(0); 463 } 464 while (sc->sc_flags & G_ELI_FLAG_SUSPEND) { 465 if (sc->sc_inflight > 0) { 466 G_ELI_DEBUG(0, "inflight=%d", sc->sc_inflight); 467 /* 468 * We still have inflight BIOs, so 469 * sleep and retry. 470 */ 471 msleep(sc, &sc->sc_queue_mtx, PRIBIO, 472 "geli:inf", hz / 5); 473 goto again; 474 } 475 /* 476 * Suspend requested, mark the worker as 477 * suspended and go to sleep. 478 */ 479 if (wr->w_active) { 480 g_eli_freesession(wr); 481 wr->w_active = FALSE; 482 } 483 wakeup(&sc->sc_workers); 484 msleep(sc, &sc->sc_queue_mtx, PRIBIO, 485 "geli:suspend", 0); 486 if (!wr->w_active && 487 !(sc->sc_flags & G_ELI_FLAG_SUSPEND)) { 488 error = g_eli_newsession(wr); 489 KASSERT(error == 0, 490 ("g_eli_newsession() failed on resume (error=%d)", 491 error)); 492 wr->w_active = TRUE; 493 } 494 goto again; 495 } 496 msleep(sc, &sc->sc_queue_mtx, PDROP, "geli:w", 0); 497 continue; 498 } 499 if (bp->bio_pflags == G_ELI_NEW_BIO) 500 atomic_add_int(&sc->sc_inflight, 1); 501 mtx_unlock(&sc->sc_queue_mtx); 502 if (bp->bio_pflags == G_ELI_NEW_BIO) { 503 bp->bio_pflags = 0; 504 if (sc->sc_flags & G_ELI_FLAG_AUTH) { 505 if (bp->bio_cmd == BIO_READ) 506 g_eli_auth_read(sc, bp); 507 else 508 g_eli_auth_run(wr, bp); 509 } else { 510 if (bp->bio_cmd == BIO_READ) 511 g_eli_crypto_read(sc, bp, 1); 512 else 513 g_eli_crypto_run(wr, bp); 514 } 515 } else { 516 if (sc->sc_flags & G_ELI_FLAG_AUTH) 517 g_eli_auth_run(wr, bp); 518 else 519 g_eli_crypto_run(wr, bp); 520 } 521 } 522 } 523 524 /* 525 * Here we generate IV. It is unique for every sector. 526 */ 527 void 528 g_eli_crypto_ivgen(struct g_eli_softc *sc, off_t offset, u_char *iv, 529 size_t size) 530 { 531 uint8_t off[8]; 532 533 if ((sc->sc_flags & G_ELI_FLAG_NATIVE_BYTE_ORDER) != 0) 534 bcopy(&offset, off, sizeof(off)); 535 else 536 le64enc(off, (uint64_t)offset); 537 538 switch (sc->sc_ealgo) { 539 case CRYPTO_AES_XTS: 540 bcopy(off, iv, sizeof(off)); 541 bzero(iv + sizeof(off), size - sizeof(off)); 542 break; 543 default: 544 { 545 u_char hash[SHA256_DIGEST_LENGTH]; 546 SHA256_CTX ctx; 547 548 /* Copy precalculated SHA256 context for IV-Key. */ 549 bcopy(&sc->sc_ivctx, &ctx, sizeof(ctx)); 550 SHA256_Update(&ctx, off, sizeof(off)); 551 SHA256_Final(hash, &ctx); 552 bcopy(hash, iv, MIN(sizeof(hash), size)); 553 break; 554 } 555 } 556 } 557 558 int 559 g_eli_read_metadata(struct g_class *mp, struct g_provider *pp, 560 struct g_eli_metadata *md) 561 { 562 struct g_geom *gp; 563 struct g_consumer *cp; 564 u_char *buf = NULL; 565 int error; 566 567 g_topology_assert(); 568 569 gp = g_new_geomf(mp, "eli:taste"); 570 gp->start = g_eli_start; 571 gp->access = g_std_access; 572 /* 573 * g_eli_read_metadata() is always called from the event thread. 574 * Our geom is created and destroyed in the same event, so there 575 * could be no orphan nor spoil event in the meantime. 576 */ 577 gp->orphan = g_eli_orphan_spoil_assert; 578 gp->spoiled = g_eli_orphan_spoil_assert; 579 cp = g_new_consumer(gp); 580 error = g_attach(cp, pp); 581 if (error != 0) 582 goto end; 583 error = g_access(cp, 1, 0, 0); 584 if (error != 0) 585 goto end; 586 g_topology_unlock(); 587 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize, 588 &error); 589 g_topology_lock(); 590 if (buf == NULL) 591 goto end; 592 eli_metadata_decode(buf, md); 593 end: 594 if (buf != NULL) 595 g_free(buf); 596 if (cp->provider != NULL) { 597 if (cp->acr == 1) 598 g_access(cp, -1, 0, 0); 599 g_detach(cp); 600 } 601 g_destroy_consumer(cp); 602 g_destroy_geom(gp); 603 return (error); 604 } 605 606 /* 607 * The function is called when we had last close on provider and user requested 608 * to close it when this situation occur. 609 */ 610 static void 611 g_eli_last_close(struct g_eli_softc *sc) 612 { 613 struct g_geom *gp; 614 struct g_provider *pp; 615 char ppname[64]; 616 int error; 617 618 g_topology_assert(); 619 gp = sc->sc_geom; 620 pp = LIST_FIRST(&gp->provider); 621 strlcpy(ppname, pp->name, sizeof(ppname)); 622 error = g_eli_destroy(sc, TRUE); 623 KASSERT(error == 0, ("Cannot detach %s on last close (error=%d).", 624 ppname, error)); 625 G_ELI_DEBUG(0, "Detached %s on last close.", ppname); 626 } 627 628 int 629 g_eli_access(struct g_provider *pp, int dr, int dw, int de) 630 { 631 struct g_eli_softc *sc; 632 struct g_geom *gp; 633 634 gp = pp->geom; 635 sc = gp->softc; 636 637 if (dw > 0) { 638 if (sc->sc_flags & G_ELI_FLAG_RO) { 639 /* Deny write attempts. */ 640 return (EROFS); 641 } 642 /* Someone is opening us for write, we need to remember that. */ 643 sc->sc_flags |= G_ELI_FLAG_WOPEN; 644 return (0); 645 } 646 /* Is this the last close? */ 647 if (pp->acr + dr > 0 || pp->acw + dw > 0 || pp->ace + de > 0) 648 return (0); 649 650 /* 651 * Automatically detach on last close if requested. 652 */ 653 if ((sc->sc_flags & G_ELI_FLAG_RW_DETACH) || 654 (sc->sc_flags & G_ELI_FLAG_WOPEN)) { 655 g_eli_last_close(sc); 656 } 657 return (0); 658 } 659 660 static int 661 g_eli_cpu_is_disabled(int cpu) 662 { 663 #ifdef SMP 664 return ((hlt_cpus_mask & (1 << cpu)) != 0); 665 #else 666 return (0); 667 #endif 668 } 669 670 struct g_geom * 671 g_eli_create(struct gctl_req *req, struct g_class *mp, struct g_provider *bpp, 672 const struct g_eli_metadata *md, const u_char *mkey, int nkey) 673 { 674 struct g_eli_softc *sc; 675 struct g_eli_worker *wr; 676 struct g_geom *gp; 677 struct g_provider *pp; 678 struct g_consumer *cp; 679 u_int i, threads; 680 int error; 681 682 G_ELI_DEBUG(1, "Creating device %s%s.", bpp->name, G_ELI_SUFFIX); 683 684 gp = g_new_geomf(mp, "%s%s", bpp->name, G_ELI_SUFFIX); 685 gp->softc = NULL; /* for a moment */ 686 687 sc = malloc(sizeof(*sc), M_ELI, M_WAITOK | M_ZERO); 688 gp->start = g_eli_start; 689 /* 690 * Spoiling cannot happen actually, because we keep provider open for 691 * writing all the time or provider is read-only. 692 */ 693 gp->spoiled = g_eli_orphan_spoil_assert; 694 gp->orphan = g_eli_orphan; 695 gp->dumpconf = g_eli_dumpconf; 696 /* 697 * If detach-on-last-close feature is not enabled and we don't operate 698 * on read-only provider, we can simply use g_std_access(). 699 */ 700 if (md->md_flags & (G_ELI_FLAG_WO_DETACH | G_ELI_FLAG_RO)) 701 gp->access = g_eli_access; 702 else 703 gp->access = g_std_access; 704 705 sc->sc_inflight = 0; 706 sc->sc_crypto = G_ELI_CRYPTO_UNKNOWN; 707 sc->sc_flags = md->md_flags; 708 /* Backward compatibility. */ 709 if (md->md_version < 4) 710 sc->sc_flags |= G_ELI_FLAG_NATIVE_BYTE_ORDER; 711 if (md->md_version < 5) 712 sc->sc_flags |= G_ELI_FLAG_SINGLE_KEY; 713 sc->sc_ealgo = md->md_ealgo; 714 sc->sc_nkey = nkey; 715 716 if (sc->sc_flags & G_ELI_FLAG_AUTH) { 717 sc->sc_akeylen = sizeof(sc->sc_akey) * 8; 718 sc->sc_aalgo = md->md_aalgo; 719 sc->sc_alen = g_eli_hashlen(sc->sc_aalgo); 720 721 sc->sc_data_per_sector = bpp->sectorsize - sc->sc_alen; 722 /* 723 * Some hash functions (like SHA1 and RIPEMD160) generates hash 724 * which length is not multiple of 128 bits, but we want data 725 * length to be multiple of 128, so we can encrypt without 726 * padding. The line below rounds down data length to multiple 727 * of 128 bits. 728 */ 729 sc->sc_data_per_sector -= sc->sc_data_per_sector % 16; 730 731 sc->sc_bytes_per_sector = 732 (md->md_sectorsize - 1) / sc->sc_data_per_sector + 1; 733 sc->sc_bytes_per_sector *= bpp->sectorsize; 734 } 735 736 gp->softc = sc; 737 sc->sc_geom = gp; 738 739 bioq_init(&sc->sc_queue); 740 mtx_init(&sc->sc_queue_mtx, "geli:queue", NULL, MTX_DEF); 741 mtx_init(&sc->sc_ekeys_lock, "geli:ekeys", NULL, MTX_DEF); 742 743 pp = NULL; 744 cp = g_new_consumer(gp); 745 error = g_attach(cp, bpp); 746 if (error != 0) { 747 if (req != NULL) { 748 gctl_error(req, "Cannot attach to %s (error=%d).", 749 bpp->name, error); 750 } else { 751 G_ELI_DEBUG(1, "Cannot attach to %s (error=%d).", 752 bpp->name, error); 753 } 754 goto failed; 755 } 756 /* 757 * Keep provider open all the time, so we can run critical tasks, 758 * like Master Keys deletion, without wondering if we can open 759 * provider or not. 760 * We don't open provider for writing only when user requested read-only 761 * access. 762 */ 763 if (sc->sc_flags & G_ELI_FLAG_RO) 764 error = g_access(cp, 1, 0, 1); 765 else 766 error = g_access(cp, 1, 1, 1); 767 if (error != 0) { 768 if (req != NULL) { 769 gctl_error(req, "Cannot access %s (error=%d).", 770 bpp->name, error); 771 } else { 772 G_ELI_DEBUG(1, "Cannot access %s (error=%d).", 773 bpp->name, error); 774 } 775 goto failed; 776 } 777 778 sc->sc_sectorsize = md->md_sectorsize; 779 sc->sc_mediasize = bpp->mediasize; 780 if (!(sc->sc_flags & G_ELI_FLAG_ONETIME)) 781 sc->sc_mediasize -= bpp->sectorsize; 782 if (!(sc->sc_flags & G_ELI_FLAG_AUTH)) 783 sc->sc_mediasize -= (sc->sc_mediasize % sc->sc_sectorsize); 784 else { 785 sc->sc_mediasize /= sc->sc_bytes_per_sector; 786 sc->sc_mediasize *= sc->sc_sectorsize; 787 } 788 789 /* 790 * Remember the keys in our softc structure. 791 */ 792 g_eli_mkey_propagate(sc, mkey); 793 sc->sc_ekeylen = md->md_keylen; 794 795 LIST_INIT(&sc->sc_workers); 796 797 threads = g_eli_threads; 798 if (threads == 0) 799 threads = mp_ncpus; 800 else if (threads > mp_ncpus) { 801 /* There is really no need for too many worker threads. */ 802 threads = mp_ncpus; 803 G_ELI_DEBUG(0, "Reducing number of threads to %u.", threads); 804 } 805 for (i = 0; i < threads; i++) { 806 if (g_eli_cpu_is_disabled(i)) { 807 G_ELI_DEBUG(1, "%s: CPU %u disabled, skipping.", 808 bpp->name, i); 809 continue; 810 } 811 wr = malloc(sizeof(*wr), M_ELI, M_WAITOK | M_ZERO); 812 wr->w_softc = sc; 813 wr->w_number = i; 814 wr->w_active = TRUE; 815 816 error = g_eli_newsession(wr); 817 if (error != 0) { 818 free(wr, M_ELI); 819 if (req != NULL) { 820 gctl_error(req, "Cannot set up crypto session " 821 "for %s (error=%d).", bpp->name, error); 822 } else { 823 G_ELI_DEBUG(1, "Cannot set up crypto session " 824 "for %s (error=%d).", bpp->name, error); 825 } 826 goto failed; 827 } 828 829 error = kproc_create(g_eli_worker, wr, &wr->w_proc, 0, 0, 830 "g_eli[%u] %s", i, bpp->name); 831 if (error != 0) { 832 g_eli_freesession(wr); 833 free(wr, M_ELI); 834 if (req != NULL) { 835 gctl_error(req, "Cannot create kernel thread " 836 "for %s (error=%d).", bpp->name, error); 837 } else { 838 G_ELI_DEBUG(1, "Cannot create kernel thread " 839 "for %s (error=%d).", bpp->name, error); 840 } 841 goto failed; 842 } 843 LIST_INSERT_HEAD(&sc->sc_workers, wr, w_next); 844 /* If we have hardware support, one thread is enough. */ 845 if (sc->sc_crypto == G_ELI_CRYPTO_HW) 846 break; 847 } 848 849 /* 850 * Create decrypted provider. 851 */ 852 pp = g_new_providerf(gp, "%s%s", bpp->name, G_ELI_SUFFIX); 853 pp->mediasize = sc->sc_mediasize; 854 pp->sectorsize = sc->sc_sectorsize; 855 856 g_error_provider(pp, 0); 857 858 G_ELI_DEBUG(0, "Device %s created.", pp->name); 859 G_ELI_DEBUG(0, "Encryption: %s %u", g_eli_algo2str(sc->sc_ealgo), 860 sc->sc_ekeylen); 861 if (sc->sc_flags & G_ELI_FLAG_AUTH) 862 G_ELI_DEBUG(0, " Integrity: %s", g_eli_algo2str(sc->sc_aalgo)); 863 G_ELI_DEBUG(0, " Crypto: %s", 864 sc->sc_crypto == G_ELI_CRYPTO_SW ? "software" : "hardware"); 865 return (gp); 866 failed: 867 mtx_lock(&sc->sc_queue_mtx); 868 sc->sc_flags |= G_ELI_FLAG_DESTROY; 869 wakeup(sc); 870 /* 871 * Wait for kernel threads self destruction. 872 */ 873 while (!LIST_EMPTY(&sc->sc_workers)) { 874 msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO, 875 "geli:destroy", 0); 876 } 877 mtx_destroy(&sc->sc_queue_mtx); 878 if (cp->provider != NULL) { 879 if (cp->acr == 1) 880 g_access(cp, -1, -1, -1); 881 g_detach(cp); 882 } 883 g_destroy_consumer(cp); 884 g_destroy_geom(gp); 885 g_eli_key_destroy(sc); 886 bzero(sc, sizeof(*sc)); 887 free(sc, M_ELI); 888 return (NULL); 889 } 890 891 int 892 g_eli_destroy(struct g_eli_softc *sc, boolean_t force) 893 { 894 struct g_geom *gp; 895 struct g_provider *pp; 896 897 g_topology_assert(); 898 899 if (sc == NULL) 900 return (ENXIO); 901 902 gp = sc->sc_geom; 903 pp = LIST_FIRST(&gp->provider); 904 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) { 905 if (force) { 906 G_ELI_DEBUG(1, "Device %s is still open, so it " 907 "cannot be definitely removed.", pp->name); 908 } else { 909 G_ELI_DEBUG(1, 910 "Device %s is still open (r%dw%de%d).", pp->name, 911 pp->acr, pp->acw, pp->ace); 912 return (EBUSY); 913 } 914 } 915 916 mtx_lock(&sc->sc_queue_mtx); 917 sc->sc_flags |= G_ELI_FLAG_DESTROY; 918 wakeup(sc); 919 while (!LIST_EMPTY(&sc->sc_workers)) { 920 msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO, 921 "geli:destroy", 0); 922 } 923 mtx_destroy(&sc->sc_queue_mtx); 924 gp->softc = NULL; 925 g_eli_key_destroy(sc); 926 bzero(sc, sizeof(*sc)); 927 free(sc, M_ELI); 928 929 if (pp == NULL || (pp->acr == 0 && pp->acw == 0 && pp->ace == 0)) 930 G_ELI_DEBUG(0, "Device %s destroyed.", gp->name); 931 g_wither_geom_close(gp, ENXIO); 932 933 return (0); 934 } 935 936 static int 937 g_eli_destroy_geom(struct gctl_req *req __unused, 938 struct g_class *mp __unused, struct g_geom *gp) 939 { 940 struct g_eli_softc *sc; 941 942 sc = gp->softc; 943 return (g_eli_destroy(sc, FALSE)); 944 } 945 946 static int 947 g_eli_keyfiles_load(struct hmac_ctx *ctx, const char *provider) 948 { 949 u_char *keyfile, *data; 950 char *file, name[64]; 951 size_t size; 952 int i; 953 954 for (i = 0; ; i++) { 955 snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i); 956 keyfile = preload_search_by_type(name); 957 if (keyfile == NULL) 958 return (i); /* Return number of loaded keyfiles. */ 959 data = preload_fetch_addr(keyfile); 960 if (data == NULL) { 961 G_ELI_DEBUG(0, "Cannot find key file data for %s.", 962 name); 963 return (0); 964 } 965 size = preload_fetch_size(keyfile); 966 if (size == 0) { 967 G_ELI_DEBUG(0, "Cannot find key file size for %s.", 968 name); 969 return (0); 970 } 971 file = preload_search_info(keyfile, MODINFO_NAME); 972 if (file == NULL) { 973 G_ELI_DEBUG(0, "Cannot find key file name for %s.", 974 name); 975 return (0); 976 } 977 G_ELI_DEBUG(1, "Loaded keyfile %s for %s (type: %s).", file, 978 provider, name); 979 g_eli_crypto_hmac_update(ctx, data, size); 980 } 981 } 982 983 static void 984 g_eli_keyfiles_clear(const char *provider) 985 { 986 u_char *keyfile, *data; 987 char name[64]; 988 size_t size; 989 int i; 990 991 for (i = 0; ; i++) { 992 snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i); 993 keyfile = preload_search_by_type(name); 994 if (keyfile == NULL) 995 return; 996 data = preload_fetch_addr(keyfile); 997 size = preload_fetch_size(keyfile); 998 if (data != NULL && size != 0) 999 bzero(data, size); 1000 } 1001 } 1002 1003 /* 1004 * Tasting is only made on boot. 1005 * We detect providers which should be attached before root is mounted. 1006 */ 1007 static struct g_geom * 1008 g_eli_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 1009 { 1010 struct g_eli_metadata md; 1011 struct g_geom *gp; 1012 struct hmac_ctx ctx; 1013 char passphrase[256]; 1014 u_char key[G_ELI_USERKEYLEN], mkey[G_ELI_DATAIVKEYLEN]; 1015 u_int i, nkey, nkeyfiles, tries; 1016 int error; 1017 1018 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name); 1019 g_topology_assert(); 1020 1021 if (root_mounted() || g_eli_tries == 0) 1022 return (NULL); 1023 1024 G_ELI_DEBUG(3, "Tasting %s.", pp->name); 1025 1026 error = g_eli_read_metadata(mp, pp, &md); 1027 if (error != 0) 1028 return (NULL); 1029 gp = NULL; 1030 1031 if (strcmp(md.md_magic, G_ELI_MAGIC) != 0) 1032 return (NULL); 1033 if (md.md_version > G_ELI_VERSION) { 1034 printf("geom_eli.ko module is too old to handle %s.\n", 1035 pp->name); 1036 return (NULL); 1037 } 1038 if (md.md_provsize != pp->mediasize) 1039 return (NULL); 1040 /* Should we attach it on boot? */ 1041 if (!(md.md_flags & G_ELI_FLAG_BOOT)) 1042 return (NULL); 1043 if (md.md_keys == 0x00) { 1044 G_ELI_DEBUG(0, "No valid keys on %s.", pp->name); 1045 return (NULL); 1046 } 1047 if (md.md_iterations == -1) { 1048 /* If there is no passphrase, we try only once. */ 1049 tries = 1; 1050 } else { 1051 /* Ask for the passphrase no more than g_eli_tries times. */ 1052 tries = g_eli_tries; 1053 } 1054 1055 for (i = 0; i < tries; i++) { 1056 g_eli_crypto_hmac_init(&ctx, NULL, 0); 1057 1058 /* 1059 * Load all key files. 1060 */ 1061 nkeyfiles = g_eli_keyfiles_load(&ctx, pp->name); 1062 1063 if (nkeyfiles == 0 && md.md_iterations == -1) { 1064 /* 1065 * No key files and no passphrase, something is 1066 * definitely wrong here. 1067 * geli(8) doesn't allow for such situation, so assume 1068 * that there was really no passphrase and in that case 1069 * key files are no properly defined in loader.conf. 1070 */ 1071 G_ELI_DEBUG(0, 1072 "Found no key files in loader.conf for %s.", 1073 pp->name); 1074 return (NULL); 1075 } 1076 1077 /* Ask for the passphrase if defined. */ 1078 if (md.md_iterations >= 0) { 1079 printf("Enter passphrase for %s: ", pp->name); 1080 gets(passphrase, sizeof(passphrase), 1081 g_eli_visible_passphrase); 1082 } 1083 1084 /* 1085 * Prepare Derived-Key from the user passphrase. 1086 */ 1087 if (md.md_iterations == 0) { 1088 g_eli_crypto_hmac_update(&ctx, md.md_salt, 1089 sizeof(md.md_salt)); 1090 g_eli_crypto_hmac_update(&ctx, passphrase, 1091 strlen(passphrase)); 1092 bzero(passphrase, sizeof(passphrase)); 1093 } else if (md.md_iterations > 0) { 1094 u_char dkey[G_ELI_USERKEYLEN]; 1095 1096 pkcs5v2_genkey(dkey, sizeof(dkey), md.md_salt, 1097 sizeof(md.md_salt), passphrase, md.md_iterations); 1098 bzero(passphrase, sizeof(passphrase)); 1099 g_eli_crypto_hmac_update(&ctx, dkey, sizeof(dkey)); 1100 bzero(dkey, sizeof(dkey)); 1101 } 1102 1103 g_eli_crypto_hmac_final(&ctx, key, 0); 1104 1105 /* 1106 * Decrypt Master-Key. 1107 */ 1108 error = g_eli_mkey_decrypt(&md, key, mkey, &nkey); 1109 bzero(key, sizeof(key)); 1110 if (error == -1) { 1111 if (i == tries - 1) { 1112 G_ELI_DEBUG(0, 1113 "Wrong key for %s. No tries left.", 1114 pp->name); 1115 g_eli_keyfiles_clear(pp->name); 1116 return (NULL); 1117 } 1118 G_ELI_DEBUG(0, "Wrong key for %s. Tries left: %u.", 1119 pp->name, tries - i - 1); 1120 /* Try again. */ 1121 continue; 1122 } else if (error > 0) { 1123 G_ELI_DEBUG(0, "Cannot decrypt Master Key for %s (error=%d).", 1124 pp->name, error); 1125 g_eli_keyfiles_clear(pp->name); 1126 return (NULL); 1127 } 1128 G_ELI_DEBUG(1, "Using Master Key %u for %s.", nkey, pp->name); 1129 break; 1130 } 1131 1132 /* 1133 * We have correct key, let's attach provider. 1134 */ 1135 gp = g_eli_create(NULL, mp, pp, &md, mkey, nkey); 1136 bzero(mkey, sizeof(mkey)); 1137 bzero(&md, sizeof(md)); 1138 if (gp == NULL) { 1139 G_ELI_DEBUG(0, "Cannot create device %s%s.", pp->name, 1140 G_ELI_SUFFIX); 1141 return (NULL); 1142 } 1143 return (gp); 1144 } 1145 1146 static void 1147 g_eli_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 1148 struct g_consumer *cp, struct g_provider *pp) 1149 { 1150 struct g_eli_softc *sc; 1151 1152 g_topology_assert(); 1153 sc = gp->softc; 1154 if (sc == NULL) 1155 return; 1156 if (pp != NULL || cp != NULL) 1157 return; /* Nothing here. */ 1158 1159 sbuf_printf(sb, "%s<KeysTotal>%ju</KeysTotal>", indent, 1160 (uintmax_t)sc->sc_ekeys_total); 1161 sbuf_printf(sb, "%s<KeysAllocated>%ju</KeysAllocated>", indent, 1162 (uintmax_t)sc->sc_ekeys_allocated); 1163 sbuf_printf(sb, "%s<Flags>", indent); 1164 if (sc->sc_flags == 0) 1165 sbuf_printf(sb, "NONE"); 1166 else { 1167 int first = 1; 1168 1169 #define ADD_FLAG(flag, name) do { \ 1170 if (sc->sc_flags & (flag)) { \ 1171 if (!first) \ 1172 sbuf_printf(sb, ", "); \ 1173 else \ 1174 first = 0; \ 1175 sbuf_printf(sb, name); \ 1176 } \ 1177 } while (0) 1178 ADD_FLAG(G_ELI_FLAG_SUSPEND, "SUSPEND"); 1179 ADD_FLAG(G_ELI_FLAG_SINGLE_KEY, "SINGLE-KEY"); 1180 ADD_FLAG(G_ELI_FLAG_NATIVE_BYTE_ORDER, "NATIVE-BYTE-ORDER"); 1181 ADD_FLAG(G_ELI_FLAG_ONETIME, "ONETIME"); 1182 ADD_FLAG(G_ELI_FLAG_BOOT, "BOOT"); 1183 ADD_FLAG(G_ELI_FLAG_WO_DETACH, "W-DETACH"); 1184 ADD_FLAG(G_ELI_FLAG_RW_DETACH, "RW-DETACH"); 1185 ADD_FLAG(G_ELI_FLAG_AUTH, "AUTH"); 1186 ADD_FLAG(G_ELI_FLAG_WOPEN, "W-OPEN"); 1187 ADD_FLAG(G_ELI_FLAG_DESTROY, "DESTROY"); 1188 ADD_FLAG(G_ELI_FLAG_RO, "READ-ONLY"); 1189 #undef ADD_FLAG 1190 } 1191 sbuf_printf(sb, "</Flags>\n"); 1192 1193 if (!(sc->sc_flags & G_ELI_FLAG_ONETIME)) { 1194 sbuf_printf(sb, "%s<UsedKey>%u</UsedKey>\n", indent, 1195 sc->sc_nkey); 1196 } 1197 sbuf_printf(sb, "%s<Crypto>", indent); 1198 switch (sc->sc_crypto) { 1199 case G_ELI_CRYPTO_HW: 1200 sbuf_printf(sb, "hardware"); 1201 break; 1202 case G_ELI_CRYPTO_SW: 1203 sbuf_printf(sb, "software"); 1204 break; 1205 default: 1206 sbuf_printf(sb, "UNKNOWN"); 1207 break; 1208 } 1209 sbuf_printf(sb, "</Crypto>\n"); 1210 if (sc->sc_flags & G_ELI_FLAG_AUTH) { 1211 sbuf_printf(sb, 1212 "%s<AuthenticationAlgorithm>%s</AuthenticationAlgorithm>\n", 1213 indent, g_eli_algo2str(sc->sc_aalgo)); 1214 } 1215 sbuf_printf(sb, "%s<KeyLength>%u</KeyLength>\n", indent, 1216 sc->sc_ekeylen); 1217 sbuf_printf(sb, "%s<EncryptionAlgorithm>%s</EncryptionAlgorithm>\n", indent, 1218 g_eli_algo2str(sc->sc_ealgo)); 1219 sbuf_printf(sb, "%s<State>%s</State>\n", indent, 1220 (sc->sc_flags & G_ELI_FLAG_SUSPEND) ? "SUSPENDED" : "ACTIVE"); 1221 } 1222 1223 static void 1224 g_eli_shutdown_pre_sync(void *arg, int howto) 1225 { 1226 struct g_class *mp; 1227 struct g_geom *gp, *gp2; 1228 struct g_provider *pp; 1229 struct g_eli_softc *sc; 1230 int error; 1231 1232 mp = arg; 1233 DROP_GIANT(); 1234 g_topology_lock(); 1235 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) { 1236 sc = gp->softc; 1237 if (sc == NULL) 1238 continue; 1239 pp = LIST_FIRST(&gp->provider); 1240 KASSERT(pp != NULL, ("No provider? gp=%p (%s)", gp, gp->name)); 1241 if (pp->acr + pp->acw + pp->ace == 0) 1242 error = g_eli_destroy(sc, TRUE); 1243 else { 1244 sc->sc_flags |= G_ELI_FLAG_RW_DETACH; 1245 gp->access = g_eli_access; 1246 } 1247 } 1248 g_topology_unlock(); 1249 PICKUP_GIANT(); 1250 } 1251 1252 static void 1253 g_eli_init(struct g_class *mp) 1254 { 1255 1256 g_eli_pre_sync = EVENTHANDLER_REGISTER(shutdown_pre_sync, 1257 g_eli_shutdown_pre_sync, mp, SHUTDOWN_PRI_FIRST); 1258 if (g_eli_pre_sync == NULL) 1259 G_ELI_DEBUG(0, "Warning! Cannot register shutdown event."); 1260 } 1261 1262 static void 1263 g_eli_fini(struct g_class *mp) 1264 { 1265 1266 if (g_eli_pre_sync != NULL) 1267 EVENTHANDLER_DEREGISTER(shutdown_pre_sync, g_eli_pre_sync); 1268 } 1269 1270 DECLARE_GEOM_CLASS(g_eli_class, g_eli); 1271 MODULE_DEPEND(g_eli, crypto, 1, 1, 1); 1272