Lines Matching +full:hardware +full:- +full:accelerated

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2005-2019 Pawel Jakub Dawidek <pawel@dawidek.net>
85 0, "Number of times on-disk keys should be overwritten when destroying them");
126 if (error != 0 || req->newptr == NULL) in sysctl_g_eli_minbufs()
133 uma_prealloc(g_eli_uma, new - g_eli_minbufs); in sysctl_g_eli_minbufs()
142 * Passphrase cached during boot, in order to be more user-friendly if
186 for (i = 0; i < keybuf->kb_nents; i++) { in zero_geli_intake_keys()
187 if (keybuf->kb_ents[i].ke_type == KEYBUF_TYPE_GELI) { in zero_geli_intake_keys()
188 explicit_bzero(keybuf->kb_ents[i].ke_data, in zero_geli_intake_keys()
189 sizeof(keybuf->kb_ents[i].ke_data)); in zero_geli_intake_keys()
190 keybuf->kb_ents[i].ke_type = KEYBUF_TYPE_NONE; in zero_geli_intake_keys()
230 …* g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli…
232 …* g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -
248 bp = (struct bio *)crp->crp_opaque; in g_eli_crypto_rerun()
249 sc = bp->bio_to->geom->softc; in g_eli_crypto_rerun()
250 LIST_FOREACH(wr, &sc->sc_workers, w_next) { in g_eli_crypto_rerun()
251 if (wr->w_number == G_ELI_WORKER(bp->bio_pflags)) in g_eli_crypto_rerun()
255 G_ELI_WORKER(bp->bio_pflags))); in g_eli_crypto_rerun()
256 G_ELI_DEBUG(1, "Rerunning crypto %s request (sid: %p -> %p).", in g_eli_crypto_rerun()
257 bp->bio_cmd == BIO_READ ? "READ" : "WRITE", wr->w_sid, in g_eli_crypto_rerun()
258 crp->crp_session); in g_eli_crypto_rerun()
259 wr->w_sid = crp->crp_session; in g_eli_crypto_rerun()
260 crp->crp_etype = 0; in g_eli_crypto_rerun()
265 crp->crp_etype = error; in g_eli_crypto_rerun()
272 if (bp->bio_error == 0 && in g_eli_getattr_done()
273 !strcmp(bp->bio_attribute, "GEOM::physpath")) { in g_eli_getattr_done()
274 strlcat(bp->bio_data, "/eli", bp->bio_length); in g_eli_getattr_done()
282 …* g_eli_start -> g_eli_crypto_read -> g_io_request -> G_ELI_READ_DONE -> g_eli_crypto_run -> g_eli…
291 pbp = bp->bio_parent; in g_eli_read_done()
292 if (pbp->bio_error == 0 && bp->bio_error != 0) in g_eli_read_done()
293 pbp->bio_error = bp->bio_error; in g_eli_read_done()
298 pbp->bio_inbed++; in g_eli_read_done()
299 if (pbp->bio_inbed < pbp->bio_children) in g_eli_read_done()
301 sc = pbp->bio_to->geom->softc; in g_eli_read_done()
302 if (pbp->bio_error != 0) { in g_eli_read_done()
304 pbp->bio_error); in g_eli_read_done()
305 pbp->bio_completed = 0; in g_eli_read_done()
307 g_io_deliver(pbp, pbp->bio_error); in g_eli_read_done()
309 atomic_subtract_int(&sc->sc_inflight, 1); in g_eli_read_done()
312 mtx_lock(&sc->sc_queue_mtx); in g_eli_read_done()
313 bioq_insert_tail(&sc->sc_queue, pbp); in g_eli_read_done()
314 mtx_unlock(&sc->sc_queue_mtx); in g_eli_read_done()
321 …* g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> G_ELI_WRITE_DONE -
330 pbp = bp->bio_parent; in g_eli_write_done()
331 if (pbp->bio_error == 0 && bp->bio_error != 0) in g_eli_write_done()
332 pbp->bio_error = bp->bio_error; in g_eli_write_done()
337 pbp->bio_inbed++; in g_eli_write_done()
338 if (pbp->bio_inbed < pbp->bio_children) in g_eli_write_done()
340 sc = pbp->bio_to->geom->softc; in g_eli_write_done()
342 if (pbp->bio_error != 0) { in g_eli_write_done()
344 pbp->bio_error); in g_eli_write_done()
345 pbp->bio_completed = 0; in g_eli_write_done()
347 pbp->bio_completed = pbp->bio_length; in g_eli_write_done()
352 g_io_deliver(pbp, pbp->bio_error); in g_eli_write_done()
354 atomic_subtract_int(&sc->sc_inflight, 1); in g_eli_write_done()
358 * This function should never be called, but GEOM made as it set ->orphan()
365 panic("Function %s() called for %s.", __func__, cp->geom->name); in g_eli_orphan_spoil_assert()
374 sc = cp->geom->softc; in g_eli_orphan()
388 sc = cp->geom->softc; in g_eli_resize()
392 if ((sc->sc_flags & G_ELI_FLAG_AUTORESIZE) == 0) { in g_eli_resize()
394 (intmax_t)sc->sc_provsize); in g_eli_resize()
398 pp = cp->provider; in g_eli_resize()
400 if ((sc->sc_flags & G_ELI_FLAG_ONETIME) == 0) { in g_eli_resize()
407 error = g_eli_read_metadata_offset(cp->geom->class, pp, in g_eli_resize()
408 sc->sc_provsize - pp->sectorsize, &md); in g_eli_resize()
411 pp->name, error); in g_eli_resize()
415 md.md_provsize = pp->mediasize; in g_eli_resize()
417 sector = malloc(pp->sectorsize, M_ELI, M_WAITOK | M_ZERO); in g_eli_resize()
419 error = g_write_data(cp, pp->mediasize - pp->sectorsize, sector, in g_eli_resize()
420 pp->sectorsize); in g_eli_resize()
423 pp->name, error); in g_eli_resize()
426 explicit_bzero(sector, pp->sectorsize); in g_eli_resize()
427 error = g_write_data(cp, sc->sc_provsize - pp->sectorsize, in g_eli_resize()
428 sector, pp->sectorsize); in g_eli_resize()
431 pp->name, error); in g_eli_resize()
439 oldsize = sc->sc_mediasize; in g_eli_resize()
440 sc->sc_mediasize = eli_mediasize(sc, pp->mediasize, pp->sectorsize); in g_eli_resize()
442 sc->sc_provsize = pp->mediasize; in g_eli_resize()
444 epp = LIST_FIRST(&sc->sc_geom->provider); in g_eli_resize()
445 g_resize_provider(epp, sc->sc_mediasize); in g_eli_resize()
446 G_ELI_DEBUG(0, "Device %s size changed from %jd to %jd.", epp->name, in g_eli_resize()
447 (intmax_t)oldsize, (intmax_t)sc->sc_mediasize); in g_eli_resize()
452 …* G_ELI_START -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli…
454 …* G_ELI_START -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -
463 sc = bp->bio_to->geom->softc; in g_eli_start()
466 bp->bio_to->error, bp->bio_to->name)); in g_eli_start()
469 switch (bp->bio_cmd) { in g_eli_start()
486 if (!(sc->sc_flags & G_ELI_FLAG_NODELETE)) in g_eli_start()
497 bp->bio_driver1 = cbp; in g_eli_start()
498 bp->bio_pflags = 0; in g_eli_start()
499 G_ELI_SET_NEW_BIO(bp->bio_pflags); in g_eli_start()
500 switch (bp->bio_cmd) { in g_eli_start()
502 if (!(sc->sc_flags & G_ELI_FLAG_AUTH)) { in g_eli_start()
508 mtx_lock(&sc->sc_queue_mtx); in g_eli_start()
509 bioq_insert_tail(&sc->sc_queue, bp); in g_eli_start()
510 mtx_unlock(&sc->sc_queue_mtx); in g_eli_start()
518 if (bp->bio_cmd == BIO_GETATTR) in g_eli_start()
519 cbp->bio_done = g_eli_getattr_done; in g_eli_start()
521 cbp->bio_done = g_std_done; in g_eli_start()
522 cp = LIST_FIRST(&sc->sc_geom->consumer); in g_eli_start()
523 cbp->bio_to = cp->provider; in g_eli_start()
539 sc = wr->w_softc; in g_eli_newsession()
543 csp.csp_cipher_alg = sc->sc_ealgo; in g_eli_newsession()
544 csp.csp_ivlen = g_eli_ivlen(sc->sc_ealgo); in g_eli_newsession()
545 csp.csp_cipher_klen = sc->sc_ekeylen / 8; in g_eli_newsession()
546 if (sc->sc_ealgo == CRYPTO_AES_XTS) in g_eli_newsession()
548 if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) != 0) { in g_eli_newsession()
550 LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize); in g_eli_newsession()
554 csp.csp_cipher_key = sc->sc_ekey; in g_eli_newsession()
556 if (sc->sc_flags & G_ELI_FLAG_AUTH) { in g_eli_newsession()
558 csp.csp_auth_alg = sc->sc_aalgo; in g_eli_newsession()
562 switch (sc->sc_crypto) { in g_eli_newsession()
565 error = crypto_newsession(&wr->w_sid, &csp, in g_eli_newsession()
569 error = crypto_newsession(&wr->w_sid, &csp, in g_eli_newsession()
573 error = crypto_newsession(&wr->w_sid, &csp, in g_eli_newsession()
576 caps = crypto_ses2caps(wr->w_sid); in g_eli_newsession()
583 mtx_lock(&sc->sc_queue_mtx); in g_eli_newsession()
584 if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN) in g_eli_newsession()
585 sc->sc_crypto = new_crypto; in g_eli_newsession()
586 mtx_unlock(&sc->sc_queue_mtx); in g_eli_newsession()
593 if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) != 0) { in g_eli_newsession()
597 wr->w_first_key = key; in g_eli_newsession()
608 crypto_freesession(wr->w_sid); in g_eli_freesession()
609 if (wr->w_first_key != NULL) { in g_eli_freesession()
610 sc = wr->w_softc; in g_eli_freesession()
611 g_eli_key_drop(sc, wr->w_first_key); in g_eli_freesession()
612 wr->w_first_key = NULL; in g_eli_freesession()
621 mtx_assert(&sc->sc_queue_mtx, MA_OWNED); in g_eli_cancel()
623 while ((bp = bioq_takefirst(&sc->sc_queue)) != NULL) { in g_eli_cancel()
624 KASSERT(G_ELI_IS_NEW_BIO(bp->bio_pflags), in g_eli_cancel()
635 mtx_assert(&sc->sc_queue_mtx, MA_OWNED); in g_eli_takefirst()
637 if (!(sc->sc_flags & G_ELI_FLAG_SUSPEND)) in g_eli_takefirst()
638 return (bioq_takefirst(&sc->sc_queue)); in g_eli_takefirst()
642 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) { in g_eli_takefirst()
643 if (!G_ELI_IS_NEW_BIO(bp->bio_pflags)) in g_eli_takefirst()
647 bioq_remove(&sc->sc_queue, bp); in g_eli_takefirst()
653 * hardware acceleration and we have to do cryptography in software.
666 sc = wr->w_softc; in g_eli_worker()
668 MPASS(!sc->sc_cpubind || smp_started); in g_eli_worker()
670 /* Before sched_bind() to a CPU, wait for all CPUs to go on-line. */ in g_eli_worker()
671 if (sc->sc_cpubind) { in g_eli_worker()
678 if (sc->sc_cpubind) in g_eli_worker()
679 sched_bind(curthread, wr->w_number % mp_ncpus); in g_eli_worker()
682 G_ELI_DEBUG(1, "Thread %s started.", curthread->td_proc->p_comm); in g_eli_worker()
685 mtx_lock(&sc->sc_queue_mtx); in g_eli_worker()
689 if (sc->sc_flags & G_ELI_FLAG_DESTROY) { in g_eli_worker()
695 curthread->td_proc->p_comm); in g_eli_worker()
696 wakeup(&sc->sc_workers); in g_eli_worker()
697 mtx_unlock(&sc->sc_queue_mtx); in g_eli_worker()
700 while (sc->sc_flags & G_ELI_FLAG_SUSPEND) { in g_eli_worker()
701 if (sc->sc_inflight > 0) { in g_eli_worker()
703 sc->sc_inflight); in g_eli_worker()
708 msleep(sc, &sc->sc_queue_mtx, PRIBIO, in g_eli_worker()
716 if (wr->w_active) { in g_eli_worker()
718 wr->w_active = FALSE; in g_eli_worker()
720 wakeup(&sc->sc_workers); in g_eli_worker()
721 msleep(sc, &sc->sc_queue_mtx, PRIBIO, in g_eli_worker()
723 if (!wr->w_active && in g_eli_worker()
724 !(sc->sc_flags & G_ELI_FLAG_SUSPEND)) { in g_eli_worker()
729 wr->w_active = TRUE; in g_eli_worker()
733 msleep(sc, &sc->sc_queue_mtx, PDROP, "geli:w", 0); in g_eli_worker()
736 if (G_ELI_IS_NEW_BIO(bp->bio_pflags)) in g_eli_worker()
737 atomic_add_int(&sc->sc_inflight, 1); in g_eli_worker()
738 mtx_unlock(&sc->sc_queue_mtx); in g_eli_worker()
739 if (G_ELI_IS_NEW_BIO(bp->bio_pflags)) { in g_eli_worker()
740 G_ELI_SETWORKER(bp->bio_pflags, 0); in g_eli_worker()
741 if (sc->sc_flags & G_ELI_FLAG_AUTH) { in g_eli_worker()
742 if (bp->bio_cmd == BIO_READ) in g_eli_worker()
747 if (bp->bio_cmd == BIO_READ) in g_eli_worker()
753 if (sc->sc_flags & G_ELI_FLAG_AUTH) in g_eli_worker()
773 gp->start = g_eli_start; in g_eli_read_metadata_offset()
774 gp->access = g_std_access; in g_eli_read_metadata_offset()
780 gp->orphan = g_eli_orphan_spoil_assert; in g_eli_read_metadata_offset()
781 gp->spoiled = g_eli_orphan_spoil_assert; in g_eli_read_metadata_offset()
783 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; in g_eli_read_metadata_offset()
791 buf = g_read_data(cp, offset, pp->sectorsize, &error); in g_eli_read_metadata_offset()
801 if (cp->provider != NULL) { in g_eli_read_metadata_offset()
802 if (cp->acr == 1) in g_eli_read_metadata_offset()
803 g_access(cp, -1, 0, 0); in g_eli_read_metadata_offset()
817 pp->mediasize - pp->sectorsize, md)); in g_eli_read_metadata()
833 strlcpy(gpname, gp->name, sizeof(gpname)); in g_eli_last_close()
834 error = g_eli_destroy(gp->softc, TRUE); in g_eli_last_close()
846 gp = pp->geom; in g_eli_access()
847 sc = gp->softc; in g_eli_access()
850 if (sc->sc_flags & G_ELI_FLAG_RO) { in g_eli_access()
855 sc->sc_flags |= G_ELI_FLAG_WOPEN; in g_eli_access()
859 if (pp->acr + dr > 0 || pp->acw + dw > 0 || pp->ace + de > 0) in g_eli_access()
865 if ((sc->sc_flags & G_ELI_FLAG_RW_DETACH) || in g_eli_access()
866 (sc->sc_flags & G_ELI_FLAG_WOPEN)) { in g_eli_access()
890 * Calculate the maximum-sized swap buffer we are in g_eli_init_uma()
900 /* Reserve and pre-allocate pages, as appropriate. */ in g_eli_init_uma()
938 if (atomic_fetchadd_int(&g_eli_devs, -1) == 1) in g_eli_fini_uma()
943 * Allocate a data buffer. If the size fits within our swap-sized buffers,
944 * try to allocate a swap-sized buffer from the UMA pool. Otherwise, fall
947 * Swap-related requests are special: they can only use the UMA pool, they
955 KASSERT(sz <= g_eli_alloc_sz || (bp->bio_flags & BIO_SWAP) == 0, in g_eli_alloc_data()
959 bp->bio_driver2 = uma_zalloc(g_eli_uma, M_NOWAIT | in g_eli_alloc_data()
960 ((bp->bio_flags & BIO_SWAP) != 0 ? M_USE_RESERVE : 0)); in g_eli_alloc_data()
961 if (bp->bio_driver2 != NULL) { in g_eli_alloc_data()
962 bp->bio_pflags |= G_ELI_UMA_ALLOC; in g_eli_alloc_data()
965 if (bp->bio_driver2 != NULL || (bp->bio_flags & BIO_SWAP) != 0) in g_eli_alloc_data()
966 return (bp->bio_driver2 != NULL); in g_eli_alloc_data()
968 bp->bio_pflags &= ~(G_ELI_UMA_ALLOC); in g_eli_alloc_data()
969 bp->bio_driver2 = malloc(sz, M_ELI, g_eli_blocking_malloc ? M_WAITOK : in g_eli_alloc_data()
971 return (bp->bio_driver2 != NULL); in g_eli_alloc_data()
975 * Free a buffer from bp->bio_driver2 which was allocated with
990 if (bp->bio_driver2 == NULL) in g_eli_free_data()
993 if ((bp->bio_pflags & G_ELI_UMA_ALLOC) != 0) { in g_eli_free_data()
994 uma_zfree(g_eli_uma, bp->bio_driver2); in g_eli_free_data()
995 if (atomic_fetchadd_int(&g_eli_umaoutstanding, -1) == 1 && in g_eli_free_data()
999 free(bp->bio_driver2, M_ELI); in g_eli_free_data()
1000 bp->bio_driver2 = NULL; in g_eli_free_data()
1016 G_ELI_DEBUG(1, "Creating device %s%s.", bpp->name, G_ELI_SUFFIX); in g_eli_create()
1018 ("%s: unsupported crypto for %s", __func__, bpp->name)); in g_eli_create()
1020 gp = g_new_geomf(mp, "%s%s", bpp->name, G_ELI_SUFFIX); in g_eli_create()
1022 gp->start = g_eli_start; in g_eli_create()
1027 gp->spoiled = g_eli_orphan; in g_eli_create()
1028 gp->orphan = g_eli_orphan; in g_eli_create()
1029 gp->resize = g_eli_resize; in g_eli_create()
1030 gp->dumpconf = g_eli_dumpconf; in g_eli_create()
1032 * If detach-on-last-close feature is not enabled and we don't operate in g_eli_create()
1033 * on read-only provider, we can simply use g_std_access(). in g_eli_create()
1035 if (md->md_flags & (G_ELI_FLAG_WO_DETACH | G_ELI_FLAG_RO)) in g_eli_create()
1036 gp->access = g_eli_access; in g_eli_create()
1038 gp->access = g_std_access; in g_eli_create()
1040 eli_metadata_softc(sc, md, bpp->sectorsize, bpp->mediasize); in g_eli_create()
1041 sc->sc_nkey = nkey; in g_eli_create()
1043 gp->softc = sc; in g_eli_create()
1044 sc->sc_geom = gp; in g_eli_create()
1046 bioq_init(&sc->sc_queue); in g_eli_create()
1047 mtx_init(&sc->sc_queue_mtx, "geli:queue", NULL, MTX_DEF); in g_eli_create()
1048 mtx_init(&sc->sc_ekeys_lock, "geli:ekeys", NULL, MTX_DEF); in g_eli_create()
1053 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; in g_eli_create()
1059 bpp->name, error); in g_eli_create()
1062 bpp->name, error); in g_eli_create()
1070 * We don't open provider for writing only when user requested read-only in g_eli_create()
1073 dcw = (sc->sc_flags & G_ELI_FLAG_RO) ? 0 : 1; in g_eli_create()
1078 bpp->name, error); in g_eli_create()
1081 bpp->name, error); in g_eli_create()
1091 LIST_INIT(&sc->sc_workers); in g_eli_create()
1096 sc->sc_cpubind = (mp_ncpus > 1 && threads == mp_ncpus); in g_eli_create()
1100 bpp->name, i); in g_eli_create()
1104 wr->w_softc = sc; in g_eli_create()
1105 wr->w_number = i; in g_eli_create()
1106 wr->w_active = TRUE; in g_eli_create()
1113 "for %s (error=%d).", bpp->name, error); in g_eli_create()
1116 "for %s (error=%d).", bpp->name, error); in g_eli_create()
1121 error = kproc_create(g_eli_worker, wr, &wr->w_proc, 0, 0, in g_eli_create()
1122 "g_eli[%u] %s", i, bpp->name); in g_eli_create()
1128 "for %s (error=%d).", bpp->name, error); in g_eli_create()
1131 "for %s (error=%d).", bpp->name, error); in g_eli_create()
1135 LIST_INSERT_HEAD(&sc->sc_workers, wr, w_next); in g_eli_create()
1141 pp = g_new_providerf(gp, "%s%s", bpp->name, G_ELI_SUFFIX); in g_eli_create()
1142 pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE; in g_eli_create()
1149 if ((sc->sc_flags & G_ELI_FLAG_AUTH) == 0) in g_eli_create()
1150 pp->flags |= G_PF_ACCEPT_UNMAPPED; in g_eli_create()
1152 pp->mediasize = sc->sc_mediasize; in g_eli_create()
1153 pp->sectorsize = sc->sc_sectorsize; in g_eli_create()
1154 LIST_FOREACH(gap, &bpp->aliases, ga_next) in g_eli_create()
1155 g_provider_add_alias(pp, "%s%s", gap->ga_alias, G_ELI_SUFFIX); in g_eli_create()
1159 G_ELI_DEBUG(0, "Device %s created.", pp->name); in g_eli_create()
1160 G_ELI_DEBUG(0, "Encryption: %s %u", g_eli_algo2str(sc->sc_ealgo), in g_eli_create()
1161 sc->sc_ekeylen); in g_eli_create()
1162 if (sc->sc_flags & G_ELI_FLAG_AUTH) in g_eli_create()
1163 G_ELI_DEBUG(0, " Integrity: %s", g_eli_algo2str(sc->sc_aalgo)); in g_eli_create()
1165 sc->sc_crypto == G_ELI_CRYPTO_SW_ACCEL ? "accelerated software" : in g_eli_create()
1166 sc->sc_crypto == G_ELI_CRYPTO_SW ? "software" : "hardware"); in g_eli_create()
1170 mtx_lock(&sc->sc_queue_mtx); in g_eli_create()
1171 sc->sc_flags |= G_ELI_FLAG_DESTROY; in g_eli_create()
1176 while (!LIST_EMPTY(&sc->sc_workers)) { in g_eli_create()
1177 msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO, in g_eli_create()
1180 mtx_destroy(&sc->sc_queue_mtx); in g_eli_create()
1181 if (cp->provider != NULL) { in g_eli_create()
1182 if (cp->acr == 1) in g_eli_create()
1183 g_access(cp, -1, -dcw, -1); in g_eli_create()
1205 gp = sc->sc_geom; in g_eli_destroy()
1206 pp = LIST_FIRST(&gp->provider); in g_eli_destroy()
1207 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) { in g_eli_destroy()
1210 "cannot be definitely removed.", pp->name); in g_eli_destroy()
1211 sc->sc_flags |= G_ELI_FLAG_RW_DETACH; in g_eli_destroy()
1212 gp->access = g_eli_access; in g_eli_destroy()
1217 "Device %s is still open (r%dw%de%d).", pp->name, in g_eli_destroy()
1218 pp->acr, pp->acw, pp->ace); in g_eli_destroy()
1223 mtx_lock(&sc->sc_queue_mtx); in g_eli_destroy()
1224 sc->sc_flags |= G_ELI_FLAG_DESTROY; in g_eli_destroy()
1226 while (!LIST_EMPTY(&sc->sc_workers)) { in g_eli_destroy()
1227 msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO, in g_eli_destroy()
1230 mtx_destroy(&sc->sc_queue_mtx); in g_eli_destroy()
1231 gp->softc = NULL; in g_eli_destroy()
1236 G_ELI_DEBUG(0, "Device %s destroyed.", gp->name); in g_eli_destroy()
1248 sc = gp->softc; in g_eli_destroy_geom()
1332 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name); in g_eli_taste()
1338 G_ELI_DEBUG(3, "Tasting %s.", pp->name); in g_eli_taste()
1349 pp->name); in g_eli_taste()
1352 if (md.md_provsize != pp->mediasize) in g_eli_taste()
1359 G_ELI_DEBUG(0, "No valid keys on %s.", pp->name); in g_eli_taste()
1364 pp->name); in g_eli_taste()
1367 if (md.md_iterations == -1) { in g_eli_taste()
1377 for (i = 0; i < keybuf->kb_nents; i++) { in g_eli_taste()
1378 if (keybuf->kb_ents[i].ke_type == KEYBUF_TYPE_GELI) { in g_eli_taste()
1379 memcpy(key, keybuf->kb_ents[i].ke_data, in g_eli_taste()
1397 nkeyfiles = g_eli_keyfiles_load(&ctx, pp->name); in g_eli_taste()
1399 if (nkeyfiles == 0 && md.md_iterations == -1) { in g_eli_taste()
1409 pp->name); in g_eli_taste()
1422 printf("Enter passphrase for %s: ", pp->name); in g_eli_taste()
1434 * Prepare Derived-Key from the user passphrase. in g_eli_taste()
1455 * Decrypt Master-Key. in g_eli_taste()
1459 if (error == -1) { in g_eli_taste()
1463 pp->name); in g_eli_taste()
1464 g_eli_keyfiles_clear(pp->name); in g_eli_taste()
1470 pp->name, tries - i); in g_eli_taste()
1477 pp->name, error); in g_eli_taste()
1478 g_eli_keyfiles_clear(pp->name); in g_eli_taste()
1481 g_eli_keyfiles_clear(pp->name); in g_eli_taste()
1482 G_ELI_DEBUG(1, "Using Master Key %u for %s.", nkey, pp->name); in g_eli_taste()
1494 G_ELI_DEBUG(0, "Cannot create device %s%s.", pp->name, in g_eli_taste()
1508 sc = gp->softc; in g_eli_dumpconf()
1515 (uintmax_t)sc->sc_ekeys_total); in g_eli_dumpconf()
1517 (uintmax_t)sc->sc_ekeys_allocated); in g_eli_dumpconf()
1519 if (sc->sc_flags == 0) in g_eli_dumpconf()
1525 if (sc->sc_flags & (flag)) { \ in g_eli_dumpconf()
1534 ADD_FLAG(G_ELI_FLAG_SINGLE_KEY, "SINGLE-KEY"); in g_eli_dumpconf()
1535 ADD_FLAG(G_ELI_FLAG_NATIVE_BYTE_ORDER, "NATIVE-BYTE-ORDER"); in g_eli_dumpconf()
1538 ADD_FLAG(G_ELI_FLAG_WO_DETACH, "W-DETACH"); in g_eli_dumpconf()
1539 ADD_FLAG(G_ELI_FLAG_RW_DETACH, "RW-DETACH"); in g_eli_dumpconf()
1541 ADD_FLAG(G_ELI_FLAG_WOPEN, "W-OPEN"); in g_eli_dumpconf()
1543 ADD_FLAG(G_ELI_FLAG_RO, "READ-ONLY"); in g_eli_dumpconf()
1552 if (!(sc->sc_flags & G_ELI_FLAG_ONETIME)) { in g_eli_dumpconf()
1554 sc->sc_nkey); in g_eli_dumpconf()
1556 sbuf_printf(sb, "%s<Version>%u</Version>\n", indent, sc->sc_version); in g_eli_dumpconf()
1558 switch (sc->sc_crypto) { in g_eli_dumpconf()
1560 sbuf_cat(sb, "hardware"); in g_eli_dumpconf()
1566 sbuf_cat(sb, "accelerated software"); in g_eli_dumpconf()
1573 if (sc->sc_flags & G_ELI_FLAG_AUTH) { in g_eli_dumpconf()
1576 indent, g_eli_algo2str(sc->sc_aalgo)); in g_eli_dumpconf()
1579 sc->sc_ekeylen); in g_eli_dumpconf()
1581 indent, g_eli_algo2str(sc->sc_ealgo)); in g_eli_dumpconf()
1583 (sc->sc_flags & G_ELI_FLAG_SUSPEND) ? "SUSPENDED" : "ACTIVE"); in g_eli_dumpconf()
1596 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) { in g_eli_shutdown_pre_sync()
1597 sc = gp->softc; in g_eli_shutdown_pre_sync()
1600 pp = LIST_FIRST(&gp->provider); in g_eli_shutdown_pre_sync()
1601 KASSERT(pp != NULL, ("No provider? gp=%p (%s)", gp, gp->name)); in g_eli_shutdown_pre_sync()
1602 if (pp->acr != 0 || pp->acw != 0 || pp->ace != 0 || in g_eli_shutdown_pre_sync()
1605 sc->sc_flags |= G_ELI_FLAG_RW_DETACH; in g_eli_shutdown_pre_sync()
1606 gp->access = g_eli_access; in g_eli_shutdown_pre_sync()