xref: /freebsd/sys/geom/eli/g_eli.c (revision 6574b8ed19b093f0af09501d2c9676c28993cb97)
1 /*-
2  * Copyright (c) 2005-2011 Pawel Jakub Dawidek <pawel@dawidek.net>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/cons.h>
33 #include <sys/kernel.h>
34 #include <sys/linker.h>
35 #include <sys/module.h>
36 #include <sys/lock.h>
37 #include <sys/mutex.h>
38 #include <sys/bio.h>
39 #include <sys/sbuf.h>
40 #include <sys/sysctl.h>
41 #include <sys/malloc.h>
42 #include <sys/eventhandler.h>
43 #include <sys/kthread.h>
44 #include <sys/proc.h>
45 #include <sys/sched.h>
46 #include <sys/smp.h>
47 #include <sys/uio.h>
48 #include <sys/vnode.h>
49 
50 #include <vm/uma.h>
51 
52 #include <geom/geom.h>
53 #include <geom/eli/g_eli.h>
54 #include <geom/eli/pkcs5v2.h>
55 
56 FEATURE(geom_eli, "GEOM crypto module");
57 
58 MALLOC_DEFINE(M_ELI, "eli data", "GEOM_ELI Data");
59 
60 SYSCTL_DECL(_kern_geom);
61 SYSCTL_NODE(_kern_geom, OID_AUTO, eli, CTLFLAG_RW, 0, "GEOM_ELI stuff");
62 static int g_eli_version = G_ELI_VERSION;
63 SYSCTL_INT(_kern_geom_eli, OID_AUTO, version, CTLFLAG_RD, &g_eli_version, 0,
64     "GELI version");
65 int g_eli_debug = 0;
66 SYSCTL_INT(_kern_geom_eli, OID_AUTO, debug, CTLFLAG_RWTUN, &g_eli_debug, 0,
67     "Debug level");
68 static u_int g_eli_tries = 3;
69 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, tries, CTLFLAG_RWTUN, &g_eli_tries, 0,
70     "Number of tries for entering the passphrase");
71 static u_int g_eli_visible_passphrase = GETS_NOECHO;
72 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, visible_passphrase, CTLFLAG_RWTUN,
73     &g_eli_visible_passphrase, 0,
74     "Visibility of passphrase prompt (0 = invisible, 1 = visible, 2 = asterisk)");
75 u_int g_eli_overwrites = G_ELI_OVERWRITES;
76 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, overwrites, CTLFLAG_RWTUN, &g_eli_overwrites,
77     0, "Number of times on-disk keys should be overwritten when destroying them");
78 static u_int g_eli_threads = 0;
79 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, threads, CTLFLAG_RWTUN, &g_eli_threads, 0,
80     "Number of threads doing crypto work");
81 u_int g_eli_batch = 0;
82 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, batch, CTLFLAG_RWTUN, &g_eli_batch, 0,
83     "Use crypto operations batching");
84 
85 /*
86  * Passphrase cached during boot, in order to be more user-friendly if
87  * there are multiple providers using the same passphrase.
88  */
89 static char cached_passphrase[256];
90 static u_int g_eli_boot_passcache = 1;
91 TUNABLE_INT("kern.geom.eli.boot_passcache", &g_eli_boot_passcache);
92 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, boot_passcache, CTLFLAG_RD,
93     &g_eli_boot_passcache, 0,
94     "Passphrases are cached during boot process for possible reuse");
95 static void
96 zero_boot_passcache(void * dummy)
97 {
98 
99 	memset(cached_passphrase, 0, sizeof(cached_passphrase));
100 }
101 EVENTHANDLER_DEFINE(mountroot, zero_boot_passcache, NULL, 0);
102 
103 static eventhandler_tag g_eli_pre_sync = NULL;
104 
105 static int g_eli_destroy_geom(struct gctl_req *req, struct g_class *mp,
106     struct g_geom *gp);
107 static void g_eli_init(struct g_class *mp);
108 static void g_eli_fini(struct g_class *mp);
109 
110 static g_taste_t g_eli_taste;
111 static g_dumpconf_t g_eli_dumpconf;
112 
113 struct g_class g_eli_class = {
114 	.name = G_ELI_CLASS_NAME,
115 	.version = G_VERSION,
116 	.ctlreq = g_eli_config,
117 	.taste = g_eli_taste,
118 	.destroy_geom = g_eli_destroy_geom,
119 	.init = g_eli_init,
120 	.fini = g_eli_fini
121 };
122 
123 
124 /*
125  * Code paths:
126  * BIO_READ:
127  *	g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
128  * BIO_WRITE:
129  *	g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
130  */
131 
132 
133 /*
134  * EAGAIN from crypto(9) means, that we were probably balanced to another crypto
135  * accelerator or something like this.
136  * The function updates the SID and rerun the operation.
137  */
138 int
139 g_eli_crypto_rerun(struct cryptop *crp)
140 {
141 	struct g_eli_softc *sc;
142 	struct g_eli_worker *wr;
143 	struct bio *bp;
144 	int error;
145 
146 	bp = (struct bio *)crp->crp_opaque;
147 	sc = bp->bio_to->geom->softc;
148 	LIST_FOREACH(wr, &sc->sc_workers, w_next) {
149 		if (wr->w_number == bp->bio_pflags)
150 			break;
151 	}
152 	KASSERT(wr != NULL, ("Invalid worker (%u).", bp->bio_pflags));
153 	G_ELI_DEBUG(1, "Rerunning crypto %s request (sid: %ju -> %ju).",
154 	    bp->bio_cmd == BIO_READ ? "READ" : "WRITE", (uintmax_t)wr->w_sid,
155 	    (uintmax_t)crp->crp_sid);
156 	wr->w_sid = crp->crp_sid;
157 	crp->crp_etype = 0;
158 	error = crypto_dispatch(crp);
159 	if (error == 0)
160 		return (0);
161 	G_ELI_DEBUG(1, "%s: crypto_dispatch() returned %d.", __func__, error);
162 	crp->crp_etype = error;
163 	return (error);
164 }
165 
166 /*
167  * The function is called afer reading encrypted data from the provider.
168  *
169  * g_eli_start -> g_eli_crypto_read -> g_io_request -> G_ELI_READ_DONE -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
170  */
171 void
172 g_eli_read_done(struct bio *bp)
173 {
174 	struct g_eli_softc *sc;
175 	struct bio *pbp;
176 
177 	G_ELI_LOGREQ(2, bp, "Request done.");
178 	pbp = bp->bio_parent;
179 	if (pbp->bio_error == 0)
180 		pbp->bio_error = bp->bio_error;
181 	g_destroy_bio(bp);
182 	/*
183 	 * Do we have all sectors already?
184 	 */
185 	pbp->bio_inbed++;
186 	if (pbp->bio_inbed < pbp->bio_children)
187 		return;
188 	sc = pbp->bio_to->geom->softc;
189 	if (pbp->bio_error != 0) {
190 		G_ELI_LOGREQ(0, pbp, "%s() failed", __func__);
191 		pbp->bio_completed = 0;
192 		if (pbp->bio_driver2 != NULL) {
193 			free(pbp->bio_driver2, M_ELI);
194 			pbp->bio_driver2 = NULL;
195 		}
196 		g_io_deliver(pbp, pbp->bio_error);
197 		atomic_subtract_int(&sc->sc_inflight, 1);
198 		return;
199 	}
200 	mtx_lock(&sc->sc_queue_mtx);
201 	bioq_insert_tail(&sc->sc_queue, pbp);
202 	mtx_unlock(&sc->sc_queue_mtx);
203 	wakeup(sc);
204 }
205 
206 /*
207  * The function is called after we encrypt and write data.
208  *
209  * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> G_ELI_WRITE_DONE -> g_io_deliver
210  */
211 void
212 g_eli_write_done(struct bio *bp)
213 {
214 	struct g_eli_softc *sc;
215 	struct bio *pbp;
216 
217 	G_ELI_LOGREQ(2, bp, "Request done.");
218 	pbp = bp->bio_parent;
219 	if (pbp->bio_error == 0) {
220 		if (bp->bio_error != 0)
221 			pbp->bio_error = bp->bio_error;
222 	}
223 	g_destroy_bio(bp);
224 	/*
225 	 * Do we have all sectors already?
226 	 */
227 	pbp->bio_inbed++;
228 	if (pbp->bio_inbed < pbp->bio_children)
229 		return;
230 	free(pbp->bio_driver2, M_ELI);
231 	pbp->bio_driver2 = NULL;
232 	if (pbp->bio_error != 0) {
233 		G_ELI_LOGREQ(0, pbp, "Crypto WRITE request failed (error=%d).",
234 		    pbp->bio_error);
235 		pbp->bio_completed = 0;
236 	}
237 	/*
238 	 * Write is finished, send it up.
239 	 */
240 	pbp->bio_completed = pbp->bio_length;
241 	sc = pbp->bio_to->geom->softc;
242 	g_io_deliver(pbp, pbp->bio_error);
243 	atomic_subtract_int(&sc->sc_inflight, 1);
244 }
245 
246 /*
247  * This function should never be called, but GEOM made as it set ->orphan()
248  * method for every geom.
249  */
250 static void
251 g_eli_orphan_spoil_assert(struct g_consumer *cp)
252 {
253 
254 	panic("Function %s() called for %s.", __func__, cp->geom->name);
255 }
256 
257 static void
258 g_eli_orphan(struct g_consumer *cp)
259 {
260 	struct g_eli_softc *sc;
261 
262 	g_topology_assert();
263 	sc = cp->geom->softc;
264 	if (sc == NULL)
265 		return;
266 	g_eli_destroy(sc, TRUE);
267 }
268 
269 /*
270  * BIO_READ:
271  *	G_ELI_START -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
272  * BIO_WRITE:
273  *	G_ELI_START -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
274  */
275 static void
276 g_eli_start(struct bio *bp)
277 {
278 	struct g_eli_softc *sc;
279 	struct g_consumer *cp;
280 	struct bio *cbp;
281 
282 	sc = bp->bio_to->geom->softc;
283 	KASSERT(sc != NULL,
284 	    ("Provider's error should be set (error=%d)(device=%s).",
285 	    bp->bio_to->error, bp->bio_to->name));
286 	G_ELI_LOGREQ(2, bp, "Request received.");
287 
288 	switch (bp->bio_cmd) {
289 	case BIO_READ:
290 	case BIO_WRITE:
291 	case BIO_GETATTR:
292 	case BIO_FLUSH:
293 		break;
294 	case BIO_DELETE:
295 		/*
296 		 * We could eventually support BIO_DELETE request.
297 		 * It could be done by overwritting requested sector with
298 		 * random data g_eli_overwrites number of times.
299 		 */
300 	default:
301 		g_io_deliver(bp, EOPNOTSUPP);
302 		return;
303 	}
304 	cbp = g_clone_bio(bp);
305 	if (cbp == NULL) {
306 		g_io_deliver(bp, ENOMEM);
307 		return;
308 	}
309 	bp->bio_driver1 = cbp;
310 	bp->bio_pflags = G_ELI_NEW_BIO;
311 	switch (bp->bio_cmd) {
312 	case BIO_READ:
313 		if (!(sc->sc_flags & G_ELI_FLAG_AUTH)) {
314 			g_eli_crypto_read(sc, bp, 0);
315 			break;
316 		}
317 		/* FALLTHROUGH */
318 	case BIO_WRITE:
319 		mtx_lock(&sc->sc_queue_mtx);
320 		bioq_insert_tail(&sc->sc_queue, bp);
321 		mtx_unlock(&sc->sc_queue_mtx);
322 		wakeup(sc);
323 		break;
324 	case BIO_GETATTR:
325 	case BIO_FLUSH:
326 		cbp->bio_done = g_std_done;
327 		cp = LIST_FIRST(&sc->sc_geom->consumer);
328 		cbp->bio_to = cp->provider;
329 		G_ELI_LOGREQ(2, cbp, "Sending request.");
330 		g_io_request(cbp, cp);
331 		break;
332 	}
333 }
334 
335 static int
336 g_eli_newsession(struct g_eli_worker *wr)
337 {
338 	struct g_eli_softc *sc;
339 	struct cryptoini crie, cria;
340 	int error;
341 
342 	sc = wr->w_softc;
343 
344 	bzero(&crie, sizeof(crie));
345 	crie.cri_alg = sc->sc_ealgo;
346 	crie.cri_klen = sc->sc_ekeylen;
347 	if (sc->sc_ealgo == CRYPTO_AES_XTS)
348 		crie.cri_klen <<= 1;
349 	if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) != 0) {
350 		crie.cri_key = g_eli_key_hold(sc, 0,
351 		    LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize);
352 	} else {
353 		crie.cri_key = sc->sc_ekey;
354 	}
355 	if (sc->sc_flags & G_ELI_FLAG_AUTH) {
356 		bzero(&cria, sizeof(cria));
357 		cria.cri_alg = sc->sc_aalgo;
358 		cria.cri_klen = sc->sc_akeylen;
359 		cria.cri_key = sc->sc_akey;
360 		crie.cri_next = &cria;
361 	}
362 
363 	switch (sc->sc_crypto) {
364 	case G_ELI_CRYPTO_SW:
365 		error = crypto_newsession(&wr->w_sid, &crie,
366 		    CRYPTOCAP_F_SOFTWARE);
367 		break;
368 	case G_ELI_CRYPTO_HW:
369 		error = crypto_newsession(&wr->w_sid, &crie,
370 		    CRYPTOCAP_F_HARDWARE);
371 		break;
372 	case G_ELI_CRYPTO_UNKNOWN:
373 		error = crypto_newsession(&wr->w_sid, &crie,
374 		    CRYPTOCAP_F_HARDWARE);
375 		if (error == 0) {
376 			mtx_lock(&sc->sc_queue_mtx);
377 			if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN)
378 				sc->sc_crypto = G_ELI_CRYPTO_HW;
379 			mtx_unlock(&sc->sc_queue_mtx);
380 		} else {
381 			error = crypto_newsession(&wr->w_sid, &crie,
382 			    CRYPTOCAP_F_SOFTWARE);
383 			mtx_lock(&sc->sc_queue_mtx);
384 			if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN)
385 				sc->sc_crypto = G_ELI_CRYPTO_SW;
386 			mtx_unlock(&sc->sc_queue_mtx);
387 		}
388 		break;
389 	default:
390 		panic("%s: invalid condition", __func__);
391 	}
392 
393 	if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) != 0)
394 		g_eli_key_drop(sc, crie.cri_key);
395 
396 	return (error);
397 }
398 
399 static void
400 g_eli_freesession(struct g_eli_worker *wr)
401 {
402 
403 	crypto_freesession(wr->w_sid);
404 }
405 
406 static void
407 g_eli_cancel(struct g_eli_softc *sc)
408 {
409 	struct bio *bp;
410 
411 	mtx_assert(&sc->sc_queue_mtx, MA_OWNED);
412 
413 	while ((bp = bioq_takefirst(&sc->sc_queue)) != NULL) {
414 		KASSERT(bp->bio_pflags == G_ELI_NEW_BIO,
415 		    ("Not new bio when canceling (bp=%p).", bp));
416 		g_io_deliver(bp, ENXIO);
417 	}
418 }
419 
420 static struct bio *
421 g_eli_takefirst(struct g_eli_softc *sc)
422 {
423 	struct bio *bp;
424 
425 	mtx_assert(&sc->sc_queue_mtx, MA_OWNED);
426 
427 	if (!(sc->sc_flags & G_ELI_FLAG_SUSPEND))
428 		return (bioq_takefirst(&sc->sc_queue));
429 	/*
430 	 * Device suspended, so we skip new I/O requests.
431 	 */
432 	TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
433 		if (bp->bio_pflags != G_ELI_NEW_BIO)
434 			break;
435 	}
436 	if (bp != NULL)
437 		bioq_remove(&sc->sc_queue, bp);
438 	return (bp);
439 }
440 
441 /*
442  * This is the main function for kernel worker thread when we don't have
443  * hardware acceleration and we have to do cryptography in software.
444  * Dedicated thread is needed, so we don't slow down g_up/g_down GEOM
445  * threads with crypto work.
446  */
447 static void
448 g_eli_worker(void *arg)
449 {
450 	struct g_eli_softc *sc;
451 	struct g_eli_worker *wr;
452 	struct bio *bp;
453 	int error;
454 
455 	wr = arg;
456 	sc = wr->w_softc;
457 #ifdef SMP
458 	/* Before sched_bind() to a CPU, wait for all CPUs to go on-line. */
459 	if (sc->sc_cpubind) {
460 		while (!smp_started)
461 			tsleep(wr, 0, "geli:smp", hz / 4);
462 	}
463 #endif
464 	thread_lock(curthread);
465 	sched_prio(curthread, PUSER);
466 	if (sc->sc_cpubind)
467 		sched_bind(curthread, wr->w_number % mp_ncpus);
468 	thread_unlock(curthread);
469 
470 	G_ELI_DEBUG(1, "Thread %s started.", curthread->td_proc->p_comm);
471 
472 	for (;;) {
473 		mtx_lock(&sc->sc_queue_mtx);
474 again:
475 		bp = g_eli_takefirst(sc);
476 		if (bp == NULL) {
477 			if (sc->sc_flags & G_ELI_FLAG_DESTROY) {
478 				g_eli_cancel(sc);
479 				LIST_REMOVE(wr, w_next);
480 				g_eli_freesession(wr);
481 				free(wr, M_ELI);
482 				G_ELI_DEBUG(1, "Thread %s exiting.",
483 				    curthread->td_proc->p_comm);
484 				wakeup(&sc->sc_workers);
485 				mtx_unlock(&sc->sc_queue_mtx);
486 				kproc_exit(0);
487 			}
488 			while (sc->sc_flags & G_ELI_FLAG_SUSPEND) {
489 				if (sc->sc_inflight > 0) {
490 					G_ELI_DEBUG(0, "inflight=%d",
491 					    sc->sc_inflight);
492 					/*
493 					 * We still have inflight BIOs, so
494 					 * sleep and retry.
495 					 */
496 					msleep(sc, &sc->sc_queue_mtx, PRIBIO,
497 					    "geli:inf", hz / 5);
498 					goto again;
499 				}
500 				/*
501 				 * Suspend requested, mark the worker as
502 				 * suspended and go to sleep.
503 				 */
504 				if (wr->w_active) {
505 					g_eli_freesession(wr);
506 					wr->w_active = FALSE;
507 				}
508 				wakeup(&sc->sc_workers);
509 				msleep(sc, &sc->sc_queue_mtx, PRIBIO,
510 				    "geli:suspend", 0);
511 				if (!wr->w_active &&
512 				    !(sc->sc_flags & G_ELI_FLAG_SUSPEND)) {
513 					error = g_eli_newsession(wr);
514 					KASSERT(error == 0,
515 					    ("g_eli_newsession() failed on resume (error=%d)",
516 					    error));
517 					wr->w_active = TRUE;
518 				}
519 				goto again;
520 			}
521 			msleep(sc, &sc->sc_queue_mtx, PDROP, "geli:w", 0);
522 			continue;
523 		}
524 		if (bp->bio_pflags == G_ELI_NEW_BIO)
525 			atomic_add_int(&sc->sc_inflight, 1);
526 		mtx_unlock(&sc->sc_queue_mtx);
527 		if (bp->bio_pflags == G_ELI_NEW_BIO) {
528 			bp->bio_pflags = 0;
529 			if (sc->sc_flags & G_ELI_FLAG_AUTH) {
530 				if (bp->bio_cmd == BIO_READ)
531 					g_eli_auth_read(sc, bp);
532 				else
533 					g_eli_auth_run(wr, bp);
534 			} else {
535 				if (bp->bio_cmd == BIO_READ)
536 					g_eli_crypto_read(sc, bp, 1);
537 				else
538 					g_eli_crypto_run(wr, bp);
539 			}
540 		} else {
541 			if (sc->sc_flags & G_ELI_FLAG_AUTH)
542 				g_eli_auth_run(wr, bp);
543 			else
544 				g_eli_crypto_run(wr, bp);
545 		}
546 	}
547 }
548 
549 /*
550  * Here we generate IV. It is unique for every sector.
551  */
552 void
553 g_eli_crypto_ivgen(struct g_eli_softc *sc, off_t offset, u_char *iv,
554     size_t size)
555 {
556 	uint8_t off[8];
557 
558 	if ((sc->sc_flags & G_ELI_FLAG_NATIVE_BYTE_ORDER) != 0)
559 		bcopy(&offset, off, sizeof(off));
560 	else
561 		le64enc(off, (uint64_t)offset);
562 
563 	switch (sc->sc_ealgo) {
564 	case CRYPTO_AES_XTS:
565 		bcopy(off, iv, sizeof(off));
566 		bzero(iv + sizeof(off), size - sizeof(off));
567 		break;
568 	default:
569 	    {
570 		u_char hash[SHA256_DIGEST_LENGTH];
571 		SHA256_CTX ctx;
572 
573 		/* Copy precalculated SHA256 context for IV-Key. */
574 		bcopy(&sc->sc_ivctx, &ctx, sizeof(ctx));
575 		SHA256_Update(&ctx, off, sizeof(off));
576 		SHA256_Final(hash, &ctx);
577 		bcopy(hash, iv, MIN(sizeof(hash), size));
578 		break;
579 	    }
580 	}
581 }
582 
583 int
584 g_eli_read_metadata(struct g_class *mp, struct g_provider *pp,
585     struct g_eli_metadata *md)
586 {
587 	struct g_geom *gp;
588 	struct g_consumer *cp;
589 	u_char *buf = NULL;
590 	int error;
591 
592 	g_topology_assert();
593 
594 	gp = g_new_geomf(mp, "eli:taste");
595 	gp->start = g_eli_start;
596 	gp->access = g_std_access;
597 	/*
598 	 * g_eli_read_metadata() is always called from the event thread.
599 	 * Our geom is created and destroyed in the same event, so there
600 	 * could be no orphan nor spoil event in the meantime.
601 	 */
602 	gp->orphan = g_eli_orphan_spoil_assert;
603 	gp->spoiled = g_eli_orphan_spoil_assert;
604 	cp = g_new_consumer(gp);
605 	error = g_attach(cp, pp);
606 	if (error != 0)
607 		goto end;
608 	error = g_access(cp, 1, 0, 0);
609 	if (error != 0)
610 		goto end;
611 	g_topology_unlock();
612 	buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
613 	    &error);
614 	g_topology_lock();
615 	if (buf == NULL)
616 		goto end;
617 	eli_metadata_decode(buf, md);
618 end:
619 	if (buf != NULL)
620 		g_free(buf);
621 	if (cp->provider != NULL) {
622 		if (cp->acr == 1)
623 			g_access(cp, -1, 0, 0);
624 		g_detach(cp);
625 	}
626 	g_destroy_consumer(cp);
627 	g_destroy_geom(gp);
628 	return (error);
629 }
630 
631 /*
632  * The function is called when we had last close on provider and user requested
633  * to close it when this situation occur.
634  */
635 static void
636 g_eli_last_close(void *arg, int flags __unused)
637 {
638 	struct g_geom *gp;
639 	char gpname[64];
640 	int error;
641 
642 	g_topology_assert();
643 	gp = arg;
644 	strlcpy(gpname, gp->name, sizeof(gpname));
645 	error = g_eli_destroy(gp->softc, TRUE);
646 	KASSERT(error == 0, ("Cannot detach %s on last close (error=%d).",
647 	    gpname, error));
648 	G_ELI_DEBUG(0, "Detached %s on last close.", gpname);
649 }
650 
651 int
652 g_eli_access(struct g_provider *pp, int dr, int dw, int de)
653 {
654 	struct g_eli_softc *sc;
655 	struct g_geom *gp;
656 
657 	gp = pp->geom;
658 	sc = gp->softc;
659 
660 	if (dw > 0) {
661 		if (sc->sc_flags & G_ELI_FLAG_RO) {
662 			/* Deny write attempts. */
663 			return (EROFS);
664 		}
665 		/* Someone is opening us for write, we need to remember that. */
666 		sc->sc_flags |= G_ELI_FLAG_WOPEN;
667 		return (0);
668 	}
669 	/* Is this the last close? */
670 	if (pp->acr + dr > 0 || pp->acw + dw > 0 || pp->ace + de > 0)
671 		return (0);
672 
673 	/*
674 	 * Automatically detach on last close if requested.
675 	 */
676 	if ((sc->sc_flags & G_ELI_FLAG_RW_DETACH) ||
677 	    (sc->sc_flags & G_ELI_FLAG_WOPEN)) {
678 		g_post_event(g_eli_last_close, gp, M_WAITOK, NULL);
679 	}
680 	return (0);
681 }
682 
683 static int
684 g_eli_cpu_is_disabled(int cpu)
685 {
686 #ifdef SMP
687 	return (CPU_ISSET(cpu, &hlt_cpus_mask));
688 #else
689 	return (0);
690 #endif
691 }
692 
693 struct g_geom *
694 g_eli_create(struct gctl_req *req, struct g_class *mp, struct g_provider *bpp,
695     const struct g_eli_metadata *md, const u_char *mkey, int nkey)
696 {
697 	struct g_eli_softc *sc;
698 	struct g_eli_worker *wr;
699 	struct g_geom *gp;
700 	struct g_provider *pp;
701 	struct g_consumer *cp;
702 	u_int i, threads;
703 	int error;
704 
705 	G_ELI_DEBUG(1, "Creating device %s%s.", bpp->name, G_ELI_SUFFIX);
706 
707 	gp = g_new_geomf(mp, "%s%s", bpp->name, G_ELI_SUFFIX);
708 	sc = malloc(sizeof(*sc), M_ELI, M_WAITOK | M_ZERO);
709 	gp->start = g_eli_start;
710 	/*
711 	 * Spoiling cannot happen actually, because we keep provider open for
712 	 * writing all the time or provider is read-only.
713 	 */
714 	gp->spoiled = g_eli_orphan_spoil_assert;
715 	gp->orphan = g_eli_orphan;
716 	gp->dumpconf = g_eli_dumpconf;
717 	/*
718 	 * If detach-on-last-close feature is not enabled and we don't operate
719 	 * on read-only provider, we can simply use g_std_access().
720 	 */
721 	if (md->md_flags & (G_ELI_FLAG_WO_DETACH | G_ELI_FLAG_RO))
722 		gp->access = g_eli_access;
723 	else
724 		gp->access = g_std_access;
725 
726 	sc->sc_version = md->md_version;
727 	sc->sc_inflight = 0;
728 	sc->sc_crypto = G_ELI_CRYPTO_UNKNOWN;
729 	sc->sc_flags = md->md_flags;
730 	/* Backward compatibility. */
731 	if (md->md_version < G_ELI_VERSION_04)
732 		sc->sc_flags |= G_ELI_FLAG_NATIVE_BYTE_ORDER;
733 	if (md->md_version < G_ELI_VERSION_05)
734 		sc->sc_flags |= G_ELI_FLAG_SINGLE_KEY;
735 	if (md->md_version < G_ELI_VERSION_06 &&
736 	    (sc->sc_flags & G_ELI_FLAG_AUTH) != 0) {
737 		sc->sc_flags |= G_ELI_FLAG_FIRST_KEY;
738 	}
739 	if (md->md_version < G_ELI_VERSION_07)
740 		sc->sc_flags |= G_ELI_FLAG_ENC_IVKEY;
741 	sc->sc_ealgo = md->md_ealgo;
742 	sc->sc_nkey = nkey;
743 
744 	if (sc->sc_flags & G_ELI_FLAG_AUTH) {
745 		sc->sc_akeylen = sizeof(sc->sc_akey) * 8;
746 		sc->sc_aalgo = md->md_aalgo;
747 		sc->sc_alen = g_eli_hashlen(sc->sc_aalgo);
748 
749 		sc->sc_data_per_sector = bpp->sectorsize - sc->sc_alen;
750 		/*
751 		 * Some hash functions (like SHA1 and RIPEMD160) generates hash
752 		 * which length is not multiple of 128 bits, but we want data
753 		 * length to be multiple of 128, so we can encrypt without
754 		 * padding. The line below rounds down data length to multiple
755 		 * of 128 bits.
756 		 */
757 		sc->sc_data_per_sector -= sc->sc_data_per_sector % 16;
758 
759 		sc->sc_bytes_per_sector =
760 		    (md->md_sectorsize - 1) / sc->sc_data_per_sector + 1;
761 		sc->sc_bytes_per_sector *= bpp->sectorsize;
762 	}
763 
764 	gp->softc = sc;
765 	sc->sc_geom = gp;
766 
767 	bioq_init(&sc->sc_queue);
768 	mtx_init(&sc->sc_queue_mtx, "geli:queue", NULL, MTX_DEF);
769 	mtx_init(&sc->sc_ekeys_lock, "geli:ekeys", NULL, MTX_DEF);
770 
771 	pp = NULL;
772 	cp = g_new_consumer(gp);
773 	error = g_attach(cp, bpp);
774 	if (error != 0) {
775 		if (req != NULL) {
776 			gctl_error(req, "Cannot attach to %s (error=%d).",
777 			    bpp->name, error);
778 		} else {
779 			G_ELI_DEBUG(1, "Cannot attach to %s (error=%d).",
780 			    bpp->name, error);
781 		}
782 		goto failed;
783 	}
784 	/*
785 	 * Keep provider open all the time, so we can run critical tasks,
786 	 * like Master Keys deletion, without wondering if we can open
787 	 * provider or not.
788 	 * We don't open provider for writing only when user requested read-only
789 	 * access.
790 	 */
791 	if (sc->sc_flags & G_ELI_FLAG_RO)
792 		error = g_access(cp, 1, 0, 1);
793 	else
794 		error = g_access(cp, 1, 1, 1);
795 	if (error != 0) {
796 		if (req != NULL) {
797 			gctl_error(req, "Cannot access %s (error=%d).",
798 			    bpp->name, error);
799 		} else {
800 			G_ELI_DEBUG(1, "Cannot access %s (error=%d).",
801 			    bpp->name, error);
802 		}
803 		goto failed;
804 	}
805 
806 	sc->sc_sectorsize = md->md_sectorsize;
807 	sc->sc_mediasize = bpp->mediasize;
808 	if (!(sc->sc_flags & G_ELI_FLAG_ONETIME))
809 		sc->sc_mediasize -= bpp->sectorsize;
810 	if (!(sc->sc_flags & G_ELI_FLAG_AUTH))
811 		sc->sc_mediasize -= (sc->sc_mediasize % sc->sc_sectorsize);
812 	else {
813 		sc->sc_mediasize /= sc->sc_bytes_per_sector;
814 		sc->sc_mediasize *= sc->sc_sectorsize;
815 	}
816 
817 	/*
818 	 * Remember the keys in our softc structure.
819 	 */
820 	g_eli_mkey_propagate(sc, mkey);
821 	sc->sc_ekeylen = md->md_keylen;
822 
823 	LIST_INIT(&sc->sc_workers);
824 
825 	threads = g_eli_threads;
826 	if (threads == 0)
827 		threads = mp_ncpus;
828 	sc->sc_cpubind = (mp_ncpus > 1 && threads == mp_ncpus);
829 	for (i = 0; i < threads; i++) {
830 		if (g_eli_cpu_is_disabled(i)) {
831 			G_ELI_DEBUG(1, "%s: CPU %u disabled, skipping.",
832 			    bpp->name, i);
833 			continue;
834 		}
835 		wr = malloc(sizeof(*wr), M_ELI, M_WAITOK | M_ZERO);
836 		wr->w_softc = sc;
837 		wr->w_number = i;
838 		wr->w_active = TRUE;
839 
840 		error = g_eli_newsession(wr);
841 		if (error != 0) {
842 			free(wr, M_ELI);
843 			if (req != NULL) {
844 				gctl_error(req, "Cannot set up crypto session "
845 				    "for %s (error=%d).", bpp->name, error);
846 			} else {
847 				G_ELI_DEBUG(1, "Cannot set up crypto session "
848 				    "for %s (error=%d).", bpp->name, error);
849 			}
850 			goto failed;
851 		}
852 
853 		error = kproc_create(g_eli_worker, wr, &wr->w_proc, 0, 0,
854 		    "g_eli[%u] %s", i, bpp->name);
855 		if (error != 0) {
856 			g_eli_freesession(wr);
857 			free(wr, M_ELI);
858 			if (req != NULL) {
859 				gctl_error(req, "Cannot create kernel thread "
860 				    "for %s (error=%d).", bpp->name, error);
861 			} else {
862 				G_ELI_DEBUG(1, "Cannot create kernel thread "
863 				    "for %s (error=%d).", bpp->name, error);
864 			}
865 			goto failed;
866 		}
867 		LIST_INSERT_HEAD(&sc->sc_workers, wr, w_next);
868 	}
869 
870 	/*
871 	 * Create decrypted provider.
872 	 */
873 	pp = g_new_providerf(gp, "%s%s", bpp->name, G_ELI_SUFFIX);
874 	pp->mediasize = sc->sc_mediasize;
875 	pp->sectorsize = sc->sc_sectorsize;
876 
877 	g_error_provider(pp, 0);
878 
879 	G_ELI_DEBUG(0, "Device %s created.", pp->name);
880 	G_ELI_DEBUG(0, "Encryption: %s %u", g_eli_algo2str(sc->sc_ealgo),
881 	    sc->sc_ekeylen);
882 	if (sc->sc_flags & G_ELI_FLAG_AUTH)
883 		G_ELI_DEBUG(0, " Integrity: %s", g_eli_algo2str(sc->sc_aalgo));
884 	G_ELI_DEBUG(0, "    Crypto: %s",
885 	    sc->sc_crypto == G_ELI_CRYPTO_SW ? "software" : "hardware");
886 	return (gp);
887 failed:
888 	mtx_lock(&sc->sc_queue_mtx);
889 	sc->sc_flags |= G_ELI_FLAG_DESTROY;
890 	wakeup(sc);
891 	/*
892 	 * Wait for kernel threads self destruction.
893 	 */
894 	while (!LIST_EMPTY(&sc->sc_workers)) {
895 		msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO,
896 		    "geli:destroy", 0);
897 	}
898 	mtx_destroy(&sc->sc_queue_mtx);
899 	if (cp->provider != NULL) {
900 		if (cp->acr == 1)
901 			g_access(cp, -1, -1, -1);
902 		g_detach(cp);
903 	}
904 	g_destroy_consumer(cp);
905 	g_destroy_geom(gp);
906 	g_eli_key_destroy(sc);
907 	bzero(sc, sizeof(*sc));
908 	free(sc, M_ELI);
909 	return (NULL);
910 }
911 
912 int
913 g_eli_destroy(struct g_eli_softc *sc, boolean_t force)
914 {
915 	struct g_geom *gp;
916 	struct g_provider *pp;
917 
918 	g_topology_assert();
919 
920 	if (sc == NULL)
921 		return (ENXIO);
922 
923 	gp = sc->sc_geom;
924 	pp = LIST_FIRST(&gp->provider);
925 	if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
926 		if (force) {
927 			G_ELI_DEBUG(1, "Device %s is still open, so it "
928 			    "cannot be definitely removed.", pp->name);
929 			sc->sc_flags |= G_ELI_FLAG_RW_DETACH;
930 			gp->access = g_eli_access;
931 			g_wither_provider(pp, ENXIO);
932 			return (EBUSY);
933 		} else {
934 			G_ELI_DEBUG(1,
935 			    "Device %s is still open (r%dw%de%d).", pp->name,
936 			    pp->acr, pp->acw, pp->ace);
937 			return (EBUSY);
938 		}
939 	}
940 
941 	mtx_lock(&sc->sc_queue_mtx);
942 	sc->sc_flags |= G_ELI_FLAG_DESTROY;
943 	wakeup(sc);
944 	while (!LIST_EMPTY(&sc->sc_workers)) {
945 		msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO,
946 		    "geli:destroy", 0);
947 	}
948 	mtx_destroy(&sc->sc_queue_mtx);
949 	gp->softc = NULL;
950 	g_eli_key_destroy(sc);
951 	bzero(sc, sizeof(*sc));
952 	free(sc, M_ELI);
953 
954 	if (pp == NULL || (pp->acr == 0 && pp->acw == 0 && pp->ace == 0))
955 		G_ELI_DEBUG(0, "Device %s destroyed.", gp->name);
956 	g_wither_geom_close(gp, ENXIO);
957 
958 	return (0);
959 }
960 
961 static int
962 g_eli_destroy_geom(struct gctl_req *req __unused,
963     struct g_class *mp __unused, struct g_geom *gp)
964 {
965 	struct g_eli_softc *sc;
966 
967 	sc = gp->softc;
968 	return (g_eli_destroy(sc, FALSE));
969 }
970 
971 static int
972 g_eli_keyfiles_load(struct hmac_ctx *ctx, const char *provider)
973 {
974 	u_char *keyfile, *data;
975 	char *file, name[64];
976 	size_t size;
977 	int i;
978 
979 	for (i = 0; ; i++) {
980 		snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i);
981 		keyfile = preload_search_by_type(name);
982 		if (keyfile == NULL)
983 			return (i);	/* Return number of loaded keyfiles. */
984 		data = preload_fetch_addr(keyfile);
985 		if (data == NULL) {
986 			G_ELI_DEBUG(0, "Cannot find key file data for %s.",
987 			    name);
988 			return (0);
989 		}
990 		size = preload_fetch_size(keyfile);
991 		if (size == 0) {
992 			G_ELI_DEBUG(0, "Cannot find key file size for %s.",
993 			    name);
994 			return (0);
995 		}
996 		file = preload_search_info(keyfile, MODINFO_NAME);
997 		if (file == NULL) {
998 			G_ELI_DEBUG(0, "Cannot find key file name for %s.",
999 			    name);
1000 			return (0);
1001 		}
1002 		G_ELI_DEBUG(1, "Loaded keyfile %s for %s (type: %s).", file,
1003 		    provider, name);
1004 		g_eli_crypto_hmac_update(ctx, data, size);
1005 	}
1006 }
1007 
1008 static void
1009 g_eli_keyfiles_clear(const char *provider)
1010 {
1011 	u_char *keyfile, *data;
1012 	char name[64];
1013 	size_t size;
1014 	int i;
1015 
1016 	for (i = 0; ; i++) {
1017 		snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i);
1018 		keyfile = preload_search_by_type(name);
1019 		if (keyfile == NULL)
1020 			return;
1021 		data = preload_fetch_addr(keyfile);
1022 		size = preload_fetch_size(keyfile);
1023 		if (data != NULL && size != 0)
1024 			bzero(data, size);
1025 	}
1026 }
1027 
1028 /*
1029  * Tasting is only made on boot.
1030  * We detect providers which should be attached before root is mounted.
1031  */
1032 static struct g_geom *
1033 g_eli_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
1034 {
1035 	struct g_eli_metadata md;
1036 	struct g_geom *gp;
1037 	struct hmac_ctx ctx;
1038 	char passphrase[256];
1039 	u_char key[G_ELI_USERKEYLEN], mkey[G_ELI_DATAIVKEYLEN];
1040 	u_int i, nkey, nkeyfiles, tries;
1041 	int error;
1042 
1043 	g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
1044 	g_topology_assert();
1045 
1046 	if (root_mounted() || g_eli_tries == 0)
1047 		return (NULL);
1048 
1049 	G_ELI_DEBUG(3, "Tasting %s.", pp->name);
1050 
1051 	error = g_eli_read_metadata(mp, pp, &md);
1052 	if (error != 0)
1053 		return (NULL);
1054 	gp = NULL;
1055 
1056 	if (strcmp(md.md_magic, G_ELI_MAGIC) != 0)
1057 		return (NULL);
1058 	if (md.md_version > G_ELI_VERSION) {
1059 		printf("geom_eli.ko module is too old to handle %s.\n",
1060 		    pp->name);
1061 		return (NULL);
1062 	}
1063 	if (md.md_provsize != pp->mediasize)
1064 		return (NULL);
1065 	/* Should we attach it on boot? */
1066 	if (!(md.md_flags & G_ELI_FLAG_BOOT))
1067 		return (NULL);
1068 	if (md.md_keys == 0x00) {
1069 		G_ELI_DEBUG(0, "No valid keys on %s.", pp->name);
1070 		return (NULL);
1071 	}
1072 	if (md.md_iterations == -1) {
1073 		/* If there is no passphrase, we try only once. */
1074 		tries = 1;
1075 	} else {
1076 		/* Ask for the passphrase no more than g_eli_tries times. */
1077 		tries = g_eli_tries;
1078 	}
1079 
1080 	for (i = 0; i <= tries; i++) {
1081 		g_eli_crypto_hmac_init(&ctx, NULL, 0);
1082 
1083 		/*
1084 		 * Load all key files.
1085 		 */
1086 		nkeyfiles = g_eli_keyfiles_load(&ctx, pp->name);
1087 
1088 		if (nkeyfiles == 0 && md.md_iterations == -1) {
1089 			/*
1090 			 * No key files and no passphrase, something is
1091 			 * definitely wrong here.
1092 			 * geli(8) doesn't allow for such situation, so assume
1093 			 * that there was really no passphrase and in that case
1094 			 * key files are no properly defined in loader.conf.
1095 			 */
1096 			G_ELI_DEBUG(0,
1097 			    "Found no key files in loader.conf for %s.",
1098 			    pp->name);
1099 			return (NULL);
1100 		}
1101 
1102 		/* Ask for the passphrase if defined. */
1103 		if (md.md_iterations >= 0) {
1104 			/* Try first with cached passphrase. */
1105 			if (i == 0) {
1106 				if (!g_eli_boot_passcache)
1107 					continue;
1108 				memcpy(passphrase, cached_passphrase,
1109 				    sizeof(passphrase));
1110 			} else {
1111 				printf("Enter passphrase for %s: ", pp->name);
1112 				cngets(passphrase, sizeof(passphrase),
1113 				    g_eli_visible_passphrase);
1114 				memcpy(cached_passphrase, passphrase,
1115 				    sizeof(passphrase));
1116 			}
1117 		}
1118 
1119 		/*
1120 		 * Prepare Derived-Key from the user passphrase.
1121 		 */
1122 		if (md.md_iterations == 0) {
1123 			g_eli_crypto_hmac_update(&ctx, md.md_salt,
1124 			    sizeof(md.md_salt));
1125 			g_eli_crypto_hmac_update(&ctx, passphrase,
1126 			    strlen(passphrase));
1127 			bzero(passphrase, sizeof(passphrase));
1128 		} else if (md.md_iterations > 0) {
1129 			u_char dkey[G_ELI_USERKEYLEN];
1130 
1131 			pkcs5v2_genkey(dkey, sizeof(dkey), md.md_salt,
1132 			    sizeof(md.md_salt), passphrase, md.md_iterations);
1133 			bzero(passphrase, sizeof(passphrase));
1134 			g_eli_crypto_hmac_update(&ctx, dkey, sizeof(dkey));
1135 			bzero(dkey, sizeof(dkey));
1136 		}
1137 
1138 		g_eli_crypto_hmac_final(&ctx, key, 0);
1139 
1140 		/*
1141 		 * Decrypt Master-Key.
1142 		 */
1143 		error = g_eli_mkey_decrypt(&md, key, mkey, &nkey);
1144 		bzero(key, sizeof(key));
1145 		if (error == -1) {
1146 			if (i == tries) {
1147 				G_ELI_DEBUG(0,
1148 				    "Wrong key for %s. No tries left.",
1149 				    pp->name);
1150 				g_eli_keyfiles_clear(pp->name);
1151 				return (NULL);
1152 			}
1153 			if (i > 0) {
1154 				G_ELI_DEBUG(0,
1155 				    "Wrong key for %s. Tries left: %u.",
1156 				    pp->name, tries - i);
1157 			}
1158 			/* Try again. */
1159 			continue;
1160 		} else if (error > 0) {
1161 			G_ELI_DEBUG(0,
1162 			    "Cannot decrypt Master Key for %s (error=%d).",
1163 			    pp->name, error);
1164 			g_eli_keyfiles_clear(pp->name);
1165 			return (NULL);
1166 		}
1167 		g_eli_keyfiles_clear(pp->name);
1168 		G_ELI_DEBUG(1, "Using Master Key %u for %s.", nkey, pp->name);
1169 		break;
1170 	}
1171 
1172 	/*
1173 	 * We have correct key, let's attach provider.
1174 	 */
1175 	gp = g_eli_create(NULL, mp, pp, &md, mkey, nkey);
1176 	bzero(mkey, sizeof(mkey));
1177 	bzero(&md, sizeof(md));
1178 	if (gp == NULL) {
1179 		G_ELI_DEBUG(0, "Cannot create device %s%s.", pp->name,
1180 		    G_ELI_SUFFIX);
1181 		return (NULL);
1182 	}
1183 	return (gp);
1184 }
1185 
1186 static void
1187 g_eli_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1188     struct g_consumer *cp, struct g_provider *pp)
1189 {
1190 	struct g_eli_softc *sc;
1191 
1192 	g_topology_assert();
1193 	sc = gp->softc;
1194 	if (sc == NULL)
1195 		return;
1196 	if (pp != NULL || cp != NULL)
1197 		return;	/* Nothing here. */
1198 
1199 	sbuf_printf(sb, "%s<KeysTotal>%ju</KeysTotal>\n", indent,
1200 	    (uintmax_t)sc->sc_ekeys_total);
1201 	sbuf_printf(sb, "%s<KeysAllocated>%ju</KeysAllocated>\n", indent,
1202 	    (uintmax_t)sc->sc_ekeys_allocated);
1203 	sbuf_printf(sb, "%s<Flags>", indent);
1204 	if (sc->sc_flags == 0)
1205 		sbuf_printf(sb, "NONE");
1206 	else {
1207 		int first = 1;
1208 
1209 #define ADD_FLAG(flag, name)	do {					\
1210 	if (sc->sc_flags & (flag)) {					\
1211 		if (!first)						\
1212 			sbuf_printf(sb, ", ");				\
1213 		else							\
1214 			first = 0;					\
1215 		sbuf_printf(sb, name);					\
1216 	}								\
1217 } while (0)
1218 		ADD_FLAG(G_ELI_FLAG_SUSPEND, "SUSPEND");
1219 		ADD_FLAG(G_ELI_FLAG_SINGLE_KEY, "SINGLE-KEY");
1220 		ADD_FLAG(G_ELI_FLAG_NATIVE_BYTE_ORDER, "NATIVE-BYTE-ORDER");
1221 		ADD_FLAG(G_ELI_FLAG_ONETIME, "ONETIME");
1222 		ADD_FLAG(G_ELI_FLAG_BOOT, "BOOT");
1223 		ADD_FLAG(G_ELI_FLAG_WO_DETACH, "W-DETACH");
1224 		ADD_FLAG(G_ELI_FLAG_RW_DETACH, "RW-DETACH");
1225 		ADD_FLAG(G_ELI_FLAG_AUTH, "AUTH");
1226 		ADD_FLAG(G_ELI_FLAG_WOPEN, "W-OPEN");
1227 		ADD_FLAG(G_ELI_FLAG_DESTROY, "DESTROY");
1228 		ADD_FLAG(G_ELI_FLAG_RO, "READ-ONLY");
1229 #undef  ADD_FLAG
1230 	}
1231 	sbuf_printf(sb, "</Flags>\n");
1232 
1233 	if (!(sc->sc_flags & G_ELI_FLAG_ONETIME)) {
1234 		sbuf_printf(sb, "%s<UsedKey>%u</UsedKey>\n", indent,
1235 		    sc->sc_nkey);
1236 	}
1237 	sbuf_printf(sb, "%s<Version>%u</Version>\n", indent, sc->sc_version);
1238 	sbuf_printf(sb, "%s<Crypto>", indent);
1239 	switch (sc->sc_crypto) {
1240 	case G_ELI_CRYPTO_HW:
1241 		sbuf_printf(sb, "hardware");
1242 		break;
1243 	case G_ELI_CRYPTO_SW:
1244 		sbuf_printf(sb, "software");
1245 		break;
1246 	default:
1247 		sbuf_printf(sb, "UNKNOWN");
1248 		break;
1249 	}
1250 	sbuf_printf(sb, "</Crypto>\n");
1251 	if (sc->sc_flags & G_ELI_FLAG_AUTH) {
1252 		sbuf_printf(sb,
1253 		    "%s<AuthenticationAlgorithm>%s</AuthenticationAlgorithm>\n",
1254 		    indent, g_eli_algo2str(sc->sc_aalgo));
1255 	}
1256 	sbuf_printf(sb, "%s<KeyLength>%u</KeyLength>\n", indent,
1257 	    sc->sc_ekeylen);
1258 	sbuf_printf(sb, "%s<EncryptionAlgorithm>%s</EncryptionAlgorithm>\n",
1259 	    indent, g_eli_algo2str(sc->sc_ealgo));
1260 	sbuf_printf(sb, "%s<State>%s</State>\n", indent,
1261 	    (sc->sc_flags & G_ELI_FLAG_SUSPEND) ? "SUSPENDED" : "ACTIVE");
1262 }
1263 
1264 static void
1265 g_eli_shutdown_pre_sync(void *arg, int howto)
1266 {
1267 	struct g_class *mp;
1268 	struct g_geom *gp, *gp2;
1269 	struct g_provider *pp;
1270 	struct g_eli_softc *sc;
1271 	int error;
1272 
1273 	mp = arg;
1274 	DROP_GIANT();
1275 	g_topology_lock();
1276 	LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
1277 		sc = gp->softc;
1278 		if (sc == NULL)
1279 			continue;
1280 		pp = LIST_FIRST(&gp->provider);
1281 		KASSERT(pp != NULL, ("No provider? gp=%p (%s)", gp, gp->name));
1282 		if (pp->acr + pp->acw + pp->ace == 0)
1283 			error = g_eli_destroy(sc, TRUE);
1284 		else {
1285 			sc->sc_flags |= G_ELI_FLAG_RW_DETACH;
1286 			gp->access = g_eli_access;
1287 		}
1288 	}
1289 	g_topology_unlock();
1290 	PICKUP_GIANT();
1291 }
1292 
1293 static void
1294 g_eli_init(struct g_class *mp)
1295 {
1296 
1297 	g_eli_pre_sync = EVENTHANDLER_REGISTER(shutdown_pre_sync,
1298 	    g_eli_shutdown_pre_sync, mp, SHUTDOWN_PRI_FIRST);
1299 	if (g_eli_pre_sync == NULL)
1300 		G_ELI_DEBUG(0, "Warning! Cannot register shutdown event.");
1301 }
1302 
1303 static void
1304 g_eli_fini(struct g_class *mp)
1305 {
1306 
1307 	if (g_eli_pre_sync != NULL)
1308 		EVENTHANDLER_DEREGISTER(shutdown_pre_sync, g_eli_pre_sync);
1309 }
1310 
1311 DECLARE_GEOM_CLASS(g_eli_class, g_eli);
1312 MODULE_DEPEND(g_eli, crypto, 1, 1, 1);
1313