xref: /freebsd/sys/geom/eli/g_eli.c (revision 884a2a699669ec61e2366e3e358342dbc94be24a)
1 /*-
2  * Copyright (c) 2005-2011 Pawel Jakub Dawidek <pawel@dawidek.net>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/linker.h>
34 #include <sys/module.h>
35 #include <sys/lock.h>
36 #include <sys/mutex.h>
37 #include <sys/bio.h>
38 #include <sys/sysctl.h>
39 #include <sys/malloc.h>
40 #include <sys/eventhandler.h>
41 #include <sys/kthread.h>
42 #include <sys/proc.h>
43 #include <sys/sched.h>
44 #include <sys/smp.h>
45 #include <sys/uio.h>
46 #include <sys/vnode.h>
47 
48 #include <vm/uma.h>
49 
50 #include <geom/geom.h>
51 #include <geom/eli/g_eli.h>
52 #include <geom/eli/pkcs5v2.h>
53 
54 FEATURE(geom_eli, "GEOM crypto module");
55 
56 MALLOC_DEFINE(M_ELI, "eli data", "GEOM_ELI Data");
57 
58 SYSCTL_DECL(_kern_geom);
59 SYSCTL_NODE(_kern_geom, OID_AUTO, eli, CTLFLAG_RW, 0, "GEOM_ELI stuff");
60 static int g_eli_version = G_ELI_VERSION;
61 SYSCTL_INT(_kern_geom_eli, OID_AUTO, version, CTLFLAG_RD, &g_eli_version, 0,
62     "GELI version");
63 int g_eli_debug = 0;
64 TUNABLE_INT("kern.geom.eli.debug", &g_eli_debug);
65 SYSCTL_INT(_kern_geom_eli, OID_AUTO, debug, CTLFLAG_RW, &g_eli_debug, 0,
66     "Debug level");
67 static u_int g_eli_tries = 3;
68 TUNABLE_INT("kern.geom.eli.tries", &g_eli_tries);
69 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, tries, CTLFLAG_RW, &g_eli_tries, 0,
70     "Number of tries for entering the passphrase");
71 static u_int g_eli_visible_passphrase = GETS_NOECHO;
72 TUNABLE_INT("kern.geom.eli.visible_passphrase", &g_eli_visible_passphrase);
73 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, visible_passphrase, CTLFLAG_RW,
74     &g_eli_visible_passphrase, 0,
75     "Visibility of passphrase prompt (0 = invisible, 1 = visible, 2 = asterisk)");
76 u_int g_eli_overwrites = G_ELI_OVERWRITES;
77 TUNABLE_INT("kern.geom.eli.overwrites", &g_eli_overwrites);
78 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, overwrites, CTLFLAG_RW, &g_eli_overwrites,
79     0, "Number of times on-disk keys should be overwritten when destroying them");
80 static u_int g_eli_threads = 0;
81 TUNABLE_INT("kern.geom.eli.threads", &g_eli_threads);
82 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, threads, CTLFLAG_RW, &g_eli_threads, 0,
83     "Number of threads doing crypto work");
84 u_int g_eli_batch = 0;
85 TUNABLE_INT("kern.geom.eli.batch", &g_eli_batch);
86 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, batch, CTLFLAG_RW, &g_eli_batch, 0,
87     "Use crypto operations batching");
88 
89 static eventhandler_tag g_eli_pre_sync = NULL;
90 
91 static int g_eli_destroy_geom(struct gctl_req *req, struct g_class *mp,
92     struct g_geom *gp);
93 static void g_eli_init(struct g_class *mp);
94 static void g_eli_fini(struct g_class *mp);
95 
96 static g_taste_t g_eli_taste;
97 static g_dumpconf_t g_eli_dumpconf;
98 
99 struct g_class g_eli_class = {
100 	.name = G_ELI_CLASS_NAME,
101 	.version = G_VERSION,
102 	.ctlreq = g_eli_config,
103 	.taste = g_eli_taste,
104 	.destroy_geom = g_eli_destroy_geom,
105 	.init = g_eli_init,
106 	.fini = g_eli_fini
107 };
108 
109 
110 /*
111  * Code paths:
112  * BIO_READ:
113  *	g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
114  * BIO_WRITE:
115  *	g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
116  */
117 
118 
119 /*
120  * EAGAIN from crypto(9) means, that we were probably balanced to another crypto
121  * accelerator or something like this.
122  * The function updates the SID and rerun the operation.
123  */
124 int
125 g_eli_crypto_rerun(struct cryptop *crp)
126 {
127 	struct g_eli_softc *sc;
128 	struct g_eli_worker *wr;
129 	struct bio *bp;
130 	int error;
131 
132 	bp = (struct bio *)crp->crp_opaque;
133 	sc = bp->bio_to->geom->softc;
134 	LIST_FOREACH(wr, &sc->sc_workers, w_next) {
135 		if (wr->w_number == bp->bio_pflags)
136 			break;
137 	}
138 	KASSERT(wr != NULL, ("Invalid worker (%u).", bp->bio_pflags));
139 	G_ELI_DEBUG(1, "Rerunning crypto %s request (sid: %ju -> %ju).",
140 	    bp->bio_cmd == BIO_READ ? "READ" : "WRITE", (uintmax_t)wr->w_sid,
141 	    (uintmax_t)crp->crp_sid);
142 	wr->w_sid = crp->crp_sid;
143 	crp->crp_etype = 0;
144 	error = crypto_dispatch(crp);
145 	if (error == 0)
146 		return (0);
147 	G_ELI_DEBUG(1, "%s: crypto_dispatch() returned %d.", __func__, error);
148 	crp->crp_etype = error;
149 	return (error);
150 }
151 
152 /*
153  * The function is called afer reading encrypted data from the provider.
154  *
155  * g_eli_start -> g_eli_crypto_read -> g_io_request -> G_ELI_READ_DONE -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
156  */
157 void
158 g_eli_read_done(struct bio *bp)
159 {
160 	struct g_eli_softc *sc;
161 	struct bio *pbp;
162 
163 	G_ELI_LOGREQ(2, bp, "Request done.");
164 	pbp = bp->bio_parent;
165 	if (pbp->bio_error == 0)
166 		pbp->bio_error = bp->bio_error;
167 	g_destroy_bio(bp);
168 	/*
169 	 * Do we have all sectors already?
170 	 */
171 	pbp->bio_inbed++;
172 	if (pbp->bio_inbed < pbp->bio_children)
173 		return;
174 	sc = pbp->bio_to->geom->softc;
175 	if (pbp->bio_error != 0) {
176 		G_ELI_LOGREQ(0, pbp, "%s() failed", __func__);
177 		pbp->bio_completed = 0;
178 		if (pbp->bio_driver2 != NULL) {
179 			free(pbp->bio_driver2, M_ELI);
180 			pbp->bio_driver2 = NULL;
181 		}
182 		g_io_deliver(pbp, pbp->bio_error);
183 		atomic_subtract_int(&sc->sc_inflight, 1);
184 		return;
185 	}
186 	mtx_lock(&sc->sc_queue_mtx);
187 	bioq_insert_tail(&sc->sc_queue, pbp);
188 	mtx_unlock(&sc->sc_queue_mtx);
189 	wakeup(sc);
190 }
191 
192 /*
193  * The function is called after we encrypt and write data.
194  *
195  * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> G_ELI_WRITE_DONE -> g_io_deliver
196  */
197 void
198 g_eli_write_done(struct bio *bp)
199 {
200 	struct g_eli_softc *sc;
201 	struct bio *pbp;
202 
203 	G_ELI_LOGREQ(2, bp, "Request done.");
204 	pbp = bp->bio_parent;
205 	if (pbp->bio_error == 0) {
206 		if (bp->bio_error != 0)
207 			pbp->bio_error = bp->bio_error;
208 	}
209 	g_destroy_bio(bp);
210 	/*
211 	 * Do we have all sectors already?
212 	 */
213 	pbp->bio_inbed++;
214 	if (pbp->bio_inbed < pbp->bio_children)
215 		return;
216 	free(pbp->bio_driver2, M_ELI);
217 	pbp->bio_driver2 = NULL;
218 	if (pbp->bio_error != 0) {
219 		G_ELI_LOGREQ(0, pbp, "Crypto WRITE request failed (error=%d).",
220 		    pbp->bio_error);
221 		pbp->bio_completed = 0;
222 	}
223 	/*
224 	 * Write is finished, send it up.
225 	 */
226 	pbp->bio_completed = pbp->bio_length;
227 	sc = pbp->bio_to->geom->softc;
228 	g_io_deliver(pbp, pbp->bio_error);
229 	atomic_subtract_int(&sc->sc_inflight, 1);
230 }
231 
232 /*
233  * This function should never be called, but GEOM made as it set ->orphan()
234  * method for every geom.
235  */
236 static void
237 g_eli_orphan_spoil_assert(struct g_consumer *cp)
238 {
239 
240 	panic("Function %s() called for %s.", __func__, cp->geom->name);
241 }
242 
243 static void
244 g_eli_orphan(struct g_consumer *cp)
245 {
246 	struct g_eli_softc *sc;
247 
248 	g_topology_assert();
249 	sc = cp->geom->softc;
250 	if (sc == NULL)
251 		return;
252 	g_eli_destroy(sc, TRUE);
253 }
254 
255 /*
256  * BIO_READ:
257  *	G_ELI_START -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
258  * BIO_WRITE:
259  *	G_ELI_START -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
260  */
261 static void
262 g_eli_start(struct bio *bp)
263 {
264 	struct g_eli_softc *sc;
265 	struct g_consumer *cp;
266 	struct bio *cbp;
267 
268 	sc = bp->bio_to->geom->softc;
269 	KASSERT(sc != NULL,
270 	    ("Provider's error should be set (error=%d)(device=%s).",
271 	    bp->bio_to->error, bp->bio_to->name));
272 	G_ELI_LOGREQ(2, bp, "Request received.");
273 
274 	switch (bp->bio_cmd) {
275 	case BIO_READ:
276 	case BIO_WRITE:
277 	case BIO_GETATTR:
278 	case BIO_FLUSH:
279 		break;
280 	case BIO_DELETE:
281 		/*
282 		 * We could eventually support BIO_DELETE request.
283 		 * It could be done by overwritting requested sector with
284 		 * random data g_eli_overwrites number of times.
285 		 */
286 	default:
287 		g_io_deliver(bp, EOPNOTSUPP);
288 		return;
289 	}
290 	cbp = g_clone_bio(bp);
291 	if (cbp == NULL) {
292 		g_io_deliver(bp, ENOMEM);
293 		return;
294 	}
295 	bp->bio_driver1 = cbp;
296 	bp->bio_pflags = G_ELI_NEW_BIO;
297 	switch (bp->bio_cmd) {
298 	case BIO_READ:
299 		if (!(sc->sc_flags & G_ELI_FLAG_AUTH)) {
300 			g_eli_crypto_read(sc, bp, 0);
301 			break;
302 		}
303 		/* FALLTHROUGH */
304 	case BIO_WRITE:
305 		mtx_lock(&sc->sc_queue_mtx);
306 		bioq_insert_tail(&sc->sc_queue, bp);
307 		mtx_unlock(&sc->sc_queue_mtx);
308 		wakeup(sc);
309 		break;
310 	case BIO_GETATTR:
311 	case BIO_FLUSH:
312 		cbp->bio_done = g_std_done;
313 		cp = LIST_FIRST(&sc->sc_geom->consumer);
314 		cbp->bio_to = cp->provider;
315 		G_ELI_LOGREQ(2, cbp, "Sending request.");
316 		g_io_request(cbp, cp);
317 		break;
318 	}
319 }
320 
321 static int
322 g_eli_newsession(struct g_eli_worker *wr)
323 {
324 	struct g_eli_softc *sc;
325 	struct cryptoini crie, cria;
326 	int error;
327 
328 	sc = wr->w_softc;
329 
330 	bzero(&crie, sizeof(crie));
331 	crie.cri_alg = sc->sc_ealgo;
332 	crie.cri_klen = sc->sc_ekeylen;
333 	if (sc->sc_ealgo == CRYPTO_AES_XTS)
334 		crie.cri_klen <<= 1;
335 	if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) != 0) {
336 		crie.cri_key = g_eli_key_hold(sc, 0,
337 		    LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize);
338 	} else {
339 		crie.cri_key = sc->sc_ekey;
340 	}
341 	if (sc->sc_flags & G_ELI_FLAG_AUTH) {
342 		bzero(&cria, sizeof(cria));
343 		cria.cri_alg = sc->sc_aalgo;
344 		cria.cri_klen = sc->sc_akeylen;
345 		cria.cri_key = sc->sc_akey;
346 		crie.cri_next = &cria;
347 	}
348 
349 	switch (sc->sc_crypto) {
350 	case G_ELI_CRYPTO_SW:
351 		error = crypto_newsession(&wr->w_sid, &crie,
352 		    CRYPTOCAP_F_SOFTWARE);
353 		break;
354 	case G_ELI_CRYPTO_HW:
355 		error = crypto_newsession(&wr->w_sid, &crie,
356 		    CRYPTOCAP_F_HARDWARE);
357 		break;
358 	case G_ELI_CRYPTO_UNKNOWN:
359 		error = crypto_newsession(&wr->w_sid, &crie,
360 		    CRYPTOCAP_F_HARDWARE);
361 		if (error == 0) {
362 			mtx_lock(&sc->sc_queue_mtx);
363 			if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN)
364 				sc->sc_crypto = G_ELI_CRYPTO_HW;
365 			mtx_unlock(&sc->sc_queue_mtx);
366 		} else {
367 			error = crypto_newsession(&wr->w_sid, &crie,
368 			    CRYPTOCAP_F_SOFTWARE);
369 			mtx_lock(&sc->sc_queue_mtx);
370 			if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN)
371 				sc->sc_crypto = G_ELI_CRYPTO_SW;
372 			mtx_unlock(&sc->sc_queue_mtx);
373 		}
374 		break;
375 	default:
376 		panic("%s: invalid condition", __func__);
377 	}
378 
379 	if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) != 0)
380 		g_eli_key_drop(sc, crie.cri_key);
381 
382 	return (error);
383 }
384 
385 static void
386 g_eli_freesession(struct g_eli_worker *wr)
387 {
388 
389 	crypto_freesession(wr->w_sid);
390 }
391 
392 static void
393 g_eli_cancel(struct g_eli_softc *sc)
394 {
395 	struct bio *bp;
396 
397 	mtx_assert(&sc->sc_queue_mtx, MA_OWNED);
398 
399 	while ((bp = bioq_takefirst(&sc->sc_queue)) != NULL) {
400 		KASSERT(bp->bio_pflags == G_ELI_NEW_BIO,
401 		    ("Not new bio when canceling (bp=%p).", bp));
402 		g_io_deliver(bp, ENXIO);
403 	}
404 }
405 
406 static struct bio *
407 g_eli_takefirst(struct g_eli_softc *sc)
408 {
409 	struct bio *bp;
410 
411 	mtx_assert(&sc->sc_queue_mtx, MA_OWNED);
412 
413 	if (!(sc->sc_flags & G_ELI_FLAG_SUSPEND))
414 		return (bioq_takefirst(&sc->sc_queue));
415 	/*
416 	 * Device suspended, so we skip new I/O requests.
417 	 */
418 	TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
419 		if (bp->bio_pflags != G_ELI_NEW_BIO)
420 			break;
421 	}
422 	if (bp != NULL)
423 		bioq_remove(&sc->sc_queue, bp);
424 	return (bp);
425 }
426 
427 /*
428  * This is the main function for kernel worker thread when we don't have
429  * hardware acceleration and we have to do cryptography in software.
430  * Dedicated thread is needed, so we don't slow down g_up/g_down GEOM
431  * threads with crypto work.
432  */
433 static void
434 g_eli_worker(void *arg)
435 {
436 	struct g_eli_softc *sc;
437 	struct g_eli_worker *wr;
438 	struct bio *bp;
439 	int error;
440 
441 	wr = arg;
442 	sc = wr->w_softc;
443 #ifdef SMP
444 	/* Before sched_bind() to a CPU, wait for all CPUs to go on-line. */
445 	if (mp_ncpus > 1 && sc->sc_crypto == G_ELI_CRYPTO_SW &&
446 	    g_eli_threads == 0) {
447 		while (!smp_started)
448 			tsleep(wr, 0, "geli:smp", hz / 4);
449 	}
450 #endif
451 	thread_lock(curthread);
452 	sched_prio(curthread, PUSER);
453 	if (sc->sc_crypto == G_ELI_CRYPTO_SW && g_eli_threads == 0)
454 		sched_bind(curthread, wr->w_number);
455 	thread_unlock(curthread);
456 
457 	G_ELI_DEBUG(1, "Thread %s started.", curthread->td_proc->p_comm);
458 
459 	for (;;) {
460 		mtx_lock(&sc->sc_queue_mtx);
461 again:
462 		bp = g_eli_takefirst(sc);
463 		if (bp == NULL) {
464 			if (sc->sc_flags & G_ELI_FLAG_DESTROY) {
465 				g_eli_cancel(sc);
466 				LIST_REMOVE(wr, w_next);
467 				g_eli_freesession(wr);
468 				free(wr, M_ELI);
469 				G_ELI_DEBUG(1, "Thread %s exiting.",
470 				    curthread->td_proc->p_comm);
471 				wakeup(&sc->sc_workers);
472 				mtx_unlock(&sc->sc_queue_mtx);
473 				kproc_exit(0);
474 			}
475 			while (sc->sc_flags & G_ELI_FLAG_SUSPEND) {
476 				if (sc->sc_inflight > 0) {
477 					G_ELI_DEBUG(0, "inflight=%d", sc->sc_inflight);
478 					/*
479 					 * We still have inflight BIOs, so
480 					 * sleep and retry.
481 					 */
482 					msleep(sc, &sc->sc_queue_mtx, PRIBIO,
483 					    "geli:inf", hz / 5);
484 					goto again;
485 				}
486 				/*
487 				 * Suspend requested, mark the worker as
488 				 * suspended and go to sleep.
489 				 */
490 				if (wr->w_active) {
491 					g_eli_freesession(wr);
492 					wr->w_active = FALSE;
493 				}
494 				wakeup(&sc->sc_workers);
495 				msleep(sc, &sc->sc_queue_mtx, PRIBIO,
496 				    "geli:suspend", 0);
497 				if (!wr->w_active &&
498 				    !(sc->sc_flags & G_ELI_FLAG_SUSPEND)) {
499 					error = g_eli_newsession(wr);
500 					KASSERT(error == 0,
501 					    ("g_eli_newsession() failed on resume (error=%d)",
502 					    error));
503 					wr->w_active = TRUE;
504 				}
505 				goto again;
506 			}
507 			msleep(sc, &sc->sc_queue_mtx, PDROP, "geli:w", 0);
508 			continue;
509 		}
510 		if (bp->bio_pflags == G_ELI_NEW_BIO)
511 			atomic_add_int(&sc->sc_inflight, 1);
512 		mtx_unlock(&sc->sc_queue_mtx);
513 		if (bp->bio_pflags == G_ELI_NEW_BIO) {
514 			bp->bio_pflags = 0;
515 			if (sc->sc_flags & G_ELI_FLAG_AUTH) {
516 				if (bp->bio_cmd == BIO_READ)
517 					g_eli_auth_read(sc, bp);
518 				else
519 					g_eli_auth_run(wr, bp);
520 			} else {
521 				if (bp->bio_cmd == BIO_READ)
522 					g_eli_crypto_read(sc, bp, 1);
523 				else
524 					g_eli_crypto_run(wr, bp);
525 			}
526 		} else {
527 			if (sc->sc_flags & G_ELI_FLAG_AUTH)
528 				g_eli_auth_run(wr, bp);
529 			else
530 				g_eli_crypto_run(wr, bp);
531 		}
532 	}
533 }
534 
535 /*
536  * Here we generate IV. It is unique for every sector.
537  */
538 void
539 g_eli_crypto_ivgen(struct g_eli_softc *sc, off_t offset, u_char *iv,
540     size_t size)
541 {
542 	uint8_t off[8];
543 
544 	if ((sc->sc_flags & G_ELI_FLAG_NATIVE_BYTE_ORDER) != 0)
545 		bcopy(&offset, off, sizeof(off));
546 	else
547 		le64enc(off, (uint64_t)offset);
548 
549 	switch (sc->sc_ealgo) {
550 	case CRYPTO_AES_XTS:
551 		bcopy(off, iv, sizeof(off));
552 		bzero(iv + sizeof(off), size - sizeof(off));
553 		break;
554 	default:
555 	    {
556 		u_char hash[SHA256_DIGEST_LENGTH];
557 		SHA256_CTX ctx;
558 
559 		/* Copy precalculated SHA256 context for IV-Key. */
560 		bcopy(&sc->sc_ivctx, &ctx, sizeof(ctx));
561 		SHA256_Update(&ctx, off, sizeof(off));
562 		SHA256_Final(hash, &ctx);
563 		bcopy(hash, iv, MIN(sizeof(hash), size));
564 		break;
565 	    }
566 	}
567 }
568 
569 int
570 g_eli_read_metadata(struct g_class *mp, struct g_provider *pp,
571     struct g_eli_metadata *md)
572 {
573 	struct g_geom *gp;
574 	struct g_consumer *cp;
575 	u_char *buf = NULL;
576 	int error;
577 
578 	g_topology_assert();
579 
580 	gp = g_new_geomf(mp, "eli:taste");
581 	gp->start = g_eli_start;
582 	gp->access = g_std_access;
583 	/*
584 	 * g_eli_read_metadata() is always called from the event thread.
585 	 * Our geom is created and destroyed in the same event, so there
586 	 * could be no orphan nor spoil event in the meantime.
587 	 */
588 	gp->orphan = g_eli_orphan_spoil_assert;
589 	gp->spoiled = g_eli_orphan_spoil_assert;
590 	cp = g_new_consumer(gp);
591 	error = g_attach(cp, pp);
592 	if (error != 0)
593 		goto end;
594 	error = g_access(cp, 1, 0, 0);
595 	if (error != 0)
596 		goto end;
597 	g_topology_unlock();
598 	buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
599 	    &error);
600 	g_topology_lock();
601 	if (buf == NULL)
602 		goto end;
603 	eli_metadata_decode(buf, md);
604 end:
605 	if (buf != NULL)
606 		g_free(buf);
607 	if (cp->provider != NULL) {
608 		if (cp->acr == 1)
609 			g_access(cp, -1, 0, 0);
610 		g_detach(cp);
611 	}
612 	g_destroy_consumer(cp);
613 	g_destroy_geom(gp);
614 	return (error);
615 }
616 
617 /*
618  * The function is called when we had last close on provider and user requested
619  * to close it when this situation occur.
620  */
621 static void
622 g_eli_last_close(struct g_eli_softc *sc)
623 {
624 	struct g_geom *gp;
625 	struct g_provider *pp;
626 	char ppname[64];
627 	int error;
628 
629 	g_topology_assert();
630 	gp = sc->sc_geom;
631 	pp = LIST_FIRST(&gp->provider);
632 	strlcpy(ppname, pp->name, sizeof(ppname));
633 	error = g_eli_destroy(sc, TRUE);
634 	KASSERT(error == 0, ("Cannot detach %s on last close (error=%d).",
635 	    ppname, error));
636 	G_ELI_DEBUG(0, "Detached %s on last close.", ppname);
637 }
638 
639 int
640 g_eli_access(struct g_provider *pp, int dr, int dw, int de)
641 {
642 	struct g_eli_softc *sc;
643 	struct g_geom *gp;
644 
645 	gp = pp->geom;
646 	sc = gp->softc;
647 
648 	if (dw > 0) {
649 		if (sc->sc_flags & G_ELI_FLAG_RO) {
650 			/* Deny write attempts. */
651 			return (EROFS);
652 		}
653 		/* Someone is opening us for write, we need to remember that. */
654 		sc->sc_flags |= G_ELI_FLAG_WOPEN;
655 		return (0);
656 	}
657 	/* Is this the last close? */
658 	if (pp->acr + dr > 0 || pp->acw + dw > 0 || pp->ace + de > 0)
659 		return (0);
660 
661 	/*
662 	 * Automatically detach on last close if requested.
663 	 */
664 	if ((sc->sc_flags & G_ELI_FLAG_RW_DETACH) ||
665 	    (sc->sc_flags & G_ELI_FLAG_WOPEN)) {
666 		g_eli_last_close(sc);
667 	}
668 	return (0);
669 }
670 
671 static int
672 g_eli_cpu_is_disabled(int cpu)
673 {
674 #ifdef SMP
675 	return ((hlt_cpus_mask & (1 << cpu)) != 0);
676 #else
677 	return (0);
678 #endif
679 }
680 
681 struct g_geom *
682 g_eli_create(struct gctl_req *req, struct g_class *mp, struct g_provider *bpp,
683     const struct g_eli_metadata *md, const u_char *mkey, int nkey)
684 {
685 	struct g_eli_softc *sc;
686 	struct g_eli_worker *wr;
687 	struct g_geom *gp;
688 	struct g_provider *pp;
689 	struct g_consumer *cp;
690 	u_int i, threads;
691 	int error;
692 
693 	G_ELI_DEBUG(1, "Creating device %s%s.", bpp->name, G_ELI_SUFFIX);
694 
695 	gp = g_new_geomf(mp, "%s%s", bpp->name, G_ELI_SUFFIX);
696 	sc = malloc(sizeof(*sc), M_ELI, M_WAITOK | M_ZERO);
697 	gp->start = g_eli_start;
698 	/*
699 	 * Spoiling cannot happen actually, because we keep provider open for
700 	 * writing all the time or provider is read-only.
701 	 */
702 	gp->spoiled = g_eli_orphan_spoil_assert;
703 	gp->orphan = g_eli_orphan;
704 	gp->dumpconf = g_eli_dumpconf;
705 	/*
706 	 * If detach-on-last-close feature is not enabled and we don't operate
707 	 * on read-only provider, we can simply use g_std_access().
708 	 */
709 	if (md->md_flags & (G_ELI_FLAG_WO_DETACH | G_ELI_FLAG_RO))
710 		gp->access = g_eli_access;
711 	else
712 		gp->access = g_std_access;
713 
714 	sc->sc_inflight = 0;
715 	sc->sc_crypto = G_ELI_CRYPTO_UNKNOWN;
716 	sc->sc_flags = md->md_flags;
717 	/* Backward compatibility. */
718 	if (md->md_version < 4)
719 		sc->sc_flags |= G_ELI_FLAG_NATIVE_BYTE_ORDER;
720 	if (md->md_version < 5)
721 		sc->sc_flags |= G_ELI_FLAG_SINGLE_KEY;
722 	if (md->md_version < 6 && (sc->sc_flags & G_ELI_FLAG_AUTH) != 0)
723 		sc->sc_flags |= G_ELI_FLAG_FIRST_KEY;
724 	sc->sc_ealgo = md->md_ealgo;
725 	sc->sc_nkey = nkey;
726 
727 	if (sc->sc_flags & G_ELI_FLAG_AUTH) {
728 		sc->sc_akeylen = sizeof(sc->sc_akey) * 8;
729 		sc->sc_aalgo = md->md_aalgo;
730 		sc->sc_alen = g_eli_hashlen(sc->sc_aalgo);
731 
732 		sc->sc_data_per_sector = bpp->sectorsize - sc->sc_alen;
733 		/*
734 		 * Some hash functions (like SHA1 and RIPEMD160) generates hash
735 		 * which length is not multiple of 128 bits, but we want data
736 		 * length to be multiple of 128, so we can encrypt without
737 		 * padding. The line below rounds down data length to multiple
738 		 * of 128 bits.
739 		 */
740 		sc->sc_data_per_sector -= sc->sc_data_per_sector % 16;
741 
742 		sc->sc_bytes_per_sector =
743 		    (md->md_sectorsize - 1) / sc->sc_data_per_sector + 1;
744 		sc->sc_bytes_per_sector *= bpp->sectorsize;
745 	}
746 
747 	gp->softc = sc;
748 	sc->sc_geom = gp;
749 
750 	bioq_init(&sc->sc_queue);
751 	mtx_init(&sc->sc_queue_mtx, "geli:queue", NULL, MTX_DEF);
752 	mtx_init(&sc->sc_ekeys_lock, "geli:ekeys", NULL, MTX_DEF);
753 
754 	pp = NULL;
755 	cp = g_new_consumer(gp);
756 	error = g_attach(cp, bpp);
757 	if (error != 0) {
758 		if (req != NULL) {
759 			gctl_error(req, "Cannot attach to %s (error=%d).",
760 			    bpp->name, error);
761 		} else {
762 			G_ELI_DEBUG(1, "Cannot attach to %s (error=%d).",
763 			    bpp->name, error);
764 		}
765 		goto failed;
766 	}
767 	/*
768 	 * Keep provider open all the time, so we can run critical tasks,
769 	 * like Master Keys deletion, without wondering if we can open
770 	 * provider or not.
771 	 * We don't open provider for writing only when user requested read-only
772 	 * access.
773 	 */
774 	if (sc->sc_flags & G_ELI_FLAG_RO)
775 		error = g_access(cp, 1, 0, 1);
776 	else
777 		error = g_access(cp, 1, 1, 1);
778 	if (error != 0) {
779 		if (req != NULL) {
780 			gctl_error(req, "Cannot access %s (error=%d).",
781 			    bpp->name, error);
782 		} else {
783 			G_ELI_DEBUG(1, "Cannot access %s (error=%d).",
784 			    bpp->name, error);
785 		}
786 		goto failed;
787 	}
788 
789 	sc->sc_sectorsize = md->md_sectorsize;
790 	sc->sc_mediasize = bpp->mediasize;
791 	if (!(sc->sc_flags & G_ELI_FLAG_ONETIME))
792 		sc->sc_mediasize -= bpp->sectorsize;
793 	if (!(sc->sc_flags & G_ELI_FLAG_AUTH))
794 		sc->sc_mediasize -= (sc->sc_mediasize % sc->sc_sectorsize);
795 	else {
796 		sc->sc_mediasize /= sc->sc_bytes_per_sector;
797 		sc->sc_mediasize *= sc->sc_sectorsize;
798 	}
799 
800 	/*
801 	 * Remember the keys in our softc structure.
802 	 */
803 	g_eli_mkey_propagate(sc, mkey);
804 	sc->sc_ekeylen = md->md_keylen;
805 
806 	LIST_INIT(&sc->sc_workers);
807 
808 	threads = g_eli_threads;
809 	if (threads == 0)
810 		threads = mp_ncpus;
811 	else if (threads > mp_ncpus) {
812 		/* There is really no need for too many worker threads. */
813 		threads = mp_ncpus;
814 		G_ELI_DEBUG(0, "Reducing number of threads to %u.", threads);
815 	}
816 	for (i = 0; i < threads; i++) {
817 		if (g_eli_cpu_is_disabled(i)) {
818 			G_ELI_DEBUG(1, "%s: CPU %u disabled, skipping.",
819 			    bpp->name, i);
820 			continue;
821 		}
822 		wr = malloc(sizeof(*wr), M_ELI, M_WAITOK | M_ZERO);
823 		wr->w_softc = sc;
824 		wr->w_number = i;
825 		wr->w_active = TRUE;
826 
827 		error = g_eli_newsession(wr);
828 		if (error != 0) {
829 			free(wr, M_ELI);
830 			if (req != NULL) {
831 				gctl_error(req, "Cannot set up crypto session "
832 				    "for %s (error=%d).", bpp->name, error);
833 			} else {
834 				G_ELI_DEBUG(1, "Cannot set up crypto session "
835 				    "for %s (error=%d).", bpp->name, error);
836 			}
837 			goto failed;
838 		}
839 
840 		error = kproc_create(g_eli_worker, wr, &wr->w_proc, 0, 0,
841 		    "g_eli[%u] %s", i, bpp->name);
842 		if (error != 0) {
843 			g_eli_freesession(wr);
844 			free(wr, M_ELI);
845 			if (req != NULL) {
846 				gctl_error(req, "Cannot create kernel thread "
847 				    "for %s (error=%d).", bpp->name, error);
848 			} else {
849 				G_ELI_DEBUG(1, "Cannot create kernel thread "
850 				    "for %s (error=%d).", bpp->name, error);
851 			}
852 			goto failed;
853 		}
854 		LIST_INSERT_HEAD(&sc->sc_workers, wr, w_next);
855 		/* If we have hardware support, one thread is enough. */
856 		if (sc->sc_crypto == G_ELI_CRYPTO_HW)
857 			break;
858 	}
859 
860 	/*
861 	 * Create decrypted provider.
862 	 */
863 	pp = g_new_providerf(gp, "%s%s", bpp->name, G_ELI_SUFFIX);
864 	pp->mediasize = sc->sc_mediasize;
865 	pp->sectorsize = sc->sc_sectorsize;
866 
867 	g_error_provider(pp, 0);
868 
869 	G_ELI_DEBUG(0, "Device %s created.", pp->name);
870 	G_ELI_DEBUG(0, "Encryption: %s %u", g_eli_algo2str(sc->sc_ealgo),
871 	    sc->sc_ekeylen);
872 	if (sc->sc_flags & G_ELI_FLAG_AUTH)
873 		G_ELI_DEBUG(0, " Integrity: %s", g_eli_algo2str(sc->sc_aalgo));
874 	G_ELI_DEBUG(0, "    Crypto: %s",
875 	    sc->sc_crypto == G_ELI_CRYPTO_SW ? "software" : "hardware");
876 	return (gp);
877 failed:
878 	mtx_lock(&sc->sc_queue_mtx);
879 	sc->sc_flags |= G_ELI_FLAG_DESTROY;
880 	wakeup(sc);
881 	/*
882 	 * Wait for kernel threads self destruction.
883 	 */
884 	while (!LIST_EMPTY(&sc->sc_workers)) {
885 		msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO,
886 		    "geli:destroy", 0);
887 	}
888 	mtx_destroy(&sc->sc_queue_mtx);
889 	if (cp->provider != NULL) {
890 		if (cp->acr == 1)
891 			g_access(cp, -1, -1, -1);
892 		g_detach(cp);
893 	}
894 	g_destroy_consumer(cp);
895 	g_destroy_geom(gp);
896 	g_eli_key_destroy(sc);
897 	bzero(sc, sizeof(*sc));
898 	free(sc, M_ELI);
899 	return (NULL);
900 }
901 
902 int
903 g_eli_destroy(struct g_eli_softc *sc, boolean_t force)
904 {
905 	struct g_geom *gp;
906 	struct g_provider *pp;
907 
908 	g_topology_assert();
909 
910 	if (sc == NULL)
911 		return (ENXIO);
912 
913 	gp = sc->sc_geom;
914 	pp = LIST_FIRST(&gp->provider);
915 	if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
916 		if (force) {
917 			G_ELI_DEBUG(1, "Device %s is still open, so it "
918 			    "cannot be definitely removed.", pp->name);
919 		} else {
920 			G_ELI_DEBUG(1,
921 			    "Device %s is still open (r%dw%de%d).", pp->name,
922 			    pp->acr, pp->acw, pp->ace);
923 			return (EBUSY);
924 		}
925 	}
926 
927 	mtx_lock(&sc->sc_queue_mtx);
928 	sc->sc_flags |= G_ELI_FLAG_DESTROY;
929 	wakeup(sc);
930 	while (!LIST_EMPTY(&sc->sc_workers)) {
931 		msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO,
932 		    "geli:destroy", 0);
933 	}
934 	mtx_destroy(&sc->sc_queue_mtx);
935 	gp->softc = NULL;
936 	g_eli_key_destroy(sc);
937 	bzero(sc, sizeof(*sc));
938 	free(sc, M_ELI);
939 
940 	if (pp == NULL || (pp->acr == 0 && pp->acw == 0 && pp->ace == 0))
941 		G_ELI_DEBUG(0, "Device %s destroyed.", gp->name);
942 	g_wither_geom_close(gp, ENXIO);
943 
944 	return (0);
945 }
946 
947 static int
948 g_eli_destroy_geom(struct gctl_req *req __unused,
949     struct g_class *mp __unused, struct g_geom *gp)
950 {
951 	struct g_eli_softc *sc;
952 
953 	sc = gp->softc;
954 	return (g_eli_destroy(sc, FALSE));
955 }
956 
957 static int
958 g_eli_keyfiles_load(struct hmac_ctx *ctx, const char *provider)
959 {
960 	u_char *keyfile, *data;
961 	char *file, name[64];
962 	size_t size;
963 	int i;
964 
965 	for (i = 0; ; i++) {
966 		snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i);
967 		keyfile = preload_search_by_type(name);
968 		if (keyfile == NULL)
969 			return (i);	/* Return number of loaded keyfiles. */
970 		data = preload_fetch_addr(keyfile);
971 		if (data == NULL) {
972 			G_ELI_DEBUG(0, "Cannot find key file data for %s.",
973 			    name);
974 			return (0);
975 		}
976 		size = preload_fetch_size(keyfile);
977 		if (size == 0) {
978 			G_ELI_DEBUG(0, "Cannot find key file size for %s.",
979 			    name);
980 			return (0);
981 		}
982 		file = preload_search_info(keyfile, MODINFO_NAME);
983 		if (file == NULL) {
984 			G_ELI_DEBUG(0, "Cannot find key file name for %s.",
985 			    name);
986 			return (0);
987 		}
988 		G_ELI_DEBUG(1, "Loaded keyfile %s for %s (type: %s).", file,
989 		    provider, name);
990 		g_eli_crypto_hmac_update(ctx, data, size);
991 	}
992 }
993 
994 static void
995 g_eli_keyfiles_clear(const char *provider)
996 {
997 	u_char *keyfile, *data;
998 	char name[64];
999 	size_t size;
1000 	int i;
1001 
1002 	for (i = 0; ; i++) {
1003 		snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i);
1004 		keyfile = preload_search_by_type(name);
1005 		if (keyfile == NULL)
1006 			return;
1007 		data = preload_fetch_addr(keyfile);
1008 		size = preload_fetch_size(keyfile);
1009 		if (data != NULL && size != 0)
1010 			bzero(data, size);
1011 	}
1012 }
1013 
1014 /*
1015  * Tasting is only made on boot.
1016  * We detect providers which should be attached before root is mounted.
1017  */
1018 static struct g_geom *
1019 g_eli_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
1020 {
1021 	struct g_eli_metadata md;
1022 	struct g_geom *gp;
1023 	struct hmac_ctx ctx;
1024 	char passphrase[256];
1025 	u_char key[G_ELI_USERKEYLEN], mkey[G_ELI_DATAIVKEYLEN];
1026 	u_int i, nkey, nkeyfiles, tries;
1027 	int error;
1028 
1029 	g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
1030 	g_topology_assert();
1031 
1032 	if (root_mounted() || g_eli_tries == 0)
1033 		return (NULL);
1034 
1035 	G_ELI_DEBUG(3, "Tasting %s.", pp->name);
1036 
1037 	error = g_eli_read_metadata(mp, pp, &md);
1038 	if (error != 0)
1039 		return (NULL);
1040 	gp = NULL;
1041 
1042 	if (strcmp(md.md_magic, G_ELI_MAGIC) != 0)
1043 		return (NULL);
1044 	if (md.md_version > G_ELI_VERSION) {
1045 		printf("geom_eli.ko module is too old to handle %s.\n",
1046 		    pp->name);
1047 		return (NULL);
1048 	}
1049 	if (md.md_provsize != pp->mediasize)
1050 		return (NULL);
1051 	/* Should we attach it on boot? */
1052 	if (!(md.md_flags & G_ELI_FLAG_BOOT))
1053 		return (NULL);
1054 	if (md.md_keys == 0x00) {
1055 		G_ELI_DEBUG(0, "No valid keys on %s.", pp->name);
1056 		return (NULL);
1057 	}
1058 	if (md.md_iterations == -1) {
1059 		/* If there is no passphrase, we try only once. */
1060 		tries = 1;
1061 	} else {
1062 		/* Ask for the passphrase no more than g_eli_tries times. */
1063 		tries = g_eli_tries;
1064 	}
1065 
1066 	for (i = 0; i < tries; i++) {
1067 		g_eli_crypto_hmac_init(&ctx, NULL, 0);
1068 
1069 		/*
1070 		 * Load all key files.
1071 		 */
1072 		nkeyfiles = g_eli_keyfiles_load(&ctx, pp->name);
1073 
1074 		if (nkeyfiles == 0 && md.md_iterations == -1) {
1075 			/*
1076 			 * No key files and no passphrase, something is
1077 			 * definitely wrong here.
1078 			 * geli(8) doesn't allow for such situation, so assume
1079 			 * that there was really no passphrase and in that case
1080 			 * key files are no properly defined in loader.conf.
1081 			 */
1082 			G_ELI_DEBUG(0,
1083 			    "Found no key files in loader.conf for %s.",
1084 			    pp->name);
1085 			return (NULL);
1086 		}
1087 
1088 		/* Ask for the passphrase if defined. */
1089 		if (md.md_iterations >= 0) {
1090 			printf("Enter passphrase for %s: ", pp->name);
1091 			gets(passphrase, sizeof(passphrase),
1092 			    g_eli_visible_passphrase);
1093 		}
1094 
1095 		/*
1096 		 * Prepare Derived-Key from the user passphrase.
1097 		 */
1098 		if (md.md_iterations == 0) {
1099 			g_eli_crypto_hmac_update(&ctx, md.md_salt,
1100 			    sizeof(md.md_salt));
1101 			g_eli_crypto_hmac_update(&ctx, passphrase,
1102 			    strlen(passphrase));
1103 			bzero(passphrase, sizeof(passphrase));
1104 		} else if (md.md_iterations > 0) {
1105 			u_char dkey[G_ELI_USERKEYLEN];
1106 
1107 			pkcs5v2_genkey(dkey, sizeof(dkey), md.md_salt,
1108 			    sizeof(md.md_salt), passphrase, md.md_iterations);
1109 			bzero(passphrase, sizeof(passphrase));
1110 			g_eli_crypto_hmac_update(&ctx, dkey, sizeof(dkey));
1111 			bzero(dkey, sizeof(dkey));
1112 		}
1113 
1114 		g_eli_crypto_hmac_final(&ctx, key, 0);
1115 
1116 		/*
1117 		 * Decrypt Master-Key.
1118 		 */
1119 		error = g_eli_mkey_decrypt(&md, key, mkey, &nkey);
1120 		bzero(key, sizeof(key));
1121 		if (error == -1) {
1122 			if (i == tries - 1) {
1123 				G_ELI_DEBUG(0,
1124 				    "Wrong key for %s. No tries left.",
1125 				    pp->name);
1126 				g_eli_keyfiles_clear(pp->name);
1127 				return (NULL);
1128 			}
1129 			G_ELI_DEBUG(0, "Wrong key for %s. Tries left: %u.",
1130 			    pp->name, tries - i - 1);
1131 			/* Try again. */
1132 			continue;
1133 		} else if (error > 0) {
1134 			G_ELI_DEBUG(0, "Cannot decrypt Master Key for %s (error=%d).",
1135 			    pp->name, error);
1136 			g_eli_keyfiles_clear(pp->name);
1137 			return (NULL);
1138 		}
1139 		G_ELI_DEBUG(1, "Using Master Key %u for %s.", nkey, pp->name);
1140 		break;
1141 	}
1142 
1143 	/*
1144 	 * We have correct key, let's attach provider.
1145 	 */
1146 	gp = g_eli_create(NULL, mp, pp, &md, mkey, nkey);
1147 	bzero(mkey, sizeof(mkey));
1148 	bzero(&md, sizeof(md));
1149 	if (gp == NULL) {
1150 		G_ELI_DEBUG(0, "Cannot create device %s%s.", pp->name,
1151 		    G_ELI_SUFFIX);
1152 		return (NULL);
1153 	}
1154 	return (gp);
1155 }
1156 
1157 static void
1158 g_eli_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1159     struct g_consumer *cp, struct g_provider *pp)
1160 {
1161 	struct g_eli_softc *sc;
1162 
1163 	g_topology_assert();
1164 	sc = gp->softc;
1165 	if (sc == NULL)
1166 		return;
1167 	if (pp != NULL || cp != NULL)
1168 		return;	/* Nothing here. */
1169 
1170 	sbuf_printf(sb, "%s<KeysTotal>%ju</KeysTotal>", indent,
1171 	    (uintmax_t)sc->sc_ekeys_total);
1172 	sbuf_printf(sb, "%s<KeysAllocated>%ju</KeysAllocated>", indent,
1173 	    (uintmax_t)sc->sc_ekeys_allocated);
1174 	sbuf_printf(sb, "%s<Flags>", indent);
1175 	if (sc->sc_flags == 0)
1176 		sbuf_printf(sb, "NONE");
1177 	else {
1178 		int first = 1;
1179 
1180 #define ADD_FLAG(flag, name)	do {					\
1181 	if (sc->sc_flags & (flag)) {					\
1182 		if (!first)						\
1183 			sbuf_printf(sb, ", ");				\
1184 		else							\
1185 			first = 0;					\
1186 		sbuf_printf(sb, name);					\
1187 	}								\
1188 } while (0)
1189 		ADD_FLAG(G_ELI_FLAG_SUSPEND, "SUSPEND");
1190 		ADD_FLAG(G_ELI_FLAG_SINGLE_KEY, "SINGLE-KEY");
1191 		ADD_FLAG(G_ELI_FLAG_NATIVE_BYTE_ORDER, "NATIVE-BYTE-ORDER");
1192 		ADD_FLAG(G_ELI_FLAG_ONETIME, "ONETIME");
1193 		ADD_FLAG(G_ELI_FLAG_BOOT, "BOOT");
1194 		ADD_FLAG(G_ELI_FLAG_WO_DETACH, "W-DETACH");
1195 		ADD_FLAG(G_ELI_FLAG_RW_DETACH, "RW-DETACH");
1196 		ADD_FLAG(G_ELI_FLAG_AUTH, "AUTH");
1197 		ADD_FLAG(G_ELI_FLAG_WOPEN, "W-OPEN");
1198 		ADD_FLAG(G_ELI_FLAG_DESTROY, "DESTROY");
1199 		ADD_FLAG(G_ELI_FLAG_RO, "READ-ONLY");
1200 #undef  ADD_FLAG
1201 	}
1202 	sbuf_printf(sb, "</Flags>\n");
1203 
1204 	if (!(sc->sc_flags & G_ELI_FLAG_ONETIME)) {
1205 		sbuf_printf(sb, "%s<UsedKey>%u</UsedKey>\n", indent,
1206 		    sc->sc_nkey);
1207 	}
1208 	sbuf_printf(sb, "%s<Crypto>", indent);
1209 	switch (sc->sc_crypto) {
1210 	case G_ELI_CRYPTO_HW:
1211 		sbuf_printf(sb, "hardware");
1212 		break;
1213 	case G_ELI_CRYPTO_SW:
1214 		sbuf_printf(sb, "software");
1215 		break;
1216 	default:
1217 		sbuf_printf(sb, "UNKNOWN");
1218 		break;
1219 	}
1220 	sbuf_printf(sb, "</Crypto>\n");
1221 	if (sc->sc_flags & G_ELI_FLAG_AUTH) {
1222 		sbuf_printf(sb,
1223 		    "%s<AuthenticationAlgorithm>%s</AuthenticationAlgorithm>\n",
1224 		    indent, g_eli_algo2str(sc->sc_aalgo));
1225 	}
1226 	sbuf_printf(sb, "%s<KeyLength>%u</KeyLength>\n", indent,
1227 	    sc->sc_ekeylen);
1228 	sbuf_printf(sb, "%s<EncryptionAlgorithm>%s</EncryptionAlgorithm>\n", indent,
1229 	    g_eli_algo2str(sc->sc_ealgo));
1230 	sbuf_printf(sb, "%s<State>%s</State>\n", indent,
1231 	    (sc->sc_flags & G_ELI_FLAG_SUSPEND) ? "SUSPENDED" : "ACTIVE");
1232 }
1233 
1234 static void
1235 g_eli_shutdown_pre_sync(void *arg, int howto)
1236 {
1237 	struct g_class *mp;
1238 	struct g_geom *gp, *gp2;
1239 	struct g_provider *pp;
1240 	struct g_eli_softc *sc;
1241 	int error;
1242 
1243 	mp = arg;
1244 	DROP_GIANT();
1245 	g_topology_lock();
1246 	LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
1247 		sc = gp->softc;
1248 		if (sc == NULL)
1249 			continue;
1250 		pp = LIST_FIRST(&gp->provider);
1251 		KASSERT(pp != NULL, ("No provider? gp=%p (%s)", gp, gp->name));
1252 		if (pp->acr + pp->acw + pp->ace == 0)
1253 			error = g_eli_destroy(sc, TRUE);
1254 		else {
1255 			sc->sc_flags |= G_ELI_FLAG_RW_DETACH;
1256 			gp->access = g_eli_access;
1257 		}
1258 	}
1259 	g_topology_unlock();
1260 	PICKUP_GIANT();
1261 }
1262 
1263 static void
1264 g_eli_init(struct g_class *mp)
1265 {
1266 
1267 	g_eli_pre_sync = EVENTHANDLER_REGISTER(shutdown_pre_sync,
1268 	    g_eli_shutdown_pre_sync, mp, SHUTDOWN_PRI_FIRST);
1269 	if (g_eli_pre_sync == NULL)
1270 		G_ELI_DEBUG(0, "Warning! Cannot register shutdown event.");
1271 }
1272 
1273 static void
1274 g_eli_fini(struct g_class *mp)
1275 {
1276 
1277 	if (g_eli_pre_sync != NULL)
1278 		EVENTHANDLER_DEREGISTER(shutdown_pre_sync, g_eli_pre_sync);
1279 }
1280 
1281 DECLARE_GEOM_CLASS(g_eli_class, g_eli);
1282 MODULE_DEPEND(g_eli, crypto, 1, 1, 1);
1283