xref: /freebsd/sys/geom/eli/g_eli.c (revision 9ecd54f24fe9fa373e07c9fd7c052deb2188f545)
1 /*-
2  * Copyright (c) 2005-2011 Pawel Jakub Dawidek <pawel@dawidek.net>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/cons.h>
33 #include <sys/kernel.h>
34 #include <sys/linker.h>
35 #include <sys/module.h>
36 #include <sys/lock.h>
37 #include <sys/mutex.h>
38 #include <sys/bio.h>
39 #include <sys/sbuf.h>
40 #include <sys/sysctl.h>
41 #include <sys/malloc.h>
42 #include <sys/eventhandler.h>
43 #include <sys/kthread.h>
44 #include <sys/proc.h>
45 #include <sys/sched.h>
46 #include <sys/smp.h>
47 #include <sys/uio.h>
48 #include <sys/vnode.h>
49 
50 #include <vm/uma.h>
51 
52 #include <geom/geom.h>
53 #include <geom/eli/g_eli.h>
54 #include <geom/eli/pkcs5v2.h>
55 
56 FEATURE(geom_eli, "GEOM crypto module");
57 
58 MALLOC_DEFINE(M_ELI, "eli data", "GEOM_ELI Data");
59 
60 SYSCTL_DECL(_kern_geom);
61 SYSCTL_NODE(_kern_geom, OID_AUTO, eli, CTLFLAG_RW, 0, "GEOM_ELI stuff");
62 static int g_eli_version = G_ELI_VERSION;
63 SYSCTL_INT(_kern_geom_eli, OID_AUTO, version, CTLFLAG_RD, &g_eli_version, 0,
64     "GELI version");
65 int g_eli_debug = 0;
66 TUNABLE_INT("kern.geom.eli.debug", &g_eli_debug);
67 SYSCTL_INT(_kern_geom_eli, OID_AUTO, debug, CTLFLAG_RW, &g_eli_debug, 0,
68     "Debug level");
69 static u_int g_eli_tries = 3;
70 TUNABLE_INT("kern.geom.eli.tries", &g_eli_tries);
71 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, tries, CTLFLAG_RW, &g_eli_tries, 0,
72     "Number of tries for entering the passphrase");
73 static u_int g_eli_visible_passphrase = GETS_NOECHO;
74 TUNABLE_INT("kern.geom.eli.visible_passphrase", &g_eli_visible_passphrase);
75 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, visible_passphrase, CTLFLAG_RW,
76     &g_eli_visible_passphrase, 0,
77     "Visibility of passphrase prompt (0 = invisible, 1 = visible, 2 = asterisk)");
78 u_int g_eli_overwrites = G_ELI_OVERWRITES;
79 TUNABLE_INT("kern.geom.eli.overwrites", &g_eli_overwrites);
80 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, overwrites, CTLFLAG_RW, &g_eli_overwrites,
81     0, "Number of times on-disk keys should be overwritten when destroying them");
82 static u_int g_eli_threads = 0;
83 TUNABLE_INT("kern.geom.eli.threads", &g_eli_threads);
84 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, threads, CTLFLAG_RW, &g_eli_threads, 0,
85     "Number of threads doing crypto work");
86 u_int g_eli_batch = 0;
87 TUNABLE_INT("kern.geom.eli.batch", &g_eli_batch);
88 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, batch, CTLFLAG_RW, &g_eli_batch, 0,
89     "Use crypto operations batching");
90 
91 static eventhandler_tag g_eli_pre_sync = NULL;
92 
93 static int g_eli_destroy_geom(struct gctl_req *req, struct g_class *mp,
94     struct g_geom *gp);
95 static void g_eli_init(struct g_class *mp);
96 static void g_eli_fini(struct g_class *mp);
97 
98 static g_taste_t g_eli_taste;
99 static g_dumpconf_t g_eli_dumpconf;
100 
101 struct g_class g_eli_class = {
102 	.name = G_ELI_CLASS_NAME,
103 	.version = G_VERSION,
104 	.ctlreq = g_eli_config,
105 	.taste = g_eli_taste,
106 	.destroy_geom = g_eli_destroy_geom,
107 	.init = g_eli_init,
108 	.fini = g_eli_fini
109 };
110 
111 
112 /*
113  * Code paths:
114  * BIO_READ:
115  *	g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
116  * BIO_WRITE:
117  *	g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
118  */
119 
120 
121 /*
122  * EAGAIN from crypto(9) means, that we were probably balanced to another crypto
123  * accelerator or something like this.
124  * The function updates the SID and rerun the operation.
125  */
126 int
127 g_eli_crypto_rerun(struct cryptop *crp)
128 {
129 	struct g_eli_softc *sc;
130 	struct g_eli_worker *wr;
131 	struct bio *bp;
132 	int error;
133 
134 	bp = (struct bio *)crp->crp_opaque;
135 	sc = bp->bio_to->geom->softc;
136 	LIST_FOREACH(wr, &sc->sc_workers, w_next) {
137 		if (wr->w_number == bp->bio_pflags)
138 			break;
139 	}
140 	KASSERT(wr != NULL, ("Invalid worker (%u).", bp->bio_pflags));
141 	G_ELI_DEBUG(1, "Rerunning crypto %s request (sid: %ju -> %ju).",
142 	    bp->bio_cmd == BIO_READ ? "READ" : "WRITE", (uintmax_t)wr->w_sid,
143 	    (uintmax_t)crp->crp_sid);
144 	wr->w_sid = crp->crp_sid;
145 	crp->crp_etype = 0;
146 	error = crypto_dispatch(crp);
147 	if (error == 0)
148 		return (0);
149 	G_ELI_DEBUG(1, "%s: crypto_dispatch() returned %d.", __func__, error);
150 	crp->crp_etype = error;
151 	return (error);
152 }
153 
154 /*
155  * The function is called afer reading encrypted data from the provider.
156  *
157  * g_eli_start -> g_eli_crypto_read -> g_io_request -> G_ELI_READ_DONE -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
158  */
159 void
160 g_eli_read_done(struct bio *bp)
161 {
162 	struct g_eli_softc *sc;
163 	struct bio *pbp;
164 
165 	G_ELI_LOGREQ(2, bp, "Request done.");
166 	pbp = bp->bio_parent;
167 	if (pbp->bio_error == 0)
168 		pbp->bio_error = bp->bio_error;
169 	g_destroy_bio(bp);
170 	/*
171 	 * Do we have all sectors already?
172 	 */
173 	pbp->bio_inbed++;
174 	if (pbp->bio_inbed < pbp->bio_children)
175 		return;
176 	sc = pbp->bio_to->geom->softc;
177 	if (pbp->bio_error != 0) {
178 		G_ELI_LOGREQ(0, pbp, "%s() failed", __func__);
179 		pbp->bio_completed = 0;
180 		if (pbp->bio_driver2 != NULL) {
181 			free(pbp->bio_driver2, M_ELI);
182 			pbp->bio_driver2 = NULL;
183 		}
184 		g_io_deliver(pbp, pbp->bio_error);
185 		atomic_subtract_int(&sc->sc_inflight, 1);
186 		return;
187 	}
188 	mtx_lock(&sc->sc_queue_mtx);
189 	bioq_insert_tail(&sc->sc_queue, pbp);
190 	mtx_unlock(&sc->sc_queue_mtx);
191 	wakeup(sc);
192 }
193 
194 /*
195  * The function is called after we encrypt and write data.
196  *
197  * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> G_ELI_WRITE_DONE -> g_io_deliver
198  */
199 void
200 g_eli_write_done(struct bio *bp)
201 {
202 	struct g_eli_softc *sc;
203 	struct bio *pbp;
204 
205 	G_ELI_LOGREQ(2, bp, "Request done.");
206 	pbp = bp->bio_parent;
207 	if (pbp->bio_error == 0) {
208 		if (bp->bio_error != 0)
209 			pbp->bio_error = bp->bio_error;
210 	}
211 	g_destroy_bio(bp);
212 	/*
213 	 * Do we have all sectors already?
214 	 */
215 	pbp->bio_inbed++;
216 	if (pbp->bio_inbed < pbp->bio_children)
217 		return;
218 	free(pbp->bio_driver2, M_ELI);
219 	pbp->bio_driver2 = NULL;
220 	if (pbp->bio_error != 0) {
221 		G_ELI_LOGREQ(0, pbp, "Crypto WRITE request failed (error=%d).",
222 		    pbp->bio_error);
223 		pbp->bio_completed = 0;
224 	}
225 	/*
226 	 * Write is finished, send it up.
227 	 */
228 	pbp->bio_completed = pbp->bio_length;
229 	sc = pbp->bio_to->geom->softc;
230 	g_io_deliver(pbp, pbp->bio_error);
231 	atomic_subtract_int(&sc->sc_inflight, 1);
232 }
233 
234 /*
235  * This function should never be called, but GEOM made as it set ->orphan()
236  * method for every geom.
237  */
238 static void
239 g_eli_orphan_spoil_assert(struct g_consumer *cp)
240 {
241 
242 	panic("Function %s() called for %s.", __func__, cp->geom->name);
243 }
244 
245 static void
246 g_eli_orphan(struct g_consumer *cp)
247 {
248 	struct g_eli_softc *sc;
249 
250 	g_topology_assert();
251 	sc = cp->geom->softc;
252 	if (sc == NULL)
253 		return;
254 	g_eli_destroy(sc, TRUE);
255 }
256 
257 /*
258  * BIO_READ:
259  *	G_ELI_START -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
260  * BIO_WRITE:
261  *	G_ELI_START -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
262  */
263 static void
264 g_eli_start(struct bio *bp)
265 {
266 	struct g_eli_softc *sc;
267 	struct g_consumer *cp;
268 	struct bio *cbp;
269 
270 	sc = bp->bio_to->geom->softc;
271 	KASSERT(sc != NULL,
272 	    ("Provider's error should be set (error=%d)(device=%s).",
273 	    bp->bio_to->error, bp->bio_to->name));
274 	G_ELI_LOGREQ(2, bp, "Request received.");
275 
276 	switch (bp->bio_cmd) {
277 	case BIO_READ:
278 	case BIO_WRITE:
279 	case BIO_GETATTR:
280 	case BIO_FLUSH:
281 		break;
282 	case BIO_DELETE:
283 		/*
284 		 * We could eventually support BIO_DELETE request.
285 		 * It could be done by overwritting requested sector with
286 		 * random data g_eli_overwrites number of times.
287 		 */
288 	default:
289 		g_io_deliver(bp, EOPNOTSUPP);
290 		return;
291 	}
292 	cbp = g_clone_bio(bp);
293 	if (cbp == NULL) {
294 		g_io_deliver(bp, ENOMEM);
295 		return;
296 	}
297 	bp->bio_driver1 = cbp;
298 	bp->bio_pflags = G_ELI_NEW_BIO;
299 	switch (bp->bio_cmd) {
300 	case BIO_READ:
301 		if (!(sc->sc_flags & G_ELI_FLAG_AUTH)) {
302 			g_eli_crypto_read(sc, bp, 0);
303 			break;
304 		}
305 		/* FALLTHROUGH */
306 	case BIO_WRITE:
307 		mtx_lock(&sc->sc_queue_mtx);
308 		bioq_insert_tail(&sc->sc_queue, bp);
309 		mtx_unlock(&sc->sc_queue_mtx);
310 		wakeup(sc);
311 		break;
312 	case BIO_GETATTR:
313 	case BIO_FLUSH:
314 		cbp->bio_done = g_std_done;
315 		cp = LIST_FIRST(&sc->sc_geom->consumer);
316 		cbp->bio_to = cp->provider;
317 		G_ELI_LOGREQ(2, cbp, "Sending request.");
318 		g_io_request(cbp, cp);
319 		break;
320 	}
321 }
322 
323 static int
324 g_eli_newsession(struct g_eli_worker *wr)
325 {
326 	struct g_eli_softc *sc;
327 	struct cryptoini crie, cria;
328 	int error;
329 
330 	sc = wr->w_softc;
331 
332 	bzero(&crie, sizeof(crie));
333 	crie.cri_alg = sc->sc_ealgo;
334 	crie.cri_klen = sc->sc_ekeylen;
335 	if (sc->sc_ealgo == CRYPTO_AES_XTS)
336 		crie.cri_klen <<= 1;
337 	if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) != 0) {
338 		crie.cri_key = g_eli_key_hold(sc, 0,
339 		    LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize);
340 	} else {
341 		crie.cri_key = sc->sc_ekey;
342 	}
343 	if (sc->sc_flags & G_ELI_FLAG_AUTH) {
344 		bzero(&cria, sizeof(cria));
345 		cria.cri_alg = sc->sc_aalgo;
346 		cria.cri_klen = sc->sc_akeylen;
347 		cria.cri_key = sc->sc_akey;
348 		crie.cri_next = &cria;
349 	}
350 
351 	switch (sc->sc_crypto) {
352 	case G_ELI_CRYPTO_SW:
353 		error = crypto_newsession(&wr->w_sid, &crie,
354 		    CRYPTOCAP_F_SOFTWARE);
355 		break;
356 	case G_ELI_CRYPTO_HW:
357 		error = crypto_newsession(&wr->w_sid, &crie,
358 		    CRYPTOCAP_F_HARDWARE);
359 		break;
360 	case G_ELI_CRYPTO_UNKNOWN:
361 		error = crypto_newsession(&wr->w_sid, &crie,
362 		    CRYPTOCAP_F_HARDWARE);
363 		if (error == 0) {
364 			mtx_lock(&sc->sc_queue_mtx);
365 			if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN)
366 				sc->sc_crypto = G_ELI_CRYPTO_HW;
367 			mtx_unlock(&sc->sc_queue_mtx);
368 		} else {
369 			error = crypto_newsession(&wr->w_sid, &crie,
370 			    CRYPTOCAP_F_SOFTWARE);
371 			mtx_lock(&sc->sc_queue_mtx);
372 			if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN)
373 				sc->sc_crypto = G_ELI_CRYPTO_SW;
374 			mtx_unlock(&sc->sc_queue_mtx);
375 		}
376 		break;
377 	default:
378 		panic("%s: invalid condition", __func__);
379 	}
380 
381 	if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) != 0)
382 		g_eli_key_drop(sc, crie.cri_key);
383 
384 	return (error);
385 }
386 
387 static void
388 g_eli_freesession(struct g_eli_worker *wr)
389 {
390 
391 	crypto_freesession(wr->w_sid);
392 }
393 
394 static void
395 g_eli_cancel(struct g_eli_softc *sc)
396 {
397 	struct bio *bp;
398 
399 	mtx_assert(&sc->sc_queue_mtx, MA_OWNED);
400 
401 	while ((bp = bioq_takefirst(&sc->sc_queue)) != NULL) {
402 		KASSERT(bp->bio_pflags == G_ELI_NEW_BIO,
403 		    ("Not new bio when canceling (bp=%p).", bp));
404 		g_io_deliver(bp, ENXIO);
405 	}
406 }
407 
408 static struct bio *
409 g_eli_takefirst(struct g_eli_softc *sc)
410 {
411 	struct bio *bp;
412 
413 	mtx_assert(&sc->sc_queue_mtx, MA_OWNED);
414 
415 	if (!(sc->sc_flags & G_ELI_FLAG_SUSPEND))
416 		return (bioq_takefirst(&sc->sc_queue));
417 	/*
418 	 * Device suspended, so we skip new I/O requests.
419 	 */
420 	TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
421 		if (bp->bio_pflags != G_ELI_NEW_BIO)
422 			break;
423 	}
424 	if (bp != NULL)
425 		bioq_remove(&sc->sc_queue, bp);
426 	return (bp);
427 }
428 
429 /*
430  * This is the main function for kernel worker thread when we don't have
431  * hardware acceleration and we have to do cryptography in software.
432  * Dedicated thread is needed, so we don't slow down g_up/g_down GEOM
433  * threads with crypto work.
434  */
435 static void
436 g_eli_worker(void *arg)
437 {
438 	struct g_eli_softc *sc;
439 	struct g_eli_worker *wr;
440 	struct bio *bp;
441 	int error;
442 
443 	wr = arg;
444 	sc = wr->w_softc;
445 #ifdef SMP
446 	/* Before sched_bind() to a CPU, wait for all CPUs to go on-line. */
447 	if (sc->sc_cpubind) {
448 		while (!smp_started)
449 			tsleep(wr, 0, "geli:smp", hz / 4);
450 	}
451 #endif
452 	thread_lock(curthread);
453 	sched_prio(curthread, PUSER);
454 	if (sc->sc_cpubind)
455 		sched_bind(curthread, wr->w_number % mp_ncpus);
456 	thread_unlock(curthread);
457 
458 	G_ELI_DEBUG(1, "Thread %s started.", curthread->td_proc->p_comm);
459 
460 	for (;;) {
461 		mtx_lock(&sc->sc_queue_mtx);
462 again:
463 		bp = g_eli_takefirst(sc);
464 		if (bp == NULL) {
465 			if (sc->sc_flags & G_ELI_FLAG_DESTROY) {
466 				g_eli_cancel(sc);
467 				LIST_REMOVE(wr, w_next);
468 				g_eli_freesession(wr);
469 				free(wr, M_ELI);
470 				G_ELI_DEBUG(1, "Thread %s exiting.",
471 				    curthread->td_proc->p_comm);
472 				wakeup(&sc->sc_workers);
473 				mtx_unlock(&sc->sc_queue_mtx);
474 				kproc_exit(0);
475 			}
476 			while (sc->sc_flags & G_ELI_FLAG_SUSPEND) {
477 				if (sc->sc_inflight > 0) {
478 					G_ELI_DEBUG(0, "inflight=%d",
479 					    sc->sc_inflight);
480 					/*
481 					 * We still have inflight BIOs, so
482 					 * sleep and retry.
483 					 */
484 					msleep(sc, &sc->sc_queue_mtx, PRIBIO,
485 					    "geli:inf", hz / 5);
486 					goto again;
487 				}
488 				/*
489 				 * Suspend requested, mark the worker as
490 				 * suspended and go to sleep.
491 				 */
492 				if (wr->w_active) {
493 					g_eli_freesession(wr);
494 					wr->w_active = FALSE;
495 				}
496 				wakeup(&sc->sc_workers);
497 				msleep(sc, &sc->sc_queue_mtx, PRIBIO,
498 				    "geli:suspend", 0);
499 				if (!wr->w_active &&
500 				    !(sc->sc_flags & G_ELI_FLAG_SUSPEND)) {
501 					error = g_eli_newsession(wr);
502 					KASSERT(error == 0,
503 					    ("g_eli_newsession() failed on resume (error=%d)",
504 					    error));
505 					wr->w_active = TRUE;
506 				}
507 				goto again;
508 			}
509 			msleep(sc, &sc->sc_queue_mtx, PDROP, "geli:w", 0);
510 			continue;
511 		}
512 		if (bp->bio_pflags == G_ELI_NEW_BIO)
513 			atomic_add_int(&sc->sc_inflight, 1);
514 		mtx_unlock(&sc->sc_queue_mtx);
515 		if (bp->bio_pflags == G_ELI_NEW_BIO) {
516 			bp->bio_pflags = 0;
517 			if (sc->sc_flags & G_ELI_FLAG_AUTH) {
518 				if (bp->bio_cmd == BIO_READ)
519 					g_eli_auth_read(sc, bp);
520 				else
521 					g_eli_auth_run(wr, bp);
522 			} else {
523 				if (bp->bio_cmd == BIO_READ)
524 					g_eli_crypto_read(sc, bp, 1);
525 				else
526 					g_eli_crypto_run(wr, bp);
527 			}
528 		} else {
529 			if (sc->sc_flags & G_ELI_FLAG_AUTH)
530 				g_eli_auth_run(wr, bp);
531 			else
532 				g_eli_crypto_run(wr, bp);
533 		}
534 	}
535 }
536 
537 /*
538  * Here we generate IV. It is unique for every sector.
539  */
540 void
541 g_eli_crypto_ivgen(struct g_eli_softc *sc, off_t offset, u_char *iv,
542     size_t size)
543 {
544 	uint8_t off[8];
545 
546 	if ((sc->sc_flags & G_ELI_FLAG_NATIVE_BYTE_ORDER) != 0)
547 		bcopy(&offset, off, sizeof(off));
548 	else
549 		le64enc(off, (uint64_t)offset);
550 
551 	switch (sc->sc_ealgo) {
552 	case CRYPTO_AES_XTS:
553 		bcopy(off, iv, sizeof(off));
554 		bzero(iv + sizeof(off), size - sizeof(off));
555 		break;
556 	default:
557 	    {
558 		u_char hash[SHA256_DIGEST_LENGTH];
559 		SHA256_CTX ctx;
560 
561 		/* Copy precalculated SHA256 context for IV-Key. */
562 		bcopy(&sc->sc_ivctx, &ctx, sizeof(ctx));
563 		SHA256_Update(&ctx, off, sizeof(off));
564 		SHA256_Final(hash, &ctx);
565 		bcopy(hash, iv, MIN(sizeof(hash), size));
566 		break;
567 	    }
568 	}
569 }
570 
571 int
572 g_eli_read_metadata(struct g_class *mp, struct g_provider *pp,
573     struct g_eli_metadata *md)
574 {
575 	struct g_geom *gp;
576 	struct g_consumer *cp;
577 	u_char *buf = NULL;
578 	int error;
579 
580 	g_topology_assert();
581 
582 	gp = g_new_geomf(mp, "eli:taste");
583 	gp->start = g_eli_start;
584 	gp->access = g_std_access;
585 	/*
586 	 * g_eli_read_metadata() is always called from the event thread.
587 	 * Our geom is created and destroyed in the same event, so there
588 	 * could be no orphan nor spoil event in the meantime.
589 	 */
590 	gp->orphan = g_eli_orphan_spoil_assert;
591 	gp->spoiled = g_eli_orphan_spoil_assert;
592 	cp = g_new_consumer(gp);
593 	error = g_attach(cp, pp);
594 	if (error != 0)
595 		goto end;
596 	error = g_access(cp, 1, 0, 0);
597 	if (error != 0)
598 		goto end;
599 	g_topology_unlock();
600 	buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
601 	    &error);
602 	g_topology_lock();
603 	if (buf == NULL)
604 		goto end;
605 	eli_metadata_decode(buf, md);
606 end:
607 	if (buf != NULL)
608 		g_free(buf);
609 	if (cp->provider != NULL) {
610 		if (cp->acr == 1)
611 			g_access(cp, -1, 0, 0);
612 		g_detach(cp);
613 	}
614 	g_destroy_consumer(cp);
615 	g_destroy_geom(gp);
616 	return (error);
617 }
618 
619 /*
620  * The function is called when we had last close on provider and user requested
621  * to close it when this situation occur.
622  */
623 static void
624 g_eli_last_close(void *arg, int flags __unused)
625 {
626 	struct g_geom *gp;
627 	char gpname[64];
628 	int error;
629 
630 	g_topology_assert();
631 	gp = arg;
632 	strlcpy(gpname, gp->name, sizeof(gpname));
633 	error = g_eli_destroy(gp->softc, TRUE);
634 	KASSERT(error == 0, ("Cannot detach %s on last close (error=%d).",
635 	    gpname, error));
636 	G_ELI_DEBUG(0, "Detached %s on last close.", gpname);
637 }
638 
639 int
640 g_eli_access(struct g_provider *pp, int dr, int dw, int de)
641 {
642 	struct g_eli_softc *sc;
643 	struct g_geom *gp;
644 
645 	gp = pp->geom;
646 	sc = gp->softc;
647 
648 	if (dw > 0) {
649 		if (sc->sc_flags & G_ELI_FLAG_RO) {
650 			/* Deny write attempts. */
651 			return (EROFS);
652 		}
653 		/* Someone is opening us for write, we need to remember that. */
654 		sc->sc_flags |= G_ELI_FLAG_WOPEN;
655 		return (0);
656 	}
657 	/* Is this the last close? */
658 	if (pp->acr + dr > 0 || pp->acw + dw > 0 || pp->ace + de > 0)
659 		return (0);
660 
661 	/*
662 	 * Automatically detach on last close if requested.
663 	 */
664 	if ((sc->sc_flags & G_ELI_FLAG_RW_DETACH) ||
665 	    (sc->sc_flags & G_ELI_FLAG_WOPEN)) {
666 		g_post_event(g_eli_last_close, gp, M_WAITOK, NULL);
667 	}
668 	return (0);
669 }
670 
671 static int
672 g_eli_cpu_is_disabled(int cpu)
673 {
674 #ifdef SMP
675 	return (CPU_ISSET(cpu, &hlt_cpus_mask));
676 #else
677 	return (0);
678 #endif
679 }
680 
681 struct g_geom *
682 g_eli_create(struct gctl_req *req, struct g_class *mp, struct g_provider *bpp,
683     const struct g_eli_metadata *md, const u_char *mkey, int nkey)
684 {
685 	struct g_eli_softc *sc;
686 	struct g_eli_worker *wr;
687 	struct g_geom *gp;
688 	struct g_provider *pp;
689 	struct g_consumer *cp;
690 	u_int i, threads;
691 	int error;
692 
693 	G_ELI_DEBUG(1, "Creating device %s%s.", bpp->name, G_ELI_SUFFIX);
694 
695 	gp = g_new_geomf(mp, "%s%s", bpp->name, G_ELI_SUFFIX);
696 	sc = malloc(sizeof(*sc), M_ELI, M_WAITOK | M_ZERO);
697 	gp->start = g_eli_start;
698 	/*
699 	 * Spoiling cannot happen actually, because we keep provider open for
700 	 * writing all the time or provider is read-only.
701 	 */
702 	gp->spoiled = g_eli_orphan_spoil_assert;
703 	gp->orphan = g_eli_orphan;
704 	gp->dumpconf = g_eli_dumpconf;
705 	/*
706 	 * If detach-on-last-close feature is not enabled and we don't operate
707 	 * on read-only provider, we can simply use g_std_access().
708 	 */
709 	if (md->md_flags & (G_ELI_FLAG_WO_DETACH | G_ELI_FLAG_RO))
710 		gp->access = g_eli_access;
711 	else
712 		gp->access = g_std_access;
713 
714 	sc->sc_version = md->md_version;
715 	sc->sc_inflight = 0;
716 	sc->sc_crypto = G_ELI_CRYPTO_UNKNOWN;
717 	sc->sc_flags = md->md_flags;
718 	/* Backward compatibility. */
719 	if (md->md_version < G_ELI_VERSION_04)
720 		sc->sc_flags |= G_ELI_FLAG_NATIVE_BYTE_ORDER;
721 	if (md->md_version < G_ELI_VERSION_05)
722 		sc->sc_flags |= G_ELI_FLAG_SINGLE_KEY;
723 	if (md->md_version < G_ELI_VERSION_06 &&
724 	    (sc->sc_flags & G_ELI_FLAG_AUTH) != 0) {
725 		sc->sc_flags |= G_ELI_FLAG_FIRST_KEY;
726 	}
727 	if (md->md_version < G_ELI_VERSION_07)
728 		sc->sc_flags |= G_ELI_FLAG_ENC_IVKEY;
729 	sc->sc_ealgo = md->md_ealgo;
730 	sc->sc_nkey = nkey;
731 
732 	if (sc->sc_flags & G_ELI_FLAG_AUTH) {
733 		sc->sc_akeylen = sizeof(sc->sc_akey) * 8;
734 		sc->sc_aalgo = md->md_aalgo;
735 		sc->sc_alen = g_eli_hashlen(sc->sc_aalgo);
736 
737 		sc->sc_data_per_sector = bpp->sectorsize - sc->sc_alen;
738 		/*
739 		 * Some hash functions (like SHA1 and RIPEMD160) generates hash
740 		 * which length is not multiple of 128 bits, but we want data
741 		 * length to be multiple of 128, so we can encrypt without
742 		 * padding. The line below rounds down data length to multiple
743 		 * of 128 bits.
744 		 */
745 		sc->sc_data_per_sector -= sc->sc_data_per_sector % 16;
746 
747 		sc->sc_bytes_per_sector =
748 		    (md->md_sectorsize - 1) / sc->sc_data_per_sector + 1;
749 		sc->sc_bytes_per_sector *= bpp->sectorsize;
750 	}
751 
752 	gp->softc = sc;
753 	sc->sc_geom = gp;
754 
755 	bioq_init(&sc->sc_queue);
756 	mtx_init(&sc->sc_queue_mtx, "geli:queue", NULL, MTX_DEF);
757 	mtx_init(&sc->sc_ekeys_lock, "geli:ekeys", NULL, MTX_DEF);
758 
759 	pp = NULL;
760 	cp = g_new_consumer(gp);
761 	error = g_attach(cp, bpp);
762 	if (error != 0) {
763 		if (req != NULL) {
764 			gctl_error(req, "Cannot attach to %s (error=%d).",
765 			    bpp->name, error);
766 		} else {
767 			G_ELI_DEBUG(1, "Cannot attach to %s (error=%d).",
768 			    bpp->name, error);
769 		}
770 		goto failed;
771 	}
772 	/*
773 	 * Keep provider open all the time, so we can run critical tasks,
774 	 * like Master Keys deletion, without wondering if we can open
775 	 * provider or not.
776 	 * We don't open provider for writing only when user requested read-only
777 	 * access.
778 	 */
779 	if (sc->sc_flags & G_ELI_FLAG_RO)
780 		error = g_access(cp, 1, 0, 1);
781 	else
782 		error = g_access(cp, 1, 1, 1);
783 	if (error != 0) {
784 		if (req != NULL) {
785 			gctl_error(req, "Cannot access %s (error=%d).",
786 			    bpp->name, error);
787 		} else {
788 			G_ELI_DEBUG(1, "Cannot access %s (error=%d).",
789 			    bpp->name, error);
790 		}
791 		goto failed;
792 	}
793 
794 	sc->sc_sectorsize = md->md_sectorsize;
795 	sc->sc_mediasize = bpp->mediasize;
796 	if (!(sc->sc_flags & G_ELI_FLAG_ONETIME))
797 		sc->sc_mediasize -= bpp->sectorsize;
798 	if (!(sc->sc_flags & G_ELI_FLAG_AUTH))
799 		sc->sc_mediasize -= (sc->sc_mediasize % sc->sc_sectorsize);
800 	else {
801 		sc->sc_mediasize /= sc->sc_bytes_per_sector;
802 		sc->sc_mediasize *= sc->sc_sectorsize;
803 	}
804 
805 	/*
806 	 * Remember the keys in our softc structure.
807 	 */
808 	g_eli_mkey_propagate(sc, mkey);
809 	sc->sc_ekeylen = md->md_keylen;
810 
811 	LIST_INIT(&sc->sc_workers);
812 
813 	threads = g_eli_threads;
814 	if (threads == 0)
815 		threads = mp_ncpus;
816 	sc->sc_cpubind = (mp_ncpus > 1 && threads == mp_ncpus);
817 	for (i = 0; i < threads; i++) {
818 		if (g_eli_cpu_is_disabled(i)) {
819 			G_ELI_DEBUG(1, "%s: CPU %u disabled, skipping.",
820 			    bpp->name, i);
821 			continue;
822 		}
823 		wr = malloc(sizeof(*wr), M_ELI, M_WAITOK | M_ZERO);
824 		wr->w_softc = sc;
825 		wr->w_number = i;
826 		wr->w_active = TRUE;
827 
828 		error = g_eli_newsession(wr);
829 		if (error != 0) {
830 			free(wr, M_ELI);
831 			if (req != NULL) {
832 				gctl_error(req, "Cannot set up crypto session "
833 				    "for %s (error=%d).", bpp->name, error);
834 			} else {
835 				G_ELI_DEBUG(1, "Cannot set up crypto session "
836 				    "for %s (error=%d).", bpp->name, error);
837 			}
838 			goto failed;
839 		}
840 
841 		error = kproc_create(g_eli_worker, wr, &wr->w_proc, 0, 0,
842 		    "g_eli[%u] %s", i, bpp->name);
843 		if (error != 0) {
844 			g_eli_freesession(wr);
845 			free(wr, M_ELI);
846 			if (req != NULL) {
847 				gctl_error(req, "Cannot create kernel thread "
848 				    "for %s (error=%d).", bpp->name, error);
849 			} else {
850 				G_ELI_DEBUG(1, "Cannot create kernel thread "
851 				    "for %s (error=%d).", bpp->name, error);
852 			}
853 			goto failed;
854 		}
855 		LIST_INSERT_HEAD(&sc->sc_workers, wr, w_next);
856 	}
857 
858 	/*
859 	 * Create decrypted provider.
860 	 */
861 	pp = g_new_providerf(gp, "%s%s", bpp->name, G_ELI_SUFFIX);
862 	pp->mediasize = sc->sc_mediasize;
863 	pp->sectorsize = sc->sc_sectorsize;
864 
865 	g_error_provider(pp, 0);
866 
867 	G_ELI_DEBUG(0, "Device %s created.", pp->name);
868 	G_ELI_DEBUG(0, "Encryption: %s %u", g_eli_algo2str(sc->sc_ealgo),
869 	    sc->sc_ekeylen);
870 	if (sc->sc_flags & G_ELI_FLAG_AUTH)
871 		G_ELI_DEBUG(0, " Integrity: %s", g_eli_algo2str(sc->sc_aalgo));
872 	G_ELI_DEBUG(0, "    Crypto: %s",
873 	    sc->sc_crypto == G_ELI_CRYPTO_SW ? "software" : "hardware");
874 	return (gp);
875 failed:
876 	mtx_lock(&sc->sc_queue_mtx);
877 	sc->sc_flags |= G_ELI_FLAG_DESTROY;
878 	wakeup(sc);
879 	/*
880 	 * Wait for kernel threads self destruction.
881 	 */
882 	while (!LIST_EMPTY(&sc->sc_workers)) {
883 		msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO,
884 		    "geli:destroy", 0);
885 	}
886 	mtx_destroy(&sc->sc_queue_mtx);
887 	if (cp->provider != NULL) {
888 		if (cp->acr == 1)
889 			g_access(cp, -1, -1, -1);
890 		g_detach(cp);
891 	}
892 	g_destroy_consumer(cp);
893 	g_destroy_geom(gp);
894 	g_eli_key_destroy(sc);
895 	bzero(sc, sizeof(*sc));
896 	free(sc, M_ELI);
897 	return (NULL);
898 }
899 
900 int
901 g_eli_destroy(struct g_eli_softc *sc, boolean_t force)
902 {
903 	struct g_geom *gp;
904 	struct g_provider *pp;
905 
906 	g_topology_assert();
907 
908 	if (sc == NULL)
909 		return (ENXIO);
910 
911 	gp = sc->sc_geom;
912 	pp = LIST_FIRST(&gp->provider);
913 	if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
914 		if (force) {
915 			G_ELI_DEBUG(1, "Device %s is still open, so it "
916 			    "cannot be definitely removed.", pp->name);
917 			sc->sc_flags |= G_ELI_FLAG_RW_DETACH;
918 			gp->access = g_eli_access;
919 			g_wither_provider(pp, ENXIO);
920 			return (EBUSY);
921 		} else {
922 			G_ELI_DEBUG(1,
923 			    "Device %s is still open (r%dw%de%d).", pp->name,
924 			    pp->acr, pp->acw, pp->ace);
925 			return (EBUSY);
926 		}
927 	}
928 
929 	mtx_lock(&sc->sc_queue_mtx);
930 	sc->sc_flags |= G_ELI_FLAG_DESTROY;
931 	wakeup(sc);
932 	while (!LIST_EMPTY(&sc->sc_workers)) {
933 		msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO,
934 		    "geli:destroy", 0);
935 	}
936 	mtx_destroy(&sc->sc_queue_mtx);
937 	gp->softc = NULL;
938 	g_eli_key_destroy(sc);
939 	bzero(sc, sizeof(*sc));
940 	free(sc, M_ELI);
941 
942 	if (pp == NULL || (pp->acr == 0 && pp->acw == 0 && pp->ace == 0))
943 		G_ELI_DEBUG(0, "Device %s destroyed.", gp->name);
944 	g_wither_geom_close(gp, ENXIO);
945 
946 	return (0);
947 }
948 
949 static int
950 g_eli_destroy_geom(struct gctl_req *req __unused,
951     struct g_class *mp __unused, struct g_geom *gp)
952 {
953 	struct g_eli_softc *sc;
954 
955 	sc = gp->softc;
956 	return (g_eli_destroy(sc, FALSE));
957 }
958 
959 static int
960 g_eli_keyfiles_load(struct hmac_ctx *ctx, const char *provider)
961 {
962 	u_char *keyfile, *data;
963 	char *file, name[64];
964 	size_t size;
965 	int i;
966 
967 	for (i = 0; ; i++) {
968 		snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i);
969 		keyfile = preload_search_by_type(name);
970 		if (keyfile == NULL)
971 			return (i);	/* Return number of loaded keyfiles. */
972 		data = preload_fetch_addr(keyfile);
973 		if (data == NULL) {
974 			G_ELI_DEBUG(0, "Cannot find key file data for %s.",
975 			    name);
976 			return (0);
977 		}
978 		size = preload_fetch_size(keyfile);
979 		if (size == 0) {
980 			G_ELI_DEBUG(0, "Cannot find key file size for %s.",
981 			    name);
982 			return (0);
983 		}
984 		file = preload_search_info(keyfile, MODINFO_NAME);
985 		if (file == NULL) {
986 			G_ELI_DEBUG(0, "Cannot find key file name for %s.",
987 			    name);
988 			return (0);
989 		}
990 		G_ELI_DEBUG(1, "Loaded keyfile %s for %s (type: %s).", file,
991 		    provider, name);
992 		g_eli_crypto_hmac_update(ctx, data, size);
993 		bzero(data, size);
994 	}
995 }
996 
997 static void
998 g_eli_keyfiles_clear(const char *provider)
999 {
1000 	u_char *keyfile, *data;
1001 	char name[64];
1002 	size_t size;
1003 	int i;
1004 
1005 	for (i = 0; ; i++) {
1006 		snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i);
1007 		keyfile = preload_search_by_type(name);
1008 		if (keyfile == NULL)
1009 			return;
1010 		data = preload_fetch_addr(keyfile);
1011 		size = preload_fetch_size(keyfile);
1012 		if (data != NULL && size != 0)
1013 			bzero(data, size);
1014 	}
1015 }
1016 
1017 /*
1018  * Tasting is only made on boot.
1019  * We detect providers which should be attached before root is mounted.
1020  */
1021 static struct g_geom *
1022 g_eli_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
1023 {
1024 	struct g_eli_metadata md;
1025 	struct g_geom *gp;
1026 	struct hmac_ctx ctx;
1027 	char passphrase[256];
1028 	u_char key[G_ELI_USERKEYLEN], mkey[G_ELI_DATAIVKEYLEN];
1029 	u_int i, nkey, nkeyfiles, tries;
1030 	int error;
1031 
1032 	g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
1033 	g_topology_assert();
1034 
1035 	if (root_mounted() || g_eli_tries == 0)
1036 		return (NULL);
1037 
1038 	G_ELI_DEBUG(3, "Tasting %s.", pp->name);
1039 
1040 	error = g_eli_read_metadata(mp, pp, &md);
1041 	if (error != 0)
1042 		return (NULL);
1043 	gp = NULL;
1044 
1045 	if (strcmp(md.md_magic, G_ELI_MAGIC) != 0)
1046 		return (NULL);
1047 	if (md.md_version > G_ELI_VERSION) {
1048 		printf("geom_eli.ko module is too old to handle %s.\n",
1049 		    pp->name);
1050 		return (NULL);
1051 	}
1052 	if (md.md_provsize != pp->mediasize)
1053 		return (NULL);
1054 	/* Should we attach it on boot? */
1055 	if (!(md.md_flags & G_ELI_FLAG_BOOT))
1056 		return (NULL);
1057 	if (md.md_keys == 0x00) {
1058 		G_ELI_DEBUG(0, "No valid keys on %s.", pp->name);
1059 		return (NULL);
1060 	}
1061 	if (md.md_iterations == -1) {
1062 		/* If there is no passphrase, we try only once. */
1063 		tries = 1;
1064 	} else {
1065 		/* Ask for the passphrase no more than g_eli_tries times. */
1066 		tries = g_eli_tries;
1067 	}
1068 
1069 	for (i = 0; i < tries; i++) {
1070 		g_eli_crypto_hmac_init(&ctx, NULL, 0);
1071 
1072 		/*
1073 		 * Load all key files.
1074 		 */
1075 		nkeyfiles = g_eli_keyfiles_load(&ctx, pp->name);
1076 
1077 		if (nkeyfiles == 0 && md.md_iterations == -1) {
1078 			/*
1079 			 * No key files and no passphrase, something is
1080 			 * definitely wrong here.
1081 			 * geli(8) doesn't allow for such situation, so assume
1082 			 * that there was really no passphrase and in that case
1083 			 * key files are no properly defined in loader.conf.
1084 			 */
1085 			G_ELI_DEBUG(0,
1086 			    "Found no key files in loader.conf for %s.",
1087 			    pp->name);
1088 			return (NULL);
1089 		}
1090 
1091 		/* Ask for the passphrase if defined. */
1092 		if (md.md_iterations >= 0) {
1093 			printf("Enter passphrase for %s: ", pp->name);
1094 			cngets(passphrase, sizeof(passphrase),
1095 			    g_eli_visible_passphrase);
1096 		}
1097 
1098 		/*
1099 		 * Prepare Derived-Key from the user passphrase.
1100 		 */
1101 		if (md.md_iterations == 0) {
1102 			g_eli_crypto_hmac_update(&ctx, md.md_salt,
1103 			    sizeof(md.md_salt));
1104 			g_eli_crypto_hmac_update(&ctx, passphrase,
1105 			    strlen(passphrase));
1106 			bzero(passphrase, sizeof(passphrase));
1107 		} else if (md.md_iterations > 0) {
1108 			u_char dkey[G_ELI_USERKEYLEN];
1109 
1110 			pkcs5v2_genkey(dkey, sizeof(dkey), md.md_salt,
1111 			    sizeof(md.md_salt), passphrase, md.md_iterations);
1112 			bzero(passphrase, sizeof(passphrase));
1113 			g_eli_crypto_hmac_update(&ctx, dkey, sizeof(dkey));
1114 			bzero(dkey, sizeof(dkey));
1115 		}
1116 
1117 		g_eli_crypto_hmac_final(&ctx, key, 0);
1118 
1119 		/*
1120 		 * Decrypt Master-Key.
1121 		 */
1122 		error = g_eli_mkey_decrypt(&md, key, mkey, &nkey);
1123 		bzero(key, sizeof(key));
1124 		if (error == -1) {
1125 			if (i == tries - 1) {
1126 				G_ELI_DEBUG(0,
1127 				    "Wrong key for %s. No tries left.",
1128 				    pp->name);
1129 				g_eli_keyfiles_clear(pp->name);
1130 				return (NULL);
1131 			}
1132 			G_ELI_DEBUG(0, "Wrong key for %s. Tries left: %u.",
1133 			    pp->name, tries - i - 1);
1134 			/* Try again. */
1135 			continue;
1136 		} else if (error > 0) {
1137 			G_ELI_DEBUG(0,
1138 			    "Cannot decrypt Master Key for %s (error=%d).",
1139 			    pp->name, error);
1140 			g_eli_keyfiles_clear(pp->name);
1141 			return (NULL);
1142 		}
1143 		G_ELI_DEBUG(1, "Using Master Key %u for %s.", nkey, pp->name);
1144 		break;
1145 	}
1146 
1147 	/*
1148 	 * We have correct key, let's attach provider.
1149 	 */
1150 	gp = g_eli_create(NULL, mp, pp, &md, mkey, nkey);
1151 	bzero(mkey, sizeof(mkey));
1152 	bzero(&md, sizeof(md));
1153 	if (gp == NULL) {
1154 		G_ELI_DEBUG(0, "Cannot create device %s%s.", pp->name,
1155 		    G_ELI_SUFFIX);
1156 		return (NULL);
1157 	}
1158 	return (gp);
1159 }
1160 
1161 static void
1162 g_eli_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1163     struct g_consumer *cp, struct g_provider *pp)
1164 {
1165 	struct g_eli_softc *sc;
1166 
1167 	g_topology_assert();
1168 	sc = gp->softc;
1169 	if (sc == NULL)
1170 		return;
1171 	if (pp != NULL || cp != NULL)
1172 		return;	/* Nothing here. */
1173 
1174 	sbuf_printf(sb, "%s<KeysTotal>%ju</KeysTotal>\n", indent,
1175 	    (uintmax_t)sc->sc_ekeys_total);
1176 	sbuf_printf(sb, "%s<KeysAllocated>%ju</KeysAllocated>\n", indent,
1177 	    (uintmax_t)sc->sc_ekeys_allocated);
1178 	sbuf_printf(sb, "%s<Flags>", indent);
1179 	if (sc->sc_flags == 0)
1180 		sbuf_printf(sb, "NONE");
1181 	else {
1182 		int first = 1;
1183 
1184 #define ADD_FLAG(flag, name)	do {					\
1185 	if (sc->sc_flags & (flag)) {					\
1186 		if (!first)						\
1187 			sbuf_printf(sb, ", ");				\
1188 		else							\
1189 			first = 0;					\
1190 		sbuf_printf(sb, name);					\
1191 	}								\
1192 } while (0)
1193 		ADD_FLAG(G_ELI_FLAG_SUSPEND, "SUSPEND");
1194 		ADD_FLAG(G_ELI_FLAG_SINGLE_KEY, "SINGLE-KEY");
1195 		ADD_FLAG(G_ELI_FLAG_NATIVE_BYTE_ORDER, "NATIVE-BYTE-ORDER");
1196 		ADD_FLAG(G_ELI_FLAG_ONETIME, "ONETIME");
1197 		ADD_FLAG(G_ELI_FLAG_BOOT, "BOOT");
1198 		ADD_FLAG(G_ELI_FLAG_WO_DETACH, "W-DETACH");
1199 		ADD_FLAG(G_ELI_FLAG_RW_DETACH, "RW-DETACH");
1200 		ADD_FLAG(G_ELI_FLAG_AUTH, "AUTH");
1201 		ADD_FLAG(G_ELI_FLAG_WOPEN, "W-OPEN");
1202 		ADD_FLAG(G_ELI_FLAG_DESTROY, "DESTROY");
1203 		ADD_FLAG(G_ELI_FLAG_RO, "READ-ONLY");
1204 #undef  ADD_FLAG
1205 	}
1206 	sbuf_printf(sb, "</Flags>\n");
1207 
1208 	if (!(sc->sc_flags & G_ELI_FLAG_ONETIME)) {
1209 		sbuf_printf(sb, "%s<UsedKey>%u</UsedKey>\n", indent,
1210 		    sc->sc_nkey);
1211 	}
1212 	sbuf_printf(sb, "%s<Version>%u</Version>\n", indent, sc->sc_version);
1213 	sbuf_printf(sb, "%s<Crypto>", indent);
1214 	switch (sc->sc_crypto) {
1215 	case G_ELI_CRYPTO_HW:
1216 		sbuf_printf(sb, "hardware");
1217 		break;
1218 	case G_ELI_CRYPTO_SW:
1219 		sbuf_printf(sb, "software");
1220 		break;
1221 	default:
1222 		sbuf_printf(sb, "UNKNOWN");
1223 		break;
1224 	}
1225 	sbuf_printf(sb, "</Crypto>\n");
1226 	if (sc->sc_flags & G_ELI_FLAG_AUTH) {
1227 		sbuf_printf(sb,
1228 		    "%s<AuthenticationAlgorithm>%s</AuthenticationAlgorithm>\n",
1229 		    indent, g_eli_algo2str(sc->sc_aalgo));
1230 	}
1231 	sbuf_printf(sb, "%s<KeyLength>%u</KeyLength>\n", indent,
1232 	    sc->sc_ekeylen);
1233 	sbuf_printf(sb, "%s<EncryptionAlgorithm>%s</EncryptionAlgorithm>\n",
1234 	    indent, g_eli_algo2str(sc->sc_ealgo));
1235 	sbuf_printf(sb, "%s<State>%s</State>\n", indent,
1236 	    (sc->sc_flags & G_ELI_FLAG_SUSPEND) ? "SUSPENDED" : "ACTIVE");
1237 }
1238 
1239 static void
1240 g_eli_shutdown_pre_sync(void *arg, int howto)
1241 {
1242 	struct g_class *mp;
1243 	struct g_geom *gp, *gp2;
1244 	struct g_provider *pp;
1245 	struct g_eli_softc *sc;
1246 	int error;
1247 
1248 	mp = arg;
1249 	DROP_GIANT();
1250 	g_topology_lock();
1251 	LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
1252 		sc = gp->softc;
1253 		if (sc == NULL)
1254 			continue;
1255 		pp = LIST_FIRST(&gp->provider);
1256 		KASSERT(pp != NULL, ("No provider? gp=%p (%s)", gp, gp->name));
1257 		if (pp->acr + pp->acw + pp->ace == 0)
1258 			error = g_eli_destroy(sc, TRUE);
1259 		else {
1260 			sc->sc_flags |= G_ELI_FLAG_RW_DETACH;
1261 			gp->access = g_eli_access;
1262 		}
1263 	}
1264 	g_topology_unlock();
1265 	PICKUP_GIANT();
1266 }
1267 
1268 static void
1269 g_eli_init(struct g_class *mp)
1270 {
1271 
1272 	g_eli_pre_sync = EVENTHANDLER_REGISTER(shutdown_pre_sync,
1273 	    g_eli_shutdown_pre_sync, mp, SHUTDOWN_PRI_FIRST);
1274 	if (g_eli_pre_sync == NULL)
1275 		G_ELI_DEBUG(0, "Warning! Cannot register shutdown event.");
1276 }
1277 
1278 static void
1279 g_eli_fini(struct g_class *mp)
1280 {
1281 
1282 	if (g_eli_pre_sync != NULL)
1283 		EVENTHANDLER_DEREGISTER(shutdown_pre_sync, g_eli_pre_sync);
1284 }
1285 
1286 DECLARE_GEOM_CLASS(g_eli_class, g_eli);
1287 MODULE_DEPEND(g_eli, crypto, 1, 1, 1);
1288