xref: /freebsd/sys/geom/eli/g_eli.c (revision 7431dfd4580e850375fe5478d92ec770344db098)
1 /*-
2  * Copyright (c) 2005-2011 Pawel Jakub Dawidek <pawel@dawidek.net>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/cons.h>
33 #include <sys/kernel.h>
34 #include <sys/linker.h>
35 #include <sys/module.h>
36 #include <sys/lock.h>
37 #include <sys/mutex.h>
38 #include <sys/bio.h>
39 #include <sys/sbuf.h>
40 #include <sys/sysctl.h>
41 #include <sys/malloc.h>
42 #include <sys/eventhandler.h>
43 #include <sys/kthread.h>
44 #include <sys/proc.h>
45 #include <sys/sched.h>
46 #include <sys/smp.h>
47 #include <sys/uio.h>
48 #include <sys/vnode.h>
49 
50 #include <vm/uma.h>
51 
52 #include <geom/geom.h>
53 #include <geom/eli/g_eli.h>
54 #include <geom/eli/pkcs5v2.h>
55 
56 FEATURE(geom_eli, "GEOM crypto module");
57 
58 MALLOC_DEFINE(M_ELI, "eli data", "GEOM_ELI Data");
59 
60 SYSCTL_DECL(_kern_geom);
61 SYSCTL_NODE(_kern_geom, OID_AUTO, eli, CTLFLAG_RW, 0, "GEOM_ELI stuff");
62 static int g_eli_version = G_ELI_VERSION;
63 SYSCTL_INT(_kern_geom_eli, OID_AUTO, version, CTLFLAG_RD, &g_eli_version, 0,
64     "GELI version");
65 int g_eli_debug = 0;
66 SYSCTL_INT(_kern_geom_eli, OID_AUTO, debug, CTLFLAG_RWTUN, &g_eli_debug, 0,
67     "Debug level");
68 static u_int g_eli_tries = 3;
69 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, tries, CTLFLAG_RWTUN, &g_eli_tries, 0,
70     "Number of tries for entering the passphrase");
71 static u_int g_eli_visible_passphrase = GETS_NOECHO;
72 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, visible_passphrase, CTLFLAG_RWTUN,
73     &g_eli_visible_passphrase, 0,
74     "Visibility of passphrase prompt (0 = invisible, 1 = visible, 2 = asterisk)");
75 u_int g_eli_overwrites = G_ELI_OVERWRITES;
76 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, overwrites, CTLFLAG_RWTUN, &g_eli_overwrites,
77     0, "Number of times on-disk keys should be overwritten when destroying them");
78 static u_int g_eli_threads = 0;
79 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, threads, CTLFLAG_RWTUN, &g_eli_threads, 0,
80     "Number of threads doing crypto work");
81 u_int g_eli_batch = 0;
82 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, batch, CTLFLAG_RWTUN, &g_eli_batch, 0,
83     "Use crypto operations batching");
84 
85 /*
86  * Passphrase cached during boot, in order to be more user-friendly if
87  * there are multiple providers using the same passphrase.
88  */
89 static char cached_passphrase[256];
90 static u_int g_eli_boot_passcache = 1;
91 TUNABLE_INT("kern.geom.eli.boot_passcache", &g_eli_boot_passcache);
92 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, boot_passcache, CTLFLAG_RD,
93     &g_eli_boot_passcache, 0,
94     "Passphrases are cached during boot process for possible reuse");
95 static void
96 fetch_loader_passphrase(void * dummy)
97 {
98 	char * env_passphrase;
99 
100 	KASSERT(dynamic_kenv, ("need dynamic kenv"));
101 
102 	if ((env_passphrase = kern_getenv("kern.geom.eli.passphrase")) != NULL) {
103 		/* Extract passphrase from the environment. */
104 		strlcpy(cached_passphrase, env_passphrase,
105 		    sizeof(cached_passphrase));
106 		freeenv(env_passphrase);
107 
108 		/* Wipe the passphrase from the environment. */
109 		kern_unsetenv("kern.geom.eli.passphrase");
110 	}
111 }
112 SYSINIT(geli_fetch_loader_passphrase, SI_SUB_KMEM + 1, SI_ORDER_ANY,
113     fetch_loader_passphrase, NULL);
114 static void
115 zero_boot_passcache(void * dummy)
116 {
117 
118 	memset(cached_passphrase, 0, sizeof(cached_passphrase));
119 }
120 EVENTHANDLER_DEFINE(mountroot, zero_boot_passcache, NULL, 0);
121 
122 static eventhandler_tag g_eli_pre_sync = NULL;
123 
124 static int g_eli_destroy_geom(struct gctl_req *req, struct g_class *mp,
125     struct g_geom *gp);
126 static void g_eli_init(struct g_class *mp);
127 static void g_eli_fini(struct g_class *mp);
128 
129 static g_taste_t g_eli_taste;
130 static g_dumpconf_t g_eli_dumpconf;
131 
132 struct g_class g_eli_class = {
133 	.name = G_ELI_CLASS_NAME,
134 	.version = G_VERSION,
135 	.ctlreq = g_eli_config,
136 	.taste = g_eli_taste,
137 	.destroy_geom = g_eli_destroy_geom,
138 	.init = g_eli_init,
139 	.fini = g_eli_fini
140 };
141 
142 
143 /*
144  * Code paths:
145  * BIO_READ:
146  *	g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
147  * BIO_WRITE:
148  *	g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
149  */
150 
151 
152 /*
153  * EAGAIN from crypto(9) means, that we were probably balanced to another crypto
154  * accelerator or something like this.
155  * The function updates the SID and rerun the operation.
156  */
157 int
158 g_eli_crypto_rerun(struct cryptop *crp)
159 {
160 	struct g_eli_softc *sc;
161 	struct g_eli_worker *wr;
162 	struct bio *bp;
163 	int error;
164 
165 	bp = (struct bio *)crp->crp_opaque;
166 	sc = bp->bio_to->geom->softc;
167 	LIST_FOREACH(wr, &sc->sc_workers, w_next) {
168 		if (wr->w_number == bp->bio_pflags)
169 			break;
170 	}
171 	KASSERT(wr != NULL, ("Invalid worker (%u).", bp->bio_pflags));
172 	G_ELI_DEBUG(1, "Rerunning crypto %s request (sid: %ju -> %ju).",
173 	    bp->bio_cmd == BIO_READ ? "READ" : "WRITE", (uintmax_t)wr->w_sid,
174 	    (uintmax_t)crp->crp_sid);
175 	wr->w_sid = crp->crp_sid;
176 	crp->crp_etype = 0;
177 	error = crypto_dispatch(crp);
178 	if (error == 0)
179 		return (0);
180 	G_ELI_DEBUG(1, "%s: crypto_dispatch() returned %d.", __func__, error);
181 	crp->crp_etype = error;
182 	return (error);
183 }
184 
185 /*
186  * The function is called afer reading encrypted data from the provider.
187  *
188  * g_eli_start -> g_eli_crypto_read -> g_io_request -> G_ELI_READ_DONE -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
189  */
190 void
191 g_eli_read_done(struct bio *bp)
192 {
193 	struct g_eli_softc *sc;
194 	struct bio *pbp;
195 
196 	G_ELI_LOGREQ(2, bp, "Request done.");
197 	pbp = bp->bio_parent;
198 	if (pbp->bio_error == 0)
199 		pbp->bio_error = bp->bio_error;
200 	g_destroy_bio(bp);
201 	/*
202 	 * Do we have all sectors already?
203 	 */
204 	pbp->bio_inbed++;
205 	if (pbp->bio_inbed < pbp->bio_children)
206 		return;
207 	sc = pbp->bio_to->geom->softc;
208 	if (pbp->bio_error != 0) {
209 		G_ELI_LOGREQ(0, pbp, "%s() failed", __func__);
210 		pbp->bio_completed = 0;
211 		if (pbp->bio_driver2 != NULL) {
212 			free(pbp->bio_driver2, M_ELI);
213 			pbp->bio_driver2 = NULL;
214 		}
215 		g_io_deliver(pbp, pbp->bio_error);
216 		atomic_subtract_int(&sc->sc_inflight, 1);
217 		return;
218 	}
219 	mtx_lock(&sc->sc_queue_mtx);
220 	bioq_insert_tail(&sc->sc_queue, pbp);
221 	mtx_unlock(&sc->sc_queue_mtx);
222 	wakeup(sc);
223 }
224 
225 /*
226  * The function is called after we encrypt and write data.
227  *
228  * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> G_ELI_WRITE_DONE -> g_io_deliver
229  */
230 void
231 g_eli_write_done(struct bio *bp)
232 {
233 	struct g_eli_softc *sc;
234 	struct bio *pbp;
235 
236 	G_ELI_LOGREQ(2, bp, "Request done.");
237 	pbp = bp->bio_parent;
238 	if (pbp->bio_error == 0) {
239 		if (bp->bio_error != 0)
240 			pbp->bio_error = bp->bio_error;
241 	}
242 	g_destroy_bio(bp);
243 	/*
244 	 * Do we have all sectors already?
245 	 */
246 	pbp->bio_inbed++;
247 	if (pbp->bio_inbed < pbp->bio_children)
248 		return;
249 	free(pbp->bio_driver2, M_ELI);
250 	pbp->bio_driver2 = NULL;
251 	if (pbp->bio_error != 0) {
252 		G_ELI_LOGREQ(0, pbp, "Crypto WRITE request failed (error=%d).",
253 		    pbp->bio_error);
254 		pbp->bio_completed = 0;
255 	}
256 	/*
257 	 * Write is finished, send it up.
258 	 */
259 	pbp->bio_completed = pbp->bio_length;
260 	sc = pbp->bio_to->geom->softc;
261 	g_io_deliver(pbp, pbp->bio_error);
262 	atomic_subtract_int(&sc->sc_inflight, 1);
263 }
264 
265 /*
266  * This function should never be called, but GEOM made as it set ->orphan()
267  * method for every geom.
268  */
269 static void
270 g_eli_orphan_spoil_assert(struct g_consumer *cp)
271 {
272 
273 	panic("Function %s() called for %s.", __func__, cp->geom->name);
274 }
275 
276 static void
277 g_eli_orphan(struct g_consumer *cp)
278 {
279 	struct g_eli_softc *sc;
280 
281 	g_topology_assert();
282 	sc = cp->geom->softc;
283 	if (sc == NULL)
284 		return;
285 	g_eli_destroy(sc, TRUE);
286 }
287 
288 /*
289  * BIO_READ:
290  *	G_ELI_START -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
291  * BIO_WRITE:
292  *	G_ELI_START -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver
293  */
294 static void
295 g_eli_start(struct bio *bp)
296 {
297 	struct g_eli_softc *sc;
298 	struct g_consumer *cp;
299 	struct bio *cbp;
300 
301 	sc = bp->bio_to->geom->softc;
302 	KASSERT(sc != NULL,
303 	    ("Provider's error should be set (error=%d)(device=%s).",
304 	    bp->bio_to->error, bp->bio_to->name));
305 	G_ELI_LOGREQ(2, bp, "Request received.");
306 
307 	switch (bp->bio_cmd) {
308 	case BIO_READ:
309 	case BIO_WRITE:
310 	case BIO_GETATTR:
311 	case BIO_FLUSH:
312 		break;
313 	case BIO_DELETE:
314 		/*
315 		 * We could eventually support BIO_DELETE request.
316 		 * It could be done by overwritting requested sector with
317 		 * random data g_eli_overwrites number of times.
318 		 */
319 	default:
320 		g_io_deliver(bp, EOPNOTSUPP);
321 		return;
322 	}
323 	cbp = g_clone_bio(bp);
324 	if (cbp == NULL) {
325 		g_io_deliver(bp, ENOMEM);
326 		return;
327 	}
328 	bp->bio_driver1 = cbp;
329 	bp->bio_pflags = G_ELI_NEW_BIO;
330 	switch (bp->bio_cmd) {
331 	case BIO_READ:
332 		if (!(sc->sc_flags & G_ELI_FLAG_AUTH)) {
333 			g_eli_crypto_read(sc, bp, 0);
334 			break;
335 		}
336 		/* FALLTHROUGH */
337 	case BIO_WRITE:
338 		mtx_lock(&sc->sc_queue_mtx);
339 		bioq_insert_tail(&sc->sc_queue, bp);
340 		mtx_unlock(&sc->sc_queue_mtx);
341 		wakeup(sc);
342 		break;
343 	case BIO_GETATTR:
344 	case BIO_FLUSH:
345 		cbp->bio_done = g_std_done;
346 		cp = LIST_FIRST(&sc->sc_geom->consumer);
347 		cbp->bio_to = cp->provider;
348 		G_ELI_LOGREQ(2, cbp, "Sending request.");
349 		g_io_request(cbp, cp);
350 		break;
351 	}
352 }
353 
354 static int
355 g_eli_newsession(struct g_eli_worker *wr)
356 {
357 	struct g_eli_softc *sc;
358 	struct cryptoini crie, cria;
359 	int error;
360 
361 	sc = wr->w_softc;
362 
363 	bzero(&crie, sizeof(crie));
364 	crie.cri_alg = sc->sc_ealgo;
365 	crie.cri_klen = sc->sc_ekeylen;
366 	if (sc->sc_ealgo == CRYPTO_AES_XTS)
367 		crie.cri_klen <<= 1;
368 	if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) != 0) {
369 		crie.cri_key = g_eli_key_hold(sc, 0,
370 		    LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize);
371 	} else {
372 		crie.cri_key = sc->sc_ekey;
373 	}
374 	if (sc->sc_flags & G_ELI_FLAG_AUTH) {
375 		bzero(&cria, sizeof(cria));
376 		cria.cri_alg = sc->sc_aalgo;
377 		cria.cri_klen = sc->sc_akeylen;
378 		cria.cri_key = sc->sc_akey;
379 		crie.cri_next = &cria;
380 	}
381 
382 	switch (sc->sc_crypto) {
383 	case G_ELI_CRYPTO_SW:
384 		error = crypto_newsession(&wr->w_sid, &crie,
385 		    CRYPTOCAP_F_SOFTWARE);
386 		break;
387 	case G_ELI_CRYPTO_HW:
388 		error = crypto_newsession(&wr->w_sid, &crie,
389 		    CRYPTOCAP_F_HARDWARE);
390 		break;
391 	case G_ELI_CRYPTO_UNKNOWN:
392 		error = crypto_newsession(&wr->w_sid, &crie,
393 		    CRYPTOCAP_F_HARDWARE);
394 		if (error == 0) {
395 			mtx_lock(&sc->sc_queue_mtx);
396 			if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN)
397 				sc->sc_crypto = G_ELI_CRYPTO_HW;
398 			mtx_unlock(&sc->sc_queue_mtx);
399 		} else {
400 			error = crypto_newsession(&wr->w_sid, &crie,
401 			    CRYPTOCAP_F_SOFTWARE);
402 			mtx_lock(&sc->sc_queue_mtx);
403 			if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN)
404 				sc->sc_crypto = G_ELI_CRYPTO_SW;
405 			mtx_unlock(&sc->sc_queue_mtx);
406 		}
407 		break;
408 	default:
409 		panic("%s: invalid condition", __func__);
410 	}
411 
412 	if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) != 0)
413 		g_eli_key_drop(sc, crie.cri_key);
414 
415 	return (error);
416 }
417 
418 static void
419 g_eli_freesession(struct g_eli_worker *wr)
420 {
421 
422 	crypto_freesession(wr->w_sid);
423 }
424 
425 static void
426 g_eli_cancel(struct g_eli_softc *sc)
427 {
428 	struct bio *bp;
429 
430 	mtx_assert(&sc->sc_queue_mtx, MA_OWNED);
431 
432 	while ((bp = bioq_takefirst(&sc->sc_queue)) != NULL) {
433 		KASSERT(bp->bio_pflags == G_ELI_NEW_BIO,
434 		    ("Not new bio when canceling (bp=%p).", bp));
435 		g_io_deliver(bp, ENXIO);
436 	}
437 }
438 
439 static struct bio *
440 g_eli_takefirst(struct g_eli_softc *sc)
441 {
442 	struct bio *bp;
443 
444 	mtx_assert(&sc->sc_queue_mtx, MA_OWNED);
445 
446 	if (!(sc->sc_flags & G_ELI_FLAG_SUSPEND))
447 		return (bioq_takefirst(&sc->sc_queue));
448 	/*
449 	 * Device suspended, so we skip new I/O requests.
450 	 */
451 	TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
452 		if (bp->bio_pflags != G_ELI_NEW_BIO)
453 			break;
454 	}
455 	if (bp != NULL)
456 		bioq_remove(&sc->sc_queue, bp);
457 	return (bp);
458 }
459 
460 /*
461  * This is the main function for kernel worker thread when we don't have
462  * hardware acceleration and we have to do cryptography in software.
463  * Dedicated thread is needed, so we don't slow down g_up/g_down GEOM
464  * threads with crypto work.
465  */
466 static void
467 g_eli_worker(void *arg)
468 {
469 	struct g_eli_softc *sc;
470 	struct g_eli_worker *wr;
471 	struct bio *bp;
472 	int error;
473 
474 	wr = arg;
475 	sc = wr->w_softc;
476 #ifdef SMP
477 	/* Before sched_bind() to a CPU, wait for all CPUs to go on-line. */
478 	if (sc->sc_cpubind) {
479 		while (!smp_started)
480 			tsleep(wr, 0, "geli:smp", hz / 4);
481 	}
482 #endif
483 	thread_lock(curthread);
484 	sched_prio(curthread, PUSER);
485 	if (sc->sc_cpubind)
486 		sched_bind(curthread, wr->w_number % mp_ncpus);
487 	thread_unlock(curthread);
488 
489 	G_ELI_DEBUG(1, "Thread %s started.", curthread->td_proc->p_comm);
490 
491 	for (;;) {
492 		mtx_lock(&sc->sc_queue_mtx);
493 again:
494 		bp = g_eli_takefirst(sc);
495 		if (bp == NULL) {
496 			if (sc->sc_flags & G_ELI_FLAG_DESTROY) {
497 				g_eli_cancel(sc);
498 				LIST_REMOVE(wr, w_next);
499 				g_eli_freesession(wr);
500 				free(wr, M_ELI);
501 				G_ELI_DEBUG(1, "Thread %s exiting.",
502 				    curthread->td_proc->p_comm);
503 				wakeup(&sc->sc_workers);
504 				mtx_unlock(&sc->sc_queue_mtx);
505 				kproc_exit(0);
506 			}
507 			while (sc->sc_flags & G_ELI_FLAG_SUSPEND) {
508 				if (sc->sc_inflight > 0) {
509 					G_ELI_DEBUG(0, "inflight=%d",
510 					    sc->sc_inflight);
511 					/*
512 					 * We still have inflight BIOs, so
513 					 * sleep and retry.
514 					 */
515 					msleep(sc, &sc->sc_queue_mtx, PRIBIO,
516 					    "geli:inf", hz / 5);
517 					goto again;
518 				}
519 				/*
520 				 * Suspend requested, mark the worker as
521 				 * suspended and go to sleep.
522 				 */
523 				if (wr->w_active) {
524 					g_eli_freesession(wr);
525 					wr->w_active = FALSE;
526 				}
527 				wakeup(&sc->sc_workers);
528 				msleep(sc, &sc->sc_queue_mtx, PRIBIO,
529 				    "geli:suspend", 0);
530 				if (!wr->w_active &&
531 				    !(sc->sc_flags & G_ELI_FLAG_SUSPEND)) {
532 					error = g_eli_newsession(wr);
533 					KASSERT(error == 0,
534 					    ("g_eli_newsession() failed on resume (error=%d)",
535 					    error));
536 					wr->w_active = TRUE;
537 				}
538 				goto again;
539 			}
540 			msleep(sc, &sc->sc_queue_mtx, PDROP, "geli:w", 0);
541 			continue;
542 		}
543 		if (bp->bio_pflags == G_ELI_NEW_BIO)
544 			atomic_add_int(&sc->sc_inflight, 1);
545 		mtx_unlock(&sc->sc_queue_mtx);
546 		if (bp->bio_pflags == G_ELI_NEW_BIO) {
547 			bp->bio_pflags = 0;
548 			if (sc->sc_flags & G_ELI_FLAG_AUTH) {
549 				if (bp->bio_cmd == BIO_READ)
550 					g_eli_auth_read(sc, bp);
551 				else
552 					g_eli_auth_run(wr, bp);
553 			} else {
554 				if (bp->bio_cmd == BIO_READ)
555 					g_eli_crypto_read(sc, bp, 1);
556 				else
557 					g_eli_crypto_run(wr, bp);
558 			}
559 		} else {
560 			if (sc->sc_flags & G_ELI_FLAG_AUTH)
561 				g_eli_auth_run(wr, bp);
562 			else
563 				g_eli_crypto_run(wr, bp);
564 		}
565 	}
566 }
567 
568 /*
569  * Here we generate IV. It is unique for every sector.
570  */
571 void
572 g_eli_crypto_ivgen(struct g_eli_softc *sc, off_t offset, u_char *iv,
573     size_t size)
574 {
575 	uint8_t off[8];
576 
577 	if ((sc->sc_flags & G_ELI_FLAG_NATIVE_BYTE_ORDER) != 0)
578 		bcopy(&offset, off, sizeof(off));
579 	else
580 		le64enc(off, (uint64_t)offset);
581 
582 	switch (sc->sc_ealgo) {
583 	case CRYPTO_AES_XTS:
584 		bcopy(off, iv, sizeof(off));
585 		bzero(iv + sizeof(off), size - sizeof(off));
586 		break;
587 	default:
588 	    {
589 		u_char hash[SHA256_DIGEST_LENGTH];
590 		SHA256_CTX ctx;
591 
592 		/* Copy precalculated SHA256 context for IV-Key. */
593 		bcopy(&sc->sc_ivctx, &ctx, sizeof(ctx));
594 		SHA256_Update(&ctx, off, sizeof(off));
595 		SHA256_Final(hash, &ctx);
596 		bcopy(hash, iv, MIN(sizeof(hash), size));
597 		break;
598 	    }
599 	}
600 }
601 
602 int
603 g_eli_read_metadata(struct g_class *mp, struct g_provider *pp,
604     struct g_eli_metadata *md)
605 {
606 	struct g_geom *gp;
607 	struct g_consumer *cp;
608 	u_char *buf = NULL;
609 	int error;
610 
611 	g_topology_assert();
612 
613 	gp = g_new_geomf(mp, "eli:taste");
614 	gp->start = g_eli_start;
615 	gp->access = g_std_access;
616 	/*
617 	 * g_eli_read_metadata() is always called from the event thread.
618 	 * Our geom is created and destroyed in the same event, so there
619 	 * could be no orphan nor spoil event in the meantime.
620 	 */
621 	gp->orphan = g_eli_orphan_spoil_assert;
622 	gp->spoiled = g_eli_orphan_spoil_assert;
623 	cp = g_new_consumer(gp);
624 	error = g_attach(cp, pp);
625 	if (error != 0)
626 		goto end;
627 	error = g_access(cp, 1, 0, 0);
628 	if (error != 0)
629 		goto end;
630 	g_topology_unlock();
631 	buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
632 	    &error);
633 	g_topology_lock();
634 	if (buf == NULL)
635 		goto end;
636 	eli_metadata_decode(buf, md);
637 end:
638 	if (buf != NULL)
639 		g_free(buf);
640 	if (cp->provider != NULL) {
641 		if (cp->acr == 1)
642 			g_access(cp, -1, 0, 0);
643 		g_detach(cp);
644 	}
645 	g_destroy_consumer(cp);
646 	g_destroy_geom(gp);
647 	return (error);
648 }
649 
650 /*
651  * The function is called when we had last close on provider and user requested
652  * to close it when this situation occur.
653  */
654 static void
655 g_eli_last_close(void *arg, int flags __unused)
656 {
657 	struct g_geom *gp;
658 	char gpname[64];
659 	int error;
660 
661 	g_topology_assert();
662 	gp = arg;
663 	strlcpy(gpname, gp->name, sizeof(gpname));
664 	error = g_eli_destroy(gp->softc, TRUE);
665 	KASSERT(error == 0, ("Cannot detach %s on last close (error=%d).",
666 	    gpname, error));
667 	G_ELI_DEBUG(0, "Detached %s on last close.", gpname);
668 }
669 
670 int
671 g_eli_access(struct g_provider *pp, int dr, int dw, int de)
672 {
673 	struct g_eli_softc *sc;
674 	struct g_geom *gp;
675 
676 	gp = pp->geom;
677 	sc = gp->softc;
678 
679 	if (dw > 0) {
680 		if (sc->sc_flags & G_ELI_FLAG_RO) {
681 			/* Deny write attempts. */
682 			return (EROFS);
683 		}
684 		/* Someone is opening us for write, we need to remember that. */
685 		sc->sc_flags |= G_ELI_FLAG_WOPEN;
686 		return (0);
687 	}
688 	/* Is this the last close? */
689 	if (pp->acr + dr > 0 || pp->acw + dw > 0 || pp->ace + de > 0)
690 		return (0);
691 
692 	/*
693 	 * Automatically detach on last close if requested.
694 	 */
695 	if ((sc->sc_flags & G_ELI_FLAG_RW_DETACH) ||
696 	    (sc->sc_flags & G_ELI_FLAG_WOPEN)) {
697 		g_post_event(g_eli_last_close, gp, M_WAITOK, NULL);
698 	}
699 	return (0);
700 }
701 
702 static int
703 g_eli_cpu_is_disabled(int cpu)
704 {
705 #ifdef SMP
706 	return (CPU_ISSET(cpu, &hlt_cpus_mask));
707 #else
708 	return (0);
709 #endif
710 }
711 
712 struct g_geom *
713 g_eli_create(struct gctl_req *req, struct g_class *mp, struct g_provider *bpp,
714     const struct g_eli_metadata *md, const u_char *mkey, int nkey)
715 {
716 	struct g_eli_softc *sc;
717 	struct g_eli_worker *wr;
718 	struct g_geom *gp;
719 	struct g_provider *pp;
720 	struct g_consumer *cp;
721 	u_int i, threads;
722 	int error;
723 
724 	G_ELI_DEBUG(1, "Creating device %s%s.", bpp->name, G_ELI_SUFFIX);
725 
726 	gp = g_new_geomf(mp, "%s%s", bpp->name, G_ELI_SUFFIX);
727 	sc = malloc(sizeof(*sc), M_ELI, M_WAITOK | M_ZERO);
728 	gp->start = g_eli_start;
729 	/*
730 	 * Spoiling cannot happen actually, because we keep provider open for
731 	 * writing all the time or provider is read-only.
732 	 */
733 	gp->spoiled = g_eli_orphan_spoil_assert;
734 	gp->orphan = g_eli_orphan;
735 	gp->dumpconf = g_eli_dumpconf;
736 	/*
737 	 * If detach-on-last-close feature is not enabled and we don't operate
738 	 * on read-only provider, we can simply use g_std_access().
739 	 */
740 	if (md->md_flags & (G_ELI_FLAG_WO_DETACH | G_ELI_FLAG_RO))
741 		gp->access = g_eli_access;
742 	else
743 		gp->access = g_std_access;
744 
745 	sc->sc_version = md->md_version;
746 	sc->sc_inflight = 0;
747 	sc->sc_crypto = G_ELI_CRYPTO_UNKNOWN;
748 	sc->sc_flags = md->md_flags;
749 	/* Backward compatibility. */
750 	if (md->md_version < G_ELI_VERSION_04)
751 		sc->sc_flags |= G_ELI_FLAG_NATIVE_BYTE_ORDER;
752 	if (md->md_version < G_ELI_VERSION_05)
753 		sc->sc_flags |= G_ELI_FLAG_SINGLE_KEY;
754 	if (md->md_version < G_ELI_VERSION_06 &&
755 	    (sc->sc_flags & G_ELI_FLAG_AUTH) != 0) {
756 		sc->sc_flags |= G_ELI_FLAG_FIRST_KEY;
757 	}
758 	if (md->md_version < G_ELI_VERSION_07)
759 		sc->sc_flags |= G_ELI_FLAG_ENC_IVKEY;
760 	sc->sc_ealgo = md->md_ealgo;
761 	sc->sc_nkey = nkey;
762 
763 	if (sc->sc_flags & G_ELI_FLAG_AUTH) {
764 		sc->sc_akeylen = sizeof(sc->sc_akey) * 8;
765 		sc->sc_aalgo = md->md_aalgo;
766 		sc->sc_alen = g_eli_hashlen(sc->sc_aalgo);
767 
768 		sc->sc_data_per_sector = bpp->sectorsize - sc->sc_alen;
769 		/*
770 		 * Some hash functions (like SHA1 and RIPEMD160) generates hash
771 		 * which length is not multiple of 128 bits, but we want data
772 		 * length to be multiple of 128, so we can encrypt without
773 		 * padding. The line below rounds down data length to multiple
774 		 * of 128 bits.
775 		 */
776 		sc->sc_data_per_sector -= sc->sc_data_per_sector % 16;
777 
778 		sc->sc_bytes_per_sector =
779 		    (md->md_sectorsize - 1) / sc->sc_data_per_sector + 1;
780 		sc->sc_bytes_per_sector *= bpp->sectorsize;
781 	}
782 
783 	gp->softc = sc;
784 	sc->sc_geom = gp;
785 
786 	bioq_init(&sc->sc_queue);
787 	mtx_init(&sc->sc_queue_mtx, "geli:queue", NULL, MTX_DEF);
788 	mtx_init(&sc->sc_ekeys_lock, "geli:ekeys", NULL, MTX_DEF);
789 
790 	pp = NULL;
791 	cp = g_new_consumer(gp);
792 	error = g_attach(cp, bpp);
793 	if (error != 0) {
794 		if (req != NULL) {
795 			gctl_error(req, "Cannot attach to %s (error=%d).",
796 			    bpp->name, error);
797 		} else {
798 			G_ELI_DEBUG(1, "Cannot attach to %s (error=%d).",
799 			    bpp->name, error);
800 		}
801 		goto failed;
802 	}
803 	/*
804 	 * Keep provider open all the time, so we can run critical tasks,
805 	 * like Master Keys deletion, without wondering if we can open
806 	 * provider or not.
807 	 * We don't open provider for writing only when user requested read-only
808 	 * access.
809 	 */
810 	if (sc->sc_flags & G_ELI_FLAG_RO)
811 		error = g_access(cp, 1, 0, 1);
812 	else
813 		error = g_access(cp, 1, 1, 1);
814 	if (error != 0) {
815 		if (req != NULL) {
816 			gctl_error(req, "Cannot access %s (error=%d).",
817 			    bpp->name, error);
818 		} else {
819 			G_ELI_DEBUG(1, "Cannot access %s (error=%d).",
820 			    bpp->name, error);
821 		}
822 		goto failed;
823 	}
824 
825 	sc->sc_sectorsize = md->md_sectorsize;
826 	sc->sc_mediasize = bpp->mediasize;
827 	if (!(sc->sc_flags & G_ELI_FLAG_ONETIME))
828 		sc->sc_mediasize -= bpp->sectorsize;
829 	if (!(sc->sc_flags & G_ELI_FLAG_AUTH))
830 		sc->sc_mediasize -= (sc->sc_mediasize % sc->sc_sectorsize);
831 	else {
832 		sc->sc_mediasize /= sc->sc_bytes_per_sector;
833 		sc->sc_mediasize *= sc->sc_sectorsize;
834 	}
835 
836 	/*
837 	 * Remember the keys in our softc structure.
838 	 */
839 	g_eli_mkey_propagate(sc, mkey);
840 	sc->sc_ekeylen = md->md_keylen;
841 
842 	LIST_INIT(&sc->sc_workers);
843 
844 	threads = g_eli_threads;
845 	if (threads == 0)
846 		threads = mp_ncpus;
847 	sc->sc_cpubind = (mp_ncpus > 1 && threads == mp_ncpus);
848 	for (i = 0; i < threads; i++) {
849 		if (g_eli_cpu_is_disabled(i)) {
850 			G_ELI_DEBUG(1, "%s: CPU %u disabled, skipping.",
851 			    bpp->name, i);
852 			continue;
853 		}
854 		wr = malloc(sizeof(*wr), M_ELI, M_WAITOK | M_ZERO);
855 		wr->w_softc = sc;
856 		wr->w_number = i;
857 		wr->w_active = TRUE;
858 
859 		error = g_eli_newsession(wr);
860 		if (error != 0) {
861 			free(wr, M_ELI);
862 			if (req != NULL) {
863 				gctl_error(req, "Cannot set up crypto session "
864 				    "for %s (error=%d).", bpp->name, error);
865 			} else {
866 				G_ELI_DEBUG(1, "Cannot set up crypto session "
867 				    "for %s (error=%d).", bpp->name, error);
868 			}
869 			goto failed;
870 		}
871 
872 		error = kproc_create(g_eli_worker, wr, &wr->w_proc, 0, 0,
873 		    "g_eli[%u] %s", i, bpp->name);
874 		if (error != 0) {
875 			g_eli_freesession(wr);
876 			free(wr, M_ELI);
877 			if (req != NULL) {
878 				gctl_error(req, "Cannot create kernel thread "
879 				    "for %s (error=%d).", bpp->name, error);
880 			} else {
881 				G_ELI_DEBUG(1, "Cannot create kernel thread "
882 				    "for %s (error=%d).", bpp->name, error);
883 			}
884 			goto failed;
885 		}
886 		LIST_INSERT_HEAD(&sc->sc_workers, wr, w_next);
887 	}
888 
889 	/*
890 	 * Create decrypted provider.
891 	 */
892 	pp = g_new_providerf(gp, "%s%s", bpp->name, G_ELI_SUFFIX);
893 	pp->mediasize = sc->sc_mediasize;
894 	pp->sectorsize = sc->sc_sectorsize;
895 
896 	g_error_provider(pp, 0);
897 
898 	G_ELI_DEBUG(0, "Device %s created.", pp->name);
899 	G_ELI_DEBUG(0, "Encryption: %s %u", g_eli_algo2str(sc->sc_ealgo),
900 	    sc->sc_ekeylen);
901 	if (sc->sc_flags & G_ELI_FLAG_AUTH)
902 		G_ELI_DEBUG(0, " Integrity: %s", g_eli_algo2str(sc->sc_aalgo));
903 	G_ELI_DEBUG(0, "    Crypto: %s",
904 	    sc->sc_crypto == G_ELI_CRYPTO_SW ? "software" : "hardware");
905 	return (gp);
906 failed:
907 	mtx_lock(&sc->sc_queue_mtx);
908 	sc->sc_flags |= G_ELI_FLAG_DESTROY;
909 	wakeup(sc);
910 	/*
911 	 * Wait for kernel threads self destruction.
912 	 */
913 	while (!LIST_EMPTY(&sc->sc_workers)) {
914 		msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO,
915 		    "geli:destroy", 0);
916 	}
917 	mtx_destroy(&sc->sc_queue_mtx);
918 	if (cp->provider != NULL) {
919 		if (cp->acr == 1)
920 			g_access(cp, -1, -1, -1);
921 		g_detach(cp);
922 	}
923 	g_destroy_consumer(cp);
924 	g_destroy_geom(gp);
925 	g_eli_key_destroy(sc);
926 	bzero(sc, sizeof(*sc));
927 	free(sc, M_ELI);
928 	return (NULL);
929 }
930 
931 int
932 g_eli_destroy(struct g_eli_softc *sc, boolean_t force)
933 {
934 	struct g_geom *gp;
935 	struct g_provider *pp;
936 
937 	g_topology_assert();
938 
939 	if (sc == NULL)
940 		return (ENXIO);
941 
942 	gp = sc->sc_geom;
943 	pp = LIST_FIRST(&gp->provider);
944 	if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
945 		if (force) {
946 			G_ELI_DEBUG(1, "Device %s is still open, so it "
947 			    "cannot be definitely removed.", pp->name);
948 			sc->sc_flags |= G_ELI_FLAG_RW_DETACH;
949 			gp->access = g_eli_access;
950 			g_wither_provider(pp, ENXIO);
951 			return (EBUSY);
952 		} else {
953 			G_ELI_DEBUG(1,
954 			    "Device %s is still open (r%dw%de%d).", pp->name,
955 			    pp->acr, pp->acw, pp->ace);
956 			return (EBUSY);
957 		}
958 	}
959 
960 	mtx_lock(&sc->sc_queue_mtx);
961 	sc->sc_flags |= G_ELI_FLAG_DESTROY;
962 	wakeup(sc);
963 	while (!LIST_EMPTY(&sc->sc_workers)) {
964 		msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO,
965 		    "geli:destroy", 0);
966 	}
967 	mtx_destroy(&sc->sc_queue_mtx);
968 	gp->softc = NULL;
969 	g_eli_key_destroy(sc);
970 	bzero(sc, sizeof(*sc));
971 	free(sc, M_ELI);
972 
973 	if (pp == NULL || (pp->acr == 0 && pp->acw == 0 && pp->ace == 0))
974 		G_ELI_DEBUG(0, "Device %s destroyed.", gp->name);
975 	g_wither_geom_close(gp, ENXIO);
976 
977 	return (0);
978 }
979 
980 static int
981 g_eli_destroy_geom(struct gctl_req *req __unused,
982     struct g_class *mp __unused, struct g_geom *gp)
983 {
984 	struct g_eli_softc *sc;
985 
986 	sc = gp->softc;
987 	return (g_eli_destroy(sc, FALSE));
988 }
989 
990 static int
991 g_eli_keyfiles_load(struct hmac_ctx *ctx, const char *provider)
992 {
993 	u_char *keyfile, *data;
994 	char *file, name[64];
995 	size_t size;
996 	int i;
997 
998 	for (i = 0; ; i++) {
999 		snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i);
1000 		keyfile = preload_search_by_type(name);
1001 		if (keyfile == NULL)
1002 			return (i);	/* Return number of loaded keyfiles. */
1003 		data = preload_fetch_addr(keyfile);
1004 		if (data == NULL) {
1005 			G_ELI_DEBUG(0, "Cannot find key file data for %s.",
1006 			    name);
1007 			return (0);
1008 		}
1009 		size = preload_fetch_size(keyfile);
1010 		if (size == 0) {
1011 			G_ELI_DEBUG(0, "Cannot find key file size for %s.",
1012 			    name);
1013 			return (0);
1014 		}
1015 		file = preload_search_info(keyfile, MODINFO_NAME);
1016 		if (file == NULL) {
1017 			G_ELI_DEBUG(0, "Cannot find key file name for %s.",
1018 			    name);
1019 			return (0);
1020 		}
1021 		G_ELI_DEBUG(1, "Loaded keyfile %s for %s (type: %s).", file,
1022 		    provider, name);
1023 		g_eli_crypto_hmac_update(ctx, data, size);
1024 	}
1025 }
1026 
1027 static void
1028 g_eli_keyfiles_clear(const char *provider)
1029 {
1030 	u_char *keyfile, *data;
1031 	char name[64];
1032 	size_t size;
1033 	int i;
1034 
1035 	for (i = 0; ; i++) {
1036 		snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i);
1037 		keyfile = preload_search_by_type(name);
1038 		if (keyfile == NULL)
1039 			return;
1040 		data = preload_fetch_addr(keyfile);
1041 		size = preload_fetch_size(keyfile);
1042 		if (data != NULL && size != 0)
1043 			bzero(data, size);
1044 	}
1045 }
1046 
1047 /*
1048  * Tasting is only made on boot.
1049  * We detect providers which should be attached before root is mounted.
1050  */
1051 static struct g_geom *
1052 g_eli_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
1053 {
1054 	struct g_eli_metadata md;
1055 	struct g_geom *gp;
1056 	struct hmac_ctx ctx;
1057 	char passphrase[256];
1058 	u_char key[G_ELI_USERKEYLEN], mkey[G_ELI_DATAIVKEYLEN];
1059 	u_int i, nkey, nkeyfiles, tries;
1060 	int error;
1061 
1062 	g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
1063 	g_topology_assert();
1064 
1065 	if (root_mounted() || g_eli_tries == 0)
1066 		return (NULL);
1067 
1068 	G_ELI_DEBUG(3, "Tasting %s.", pp->name);
1069 
1070 	error = g_eli_read_metadata(mp, pp, &md);
1071 	if (error != 0)
1072 		return (NULL);
1073 	gp = NULL;
1074 
1075 	if (strcmp(md.md_magic, G_ELI_MAGIC) != 0)
1076 		return (NULL);
1077 	if (md.md_version > G_ELI_VERSION) {
1078 		printf("geom_eli.ko module is too old to handle %s.\n",
1079 		    pp->name);
1080 		return (NULL);
1081 	}
1082 	if (md.md_provsize != pp->mediasize)
1083 		return (NULL);
1084 	/* Should we attach it on boot? */
1085 	if (!(md.md_flags & G_ELI_FLAG_BOOT))
1086 		return (NULL);
1087 	if (md.md_keys == 0x00) {
1088 		G_ELI_DEBUG(0, "No valid keys on %s.", pp->name);
1089 		return (NULL);
1090 	}
1091 	if (md.md_iterations == -1) {
1092 		/* If there is no passphrase, we try only once. */
1093 		tries = 1;
1094 	} else {
1095 		/* Ask for the passphrase no more than g_eli_tries times. */
1096 		tries = g_eli_tries;
1097 	}
1098 
1099 	for (i = 0; i <= tries; i++) {
1100 		g_eli_crypto_hmac_init(&ctx, NULL, 0);
1101 
1102 		/*
1103 		 * Load all key files.
1104 		 */
1105 		nkeyfiles = g_eli_keyfiles_load(&ctx, pp->name);
1106 
1107 		if (nkeyfiles == 0 && md.md_iterations == -1) {
1108 			/*
1109 			 * No key files and no passphrase, something is
1110 			 * definitely wrong here.
1111 			 * geli(8) doesn't allow for such situation, so assume
1112 			 * that there was really no passphrase and in that case
1113 			 * key files are no properly defined in loader.conf.
1114 			 */
1115 			G_ELI_DEBUG(0,
1116 			    "Found no key files in loader.conf for %s.",
1117 			    pp->name);
1118 			return (NULL);
1119 		}
1120 
1121 		/* Ask for the passphrase if defined. */
1122 		if (md.md_iterations >= 0) {
1123 			/* Try first with cached passphrase. */
1124 			if (i == 0) {
1125 				if (!g_eli_boot_passcache)
1126 					continue;
1127 				memcpy(passphrase, cached_passphrase,
1128 				    sizeof(passphrase));
1129 			} else {
1130 				printf("Enter passphrase for %s: ", pp->name);
1131 				cngets(passphrase, sizeof(passphrase),
1132 				    g_eli_visible_passphrase);
1133 				memcpy(cached_passphrase, passphrase,
1134 				    sizeof(passphrase));
1135 			}
1136 		}
1137 
1138 		/*
1139 		 * Prepare Derived-Key from the user passphrase.
1140 		 */
1141 		if (md.md_iterations == 0) {
1142 			g_eli_crypto_hmac_update(&ctx, md.md_salt,
1143 			    sizeof(md.md_salt));
1144 			g_eli_crypto_hmac_update(&ctx, passphrase,
1145 			    strlen(passphrase));
1146 			bzero(passphrase, sizeof(passphrase));
1147 		} else if (md.md_iterations > 0) {
1148 			u_char dkey[G_ELI_USERKEYLEN];
1149 
1150 			pkcs5v2_genkey(dkey, sizeof(dkey), md.md_salt,
1151 			    sizeof(md.md_salt), passphrase, md.md_iterations);
1152 			bzero(passphrase, sizeof(passphrase));
1153 			g_eli_crypto_hmac_update(&ctx, dkey, sizeof(dkey));
1154 			bzero(dkey, sizeof(dkey));
1155 		}
1156 
1157 		g_eli_crypto_hmac_final(&ctx, key, 0);
1158 
1159 		/*
1160 		 * Decrypt Master-Key.
1161 		 */
1162 		error = g_eli_mkey_decrypt(&md, key, mkey, &nkey);
1163 		bzero(key, sizeof(key));
1164 		if (error == -1) {
1165 			if (i == tries) {
1166 				G_ELI_DEBUG(0,
1167 				    "Wrong key for %s. No tries left.",
1168 				    pp->name);
1169 				g_eli_keyfiles_clear(pp->name);
1170 				return (NULL);
1171 			}
1172 			if (i > 0) {
1173 				G_ELI_DEBUG(0,
1174 				    "Wrong key for %s. Tries left: %u.",
1175 				    pp->name, tries - i);
1176 			}
1177 			/* Try again. */
1178 			continue;
1179 		} else if (error > 0) {
1180 			G_ELI_DEBUG(0,
1181 			    "Cannot decrypt Master Key for %s (error=%d).",
1182 			    pp->name, error);
1183 			g_eli_keyfiles_clear(pp->name);
1184 			return (NULL);
1185 		}
1186 		g_eli_keyfiles_clear(pp->name);
1187 		G_ELI_DEBUG(1, "Using Master Key %u for %s.", nkey, pp->name);
1188 		break;
1189 	}
1190 
1191 	/*
1192 	 * We have correct key, let's attach provider.
1193 	 */
1194 	gp = g_eli_create(NULL, mp, pp, &md, mkey, nkey);
1195 	bzero(mkey, sizeof(mkey));
1196 	bzero(&md, sizeof(md));
1197 	if (gp == NULL) {
1198 		G_ELI_DEBUG(0, "Cannot create device %s%s.", pp->name,
1199 		    G_ELI_SUFFIX);
1200 		return (NULL);
1201 	}
1202 	return (gp);
1203 }
1204 
1205 static void
1206 g_eli_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1207     struct g_consumer *cp, struct g_provider *pp)
1208 {
1209 	struct g_eli_softc *sc;
1210 
1211 	g_topology_assert();
1212 	sc = gp->softc;
1213 	if (sc == NULL)
1214 		return;
1215 	if (pp != NULL || cp != NULL)
1216 		return;	/* Nothing here. */
1217 
1218 	sbuf_printf(sb, "%s<KeysTotal>%ju</KeysTotal>\n", indent,
1219 	    (uintmax_t)sc->sc_ekeys_total);
1220 	sbuf_printf(sb, "%s<KeysAllocated>%ju</KeysAllocated>\n", indent,
1221 	    (uintmax_t)sc->sc_ekeys_allocated);
1222 	sbuf_printf(sb, "%s<Flags>", indent);
1223 	if (sc->sc_flags == 0)
1224 		sbuf_printf(sb, "NONE");
1225 	else {
1226 		int first = 1;
1227 
1228 #define ADD_FLAG(flag, name)	do {					\
1229 	if (sc->sc_flags & (flag)) {					\
1230 		if (!first)						\
1231 			sbuf_printf(sb, ", ");				\
1232 		else							\
1233 			first = 0;					\
1234 		sbuf_printf(sb, name);					\
1235 	}								\
1236 } while (0)
1237 		ADD_FLAG(G_ELI_FLAG_SUSPEND, "SUSPEND");
1238 		ADD_FLAG(G_ELI_FLAG_SINGLE_KEY, "SINGLE-KEY");
1239 		ADD_FLAG(G_ELI_FLAG_NATIVE_BYTE_ORDER, "NATIVE-BYTE-ORDER");
1240 		ADD_FLAG(G_ELI_FLAG_ONETIME, "ONETIME");
1241 		ADD_FLAG(G_ELI_FLAG_BOOT, "BOOT");
1242 		ADD_FLAG(G_ELI_FLAG_WO_DETACH, "W-DETACH");
1243 		ADD_FLAG(G_ELI_FLAG_RW_DETACH, "RW-DETACH");
1244 		ADD_FLAG(G_ELI_FLAG_AUTH, "AUTH");
1245 		ADD_FLAG(G_ELI_FLAG_WOPEN, "W-OPEN");
1246 		ADD_FLAG(G_ELI_FLAG_DESTROY, "DESTROY");
1247 		ADD_FLAG(G_ELI_FLAG_RO, "READ-ONLY");
1248 #undef  ADD_FLAG
1249 	}
1250 	sbuf_printf(sb, "</Flags>\n");
1251 
1252 	if (!(sc->sc_flags & G_ELI_FLAG_ONETIME)) {
1253 		sbuf_printf(sb, "%s<UsedKey>%u</UsedKey>\n", indent,
1254 		    sc->sc_nkey);
1255 	}
1256 	sbuf_printf(sb, "%s<Version>%u</Version>\n", indent, sc->sc_version);
1257 	sbuf_printf(sb, "%s<Crypto>", indent);
1258 	switch (sc->sc_crypto) {
1259 	case G_ELI_CRYPTO_HW:
1260 		sbuf_printf(sb, "hardware");
1261 		break;
1262 	case G_ELI_CRYPTO_SW:
1263 		sbuf_printf(sb, "software");
1264 		break;
1265 	default:
1266 		sbuf_printf(sb, "UNKNOWN");
1267 		break;
1268 	}
1269 	sbuf_printf(sb, "</Crypto>\n");
1270 	if (sc->sc_flags & G_ELI_FLAG_AUTH) {
1271 		sbuf_printf(sb,
1272 		    "%s<AuthenticationAlgorithm>%s</AuthenticationAlgorithm>\n",
1273 		    indent, g_eli_algo2str(sc->sc_aalgo));
1274 	}
1275 	sbuf_printf(sb, "%s<KeyLength>%u</KeyLength>\n", indent,
1276 	    sc->sc_ekeylen);
1277 	sbuf_printf(sb, "%s<EncryptionAlgorithm>%s</EncryptionAlgorithm>\n",
1278 	    indent, g_eli_algo2str(sc->sc_ealgo));
1279 	sbuf_printf(sb, "%s<State>%s</State>\n", indent,
1280 	    (sc->sc_flags & G_ELI_FLAG_SUSPEND) ? "SUSPENDED" : "ACTIVE");
1281 }
1282 
1283 static void
1284 g_eli_shutdown_pre_sync(void *arg, int howto)
1285 {
1286 	struct g_class *mp;
1287 	struct g_geom *gp, *gp2;
1288 	struct g_provider *pp;
1289 	struct g_eli_softc *sc;
1290 	int error;
1291 
1292 	mp = arg;
1293 	DROP_GIANT();
1294 	g_topology_lock();
1295 	LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
1296 		sc = gp->softc;
1297 		if (sc == NULL)
1298 			continue;
1299 		pp = LIST_FIRST(&gp->provider);
1300 		KASSERT(pp != NULL, ("No provider? gp=%p (%s)", gp, gp->name));
1301 		if (pp->acr + pp->acw + pp->ace == 0)
1302 			error = g_eli_destroy(sc, TRUE);
1303 		else {
1304 			sc->sc_flags |= G_ELI_FLAG_RW_DETACH;
1305 			gp->access = g_eli_access;
1306 		}
1307 	}
1308 	g_topology_unlock();
1309 	PICKUP_GIANT();
1310 }
1311 
1312 static void
1313 g_eli_init(struct g_class *mp)
1314 {
1315 
1316 	g_eli_pre_sync = EVENTHANDLER_REGISTER(shutdown_pre_sync,
1317 	    g_eli_shutdown_pre_sync, mp, SHUTDOWN_PRI_FIRST);
1318 	if (g_eli_pre_sync == NULL)
1319 		G_ELI_DEBUG(0, "Warning! Cannot register shutdown event.");
1320 }
1321 
1322 static void
1323 g_eli_fini(struct g_class *mp)
1324 {
1325 
1326 	if (g_eli_pre_sync != NULL)
1327 		EVENTHANDLER_DEREGISTER(shutdown_pre_sync, g_eli_pre_sync);
1328 }
1329 
1330 DECLARE_GEOM_CLASS(g_eli_class, g_eli);
1331 MODULE_DEPEND(g_eli, crypto, 1, 1, 1);
1332