xref: /freebsd/sys/geom/shsec/g_shsec.c (revision 5e3190f700637fcfc1a52daeaa4a031fdd2557c7)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2005 Pawel Jakub Dawidek <pjd@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/module.h>
34 #include <sys/lock.h>
35 #include <sys/mutex.h>
36 #include <sys/bio.h>
37 #include <sys/sbuf.h>
38 #include <sys/sysctl.h>
39 #include <sys/malloc.h>
40 #include <vm/uma.h>
41 #include <geom/geom.h>
42 #include <geom/geom_dbg.h>
43 #include <geom/shsec/g_shsec.h>
44 
45 FEATURE(geom_shsec, "GEOM shared secret device support");
46 
47 static MALLOC_DEFINE(M_SHSEC, "shsec_data", "GEOM_SHSEC Data");
48 
49 static uma_zone_t g_shsec_zone;
50 
51 static int g_shsec_destroy(struct g_shsec_softc *sc, boolean_t force);
52 static int g_shsec_destroy_geom(struct gctl_req *req, struct g_class *mp,
53     struct g_geom *gp);
54 
55 static g_taste_t g_shsec_taste;
56 static g_ctl_req_t g_shsec_config;
57 static g_dumpconf_t g_shsec_dumpconf;
58 static g_init_t g_shsec_init;
59 static g_fini_t g_shsec_fini;
60 
61 struct g_class g_shsec_class = {
62 	.name = G_SHSEC_CLASS_NAME,
63 	.version = G_VERSION,
64 	.ctlreq = g_shsec_config,
65 	.taste = g_shsec_taste,
66 	.destroy_geom = g_shsec_destroy_geom,
67 	.init = g_shsec_init,
68 	.fini = g_shsec_fini
69 };
70 
71 SYSCTL_DECL(_kern_geom);
72 static SYSCTL_NODE(_kern_geom, OID_AUTO, shsec, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
73     "GEOM_SHSEC stuff");
74 static u_int g_shsec_debug;
75 SYSCTL_UINT(_kern_geom_shsec, OID_AUTO, debug, CTLFLAG_RWTUN, &g_shsec_debug, 0,
76     "Debug level");
77 static u_long g_shsec_maxmem;
78 SYSCTL_ULONG(_kern_geom_shsec, OID_AUTO, maxmem,
79     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &g_shsec_maxmem,
80     0, "Maximum memory that can be allocated for I/O (in bytes)");
81 static u_int g_shsec_alloc_failed = 0;
82 SYSCTL_UINT(_kern_geom_shsec, OID_AUTO, alloc_failed, CTLFLAG_RD,
83     &g_shsec_alloc_failed, 0, "How many times I/O allocation failed");
84 
85 /*
86  * Greatest Common Divisor.
87  */
88 static u_int
89 gcd(u_int a, u_int b)
90 {
91 	u_int c;
92 
93 	while (b != 0) {
94 		c = a;
95 		a = b;
96 		b = (c % b);
97 	}
98 	return (a);
99 }
100 
101 /*
102  * Least Common Multiple.
103  */
104 static u_int
105 lcm(u_int a, u_int b)
106 {
107 
108 	return ((a * b) / gcd(a, b));
109 }
110 
111 static void
112 g_shsec_init(struct g_class *mp __unused)
113 {
114 
115 	g_shsec_maxmem = maxphys * 100;
116 	TUNABLE_ULONG_FETCH("kern.geom.shsec.maxmem,", &g_shsec_maxmem);
117 	g_shsec_zone = uma_zcreate("g_shsec_zone", maxphys, NULL, NULL, NULL,
118 	    NULL, 0, 0);
119 	g_shsec_maxmem -= g_shsec_maxmem % maxphys;
120 	uma_zone_set_max(g_shsec_zone, g_shsec_maxmem / maxphys);
121 }
122 
123 static void
124 g_shsec_fini(struct g_class *mp __unused)
125 {
126 
127 	uma_zdestroy(g_shsec_zone);
128 }
129 
130 /*
131  * Return the number of valid disks.
132  */
133 static u_int
134 g_shsec_nvalid(struct g_shsec_softc *sc)
135 {
136 	u_int i, no;
137 
138 	no = 0;
139 	for (i = 0; i < sc->sc_ndisks; i++) {
140 		if (sc->sc_disks[i] != NULL)
141 			no++;
142 	}
143 
144 	return (no);
145 }
146 
147 static void
148 g_shsec_remove_disk(struct g_consumer *cp)
149 {
150 	struct g_shsec_softc *sc;
151 	u_int no;
152 
153 	KASSERT(cp != NULL, ("Non-valid disk in %s.", __func__));
154 	sc = (struct g_shsec_softc *)cp->private;
155 	KASSERT(sc != NULL, ("NULL sc in %s.", __func__));
156 	no = cp->index;
157 
158 	G_SHSEC_DEBUG(0, "Disk %s removed from %s.", cp->provider->name,
159 	    sc->sc_name);
160 
161 	sc->sc_disks[no] = NULL;
162 	if (sc->sc_provider != NULL) {
163 		g_wither_provider(sc->sc_provider, ENXIO);
164 		sc->sc_provider = NULL;
165 		G_SHSEC_DEBUG(0, "Device %s removed.", sc->sc_name);
166 	}
167 
168 	if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
169 		return;
170 	g_detach(cp);
171 	g_destroy_consumer(cp);
172 }
173 
174 static void
175 g_shsec_orphan(struct g_consumer *cp)
176 {
177 	struct g_shsec_softc *sc;
178 	struct g_geom *gp;
179 
180 	g_topology_assert();
181 	gp = cp->geom;
182 	sc = gp->softc;
183 	if (sc == NULL)
184 		return;
185 
186 	g_shsec_remove_disk(cp);
187 	/* If there are no valid disks anymore, remove device. */
188 	if (LIST_EMPTY(&gp->consumer))
189 		g_shsec_destroy(sc, 1);
190 }
191 
192 static int
193 g_shsec_access(struct g_provider *pp, int dr, int dw, int de)
194 {
195 	struct g_consumer *cp1, *cp2, *tmp;
196 	struct g_shsec_softc *sc;
197 	struct g_geom *gp;
198 	int error;
199 
200 	gp = pp->geom;
201 	sc = gp->softc;
202 
203 	/* On first open, grab an extra "exclusive" bit */
204 	if (pp->acr == 0 && pp->acw == 0 && pp->ace == 0)
205 		de++;
206 	/* ... and let go of it on last close */
207 	if ((pp->acr + dr) == 0 && (pp->acw + dw) == 0 && (pp->ace + de) == 0)
208 		de--;
209 
210 	error = ENXIO;
211 	LIST_FOREACH_SAFE(cp1, &gp->consumer, consumer, tmp) {
212 		error = g_access(cp1, dr, dw, de);
213 		if (error != 0)
214 			goto fail;
215 		if (cp1->acr == 0 && cp1->acw == 0 && cp1->ace == 0 &&
216 		    cp1->flags & G_CF_ORPHAN) {
217 			g_detach(cp1);
218 			g_destroy_consumer(cp1);
219 		}
220 	}
221 
222 	/* If there are no valid disks anymore, remove device. */
223 	if (LIST_EMPTY(&gp->consumer))
224 		g_shsec_destroy(sc, 1);
225 
226 	return (error);
227 
228 fail:
229 	/* If we fail here, backout all previous changes. */
230 	LIST_FOREACH(cp2, &gp->consumer, consumer) {
231 		if (cp1 == cp2)
232 			break;
233 		g_access(cp2, -dr, -dw, -de);
234 	}
235 	return (error);
236 }
237 
238 static void
239 g_shsec_xor1(uint32_t *src, uint32_t *dst, ssize_t len)
240 {
241 
242 	for (; len > 0; len -= sizeof(uint32_t), dst++)
243 		*dst = *dst ^ *src++;
244 	KASSERT(len == 0, ("len != 0 (len=%zd)", len));
245 }
246 
247 static void
248 g_shsec_done(struct bio *bp)
249 {
250 	struct bio *pbp;
251 
252 	pbp = bp->bio_parent;
253 	if (bp->bio_error == 0)
254 		G_SHSEC_LOGREQ(2, bp, "Request done.");
255 	else {
256 		G_SHSEC_LOGREQ(0, bp, "Request failed (error=%d).",
257 		    bp->bio_error);
258 		if (pbp->bio_error == 0)
259 			pbp->bio_error = bp->bio_error;
260 	}
261 	if (pbp->bio_cmd == BIO_READ) {
262 		if ((pbp->bio_pflags & G_SHSEC_BFLAG_FIRST) != 0) {
263 			bcopy(bp->bio_data, pbp->bio_data, pbp->bio_length);
264 			pbp->bio_pflags = 0;
265 		} else {
266 			g_shsec_xor1((uint32_t *)bp->bio_data,
267 			    (uint32_t *)pbp->bio_data,
268 			    (ssize_t)pbp->bio_length);
269 		}
270 	}
271 	if (bp->bio_data != NULL) {
272 		explicit_bzero(bp->bio_data, bp->bio_length);
273 		uma_zfree(g_shsec_zone, bp->bio_data);
274 	}
275 	g_destroy_bio(bp);
276 	pbp->bio_inbed++;
277 	if (pbp->bio_children == pbp->bio_inbed) {
278 		pbp->bio_completed = pbp->bio_length;
279 		g_io_deliver(pbp, pbp->bio_error);
280 	}
281 }
282 
283 static void
284 g_shsec_xor2(uint32_t *rand, uint32_t *dst, ssize_t len)
285 {
286 
287 	for (; len > 0; len -= sizeof(uint32_t), dst++) {
288 		*rand = arc4random();
289 		*dst = *dst ^ *rand++;
290 	}
291 	KASSERT(len == 0, ("len != 0 (len=%zd)", len));
292 }
293 
294 static void
295 g_shsec_start(struct bio *bp)
296 {
297 	TAILQ_HEAD(, bio) queue = TAILQ_HEAD_INITIALIZER(queue);
298 	struct g_shsec_softc *sc;
299 	struct bio *cbp;
300 	uint32_t *dst;
301 	ssize_t len;
302 	u_int no;
303 	int error;
304 
305 	sc = bp->bio_to->geom->softc;
306 	/*
307 	 * If sc == NULL, provider's error should be set and g_shsec_start()
308 	 * should not be called at all.
309 	 */
310 	KASSERT(sc != NULL,
311 	    ("Provider's error should be set (error=%d)(device=%s).",
312 	    bp->bio_to->error, bp->bio_to->name));
313 
314 	G_SHSEC_LOGREQ(2, bp, "Request received.");
315 
316 	switch (bp->bio_cmd) {
317 	case BIO_READ:
318 	case BIO_WRITE:
319 	case BIO_FLUSH:
320 	case BIO_SPEEDUP:
321 		/*
322 		 * Only those requests are supported.
323 		 */
324 		break;
325 	case BIO_DELETE:
326 	case BIO_GETATTR:
327 		/* To which provider it should be delivered? */
328 	default:
329 		g_io_deliver(bp, EOPNOTSUPP);
330 		return;
331 	}
332 
333 	/*
334 	 * Allocate all bios first and calculate XOR.
335 	 */
336 	dst = NULL;
337 	len = bp->bio_length;
338 	if (bp->bio_cmd == BIO_READ)
339 		bp->bio_pflags = G_SHSEC_BFLAG_FIRST;
340 	for (no = 0; no < sc->sc_ndisks; no++) {
341 		cbp = g_clone_bio(bp);
342 		if (cbp == NULL) {
343 			error = ENOMEM;
344 			goto failure;
345 		}
346 		TAILQ_INSERT_TAIL(&queue, cbp, bio_queue);
347 
348 		/*
349 		 * Fill in the component buf structure.
350 		 */
351 		cbp->bio_done = g_shsec_done;
352 		cbp->bio_caller2 = sc->sc_disks[no];
353 		if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) {
354 			cbp->bio_data = uma_zalloc(g_shsec_zone, M_NOWAIT);
355 			if (cbp->bio_data == NULL) {
356 				g_shsec_alloc_failed++;
357 				error = ENOMEM;
358 				goto failure;
359 			}
360 			if (bp->bio_cmd == BIO_WRITE) {
361 				if (no == 0) {
362 					dst = (uint32_t *)cbp->bio_data;
363 					bcopy(bp->bio_data, dst, len);
364 				} else {
365 					g_shsec_xor2((uint32_t *)cbp->bio_data,
366 					    dst, len);
367 				}
368 			}
369 		}
370 	}
371 	/*
372 	 * Fire off all allocated requests!
373 	 */
374 	while ((cbp = TAILQ_FIRST(&queue)) != NULL) {
375 		struct g_consumer *cp;
376 
377 		TAILQ_REMOVE(&queue, cbp, bio_queue);
378 		cp = cbp->bio_caller2;
379 		cbp->bio_caller2 = NULL;
380 		cbp->bio_to = cp->provider;
381 		G_SHSEC_LOGREQ(2, cbp, "Sending request.");
382 		g_io_request(cbp, cp);
383 	}
384 	return;
385 failure:
386 	while ((cbp = TAILQ_FIRST(&queue)) != NULL) {
387 		TAILQ_REMOVE(&queue, cbp, bio_queue);
388 		bp->bio_children--;
389 		if (cbp->bio_data != NULL) {
390 			explicit_bzero(cbp->bio_data, cbp->bio_length);
391 			uma_zfree(g_shsec_zone, cbp->bio_data);
392 		}
393 		g_destroy_bio(cbp);
394 	}
395 	if (bp->bio_error == 0)
396 		bp->bio_error = error;
397 	g_io_deliver(bp, bp->bio_error);
398 }
399 
400 static void
401 g_shsec_check_and_run(struct g_shsec_softc *sc)
402 {
403 	off_t mediasize, ms;
404 	u_int no, sectorsize = 0;
405 
406 	if (g_shsec_nvalid(sc) != sc->sc_ndisks)
407 		return;
408 
409 	sc->sc_provider = g_new_providerf(sc->sc_geom, "shsec/%s", sc->sc_name);
410 	/*
411 	 * Find the smallest disk.
412 	 */
413 	mediasize = sc->sc_disks[0]->provider->mediasize;
414 	mediasize -= sc->sc_disks[0]->provider->sectorsize;
415 	sectorsize = sc->sc_disks[0]->provider->sectorsize;
416 	for (no = 1; no < sc->sc_ndisks; no++) {
417 		ms = sc->sc_disks[no]->provider->mediasize;
418 		ms -= sc->sc_disks[no]->provider->sectorsize;
419 		if (ms < mediasize)
420 			mediasize = ms;
421 		sectorsize = lcm(sectorsize,
422 		    sc->sc_disks[no]->provider->sectorsize);
423 	}
424 	sc->sc_provider->sectorsize = sectorsize;
425 	sc->sc_provider->mediasize = mediasize;
426 	g_error_provider(sc->sc_provider, 0);
427 
428 	G_SHSEC_DEBUG(0, "Device %s activated.", sc->sc_name);
429 }
430 
431 static int
432 g_shsec_read_metadata(struct g_consumer *cp, struct g_shsec_metadata *md)
433 {
434 	struct g_provider *pp;
435 	u_char *buf;
436 	int error;
437 
438 	g_topology_assert();
439 
440 	error = g_access(cp, 1, 0, 0);
441 	if (error != 0)
442 		return (error);
443 	pp = cp->provider;
444 	g_topology_unlock();
445 	buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
446 	    &error);
447 	g_topology_lock();
448 	g_access(cp, -1, 0, 0);
449 	if (buf == NULL)
450 		return (error);
451 
452 	/* Decode metadata. */
453 	shsec_metadata_decode(buf, md);
454 	g_free(buf);
455 
456 	return (0);
457 }
458 
459 /*
460  * Add disk to given device.
461  */
462 static int
463 g_shsec_add_disk(struct g_shsec_softc *sc, struct g_provider *pp, u_int no)
464 {
465 	struct g_consumer *cp, *fcp;
466 	struct g_geom *gp;
467 	struct g_shsec_metadata md;
468 	int error;
469 
470 	/* Metadata corrupted? */
471 	if (no >= sc->sc_ndisks)
472 		return (EINVAL);
473 
474 	/* Check if disk is not already attached. */
475 	if (sc->sc_disks[no] != NULL)
476 		return (EEXIST);
477 
478 	gp = sc->sc_geom;
479 	fcp = LIST_FIRST(&gp->consumer);
480 
481 	cp = g_new_consumer(gp);
482 	error = g_attach(cp, pp);
483 	if (error != 0) {
484 		g_destroy_consumer(cp);
485 		return (error);
486 	}
487 
488 	if (fcp != NULL && (fcp->acr > 0 || fcp->acw > 0 || fcp->ace > 0)) {
489 		error = g_access(cp, fcp->acr, fcp->acw, fcp->ace);
490 		if (error != 0) {
491 			g_detach(cp);
492 			g_destroy_consumer(cp);
493 			return (error);
494 		}
495 	}
496 
497 	/* Reread metadata. */
498 	error = g_shsec_read_metadata(cp, &md);
499 	if (error != 0)
500 		goto fail;
501 
502 	if (strcmp(md.md_magic, G_SHSEC_MAGIC) != 0 ||
503 	    strcmp(md.md_name, sc->sc_name) != 0 || md.md_id != sc->sc_id) {
504 		G_SHSEC_DEBUG(0, "Metadata on %s changed.", pp->name);
505 		goto fail;
506 	}
507 
508 	cp->private = sc;
509 	cp->index = no;
510 	sc->sc_disks[no] = cp;
511 
512 	G_SHSEC_DEBUG(0, "Disk %s attached to %s.", pp->name, sc->sc_name);
513 
514 	g_shsec_check_and_run(sc);
515 
516 	return (0);
517 fail:
518 	if (fcp != NULL && (fcp->acr > 0 || fcp->acw > 0 || fcp->ace > 0))
519 		g_access(cp, -fcp->acr, -fcp->acw, -fcp->ace);
520 	g_detach(cp);
521 	g_destroy_consumer(cp);
522 	return (error);
523 }
524 
525 static struct g_geom *
526 g_shsec_create(struct g_class *mp, const struct g_shsec_metadata *md)
527 {
528 	struct g_shsec_softc *sc;
529 	struct g_geom *gp;
530 	u_int no;
531 
532 	G_SHSEC_DEBUG(1, "Creating device %s (id=%u).", md->md_name, md->md_id);
533 
534 	/* Two disks is minimum. */
535 	if (md->md_all < 2) {
536 		G_SHSEC_DEBUG(0, "Too few disks defined for %s.", md->md_name);
537 		return (NULL);
538 	}
539 
540 	/* Check for duplicate unit */
541 	LIST_FOREACH(gp, &mp->geom, geom) {
542 		sc = gp->softc;
543 		if (sc != NULL && strcmp(sc->sc_name, md->md_name) == 0) {
544 			G_SHSEC_DEBUG(0, "Device %s already configured.",
545 			    sc->sc_name);
546 			return (NULL);
547 		}
548 	}
549 	gp = g_new_geomf(mp, "%s", md->md_name);
550 	sc = malloc(sizeof(*sc), M_SHSEC, M_WAITOK | M_ZERO);
551 	gp->start = g_shsec_start;
552 	gp->spoiled = g_shsec_orphan;
553 	gp->orphan = g_shsec_orphan;
554 	gp->access = g_shsec_access;
555 	gp->dumpconf = g_shsec_dumpconf;
556 
557 	sc->sc_id = md->md_id;
558 	sc->sc_ndisks = md->md_all;
559 	sc->sc_disks = malloc(sizeof(struct g_consumer *) * sc->sc_ndisks,
560 	    M_SHSEC, M_WAITOK | M_ZERO);
561 	for (no = 0; no < sc->sc_ndisks; no++)
562 		sc->sc_disks[no] = NULL;
563 
564 	gp->softc = sc;
565 	sc->sc_geom = gp;
566 	sc->sc_provider = NULL;
567 
568 	G_SHSEC_DEBUG(0, "Device %s created (id=%u).", sc->sc_name, sc->sc_id);
569 
570 	return (gp);
571 }
572 
573 static int
574 g_shsec_destroy(struct g_shsec_softc *sc, boolean_t force)
575 {
576 	struct g_provider *pp;
577 	struct g_geom *gp;
578 	u_int no;
579 
580 	g_topology_assert();
581 
582 	if (sc == NULL)
583 		return (ENXIO);
584 
585 	pp = sc->sc_provider;
586 	if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
587 		if (force) {
588 			G_SHSEC_DEBUG(0, "Device %s is still open, so it "
589 			    "can't be definitely removed.", pp->name);
590 		} else {
591 			G_SHSEC_DEBUG(1,
592 			    "Device %s is still open (r%dw%de%d).", pp->name,
593 			    pp->acr, pp->acw, pp->ace);
594 			return (EBUSY);
595 		}
596 	}
597 
598 	for (no = 0; no < sc->sc_ndisks; no++) {
599 		if (sc->sc_disks[no] != NULL)
600 			g_shsec_remove_disk(sc->sc_disks[no]);
601 	}
602 
603 	gp = sc->sc_geom;
604 	gp->softc = NULL;
605 	KASSERT(sc->sc_provider == NULL, ("Provider still exists? (device=%s)",
606 	    gp->name));
607 	free(sc->sc_disks, M_SHSEC);
608 	free(sc, M_SHSEC);
609 
610 	pp = LIST_FIRST(&gp->provider);
611 	if (pp == NULL || (pp->acr == 0 && pp->acw == 0 && pp->ace == 0))
612 		G_SHSEC_DEBUG(0, "Device %s destroyed.", gp->name);
613 
614 	g_wither_geom(gp, ENXIO);
615 
616 	return (0);
617 }
618 
619 static int
620 g_shsec_destroy_geom(struct gctl_req *req __unused, struct g_class *mp __unused,
621     struct g_geom *gp)
622 {
623 	struct g_shsec_softc *sc;
624 
625 	sc = gp->softc;
626 	return (g_shsec_destroy(sc, 0));
627 }
628 
629 static struct g_geom *
630 g_shsec_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
631 {
632 	struct g_shsec_metadata md;
633 	struct g_shsec_softc *sc;
634 	struct g_consumer *cp;
635 	struct g_geom *gp;
636 	int error;
637 
638 	g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
639 	g_topology_assert();
640 
641 	/* Skip providers that are already open for writing. */
642 	if (pp->acw > 0)
643 		return (NULL);
644 
645 	G_SHSEC_DEBUG(3, "Tasting %s.", pp->name);
646 
647 	gp = g_new_geomf(mp, "shsec:taste");
648 	gp->start = g_shsec_start;
649 	gp->access = g_shsec_access;
650 	gp->orphan = g_shsec_orphan;
651 	cp = g_new_consumer(gp);
652 	cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
653 	error = g_attach(cp, pp);
654 	if (error == 0) {
655 		error = g_shsec_read_metadata(cp, &md);
656 		g_detach(cp);
657 	}
658 	g_destroy_consumer(cp);
659 	g_destroy_geom(gp);
660 	if (error != 0)
661 		return (NULL);
662 	gp = NULL;
663 
664 	if (strcmp(md.md_magic, G_SHSEC_MAGIC) != 0)
665 		return (NULL);
666 	if (md.md_version > G_SHSEC_VERSION) {
667 		G_SHSEC_DEBUG(0, "Kernel module is too old to handle %s.\n",
668 		    pp->name);
669 		return (NULL);
670 	}
671 	/*
672 	 * Backward compatibility:
673 	 */
674 	/* There was no md_provsize field in earlier versions of metadata. */
675 	if (md.md_version < 1)
676 		md.md_provsize = pp->mediasize;
677 
678 	if (md.md_provider[0] != '\0' &&
679 	    !g_compare_names(md.md_provider, pp->name))
680 		return (NULL);
681 	if (md.md_provsize != pp->mediasize)
682 		return (NULL);
683 
684 	/*
685 	 * Let's check if device already exists.
686 	 */
687 	sc = NULL;
688 	LIST_FOREACH(gp, &mp->geom, geom) {
689 		sc = gp->softc;
690 		if (sc == NULL)
691 			continue;
692 		if (strcmp(md.md_name, sc->sc_name) != 0)
693 			continue;
694 		if (md.md_id != sc->sc_id)
695 			continue;
696 		break;
697 	}
698 	if (gp != NULL) {
699 		G_SHSEC_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name);
700 		error = g_shsec_add_disk(sc, pp, md.md_no);
701 		if (error != 0) {
702 			G_SHSEC_DEBUG(0, "Cannot add disk %s to %s (error=%d).",
703 			    pp->name, gp->name, error);
704 			return (NULL);
705 		}
706 	} else {
707 		gp = g_shsec_create(mp, &md);
708 		if (gp == NULL) {
709 			G_SHSEC_DEBUG(0, "Cannot create device %s.", md.md_name);
710 			return (NULL);
711 		}
712 		sc = gp->softc;
713 		G_SHSEC_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name);
714 		error = g_shsec_add_disk(sc, pp, md.md_no);
715 		if (error != 0) {
716 			G_SHSEC_DEBUG(0, "Cannot add disk %s to %s (error=%d).",
717 			    pp->name, gp->name, error);
718 			g_shsec_destroy(sc, 1);
719 			return (NULL);
720 		}
721 	}
722 	return (gp);
723 }
724 
725 static struct g_shsec_softc *
726 g_shsec_find_device(struct g_class *mp, const char *name)
727 {
728 	struct g_shsec_softc *sc;
729 	struct g_geom *gp;
730 
731 	LIST_FOREACH(gp, &mp->geom, geom) {
732 		sc = gp->softc;
733 		if (sc == NULL)
734 			continue;
735 		if (strcmp(sc->sc_name, name) == 0)
736 			return (sc);
737 	}
738 	return (NULL);
739 }
740 
741 static void
742 g_shsec_ctl_destroy(struct gctl_req *req, struct g_class *mp)
743 {
744 	struct g_shsec_softc *sc;
745 	int *force, *nargs, error;
746 	const char *name;
747 	char param[16];
748 	u_int i;
749 
750 	g_topology_assert();
751 
752 	nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
753 	if (nargs == NULL) {
754 		gctl_error(req, "No '%s' argument.", "nargs");
755 		return;
756 	}
757 	if (*nargs <= 0) {
758 		gctl_error(req, "Missing device(s).");
759 		return;
760 	}
761 	force = gctl_get_paraml(req, "force", sizeof(*force));
762 	if (force == NULL) {
763 		gctl_error(req, "No '%s' argument.", "force");
764 		return;
765 	}
766 
767 	for (i = 0; i < (u_int)*nargs; i++) {
768 		snprintf(param, sizeof(param), "arg%u", i);
769 		name = gctl_get_asciiparam(req, param);
770 		if (name == NULL) {
771 			gctl_error(req, "No 'arg%u' argument.", i);
772 			return;
773 		}
774 		sc = g_shsec_find_device(mp, name);
775 		if (sc == NULL) {
776 			gctl_error(req, "No such device: %s.", name);
777 			return;
778 		}
779 		error = g_shsec_destroy(sc, *force);
780 		if (error != 0) {
781 			gctl_error(req, "Cannot destroy device %s (error=%d).",
782 			    sc->sc_name, error);
783 			return;
784 		}
785 	}
786 }
787 
788 static void
789 g_shsec_config(struct gctl_req *req, struct g_class *mp, const char *verb)
790 {
791 	uint32_t *version;
792 
793 	g_topology_assert();
794 
795 	version = gctl_get_paraml(req, "version", sizeof(*version));
796 	if (version == NULL) {
797 		gctl_error(req, "No '%s' argument.", "version");
798 		return;
799 	}
800 	if (*version != G_SHSEC_VERSION) {
801 		gctl_error(req, "Userland and kernel parts are out of sync.");
802 		return;
803 	}
804 
805 	if (strcmp(verb, "stop") == 0) {
806 		g_shsec_ctl_destroy(req, mp);
807 		return;
808 	}
809 
810 	gctl_error(req, "Unknown verb.");
811 }
812 
813 static void
814 g_shsec_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
815     struct g_consumer *cp, struct g_provider *pp)
816 {
817 	struct g_shsec_softc *sc;
818 
819 	sc = gp->softc;
820 	if (sc == NULL)
821 		return;
822 	if (pp != NULL) {
823 		/* Nothing here. */
824 	} else if (cp != NULL) {
825 		sbuf_printf(sb, "%s<Number>%u</Number>\n", indent,
826 		    (u_int)cp->index);
827 	} else {
828 		sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id);
829 		sbuf_printf(sb, "%s<Status>Total=%u, Online=%u</Status>\n",
830 		    indent, sc->sc_ndisks, g_shsec_nvalid(sc));
831 		sbuf_printf(sb, "%s<State>", indent);
832 		if (sc->sc_provider != NULL && sc->sc_provider->error == 0)
833 			sbuf_printf(sb, "UP");
834 		else
835 			sbuf_printf(sb, "DOWN");
836 		sbuf_printf(sb, "</State>\n");
837 	}
838 }
839 
840 DECLARE_GEOM_CLASS(g_shsec_class, g_shsec);
841 MODULE_VERSION(geom_shsec, 0);
842