xref: /freebsd/sys/geom/shsec/g_shsec.c (revision 0b3105a37d7adcadcb720112fed4dc4e8040be99)
1 /*-
2  * Copyright (c) 2005 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/module.h>
34 #include <sys/lock.h>
35 #include <sys/mutex.h>
36 #include <sys/bio.h>
37 #include <sys/sbuf.h>
38 #include <sys/sysctl.h>
39 #include <sys/malloc.h>
40 #include <vm/uma.h>
41 #include <geom/geom.h>
42 #include <geom/shsec/g_shsec.h>
43 
44 FEATURE(geom_shsec, "GEOM shared secret device support");
45 
46 static MALLOC_DEFINE(M_SHSEC, "shsec_data", "GEOM_SHSEC Data");
47 
48 static uma_zone_t g_shsec_zone;
49 
50 static int g_shsec_destroy(struct g_shsec_softc *sc, boolean_t force);
51 static int g_shsec_destroy_geom(struct gctl_req *req, struct g_class *mp,
52     struct g_geom *gp);
53 
54 static g_taste_t g_shsec_taste;
55 static g_ctl_req_t g_shsec_config;
56 static g_dumpconf_t g_shsec_dumpconf;
57 static g_init_t g_shsec_init;
58 static g_fini_t g_shsec_fini;
59 
60 struct g_class g_shsec_class = {
61 	.name = G_SHSEC_CLASS_NAME,
62 	.version = G_VERSION,
63 	.ctlreq = g_shsec_config,
64 	.taste = g_shsec_taste,
65 	.destroy_geom = g_shsec_destroy_geom,
66 	.init = g_shsec_init,
67 	.fini = g_shsec_fini
68 };
69 
70 SYSCTL_DECL(_kern_geom);
71 static SYSCTL_NODE(_kern_geom, OID_AUTO, shsec, CTLFLAG_RW, 0,
72     "GEOM_SHSEC stuff");
73 static u_int g_shsec_debug = 0;
74 SYSCTL_UINT(_kern_geom_shsec, OID_AUTO, debug, CTLFLAG_RWTUN, &g_shsec_debug, 0,
75     "Debug level");
76 static u_int g_shsec_maxmem = MAXPHYS * 100;
77 SYSCTL_UINT(_kern_geom_shsec, OID_AUTO, maxmem, CTLFLAG_RDTUN, &g_shsec_maxmem,
78     0, "Maximum memory that can be allocated for I/O (in bytes)");
79 static u_int g_shsec_alloc_failed = 0;
80 SYSCTL_UINT(_kern_geom_shsec, OID_AUTO, alloc_failed, CTLFLAG_RD,
81     &g_shsec_alloc_failed, 0, "How many times I/O allocation failed");
82 
83 /*
84  * Greatest Common Divisor.
85  */
86 static u_int
87 gcd(u_int a, u_int b)
88 {
89 	u_int c;
90 
91 	while (b != 0) {
92 		c = a;
93 		a = b;
94 		b = (c % b);
95 	}
96 	return (a);
97 }
98 
99 /*
100  * Least Common Multiple.
101  */
102 static u_int
103 lcm(u_int a, u_int b)
104 {
105 
106 	return ((a * b) / gcd(a, b));
107 }
108 
109 static void
110 g_shsec_init(struct g_class *mp __unused)
111 {
112 
113 	g_shsec_zone = uma_zcreate("g_shsec_zone", MAXPHYS, NULL, NULL, NULL,
114 	    NULL, 0, 0);
115 	g_shsec_maxmem -= g_shsec_maxmem % MAXPHYS;
116 	uma_zone_set_max(g_shsec_zone, g_shsec_maxmem / MAXPHYS);
117 }
118 
119 static void
120 g_shsec_fini(struct g_class *mp __unused)
121 {
122 
123 	uma_zdestroy(g_shsec_zone);
124 }
125 
126 /*
127  * Return the number of valid disks.
128  */
129 static u_int
130 g_shsec_nvalid(struct g_shsec_softc *sc)
131 {
132 	u_int i, no;
133 
134 	no = 0;
135 	for (i = 0; i < sc->sc_ndisks; i++) {
136 		if (sc->sc_disks[i] != NULL)
137 			no++;
138 	}
139 
140 	return (no);
141 }
142 
143 static void
144 g_shsec_remove_disk(struct g_consumer *cp)
145 {
146 	struct g_shsec_softc *sc;
147 	u_int no;
148 
149 	KASSERT(cp != NULL, ("Non-valid disk in %s.", __func__));
150 	sc = (struct g_shsec_softc *)cp->private;
151 	KASSERT(sc != NULL, ("NULL sc in %s.", __func__));
152 	no = cp->index;
153 
154 	G_SHSEC_DEBUG(0, "Disk %s removed from %s.", cp->provider->name,
155 	    sc->sc_name);
156 
157 	sc->sc_disks[no] = NULL;
158 	if (sc->sc_provider != NULL) {
159 		g_orphan_provider(sc->sc_provider, ENXIO);
160 		sc->sc_provider = NULL;
161 		G_SHSEC_DEBUG(0, "Device %s removed.", sc->sc_name);
162 	}
163 
164 	if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
165 		g_access(cp, -cp->acr, -cp->acw, -cp->ace);
166 	g_detach(cp);
167 	g_destroy_consumer(cp);
168 }
169 
170 static void
171 g_shsec_orphan(struct g_consumer *cp)
172 {
173 	struct g_shsec_softc *sc;
174 	struct g_geom *gp;
175 
176 	g_topology_assert();
177 	gp = cp->geom;
178 	sc = gp->softc;
179 	if (sc == NULL)
180 		return;
181 
182 	g_shsec_remove_disk(cp);
183 	/* If there are no valid disks anymore, remove device. */
184 	if (g_shsec_nvalid(sc) == 0)
185 		g_shsec_destroy(sc, 1);
186 }
187 
188 static int
189 g_shsec_access(struct g_provider *pp, int dr, int dw, int de)
190 {
191 	struct g_consumer *cp1, *cp2;
192 	struct g_shsec_softc *sc;
193 	struct g_geom *gp;
194 	int error;
195 
196 	gp = pp->geom;
197 	sc = gp->softc;
198 
199 	if (sc == NULL) {
200 		/*
201 		 * It looks like geom is being withered.
202 		 * In that case we allow only negative requests.
203 		 */
204 		KASSERT(dr <= 0 && dw <= 0 && de <= 0,
205 		    ("Positive access request (device=%s).", pp->name));
206 		if ((pp->acr + dr) == 0 && (pp->acw + dw) == 0 &&
207 		    (pp->ace + de) == 0) {
208 			G_SHSEC_DEBUG(0, "Device %s definitely destroyed.",
209 			    gp->name);
210 		}
211 		return (0);
212 	}
213 
214 	/* On first open, grab an extra "exclusive" bit */
215 	if (pp->acr == 0 && pp->acw == 0 && pp->ace == 0)
216 		de++;
217 	/* ... and let go of it on last close */
218 	if ((pp->acr + dr) == 0 && (pp->acw + dw) == 0 && (pp->ace + de) == 0)
219 		de--;
220 
221 	error = ENXIO;
222 	LIST_FOREACH(cp1, &gp->consumer, consumer) {
223 		error = g_access(cp1, dr, dw, de);
224 		if (error == 0)
225 			continue;
226 		/*
227 		 * If we fail here, backout all previous changes.
228 		 */
229 		LIST_FOREACH(cp2, &gp->consumer, consumer) {
230 			if (cp1 == cp2)
231 				return (error);
232 			g_access(cp2, -dr, -dw, -de);
233 		}
234 		/* NOTREACHED */
235 	}
236 
237 	return (error);
238 }
239 
240 static void
241 g_shsec_xor1(uint32_t *src, uint32_t *dst, ssize_t len)
242 {
243 
244 	for (; len > 0; len -= sizeof(uint32_t), dst++)
245 		*dst = *dst ^ *src++;
246 	KASSERT(len == 0, ("len != 0 (len=%zd)", len));
247 }
248 
249 static void
250 g_shsec_done(struct bio *bp)
251 {
252 	struct g_shsec_softc *sc;
253 	struct bio *pbp;
254 
255 	pbp = bp->bio_parent;
256 	sc = pbp->bio_to->geom->softc;
257 	if (bp->bio_error == 0)
258 		G_SHSEC_LOGREQ(2, bp, "Request done.");
259 	else {
260 		G_SHSEC_LOGREQ(0, bp, "Request failed (error=%d).",
261 		    bp->bio_error);
262 		if (pbp->bio_error == 0)
263 			pbp->bio_error = bp->bio_error;
264 	}
265 	if (pbp->bio_cmd == BIO_READ) {
266 		if ((pbp->bio_pflags & G_SHSEC_BFLAG_FIRST) != 0) {
267 			bcopy(bp->bio_data, pbp->bio_data, pbp->bio_length);
268 			pbp->bio_pflags = 0;
269 		} else {
270 			g_shsec_xor1((uint32_t *)bp->bio_data,
271 			    (uint32_t *)pbp->bio_data,
272 			    (ssize_t)pbp->bio_length);
273 		}
274 	}
275 	bzero(bp->bio_data, bp->bio_length);
276 	uma_zfree(g_shsec_zone, bp->bio_data);
277 	g_destroy_bio(bp);
278 	pbp->bio_inbed++;
279 	if (pbp->bio_children == pbp->bio_inbed) {
280 		pbp->bio_completed = pbp->bio_length;
281 		g_io_deliver(pbp, pbp->bio_error);
282 	}
283 }
284 
285 static void
286 g_shsec_xor2(uint32_t *rand, uint32_t *dst, ssize_t len)
287 {
288 
289 	for (; len > 0; len -= sizeof(uint32_t), dst++) {
290 		*rand = arc4random();
291 		*dst = *dst ^ *rand++;
292 	}
293 	KASSERT(len == 0, ("len != 0 (len=%zd)", len));
294 }
295 
296 static void
297 g_shsec_start(struct bio *bp)
298 {
299 	TAILQ_HEAD(, bio) queue = TAILQ_HEAD_INITIALIZER(queue);
300 	struct g_shsec_softc *sc;
301 	struct bio *cbp;
302 	uint32_t *dst;
303 	ssize_t len;
304 	u_int no;
305 	int error;
306 
307 	sc = bp->bio_to->geom->softc;
308 	/*
309 	 * If sc == NULL, provider's error should be set and g_shsec_start()
310 	 * should not be called at all.
311 	 */
312 	KASSERT(sc != NULL,
313 	    ("Provider's error should be set (error=%d)(device=%s).",
314 	    bp->bio_to->error, bp->bio_to->name));
315 
316 	G_SHSEC_LOGREQ(2, bp, "Request received.");
317 
318 	switch (bp->bio_cmd) {
319 	case BIO_READ:
320 	case BIO_WRITE:
321 	case BIO_FLUSH:
322 		/*
323 		 * Only those requests are supported.
324 		 */
325 		break;
326 	case BIO_DELETE:
327 	case BIO_GETATTR:
328 		/* To which provider it should be delivered? */
329 	default:
330 		g_io_deliver(bp, EOPNOTSUPP);
331 		return;
332 	}
333 
334 	/*
335 	 * Allocate all bios first and calculate XOR.
336 	 */
337 	dst = NULL;
338 	len = bp->bio_length;
339 	if (bp->bio_cmd == BIO_READ)
340 		bp->bio_pflags = G_SHSEC_BFLAG_FIRST;
341 	for (no = 0; no < sc->sc_ndisks; no++) {
342 		cbp = g_clone_bio(bp);
343 		if (cbp == NULL) {
344 			error = ENOMEM;
345 			goto failure;
346 		}
347 		TAILQ_INSERT_TAIL(&queue, cbp, bio_queue);
348 
349 		/*
350 		 * Fill in the component buf structure.
351 		 */
352 		cbp->bio_done = g_shsec_done;
353 		cbp->bio_data = uma_zalloc(g_shsec_zone, M_NOWAIT);
354 		if (cbp->bio_data == NULL) {
355 			g_shsec_alloc_failed++;
356 			error = ENOMEM;
357 			goto failure;
358 		}
359 		cbp->bio_caller2 = sc->sc_disks[no];
360 		if (bp->bio_cmd == BIO_WRITE) {
361 			if (no == 0) {
362 				dst = (uint32_t *)cbp->bio_data;
363 				bcopy(bp->bio_data, dst, len);
364 			} else {
365 				g_shsec_xor2((uint32_t *)cbp->bio_data, dst,
366 				    len);
367 			}
368 		}
369 	}
370 	/*
371 	 * Fire off all allocated requests!
372 	 */
373 	while ((cbp = TAILQ_FIRST(&queue)) != NULL) {
374 		struct g_consumer *cp;
375 
376 		TAILQ_REMOVE(&queue, cbp, bio_queue);
377 		cp = cbp->bio_caller2;
378 		cbp->bio_caller2 = NULL;
379 		cbp->bio_to = cp->provider;
380 		G_SHSEC_LOGREQ(2, cbp, "Sending request.");
381 		g_io_request(cbp, cp);
382 	}
383 	return;
384 failure:
385 	while ((cbp = TAILQ_FIRST(&queue)) != NULL) {
386 		TAILQ_REMOVE(&queue, cbp, bio_queue);
387 		bp->bio_children--;
388 		if (cbp->bio_data != NULL) {
389 			bzero(cbp->bio_data, cbp->bio_length);
390 			uma_zfree(g_shsec_zone, cbp->bio_data);
391 		}
392 		g_destroy_bio(cbp);
393 	}
394 	if (bp->bio_error == 0)
395 		bp->bio_error = error;
396 	g_io_deliver(bp, bp->bio_error);
397 }
398 
399 static void
400 g_shsec_check_and_run(struct g_shsec_softc *sc)
401 {
402 	off_t mediasize, ms;
403 	u_int no, sectorsize = 0;
404 
405 	if (g_shsec_nvalid(sc) != sc->sc_ndisks)
406 		return;
407 
408 	sc->sc_provider = g_new_providerf(sc->sc_geom, "shsec/%s", sc->sc_name);
409 	/*
410 	 * Find the smallest disk.
411 	 */
412 	mediasize = sc->sc_disks[0]->provider->mediasize;
413 	mediasize -= sc->sc_disks[0]->provider->sectorsize;
414 	sectorsize = sc->sc_disks[0]->provider->sectorsize;
415 	for (no = 1; no < sc->sc_ndisks; no++) {
416 		ms = sc->sc_disks[no]->provider->mediasize;
417 		ms -= sc->sc_disks[no]->provider->sectorsize;
418 		if (ms < mediasize)
419 			mediasize = ms;
420 		sectorsize = lcm(sectorsize,
421 		    sc->sc_disks[no]->provider->sectorsize);
422 	}
423 	sc->sc_provider->sectorsize = sectorsize;
424 	sc->sc_provider->mediasize = mediasize;
425 	g_error_provider(sc->sc_provider, 0);
426 
427 	G_SHSEC_DEBUG(0, "Device %s activated.", sc->sc_name);
428 }
429 
430 static int
431 g_shsec_read_metadata(struct g_consumer *cp, struct g_shsec_metadata *md)
432 {
433 	struct g_provider *pp;
434 	u_char *buf;
435 	int error;
436 
437 	g_topology_assert();
438 
439 	error = g_access(cp, 1, 0, 0);
440 	if (error != 0)
441 		return (error);
442 	pp = cp->provider;
443 	g_topology_unlock();
444 	buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
445 	    &error);
446 	g_topology_lock();
447 	g_access(cp, -1, 0, 0);
448 	if (buf == NULL)
449 		return (error);
450 
451 	/* Decode metadata. */
452 	shsec_metadata_decode(buf, md);
453 	g_free(buf);
454 
455 	return (0);
456 }
457 
458 /*
459  * Add disk to given device.
460  */
461 static int
462 g_shsec_add_disk(struct g_shsec_softc *sc, struct g_provider *pp, u_int no)
463 {
464 	struct g_consumer *cp, *fcp;
465 	struct g_geom *gp;
466 	struct g_shsec_metadata md;
467 	int error;
468 
469 	/* Metadata corrupted? */
470 	if (no >= sc->sc_ndisks)
471 		return (EINVAL);
472 
473 	/* Check if disk is not already attached. */
474 	if (sc->sc_disks[no] != NULL)
475 		return (EEXIST);
476 
477 	gp = sc->sc_geom;
478 	fcp = LIST_FIRST(&gp->consumer);
479 
480 	cp = g_new_consumer(gp);
481 	error = g_attach(cp, pp);
482 	if (error != 0) {
483 		g_destroy_consumer(cp);
484 		return (error);
485 	}
486 
487 	if (fcp != NULL && (fcp->acr > 0 || fcp->acw > 0 || fcp->ace > 0)) {
488 		error = g_access(cp, fcp->acr, fcp->acw, fcp->ace);
489 		if (error != 0) {
490 			g_detach(cp);
491 			g_destroy_consumer(cp);
492 			return (error);
493 		}
494 	}
495 
496 	/* Reread metadata. */
497 	error = g_shsec_read_metadata(cp, &md);
498 	if (error != 0)
499 		goto fail;
500 
501 	if (strcmp(md.md_magic, G_SHSEC_MAGIC) != 0 ||
502 	    strcmp(md.md_name, sc->sc_name) != 0 || md.md_id != sc->sc_id) {
503 		G_SHSEC_DEBUG(0, "Metadata on %s changed.", pp->name);
504 		goto fail;
505 	}
506 
507 	cp->private = sc;
508 	cp->index = no;
509 	sc->sc_disks[no] = cp;
510 
511 	G_SHSEC_DEBUG(0, "Disk %s attached to %s.", pp->name, sc->sc_name);
512 
513 	g_shsec_check_and_run(sc);
514 
515 	return (0);
516 fail:
517 	if (fcp != NULL && (fcp->acr > 0 || fcp->acw > 0 || fcp->ace > 0))
518 		g_access(cp, -fcp->acr, -fcp->acw, -fcp->ace);
519 	g_detach(cp);
520 	g_destroy_consumer(cp);
521 	return (error);
522 }
523 
524 static struct g_geom *
525 g_shsec_create(struct g_class *mp, const struct g_shsec_metadata *md)
526 {
527 	struct g_shsec_softc *sc;
528 	struct g_geom *gp;
529 	u_int no;
530 
531 	G_SHSEC_DEBUG(1, "Creating device %s (id=%u).", md->md_name, md->md_id);
532 
533 	/* Two disks is minimum. */
534 	if (md->md_all < 2) {
535 		G_SHSEC_DEBUG(0, "Too few disks defined for %s.", md->md_name);
536 		return (NULL);
537 	}
538 
539 	/* Check for duplicate unit */
540 	LIST_FOREACH(gp, &mp->geom, geom) {
541 		sc = gp->softc;
542 		if (sc != NULL && strcmp(sc->sc_name, md->md_name) == 0) {
543 			G_SHSEC_DEBUG(0, "Device %s already configured.",
544 			    sc->sc_name);
545 			return (NULL);
546 		}
547 	}
548 	gp = g_new_geomf(mp, "%s", md->md_name);
549 	sc = malloc(sizeof(*sc), M_SHSEC, M_WAITOK | M_ZERO);
550 	gp->start = g_shsec_start;
551 	gp->spoiled = g_shsec_orphan;
552 	gp->orphan = g_shsec_orphan;
553 	gp->access = g_shsec_access;
554 	gp->dumpconf = g_shsec_dumpconf;
555 
556 	sc->sc_id = md->md_id;
557 	sc->sc_ndisks = md->md_all;
558 	sc->sc_disks = malloc(sizeof(struct g_consumer *) * sc->sc_ndisks,
559 	    M_SHSEC, M_WAITOK | M_ZERO);
560 	for (no = 0; no < sc->sc_ndisks; no++)
561 		sc->sc_disks[no] = NULL;
562 
563 	gp->softc = sc;
564 	sc->sc_geom = gp;
565 	sc->sc_provider = NULL;
566 
567 	G_SHSEC_DEBUG(0, "Device %s created (id=%u).", sc->sc_name, sc->sc_id);
568 
569 	return (gp);
570 }
571 
572 static int
573 g_shsec_destroy(struct g_shsec_softc *sc, boolean_t force)
574 {
575 	struct g_provider *pp;
576 	struct g_geom *gp;
577 	u_int no;
578 
579 	g_topology_assert();
580 
581 	if (sc == NULL)
582 		return (ENXIO);
583 
584 	pp = sc->sc_provider;
585 	if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
586 		if (force) {
587 			G_SHSEC_DEBUG(0, "Device %s is still open, so it "
588 			    "can't be definitely removed.", pp->name);
589 		} else {
590 			G_SHSEC_DEBUG(1,
591 			    "Device %s is still open (r%dw%de%d).", pp->name,
592 			    pp->acr, pp->acw, pp->ace);
593 			return (EBUSY);
594 		}
595 	}
596 
597 	for (no = 0; no < sc->sc_ndisks; no++) {
598 		if (sc->sc_disks[no] != NULL)
599 			g_shsec_remove_disk(sc->sc_disks[no]);
600 	}
601 
602 	gp = sc->sc_geom;
603 	gp->softc = NULL;
604 	KASSERT(sc->sc_provider == NULL, ("Provider still exists? (device=%s)",
605 	    gp->name));
606 	free(sc->sc_disks, M_SHSEC);
607 	free(sc, M_SHSEC);
608 
609 	pp = LIST_FIRST(&gp->provider);
610 	if (pp == NULL || (pp->acr == 0 && pp->acw == 0 && pp->ace == 0))
611 		G_SHSEC_DEBUG(0, "Device %s destroyed.", gp->name);
612 
613 	g_wither_geom(gp, ENXIO);
614 
615 	return (0);
616 }
617 
618 static int
619 g_shsec_destroy_geom(struct gctl_req *req __unused, struct g_class *mp __unused,
620     struct g_geom *gp)
621 {
622 	struct g_shsec_softc *sc;
623 
624 	sc = gp->softc;
625 	return (g_shsec_destroy(sc, 0));
626 }
627 
628 static struct g_geom *
629 g_shsec_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
630 {
631 	struct g_shsec_metadata md;
632 	struct g_shsec_softc *sc;
633 	struct g_consumer *cp;
634 	struct g_geom *gp;
635 	int error;
636 
637 	g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
638 	g_topology_assert();
639 
640 	/* Skip providers that are already open for writing. */
641 	if (pp->acw > 0)
642 		return (NULL);
643 
644 	G_SHSEC_DEBUG(3, "Tasting %s.", pp->name);
645 
646 	gp = g_new_geomf(mp, "shsec:taste");
647 	gp->start = g_shsec_start;
648 	gp->access = g_shsec_access;
649 	gp->orphan = g_shsec_orphan;
650 	cp = g_new_consumer(gp);
651 	g_attach(cp, pp);
652 	error = g_shsec_read_metadata(cp, &md);
653 	g_detach(cp);
654 	g_destroy_consumer(cp);
655 	g_destroy_geom(gp);
656 	if (error != 0)
657 		return (NULL);
658 	gp = NULL;
659 
660 	if (strcmp(md.md_magic, G_SHSEC_MAGIC) != 0)
661 		return (NULL);
662 	if (md.md_version > G_SHSEC_VERSION) {
663 		G_SHSEC_DEBUG(0, "Kernel module is too old to handle %s.\n",
664 		    pp->name);
665 		return (NULL);
666 	}
667 	/*
668 	 * Backward compatibility:
669 	 */
670 	/* There was no md_provsize field in earlier versions of metadata. */
671 	if (md.md_version < 1)
672 		md.md_provsize = pp->mediasize;
673 
674 	if (md.md_provider[0] != '\0' &&
675 	    !g_compare_names(md.md_provider, pp->name))
676 		return (NULL);
677 	if (md.md_provsize != pp->mediasize)
678 		return (NULL);
679 
680 	/*
681 	 * Let's check if device already exists.
682 	 */
683 	sc = NULL;
684 	LIST_FOREACH(gp, &mp->geom, geom) {
685 		sc = gp->softc;
686 		if (sc == NULL)
687 			continue;
688 		if (strcmp(md.md_name, sc->sc_name) != 0)
689 			continue;
690 		if (md.md_id != sc->sc_id)
691 			continue;
692 		break;
693 	}
694 	if (gp != NULL) {
695 		G_SHSEC_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name);
696 		error = g_shsec_add_disk(sc, pp, md.md_no);
697 		if (error != 0) {
698 			G_SHSEC_DEBUG(0, "Cannot add disk %s to %s (error=%d).",
699 			    pp->name, gp->name, error);
700 			return (NULL);
701 		}
702 	} else {
703 		gp = g_shsec_create(mp, &md);
704 		if (gp == NULL) {
705 			G_SHSEC_DEBUG(0, "Cannot create device %s.", md.md_name);
706 			return (NULL);
707 		}
708 		sc = gp->softc;
709 		G_SHSEC_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name);
710 		error = g_shsec_add_disk(sc, pp, md.md_no);
711 		if (error != 0) {
712 			G_SHSEC_DEBUG(0, "Cannot add disk %s to %s (error=%d).",
713 			    pp->name, gp->name, error);
714 			g_shsec_destroy(sc, 1);
715 			return (NULL);
716 		}
717 	}
718 	return (gp);
719 }
720 
721 static struct g_shsec_softc *
722 g_shsec_find_device(struct g_class *mp, const char *name)
723 {
724 	struct g_shsec_softc *sc;
725 	struct g_geom *gp;
726 
727 	LIST_FOREACH(gp, &mp->geom, geom) {
728 		sc = gp->softc;
729 		if (sc == NULL)
730 			continue;
731 		if (strcmp(sc->sc_name, name) == 0)
732 			return (sc);
733 	}
734 	return (NULL);
735 }
736 
737 static void
738 g_shsec_ctl_destroy(struct gctl_req *req, struct g_class *mp)
739 {
740 	struct g_shsec_softc *sc;
741 	int *force, *nargs, error;
742 	const char *name;
743 	char param[16];
744 	u_int i;
745 
746 	g_topology_assert();
747 
748 	nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
749 	if (nargs == NULL) {
750 		gctl_error(req, "No '%s' argument.", "nargs");
751 		return;
752 	}
753 	if (*nargs <= 0) {
754 		gctl_error(req, "Missing device(s).");
755 		return;
756 	}
757 	force = gctl_get_paraml(req, "force", sizeof(*force));
758 	if (force == NULL) {
759 		gctl_error(req, "No '%s' argument.", "force");
760 		return;
761 	}
762 
763 	for (i = 0; i < (u_int)*nargs; i++) {
764 		snprintf(param, sizeof(param), "arg%u", i);
765 		name = gctl_get_asciiparam(req, param);
766 		if (name == NULL) {
767 			gctl_error(req, "No 'arg%u' argument.", i);
768 			return;
769 		}
770 		sc = g_shsec_find_device(mp, name);
771 		if (sc == NULL) {
772 			gctl_error(req, "No such device: %s.", name);
773 			return;
774 		}
775 		error = g_shsec_destroy(sc, *force);
776 		if (error != 0) {
777 			gctl_error(req, "Cannot destroy device %s (error=%d).",
778 			    sc->sc_name, error);
779 			return;
780 		}
781 	}
782 }
783 
784 static void
785 g_shsec_config(struct gctl_req *req, struct g_class *mp, const char *verb)
786 {
787 	uint32_t *version;
788 
789 	g_topology_assert();
790 
791 	version = gctl_get_paraml(req, "version", sizeof(*version));
792 	if (version == NULL) {
793 		gctl_error(req, "No '%s' argument.", "version");
794 		return;
795 	}
796 	if (*version != G_SHSEC_VERSION) {
797 		gctl_error(req, "Userland and kernel parts are out of sync.");
798 		return;
799 	}
800 
801 	if (strcmp(verb, "stop") == 0) {
802 		g_shsec_ctl_destroy(req, mp);
803 		return;
804 	}
805 
806 	gctl_error(req, "Unknown verb.");
807 }
808 
809 static void
810 g_shsec_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
811     struct g_consumer *cp, struct g_provider *pp)
812 {
813 	struct g_shsec_softc *sc;
814 
815 	sc = gp->softc;
816 	if (sc == NULL)
817 		return;
818 	if (pp != NULL) {
819 		/* Nothing here. */
820 	} else if (cp != NULL) {
821 		sbuf_printf(sb, "%s<Number>%u</Number>\n", indent,
822 		    (u_int)cp->index);
823 	} else {
824 		sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id);
825 		sbuf_printf(sb, "%s<Status>Total=%u, Online=%u</Status>\n",
826 		    indent, sc->sc_ndisks, g_shsec_nvalid(sc));
827 		sbuf_printf(sb, "%s<State>", indent);
828 		if (sc->sc_provider != NULL && sc->sc_provider->error == 0)
829 			sbuf_printf(sb, "UP");
830 		else
831 			sbuf_printf(sb, "DOWN");
832 		sbuf_printf(sb, "</State>\n");
833 	}
834 }
835 
836 DECLARE_GEOM_CLASS(g_shsec_class, g_shsec);
837