xref: /freebsd/sys/geom/shsec/g_shsec.c (revision ee0fe82ee2892f5ece189db0eab38913aaab5f0f)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2005 Pawel Jakub Dawidek <pjd@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/module.h>
36 #include <sys/lock.h>
37 #include <sys/mutex.h>
38 #include <sys/bio.h>
39 #include <sys/sbuf.h>
40 #include <sys/sysctl.h>
41 #include <sys/malloc.h>
42 #include <vm/uma.h>
43 #include <geom/geom.h>
44 #include <geom/geom_dbg.h>
45 #include <geom/shsec/g_shsec.h>
46 
47 FEATURE(geom_shsec, "GEOM shared secret device support");
48 
49 static MALLOC_DEFINE(M_SHSEC, "shsec_data", "GEOM_SHSEC Data");
50 
51 static uma_zone_t g_shsec_zone;
52 
53 static int g_shsec_destroy(struct g_shsec_softc *sc, boolean_t force);
54 static int g_shsec_destroy_geom(struct gctl_req *req, struct g_class *mp,
55     struct g_geom *gp);
56 
57 static g_taste_t g_shsec_taste;
58 static g_ctl_req_t g_shsec_config;
59 static g_dumpconf_t g_shsec_dumpconf;
60 static g_init_t g_shsec_init;
61 static g_fini_t g_shsec_fini;
62 
63 struct g_class g_shsec_class = {
64 	.name = G_SHSEC_CLASS_NAME,
65 	.version = G_VERSION,
66 	.ctlreq = g_shsec_config,
67 	.taste = g_shsec_taste,
68 	.destroy_geom = g_shsec_destroy_geom,
69 	.init = g_shsec_init,
70 	.fini = g_shsec_fini
71 };
72 
73 SYSCTL_DECL(_kern_geom);
74 static SYSCTL_NODE(_kern_geom, OID_AUTO, shsec, CTLFLAG_RW, 0,
75     "GEOM_SHSEC stuff");
76 static u_int g_shsec_debug = 0;
77 SYSCTL_UINT(_kern_geom_shsec, OID_AUTO, debug, CTLFLAG_RWTUN, &g_shsec_debug, 0,
78     "Debug level");
79 static u_int g_shsec_maxmem = MAXPHYS * 100;
80 SYSCTL_UINT(_kern_geom_shsec, OID_AUTO, maxmem, CTLFLAG_RDTUN, &g_shsec_maxmem,
81     0, "Maximum memory that can be allocated for I/O (in bytes)");
82 static u_int g_shsec_alloc_failed = 0;
83 SYSCTL_UINT(_kern_geom_shsec, OID_AUTO, alloc_failed, CTLFLAG_RD,
84     &g_shsec_alloc_failed, 0, "How many times I/O allocation failed");
85 
86 /*
87  * Greatest Common Divisor.
88  */
89 static u_int
90 gcd(u_int a, u_int b)
91 {
92 	u_int c;
93 
94 	while (b != 0) {
95 		c = a;
96 		a = b;
97 		b = (c % b);
98 	}
99 	return (a);
100 }
101 
102 /*
103  * Least Common Multiple.
104  */
105 static u_int
106 lcm(u_int a, u_int b)
107 {
108 
109 	return ((a * b) / gcd(a, b));
110 }
111 
112 static void
113 g_shsec_init(struct g_class *mp __unused)
114 {
115 
116 	g_shsec_zone = uma_zcreate("g_shsec_zone", MAXPHYS, NULL, NULL, NULL,
117 	    NULL, 0, 0);
118 	g_shsec_maxmem -= g_shsec_maxmem % MAXPHYS;
119 	uma_zone_set_max(g_shsec_zone, g_shsec_maxmem / MAXPHYS);
120 }
121 
122 static void
123 g_shsec_fini(struct g_class *mp __unused)
124 {
125 
126 	uma_zdestroy(g_shsec_zone);
127 }
128 
129 /*
130  * Return the number of valid disks.
131  */
132 static u_int
133 g_shsec_nvalid(struct g_shsec_softc *sc)
134 {
135 	u_int i, no;
136 
137 	no = 0;
138 	for (i = 0; i < sc->sc_ndisks; i++) {
139 		if (sc->sc_disks[i] != NULL)
140 			no++;
141 	}
142 
143 	return (no);
144 }
145 
146 static void
147 g_shsec_remove_disk(struct g_consumer *cp)
148 {
149 	struct g_shsec_softc *sc;
150 	u_int no;
151 
152 	KASSERT(cp != NULL, ("Non-valid disk in %s.", __func__));
153 	sc = (struct g_shsec_softc *)cp->private;
154 	KASSERT(sc != NULL, ("NULL sc in %s.", __func__));
155 	no = cp->index;
156 
157 	G_SHSEC_DEBUG(0, "Disk %s removed from %s.", cp->provider->name,
158 	    sc->sc_name);
159 
160 	sc->sc_disks[no] = NULL;
161 	if (sc->sc_provider != NULL) {
162 		g_wither_provider(sc->sc_provider, ENXIO);
163 		sc->sc_provider = NULL;
164 		G_SHSEC_DEBUG(0, "Device %s removed.", sc->sc_name);
165 	}
166 
167 	if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
168 		return;
169 	g_detach(cp);
170 	g_destroy_consumer(cp);
171 }
172 
173 static void
174 g_shsec_orphan(struct g_consumer *cp)
175 {
176 	struct g_shsec_softc *sc;
177 	struct g_geom *gp;
178 
179 	g_topology_assert();
180 	gp = cp->geom;
181 	sc = gp->softc;
182 	if (sc == NULL)
183 		return;
184 
185 	g_shsec_remove_disk(cp);
186 	/* If there are no valid disks anymore, remove device. */
187 	if (g_shsec_nvalid(sc) == 0)
188 		g_shsec_destroy(sc, 1);
189 }
190 
191 static int
192 g_shsec_access(struct g_provider *pp, int dr, int dw, int de)
193 {
194 	struct g_consumer *cp1, *cp2, *tmp;
195 	struct g_shsec_softc *sc;
196 	struct g_geom *gp;
197 	int error;
198 
199 	gp = pp->geom;
200 	sc = gp->softc;
201 
202 	if (sc == NULL) {
203 		/*
204 		 * It looks like geom is being withered.
205 		 * In that case we allow only negative requests.
206 		 */
207 		KASSERT(dr <= 0 && dw <= 0 && de <= 0,
208 		    ("Positive access request (device=%s).", pp->name));
209 		if ((pp->acr + dr) == 0 && (pp->acw + dw) == 0 &&
210 		    (pp->ace + de) == 0) {
211 			G_SHSEC_DEBUG(0, "Device %s definitely destroyed.",
212 			    gp->name);
213 		}
214 		return (0);
215 	}
216 
217 	/* On first open, grab an extra "exclusive" bit */
218 	if (pp->acr == 0 && pp->acw == 0 && pp->ace == 0)
219 		de++;
220 	/* ... and let go of it on last close */
221 	if ((pp->acr + dr) == 0 && (pp->acw + dw) == 0 && (pp->ace + de) == 0)
222 		de--;
223 
224 	error = ENXIO;
225 	LIST_FOREACH_SAFE(cp1, &gp->consumer, consumer, tmp) {
226 		error = g_access(cp1, dr, dw, de);
227 		if (error != 0)
228 			goto fail;
229 		if (cp1->acr == 0 && cp1->acw == 0 && cp1->ace == 0 &&
230 		    cp1->flags & G_CF_ORPHAN) {
231 			g_detach(cp1);
232 			g_destroy_consumer(cp1);
233 		}
234 	}
235 	return (error);
236 
237 fail:
238 	/* If we fail here, backout all previous changes. */
239 	LIST_FOREACH(cp2, &gp->consumer, consumer) {
240 		if (cp1 == cp2)
241 			break;
242 		g_access(cp2, -dr, -dw, -de);
243 	}
244 	return (error);
245 }
246 
247 static void
248 g_shsec_xor1(uint32_t *src, uint32_t *dst, ssize_t len)
249 {
250 
251 	for (; len > 0; len -= sizeof(uint32_t), dst++)
252 		*dst = *dst ^ *src++;
253 	KASSERT(len == 0, ("len != 0 (len=%zd)", len));
254 }
255 
256 static void
257 g_shsec_done(struct bio *bp)
258 {
259 	struct g_shsec_softc *sc;
260 	struct bio *pbp;
261 
262 	pbp = bp->bio_parent;
263 	sc = pbp->bio_to->geom->softc;
264 	if (bp->bio_error == 0)
265 		G_SHSEC_LOGREQ(2, bp, "Request done.");
266 	else {
267 		G_SHSEC_LOGREQ(0, bp, "Request failed (error=%d).",
268 		    bp->bio_error);
269 		if (pbp->bio_error == 0)
270 			pbp->bio_error = bp->bio_error;
271 	}
272 	if (pbp->bio_cmd == BIO_READ) {
273 		if ((pbp->bio_pflags & G_SHSEC_BFLAG_FIRST) != 0) {
274 			bcopy(bp->bio_data, pbp->bio_data, pbp->bio_length);
275 			pbp->bio_pflags = 0;
276 		} else {
277 			g_shsec_xor1((uint32_t *)bp->bio_data,
278 			    (uint32_t *)pbp->bio_data,
279 			    (ssize_t)pbp->bio_length);
280 		}
281 	}
282 	bzero(bp->bio_data, bp->bio_length);
283 	uma_zfree(g_shsec_zone, bp->bio_data);
284 	g_destroy_bio(bp);
285 	pbp->bio_inbed++;
286 	if (pbp->bio_children == pbp->bio_inbed) {
287 		pbp->bio_completed = pbp->bio_length;
288 		g_io_deliver(pbp, pbp->bio_error);
289 	}
290 }
291 
292 static void
293 g_shsec_xor2(uint32_t *rand, uint32_t *dst, ssize_t len)
294 {
295 
296 	for (; len > 0; len -= sizeof(uint32_t), dst++) {
297 		*rand = arc4random();
298 		*dst = *dst ^ *rand++;
299 	}
300 	KASSERT(len == 0, ("len != 0 (len=%zd)", len));
301 }
302 
303 static void
304 g_shsec_start(struct bio *bp)
305 {
306 	TAILQ_HEAD(, bio) queue = TAILQ_HEAD_INITIALIZER(queue);
307 	struct g_shsec_softc *sc;
308 	struct bio *cbp;
309 	uint32_t *dst;
310 	ssize_t len;
311 	u_int no;
312 	int error;
313 
314 	sc = bp->bio_to->geom->softc;
315 	/*
316 	 * If sc == NULL, provider's error should be set and g_shsec_start()
317 	 * should not be called at all.
318 	 */
319 	KASSERT(sc != NULL,
320 	    ("Provider's error should be set (error=%d)(device=%s).",
321 	    bp->bio_to->error, bp->bio_to->name));
322 
323 	G_SHSEC_LOGREQ(2, bp, "Request received.");
324 
325 	switch (bp->bio_cmd) {
326 	case BIO_READ:
327 	case BIO_WRITE:
328 	case BIO_FLUSH:
329 		/*
330 		 * Only those requests are supported.
331 		 */
332 		break;
333 	case BIO_DELETE:
334 	case BIO_GETATTR:
335 		/* To which provider it should be delivered? */
336 	default:
337 		g_io_deliver(bp, EOPNOTSUPP);
338 		return;
339 	}
340 
341 	/*
342 	 * Allocate all bios first and calculate XOR.
343 	 */
344 	dst = NULL;
345 	len = bp->bio_length;
346 	if (bp->bio_cmd == BIO_READ)
347 		bp->bio_pflags = G_SHSEC_BFLAG_FIRST;
348 	for (no = 0; no < sc->sc_ndisks; no++) {
349 		cbp = g_clone_bio(bp);
350 		if (cbp == NULL) {
351 			error = ENOMEM;
352 			goto failure;
353 		}
354 		TAILQ_INSERT_TAIL(&queue, cbp, bio_queue);
355 
356 		/*
357 		 * Fill in the component buf structure.
358 		 */
359 		cbp->bio_done = g_shsec_done;
360 		cbp->bio_data = uma_zalloc(g_shsec_zone, M_NOWAIT);
361 		if (cbp->bio_data == NULL) {
362 			g_shsec_alloc_failed++;
363 			error = ENOMEM;
364 			goto failure;
365 		}
366 		cbp->bio_caller2 = sc->sc_disks[no];
367 		if (bp->bio_cmd == BIO_WRITE) {
368 			if (no == 0) {
369 				dst = (uint32_t *)cbp->bio_data;
370 				bcopy(bp->bio_data, dst, len);
371 			} else {
372 				g_shsec_xor2((uint32_t *)cbp->bio_data, dst,
373 				    len);
374 			}
375 		}
376 	}
377 	/*
378 	 * Fire off all allocated requests!
379 	 */
380 	while ((cbp = TAILQ_FIRST(&queue)) != NULL) {
381 		struct g_consumer *cp;
382 
383 		TAILQ_REMOVE(&queue, cbp, bio_queue);
384 		cp = cbp->bio_caller2;
385 		cbp->bio_caller2 = NULL;
386 		cbp->bio_to = cp->provider;
387 		G_SHSEC_LOGREQ(2, cbp, "Sending request.");
388 		g_io_request(cbp, cp);
389 	}
390 	return;
391 failure:
392 	while ((cbp = TAILQ_FIRST(&queue)) != NULL) {
393 		TAILQ_REMOVE(&queue, cbp, bio_queue);
394 		bp->bio_children--;
395 		if (cbp->bio_data != NULL) {
396 			bzero(cbp->bio_data, cbp->bio_length);
397 			uma_zfree(g_shsec_zone, cbp->bio_data);
398 		}
399 		g_destroy_bio(cbp);
400 	}
401 	if (bp->bio_error == 0)
402 		bp->bio_error = error;
403 	g_io_deliver(bp, bp->bio_error);
404 }
405 
406 static void
407 g_shsec_check_and_run(struct g_shsec_softc *sc)
408 {
409 	off_t mediasize, ms;
410 	u_int no, sectorsize = 0;
411 
412 	if (g_shsec_nvalid(sc) != sc->sc_ndisks)
413 		return;
414 
415 	sc->sc_provider = g_new_providerf(sc->sc_geom, "shsec/%s", sc->sc_name);
416 	/*
417 	 * Find the smallest disk.
418 	 */
419 	mediasize = sc->sc_disks[0]->provider->mediasize;
420 	mediasize -= sc->sc_disks[0]->provider->sectorsize;
421 	sectorsize = sc->sc_disks[0]->provider->sectorsize;
422 	for (no = 1; no < sc->sc_ndisks; no++) {
423 		ms = sc->sc_disks[no]->provider->mediasize;
424 		ms -= sc->sc_disks[no]->provider->sectorsize;
425 		if (ms < mediasize)
426 			mediasize = ms;
427 		sectorsize = lcm(sectorsize,
428 		    sc->sc_disks[no]->provider->sectorsize);
429 	}
430 	sc->sc_provider->sectorsize = sectorsize;
431 	sc->sc_provider->mediasize = mediasize;
432 	g_error_provider(sc->sc_provider, 0);
433 
434 	G_SHSEC_DEBUG(0, "Device %s activated.", sc->sc_name);
435 }
436 
437 static int
438 g_shsec_read_metadata(struct g_consumer *cp, struct g_shsec_metadata *md)
439 {
440 	struct g_provider *pp;
441 	u_char *buf;
442 	int error;
443 
444 	g_topology_assert();
445 
446 	error = g_access(cp, 1, 0, 0);
447 	if (error != 0)
448 		return (error);
449 	pp = cp->provider;
450 	g_topology_unlock();
451 	buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
452 	    &error);
453 	g_topology_lock();
454 	g_access(cp, -1, 0, 0);
455 	if (buf == NULL)
456 		return (error);
457 
458 	/* Decode metadata. */
459 	shsec_metadata_decode(buf, md);
460 	g_free(buf);
461 
462 	return (0);
463 }
464 
465 /*
466  * Add disk to given device.
467  */
468 static int
469 g_shsec_add_disk(struct g_shsec_softc *sc, struct g_provider *pp, u_int no)
470 {
471 	struct g_consumer *cp, *fcp;
472 	struct g_geom *gp;
473 	struct g_shsec_metadata md;
474 	int error;
475 
476 	/* Metadata corrupted? */
477 	if (no >= sc->sc_ndisks)
478 		return (EINVAL);
479 
480 	/* Check if disk is not already attached. */
481 	if (sc->sc_disks[no] != NULL)
482 		return (EEXIST);
483 
484 	gp = sc->sc_geom;
485 	fcp = LIST_FIRST(&gp->consumer);
486 
487 	cp = g_new_consumer(gp);
488 	error = g_attach(cp, pp);
489 	if (error != 0) {
490 		g_destroy_consumer(cp);
491 		return (error);
492 	}
493 
494 	if (fcp != NULL && (fcp->acr > 0 || fcp->acw > 0 || fcp->ace > 0)) {
495 		error = g_access(cp, fcp->acr, fcp->acw, fcp->ace);
496 		if (error != 0) {
497 			g_detach(cp);
498 			g_destroy_consumer(cp);
499 			return (error);
500 		}
501 	}
502 
503 	/* Reread metadata. */
504 	error = g_shsec_read_metadata(cp, &md);
505 	if (error != 0)
506 		goto fail;
507 
508 	if (strcmp(md.md_magic, G_SHSEC_MAGIC) != 0 ||
509 	    strcmp(md.md_name, sc->sc_name) != 0 || md.md_id != sc->sc_id) {
510 		G_SHSEC_DEBUG(0, "Metadata on %s changed.", pp->name);
511 		goto fail;
512 	}
513 
514 	cp->private = sc;
515 	cp->index = no;
516 	sc->sc_disks[no] = cp;
517 
518 	G_SHSEC_DEBUG(0, "Disk %s attached to %s.", pp->name, sc->sc_name);
519 
520 	g_shsec_check_and_run(sc);
521 
522 	return (0);
523 fail:
524 	if (fcp != NULL && (fcp->acr > 0 || fcp->acw > 0 || fcp->ace > 0))
525 		g_access(cp, -fcp->acr, -fcp->acw, -fcp->ace);
526 	g_detach(cp);
527 	g_destroy_consumer(cp);
528 	return (error);
529 }
530 
531 static struct g_geom *
532 g_shsec_create(struct g_class *mp, const struct g_shsec_metadata *md)
533 {
534 	struct g_shsec_softc *sc;
535 	struct g_geom *gp;
536 	u_int no;
537 
538 	G_SHSEC_DEBUG(1, "Creating device %s (id=%u).", md->md_name, md->md_id);
539 
540 	/* Two disks is minimum. */
541 	if (md->md_all < 2) {
542 		G_SHSEC_DEBUG(0, "Too few disks defined for %s.", md->md_name);
543 		return (NULL);
544 	}
545 
546 	/* Check for duplicate unit */
547 	LIST_FOREACH(gp, &mp->geom, geom) {
548 		sc = gp->softc;
549 		if (sc != NULL && strcmp(sc->sc_name, md->md_name) == 0) {
550 			G_SHSEC_DEBUG(0, "Device %s already configured.",
551 			    sc->sc_name);
552 			return (NULL);
553 		}
554 	}
555 	gp = g_new_geomf(mp, "%s", md->md_name);
556 	sc = malloc(sizeof(*sc), M_SHSEC, M_WAITOK | M_ZERO);
557 	gp->start = g_shsec_start;
558 	gp->spoiled = g_shsec_orphan;
559 	gp->orphan = g_shsec_orphan;
560 	gp->access = g_shsec_access;
561 	gp->dumpconf = g_shsec_dumpconf;
562 
563 	sc->sc_id = md->md_id;
564 	sc->sc_ndisks = md->md_all;
565 	sc->sc_disks = malloc(sizeof(struct g_consumer *) * sc->sc_ndisks,
566 	    M_SHSEC, M_WAITOK | M_ZERO);
567 	for (no = 0; no < sc->sc_ndisks; no++)
568 		sc->sc_disks[no] = NULL;
569 
570 	gp->softc = sc;
571 	sc->sc_geom = gp;
572 	sc->sc_provider = NULL;
573 
574 	G_SHSEC_DEBUG(0, "Device %s created (id=%u).", sc->sc_name, sc->sc_id);
575 
576 	return (gp);
577 }
578 
579 static int
580 g_shsec_destroy(struct g_shsec_softc *sc, boolean_t force)
581 {
582 	struct g_provider *pp;
583 	struct g_geom *gp;
584 	u_int no;
585 
586 	g_topology_assert();
587 
588 	if (sc == NULL)
589 		return (ENXIO);
590 
591 	pp = sc->sc_provider;
592 	if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
593 		if (force) {
594 			G_SHSEC_DEBUG(0, "Device %s is still open, so it "
595 			    "can't be definitely removed.", pp->name);
596 		} else {
597 			G_SHSEC_DEBUG(1,
598 			    "Device %s is still open (r%dw%de%d).", pp->name,
599 			    pp->acr, pp->acw, pp->ace);
600 			return (EBUSY);
601 		}
602 	}
603 
604 	for (no = 0; no < sc->sc_ndisks; no++) {
605 		if (sc->sc_disks[no] != NULL)
606 			g_shsec_remove_disk(sc->sc_disks[no]);
607 	}
608 
609 	gp = sc->sc_geom;
610 	gp->softc = NULL;
611 	KASSERT(sc->sc_provider == NULL, ("Provider still exists? (device=%s)",
612 	    gp->name));
613 	free(sc->sc_disks, M_SHSEC);
614 	free(sc, M_SHSEC);
615 
616 	pp = LIST_FIRST(&gp->provider);
617 	if (pp == NULL || (pp->acr == 0 && pp->acw == 0 && pp->ace == 0))
618 		G_SHSEC_DEBUG(0, "Device %s destroyed.", gp->name);
619 
620 	g_wither_geom(gp, ENXIO);
621 
622 	return (0);
623 }
624 
625 static int
626 g_shsec_destroy_geom(struct gctl_req *req __unused, struct g_class *mp __unused,
627     struct g_geom *gp)
628 {
629 	struct g_shsec_softc *sc;
630 
631 	sc = gp->softc;
632 	return (g_shsec_destroy(sc, 0));
633 }
634 
635 static struct g_geom *
636 g_shsec_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
637 {
638 	struct g_shsec_metadata md;
639 	struct g_shsec_softc *sc;
640 	struct g_consumer *cp;
641 	struct g_geom *gp;
642 	int error;
643 
644 	g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
645 	g_topology_assert();
646 
647 	/* Skip providers that are already open for writing. */
648 	if (pp->acw > 0)
649 		return (NULL);
650 
651 	G_SHSEC_DEBUG(3, "Tasting %s.", pp->name);
652 
653 	gp = g_new_geomf(mp, "shsec:taste");
654 	gp->start = g_shsec_start;
655 	gp->access = g_shsec_access;
656 	gp->orphan = g_shsec_orphan;
657 	cp = g_new_consumer(gp);
658 	g_attach(cp, pp);
659 	error = g_shsec_read_metadata(cp, &md);
660 	g_detach(cp);
661 	g_destroy_consumer(cp);
662 	g_destroy_geom(gp);
663 	if (error != 0)
664 		return (NULL);
665 	gp = NULL;
666 
667 	if (strcmp(md.md_magic, G_SHSEC_MAGIC) != 0)
668 		return (NULL);
669 	if (md.md_version > G_SHSEC_VERSION) {
670 		G_SHSEC_DEBUG(0, "Kernel module is too old to handle %s.\n",
671 		    pp->name);
672 		return (NULL);
673 	}
674 	/*
675 	 * Backward compatibility:
676 	 */
677 	/* There was no md_provsize field in earlier versions of metadata. */
678 	if (md.md_version < 1)
679 		md.md_provsize = pp->mediasize;
680 
681 	if (md.md_provider[0] != '\0' &&
682 	    !g_compare_names(md.md_provider, pp->name))
683 		return (NULL);
684 	if (md.md_provsize != pp->mediasize)
685 		return (NULL);
686 
687 	/*
688 	 * Let's check if device already exists.
689 	 */
690 	sc = NULL;
691 	LIST_FOREACH(gp, &mp->geom, geom) {
692 		sc = gp->softc;
693 		if (sc == NULL)
694 			continue;
695 		if (strcmp(md.md_name, sc->sc_name) != 0)
696 			continue;
697 		if (md.md_id != sc->sc_id)
698 			continue;
699 		break;
700 	}
701 	if (gp != NULL) {
702 		G_SHSEC_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name);
703 		error = g_shsec_add_disk(sc, pp, md.md_no);
704 		if (error != 0) {
705 			G_SHSEC_DEBUG(0, "Cannot add disk %s to %s (error=%d).",
706 			    pp->name, gp->name, error);
707 			return (NULL);
708 		}
709 	} else {
710 		gp = g_shsec_create(mp, &md);
711 		if (gp == NULL) {
712 			G_SHSEC_DEBUG(0, "Cannot create device %s.", md.md_name);
713 			return (NULL);
714 		}
715 		sc = gp->softc;
716 		G_SHSEC_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name);
717 		error = g_shsec_add_disk(sc, pp, md.md_no);
718 		if (error != 0) {
719 			G_SHSEC_DEBUG(0, "Cannot add disk %s to %s (error=%d).",
720 			    pp->name, gp->name, error);
721 			g_shsec_destroy(sc, 1);
722 			return (NULL);
723 		}
724 	}
725 	return (gp);
726 }
727 
728 static struct g_shsec_softc *
729 g_shsec_find_device(struct g_class *mp, const char *name)
730 {
731 	struct g_shsec_softc *sc;
732 	struct g_geom *gp;
733 
734 	LIST_FOREACH(gp, &mp->geom, geom) {
735 		sc = gp->softc;
736 		if (sc == NULL)
737 			continue;
738 		if (strcmp(sc->sc_name, name) == 0)
739 			return (sc);
740 	}
741 	return (NULL);
742 }
743 
744 static void
745 g_shsec_ctl_destroy(struct gctl_req *req, struct g_class *mp)
746 {
747 	struct g_shsec_softc *sc;
748 	int *force, *nargs, error;
749 	const char *name;
750 	char param[16];
751 	u_int i;
752 
753 	g_topology_assert();
754 
755 	nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
756 	if (nargs == NULL) {
757 		gctl_error(req, "No '%s' argument.", "nargs");
758 		return;
759 	}
760 	if (*nargs <= 0) {
761 		gctl_error(req, "Missing device(s).");
762 		return;
763 	}
764 	force = gctl_get_paraml(req, "force", sizeof(*force));
765 	if (force == NULL) {
766 		gctl_error(req, "No '%s' argument.", "force");
767 		return;
768 	}
769 
770 	for (i = 0; i < (u_int)*nargs; i++) {
771 		snprintf(param, sizeof(param), "arg%u", i);
772 		name = gctl_get_asciiparam(req, param);
773 		if (name == NULL) {
774 			gctl_error(req, "No 'arg%u' argument.", i);
775 			return;
776 		}
777 		sc = g_shsec_find_device(mp, name);
778 		if (sc == NULL) {
779 			gctl_error(req, "No such device: %s.", name);
780 			return;
781 		}
782 		error = g_shsec_destroy(sc, *force);
783 		if (error != 0) {
784 			gctl_error(req, "Cannot destroy device %s (error=%d).",
785 			    sc->sc_name, error);
786 			return;
787 		}
788 	}
789 }
790 
791 static void
792 g_shsec_config(struct gctl_req *req, struct g_class *mp, const char *verb)
793 {
794 	uint32_t *version;
795 
796 	g_topology_assert();
797 
798 	version = gctl_get_paraml(req, "version", sizeof(*version));
799 	if (version == NULL) {
800 		gctl_error(req, "No '%s' argument.", "version");
801 		return;
802 	}
803 	if (*version != G_SHSEC_VERSION) {
804 		gctl_error(req, "Userland and kernel parts are out of sync.");
805 		return;
806 	}
807 
808 	if (strcmp(verb, "stop") == 0) {
809 		g_shsec_ctl_destroy(req, mp);
810 		return;
811 	}
812 
813 	gctl_error(req, "Unknown verb.");
814 }
815 
816 static void
817 g_shsec_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
818     struct g_consumer *cp, struct g_provider *pp)
819 {
820 	struct g_shsec_softc *sc;
821 
822 	sc = gp->softc;
823 	if (sc == NULL)
824 		return;
825 	if (pp != NULL) {
826 		/* Nothing here. */
827 	} else if (cp != NULL) {
828 		sbuf_printf(sb, "%s<Number>%u</Number>\n", indent,
829 		    (u_int)cp->index);
830 	} else {
831 		sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id);
832 		sbuf_printf(sb, "%s<Status>Total=%u, Online=%u</Status>\n",
833 		    indent, sc->sc_ndisks, g_shsec_nvalid(sc));
834 		sbuf_printf(sb, "%s<State>", indent);
835 		if (sc->sc_provider != NULL && sc->sc_provider->error == 0)
836 			sbuf_printf(sb, "UP");
837 		else
838 			sbuf_printf(sb, "DOWN");
839 		sbuf_printf(sb, "</State>\n");
840 	}
841 }
842 
843 DECLARE_GEOM_CLASS(g_shsec_class, g_shsec);
844 MODULE_VERSION(geom_shsec, 0);
845