xref: /freebsd/sys/geom/gate/g_gate.c (revision 77a1348b3c1cfe8547be49a121b56299a1e18b69)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
5  * Copyright (c) 2009-2010 The FreeBSD Foundation
6  * All rights reserved.
7  *
8  * Portions of this software were developed by Pawel Jakub Dawidek
9  * under sponsorship from the FreeBSD Foundation.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/bio.h>
39 #include <sys/conf.h>
40 #include <sys/kernel.h>
41 #include <sys/kthread.h>
42 #include <sys/fcntl.h>
43 #include <sys/linker.h>
44 #include <sys/lock.h>
45 #include <sys/malloc.h>
46 #include <sys/mutex.h>
47 #include <sys/proc.h>
48 #include <sys/limits.h>
49 #include <sys/queue.h>
50 #include <sys/sbuf.h>
51 #include <sys/sysctl.h>
52 #include <sys/signalvar.h>
53 #include <sys/time.h>
54 #include <machine/atomic.h>
55 
56 #include <geom/geom.h>
57 #include <geom/geom_dbg.h>
58 #include <geom/gate/g_gate.h>
59 
60 FEATURE(geom_gate, "GEOM Gate module");
61 
62 static MALLOC_DEFINE(M_GATE, "gg_data", "GEOM Gate Data");
63 
64 SYSCTL_DECL(_kern_geom);
65 static SYSCTL_NODE(_kern_geom, OID_AUTO, gate, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
66     "GEOM_GATE configuration");
67 static int g_gate_debug = 0;
68 SYSCTL_INT(_kern_geom_gate, OID_AUTO, debug, CTLFLAG_RWTUN, &g_gate_debug, 0,
69     "Debug level");
70 static u_int g_gate_maxunits = 256;
71 SYSCTL_UINT(_kern_geom_gate, OID_AUTO, maxunits, CTLFLAG_RDTUN,
72     &g_gate_maxunits, 0, "Maximum number of ggate devices");
73 
74 struct g_class g_gate_class = {
75 	.name = G_GATE_CLASS_NAME,
76 	.version = G_VERSION,
77 };
78 
79 static struct cdev *status_dev;
80 static d_ioctl_t g_gate_ioctl;
81 static struct cdevsw g_gate_cdevsw = {
82 	.d_version =	D_VERSION,
83 	.d_ioctl =	g_gate_ioctl,
84 	.d_name =	G_GATE_CTL_NAME
85 };
86 
87 
88 static struct g_gate_softc **g_gate_units;
89 static u_int g_gate_nunits;
90 static struct mtx g_gate_units_lock;
91 
92 static void
93 g_gate_detach(void *arg, int flags __unused)
94 {
95 	struct g_consumer *cp = arg;
96 
97 	g_topology_assert();
98 	G_GATE_DEBUG(1, "Destroying read consumer on provider %s orphan.",
99 	    cp->provider->name);
100 	(void)g_access(cp, -1, 0, 0);
101 	g_detach(cp);
102 	g_destroy_consumer(cp);
103 }
104 
105 static int
106 g_gate_destroy(struct g_gate_softc *sc, boolean_t force)
107 {
108 	struct bio_queue_head queue;
109 	struct g_provider *pp;
110 	struct g_consumer *cp;
111 	struct g_geom *gp;
112 	struct bio *bp;
113 
114 	g_topology_assert();
115 	mtx_assert(&g_gate_units_lock, MA_OWNED);
116 	pp = sc->sc_provider;
117 	if (!force && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
118 		mtx_unlock(&g_gate_units_lock);
119 		return (EBUSY);
120 	}
121 	mtx_unlock(&g_gate_units_lock);
122 	mtx_lock(&sc->sc_queue_mtx);
123 	if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0)
124 		sc->sc_flags |= G_GATE_FLAG_DESTROY;
125 	wakeup(sc);
126 	mtx_unlock(&sc->sc_queue_mtx);
127 	gp = pp->geom;
128 	g_wither_provider(pp, ENXIO);
129 	callout_drain(&sc->sc_callout);
130 	bioq_init(&queue);
131 	mtx_lock(&sc->sc_queue_mtx);
132 	while ((bp = bioq_takefirst(&sc->sc_inqueue)) != NULL) {
133 		sc->sc_queue_count--;
134 		bioq_insert_tail(&queue, bp);
135 	}
136 	while ((bp = bioq_takefirst(&sc->sc_outqueue)) != NULL) {
137 		sc->sc_queue_count--;
138 		bioq_insert_tail(&queue, bp);
139 	}
140 	mtx_unlock(&sc->sc_queue_mtx);
141 	g_topology_unlock();
142 	while ((bp = bioq_takefirst(&queue)) != NULL) {
143 		G_GATE_LOGREQ(1, bp, "Request canceled.");
144 		g_io_deliver(bp, ENXIO);
145 	}
146 	mtx_lock(&g_gate_units_lock);
147 	/* One reference is ours. */
148 	sc->sc_ref--;
149 	while (sc->sc_ref > 0)
150 		msleep(&sc->sc_ref, &g_gate_units_lock, 0, "gg:destroy", 0);
151 	g_gate_units[sc->sc_unit] = NULL;
152 	KASSERT(g_gate_nunits > 0, ("negative g_gate_nunits?"));
153 	g_gate_nunits--;
154 	mtx_unlock(&g_gate_units_lock);
155 	mtx_destroy(&sc->sc_queue_mtx);
156 	mtx_destroy(&sc->sc_read_mtx);
157 	g_topology_lock();
158 	if ((cp = sc->sc_readcons) != NULL) {
159 		sc->sc_readcons = NULL;
160 		(void)g_access(cp, -1, 0, 0);
161 		g_detach(cp);
162 		g_destroy_consumer(cp);
163 	}
164 	G_GATE_DEBUG(1, "Device %s destroyed.", gp->name);
165 	gp->softc = NULL;
166 	g_wither_geom(gp, ENXIO);
167 	sc->sc_provider = NULL;
168 	free(sc, M_GATE);
169 	return (0);
170 }
171 
172 static int
173 g_gate_access(struct g_provider *pp, int dr, int dw, int de)
174 {
175 	struct g_gate_softc *sc;
176 
177 	if (dr <= 0 && dw <= 0 && de <= 0)
178 		return (0);
179 	sc = pp->geom->softc;
180 	if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0)
181 		return (ENXIO);
182 	/* XXX: Hack to allow read-only mounts. */
183 #if 0
184 	if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0 && dw > 0)
185 		return (EPERM);
186 #endif
187 	if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0 && dr > 0)
188 		return (EPERM);
189 	return (0);
190 }
191 
192 static void
193 g_gate_queue_io(struct bio *bp)
194 {
195 	struct g_gate_softc *sc;
196 
197 	sc = bp->bio_to->geom->softc;
198 	if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
199 		g_io_deliver(bp, ENXIO);
200 		return;
201 	}
202 
203 	mtx_lock(&sc->sc_queue_mtx);
204 
205 	if (sc->sc_queue_size > 0 && sc->sc_queue_count > sc->sc_queue_size) {
206 		mtx_unlock(&sc->sc_queue_mtx);
207 		G_GATE_LOGREQ(1, bp, "Queue full, request canceled.");
208 		g_io_deliver(bp, ENOMEM);
209 		return;
210 	}
211 
212 	bp->bio_driver1 = (void *)sc->sc_seq;
213 	sc->sc_seq++;
214 	sc->sc_queue_count++;
215 
216 	bioq_insert_tail(&sc->sc_inqueue, bp);
217 	wakeup(sc);
218 
219 	mtx_unlock(&sc->sc_queue_mtx);
220 }
221 
222 static void
223 g_gate_done(struct bio *cbp)
224 {
225 	struct g_gate_softc *sc;
226 	struct bio *pbp;
227 	struct g_consumer *cp;
228 
229 	cp = cbp->bio_from;
230 	pbp = cbp->bio_parent;
231 	if (cbp->bio_error == 0) {
232 		pbp->bio_completed = cbp->bio_completed;
233 		g_destroy_bio(cbp);
234 		pbp->bio_inbed++;
235 		g_io_deliver(pbp, 0);
236 	} else {
237 		/* If direct read failed, pass it through userland daemon. */
238 		g_destroy_bio(cbp);
239 		pbp->bio_children--;
240 		g_gate_queue_io(pbp);
241 	}
242 
243 	sc = cp->geom->softc;
244 	mtx_lock(&sc->sc_read_mtx);
245 	if (--cp->index == 0 && sc->sc_readcons != cp)
246 		g_post_event(g_gate_detach, cp, M_NOWAIT, NULL);
247 	mtx_unlock(&sc->sc_read_mtx);
248 }
249 
250 static void
251 g_gate_start(struct bio *pbp)
252 {
253 	struct g_gate_softc *sc;
254 	struct g_consumer *cp;
255 	struct bio *cbp;
256 
257 	sc = pbp->bio_to->geom->softc;
258 	if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
259 		g_io_deliver(pbp, ENXIO);
260 		return;
261 	}
262 	G_GATE_LOGREQ(2, pbp, "Request received.");
263 	switch (pbp->bio_cmd) {
264 	case BIO_READ:
265 		if (sc->sc_readcons == NULL)
266 			break;
267 		cbp = g_clone_bio(pbp);
268 		if (cbp == NULL) {
269 			g_io_deliver(pbp, ENOMEM);
270 			return;
271 		}
272 		mtx_lock(&sc->sc_read_mtx);
273 		if ((cp = sc->sc_readcons) == NULL) {
274 			mtx_unlock(&sc->sc_read_mtx);
275 			g_destroy_bio(cbp);
276 			pbp->bio_children--;
277 			break;
278 		}
279 		cp->index++;
280 		cbp->bio_offset = pbp->bio_offset + sc->sc_readoffset;
281 		mtx_unlock(&sc->sc_read_mtx);
282 		cbp->bio_done = g_gate_done;
283 		g_io_request(cbp, cp);
284 		return;
285 	case BIO_DELETE:
286 	case BIO_WRITE:
287 	case BIO_FLUSH:
288 	case BIO_SPEEDUP:
289 		/* XXX: Hack to allow read-only mounts. */
290 		if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) {
291 			g_io_deliver(pbp, EPERM);
292 			return;
293 		}
294 		break;
295 	case BIO_GETATTR:
296 	default:
297 		G_GATE_LOGREQ(2, pbp, "Ignoring request.");
298 		g_io_deliver(pbp, EOPNOTSUPP);
299 		return;
300 	}
301 
302 	g_gate_queue_io(pbp);
303 }
304 
305 static struct g_gate_softc *
306 g_gate_hold(int unit, const char *name)
307 {
308 	struct g_gate_softc *sc = NULL;
309 
310 	mtx_lock(&g_gate_units_lock);
311 	if (unit >= 0 && unit < g_gate_maxunits)
312 		sc = g_gate_units[unit];
313 	else if (unit == G_GATE_NAME_GIVEN) {
314 		KASSERT(name != NULL, ("name is NULL"));
315 		for (unit = 0; unit < g_gate_maxunits; unit++) {
316 			if (g_gate_units[unit] == NULL)
317 				continue;
318 			if (strcmp(name,
319 			    g_gate_units[unit]->sc_provider->name) != 0) {
320 				continue;
321 			}
322 			sc = g_gate_units[unit];
323 			break;
324 		}
325 	}
326 	if (sc != NULL)
327 		sc->sc_ref++;
328 	mtx_unlock(&g_gate_units_lock);
329 	return (sc);
330 }
331 
332 static void
333 g_gate_release(struct g_gate_softc *sc)
334 {
335 
336 	g_topology_assert_not();
337 	mtx_lock(&g_gate_units_lock);
338 	sc->sc_ref--;
339 	KASSERT(sc->sc_ref >= 0, ("Negative sc_ref for %s.", sc->sc_name));
340 	if (sc->sc_ref == 0 && (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0)
341 		wakeup(&sc->sc_ref);
342 	mtx_unlock(&g_gate_units_lock);
343 }
344 
345 static int
346 g_gate_getunit(int unit, int *errorp)
347 {
348 
349 	mtx_assert(&g_gate_units_lock, MA_OWNED);
350 	if (unit >= 0) {
351 		if (unit >= g_gate_maxunits)
352 			*errorp = EINVAL;
353 		else if (g_gate_units[unit] == NULL)
354 			return (unit);
355 		else
356 			*errorp = EEXIST;
357 	} else {
358 		for (unit = 0; unit < g_gate_maxunits; unit++) {
359 			if (g_gate_units[unit] == NULL)
360 				return (unit);
361 		}
362 		*errorp = ENFILE;
363 	}
364 	return (-1);
365 }
366 
367 static void
368 g_gate_guard(void *arg)
369 {
370 	struct bio_queue_head queue;
371 	struct g_gate_softc *sc;
372 	struct bintime curtime;
373 	struct bio *bp, *bp2;
374 
375 	sc = arg;
376 	binuptime(&curtime);
377 	g_gate_hold(sc->sc_unit, NULL);
378 	bioq_init(&queue);
379 	mtx_lock(&sc->sc_queue_mtx);
380 	TAILQ_FOREACH_SAFE(bp, &sc->sc_inqueue.queue, bio_queue, bp2) {
381 		if (curtime.sec - bp->bio_t0.sec < 5)
382 			continue;
383 		bioq_remove(&sc->sc_inqueue, bp);
384 		sc->sc_queue_count--;
385 		bioq_insert_tail(&queue, bp);
386 	}
387 	TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, bp2) {
388 		if (curtime.sec - bp->bio_t0.sec < 5)
389 			continue;
390 		bioq_remove(&sc->sc_outqueue, bp);
391 		sc->sc_queue_count--;
392 		bioq_insert_tail(&queue, bp);
393 	}
394 	mtx_unlock(&sc->sc_queue_mtx);
395 	while ((bp = bioq_takefirst(&queue)) != NULL) {
396 		G_GATE_LOGREQ(1, bp, "Request timeout.");
397 		g_io_deliver(bp, EIO);
398 	}
399 	if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0) {
400 		callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
401 		    g_gate_guard, sc);
402 	}
403 	g_gate_release(sc);
404 }
405 
406 static void
407 g_gate_orphan(struct g_consumer *cp)
408 {
409 	struct g_gate_softc *sc;
410 	struct g_geom *gp;
411 	int done;
412 
413 	g_topology_assert();
414 	gp = cp->geom;
415 	sc = gp->softc;
416 	mtx_lock(&sc->sc_read_mtx);
417 	if (sc->sc_readcons == cp)
418 		sc->sc_readcons = NULL;
419 	done = (cp->index == 0);
420 	mtx_unlock(&sc->sc_read_mtx);
421 	if (done)
422 		g_gate_detach(cp, 0);
423 }
424 
425 static void
426 g_gate_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
427     struct g_consumer *cp, struct g_provider *pp)
428 {
429 	struct g_gate_softc *sc;
430 
431 	sc = gp->softc;
432 	if (sc == NULL || pp != NULL || cp != NULL)
433 		return;
434 	sc = g_gate_hold(sc->sc_unit, NULL);
435 	if (sc == NULL)
436 		return;
437 	if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) {
438 		sbuf_printf(sb, "%s<access>%s</access>\n", indent, "read-only");
439 	} else if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0) {
440 		sbuf_printf(sb, "%s<access>%s</access>\n", indent,
441 		    "write-only");
442 	} else {
443 		sbuf_printf(sb, "%s<access>%s</access>\n", indent,
444 		    "read-write");
445 	}
446 	if (sc->sc_readcons != NULL) {
447 		sbuf_printf(sb, "%s<read_offset>%jd</read_offset>\n",
448 		    indent, (intmax_t)sc->sc_readoffset);
449 		sbuf_printf(sb, "%s<read_provider>%s</read_provider>\n",
450 		    indent, sc->sc_readcons->provider->name);
451 	}
452 	sbuf_printf(sb, "%s<timeout>%u</timeout>\n", indent, sc->sc_timeout);
453 	sbuf_printf(sb, "%s<info>%s</info>\n", indent, sc->sc_info);
454 	sbuf_printf(sb, "%s<queue_count>%u</queue_count>\n", indent,
455 	    sc->sc_queue_count);
456 	sbuf_printf(sb, "%s<queue_size>%u</queue_size>\n", indent,
457 	    sc->sc_queue_size);
458 	sbuf_printf(sb, "%s<ref>%u</ref>\n", indent, sc->sc_ref);
459 	sbuf_printf(sb, "%s<unit>%d</unit>\n", indent, sc->sc_unit);
460 	g_topology_unlock();
461 	g_gate_release(sc);
462 	g_topology_lock();
463 }
464 
465 static int
466 g_gate_create(struct g_gate_ctl_create *ggio)
467 {
468 	struct g_gate_softc *sc;
469 	struct g_geom *gp;
470 	struct g_provider *pp, *ropp;
471 	struct g_consumer *cp;
472 	char name[NAME_MAX];
473 	int error = 0, unit;
474 
475 	if (ggio->gctl_mediasize <= 0) {
476 		G_GATE_DEBUG(1, "Invalid media size.");
477 		return (EINVAL);
478 	}
479 	if (ggio->gctl_sectorsize <= 0) {
480 		G_GATE_DEBUG(1, "Invalid sector size.");
481 		return (EINVAL);
482 	}
483 	if (!powerof2(ggio->gctl_sectorsize)) {
484 		G_GATE_DEBUG(1, "Invalid sector size.");
485 		return (EINVAL);
486 	}
487 	if ((ggio->gctl_mediasize % ggio->gctl_sectorsize) != 0) {
488 		G_GATE_DEBUG(1, "Invalid media size.");
489 		return (EINVAL);
490 	}
491 	if ((ggio->gctl_flags & G_GATE_FLAG_READONLY) != 0 &&
492 	    (ggio->gctl_flags & G_GATE_FLAG_WRITEONLY) != 0) {
493 		G_GATE_DEBUG(1, "Invalid flags.");
494 		return (EINVAL);
495 	}
496 	if (ggio->gctl_unit != G_GATE_UNIT_AUTO &&
497 	    ggio->gctl_unit != G_GATE_NAME_GIVEN &&
498 	    ggio->gctl_unit < 0) {
499 		G_GATE_DEBUG(1, "Invalid unit number.");
500 		return (EINVAL);
501 	}
502 	if (ggio->gctl_unit == G_GATE_NAME_GIVEN &&
503 	    ggio->gctl_name[0] == '\0') {
504 		G_GATE_DEBUG(1, "No device name.");
505 		return (EINVAL);
506 	}
507 
508 	sc = malloc(sizeof(*sc), M_GATE, M_WAITOK | M_ZERO);
509 	sc->sc_flags = (ggio->gctl_flags & G_GATE_USERFLAGS);
510 	strlcpy(sc->sc_info, ggio->gctl_info, sizeof(sc->sc_info));
511 	sc->sc_seq = 1;
512 	bioq_init(&sc->sc_inqueue);
513 	bioq_init(&sc->sc_outqueue);
514 	mtx_init(&sc->sc_queue_mtx, "gg:queue", NULL, MTX_DEF);
515 	mtx_init(&sc->sc_read_mtx, "gg:read", NULL, MTX_DEF);
516 	sc->sc_queue_count = 0;
517 	sc->sc_queue_size = ggio->gctl_maxcount;
518 	if (sc->sc_queue_size > G_GATE_MAX_QUEUE_SIZE)
519 		sc->sc_queue_size = G_GATE_MAX_QUEUE_SIZE;
520 	sc->sc_timeout = ggio->gctl_timeout;
521 	callout_init(&sc->sc_callout, 1);
522 
523 	mtx_lock(&g_gate_units_lock);
524 	sc->sc_unit = g_gate_getunit(ggio->gctl_unit, &error);
525 	if (sc->sc_unit < 0)
526 		goto fail1;
527 	if (ggio->gctl_unit == G_GATE_NAME_GIVEN)
528 		snprintf(name, sizeof(name), "%s", ggio->gctl_name);
529 	else {
530 		snprintf(name, sizeof(name), "%s%d", G_GATE_PROVIDER_NAME,
531 		    sc->sc_unit);
532 	}
533 	/* Check for name collision. */
534 	for (unit = 0; unit < g_gate_maxunits; unit++) {
535 		if (g_gate_units[unit] == NULL)
536 			continue;
537 		if (strcmp(name, g_gate_units[unit]->sc_name) != 0)
538 			continue;
539 		error = EEXIST;
540 		goto fail1;
541 	}
542 	sc->sc_name = name;
543 	g_gate_units[sc->sc_unit] = sc;
544 	g_gate_nunits++;
545 	mtx_unlock(&g_gate_units_lock);
546 
547 	g_topology_lock();
548 
549 	if (ggio->gctl_readprov[0] == '\0') {
550 		ropp = NULL;
551 	} else {
552 		ropp = g_provider_by_name(ggio->gctl_readprov);
553 		if (ropp == NULL) {
554 			G_GATE_DEBUG(1, "Provider %s doesn't exist.",
555 			    ggio->gctl_readprov);
556 			error = EINVAL;
557 			goto fail2;
558 		}
559 		if ((ggio->gctl_readoffset % ggio->gctl_sectorsize) != 0) {
560 			G_GATE_DEBUG(1, "Invalid read offset.");
561 			error = EINVAL;
562 			goto fail2;
563 		}
564 		if (ggio->gctl_mediasize + ggio->gctl_readoffset >
565 		    ropp->mediasize) {
566 			G_GATE_DEBUG(1, "Invalid read offset or media size.");
567 			error = EINVAL;
568 			goto fail2;
569 		}
570 	}
571 
572 	gp = g_new_geomf(&g_gate_class, "%s", name);
573 	gp->start = g_gate_start;
574 	gp->access = g_gate_access;
575 	gp->orphan = g_gate_orphan;
576 	gp->dumpconf = g_gate_dumpconf;
577 	gp->softc = sc;
578 
579 	if (ropp != NULL) {
580 		cp = g_new_consumer(gp);
581 		cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
582 		error = g_attach(cp, ropp);
583 		if (error != 0) {
584 			G_GATE_DEBUG(1, "Unable to attach to %s.", ropp->name);
585 			goto fail3;
586 		}
587 		error = g_access(cp, 1, 0, 0);
588 		if (error != 0) {
589 			G_GATE_DEBUG(1, "Unable to access %s.", ropp->name);
590 			g_detach(cp);
591 			goto fail3;
592 		}
593 		sc->sc_readcons = cp;
594 		sc->sc_readoffset = ggio->gctl_readoffset;
595 	}
596 
597 	ggio->gctl_unit = sc->sc_unit;
598 
599 	pp = g_new_providerf(gp, "%s", name);
600 	pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE;
601 	pp->mediasize = ggio->gctl_mediasize;
602 	pp->sectorsize = ggio->gctl_sectorsize;
603 	sc->sc_provider = pp;
604 	g_error_provider(pp, 0);
605 
606 	g_topology_unlock();
607 	mtx_lock(&g_gate_units_lock);
608 	sc->sc_name = sc->sc_provider->name;
609 	mtx_unlock(&g_gate_units_lock);
610 	G_GATE_DEBUG(1, "Device %s created.", gp->name);
611 
612 	if (sc->sc_timeout > 0) {
613 		callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
614 		    g_gate_guard, sc);
615 	}
616 	return (0);
617 fail3:
618 	g_destroy_consumer(cp);
619 	g_destroy_geom(gp);
620 fail2:
621 	g_topology_unlock();
622 	mtx_lock(&g_gate_units_lock);
623 	g_gate_units[sc->sc_unit] = NULL;
624 	KASSERT(g_gate_nunits > 0, ("negative g_gate_nunits?"));
625 	g_gate_nunits--;
626 fail1:
627 	mtx_unlock(&g_gate_units_lock);
628 	mtx_destroy(&sc->sc_queue_mtx);
629 	mtx_destroy(&sc->sc_read_mtx);
630 	free(sc, M_GATE);
631 	return (error);
632 }
633 
634 static int
635 g_gate_modify(struct g_gate_softc *sc, struct g_gate_ctl_modify *ggio)
636 {
637 	struct g_provider *pp;
638 	struct g_consumer *cp;
639 	int done, error;
640 
641 	if ((ggio->gctl_modify & GG_MODIFY_MEDIASIZE) != 0) {
642 		if (ggio->gctl_mediasize <= 0) {
643 			G_GATE_DEBUG(1, "Invalid media size.");
644 			return (EINVAL);
645 		}
646 		pp = sc->sc_provider;
647 		if ((ggio->gctl_mediasize % pp->sectorsize) != 0) {
648 			G_GATE_DEBUG(1, "Invalid media size.");
649 			return (EINVAL);
650 		}
651 		g_resize_provider(pp, ggio->gctl_mediasize);
652 		return (0);
653 	}
654 
655 	if ((ggio->gctl_modify & GG_MODIFY_INFO) != 0)
656 		(void)strlcpy(sc->sc_info, ggio->gctl_info, sizeof(sc->sc_info));
657 
658 	cp = NULL;
659 
660 	if ((ggio->gctl_modify & GG_MODIFY_READPROV) != 0) {
661 		g_topology_lock();
662 		mtx_lock(&sc->sc_read_mtx);
663 		if ((cp = sc->sc_readcons) != NULL) {
664 			sc->sc_readcons = NULL;
665 			done = (cp->index == 0);
666 			mtx_unlock(&sc->sc_read_mtx);
667 			if (done)
668 				g_gate_detach(cp, 0);
669 		} else
670 			mtx_unlock(&sc->sc_read_mtx);
671 		if (ggio->gctl_readprov[0] != '\0') {
672 			pp = g_provider_by_name(ggio->gctl_readprov);
673 			if (pp == NULL) {
674 				g_topology_unlock();
675 				G_GATE_DEBUG(1, "Provider %s doesn't exist.",
676 				    ggio->gctl_readprov);
677 				return (EINVAL);
678 			}
679 			cp = g_new_consumer(sc->sc_provider->geom);
680 			cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
681 			error = g_attach(cp, pp);
682 			if (error != 0) {
683 				G_GATE_DEBUG(1, "Unable to attach to %s.",
684 				    pp->name);
685 			} else {
686 				error = g_access(cp, 1, 0, 0);
687 				if (error != 0) {
688 					G_GATE_DEBUG(1, "Unable to access %s.",
689 					    pp->name);
690 					g_detach(cp);
691 				}
692 			}
693 			if (error != 0) {
694 				g_destroy_consumer(cp);
695 				g_topology_unlock();
696 				return (error);
697 			}
698 		}
699 	} else {
700 		cp = sc->sc_readcons;
701 	}
702 
703 	if ((ggio->gctl_modify & GG_MODIFY_READOFFSET) != 0) {
704 		if (cp == NULL) {
705 			G_GATE_DEBUG(1, "No read provider.");
706 			return (EINVAL);
707 		}
708 		pp = sc->sc_provider;
709 		if ((ggio->gctl_readoffset % pp->sectorsize) != 0) {
710 			G_GATE_DEBUG(1, "Invalid read offset.");
711 			return (EINVAL);
712 		}
713 		if (pp->mediasize + ggio->gctl_readoffset >
714 		    cp->provider->mediasize) {
715 			G_GATE_DEBUG(1, "Invalid read offset or media size.");
716 			return (EINVAL);
717 		}
718 		sc->sc_readoffset = ggio->gctl_readoffset;
719 	}
720 
721 	if ((ggio->gctl_modify & GG_MODIFY_READPROV) != 0) {
722 		sc->sc_readcons = cp;
723 		g_topology_unlock();
724 	}
725 
726 	return (0);
727 }
728 
729 #define	G_GATE_CHECK_VERSION(ggio)	do {				\
730 	if ((ggio)->gctl_version != G_GATE_VERSION) {			\
731 		printf("Version mismatch %d != %d.\n",			\
732 		    ggio->gctl_version, G_GATE_VERSION);		\
733 		return (EINVAL);					\
734 	}								\
735 } while (0)
736 static int
737 g_gate_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
738 {
739 	struct g_gate_softc *sc;
740 	struct bio *bp;
741 	int error = 0;
742 
743 	G_GATE_DEBUG(4, "ioctl(%s, %lx, %p, %x, %p)", devtoname(dev), cmd, addr,
744 	    flags, td);
745 
746 	switch (cmd) {
747 	case G_GATE_CMD_CREATE:
748 	    {
749 		struct g_gate_ctl_create *ggio = (void *)addr;
750 
751 		G_GATE_CHECK_VERSION(ggio);
752 		error = g_gate_create(ggio);
753 		/*
754 		 * Reset TDP_GEOM flag.
755 		 * There are pending events for sure, because we just created
756 		 * new provider and other classes want to taste it, but we
757 		 * cannot answer on I/O requests until we're here.
758 		 */
759 		td->td_pflags &= ~TDP_GEOM;
760 		return (error);
761 	    }
762 	case G_GATE_CMD_MODIFY:
763 	    {
764 		struct g_gate_ctl_modify *ggio = (void *)addr;
765 
766 		G_GATE_CHECK_VERSION(ggio);
767 		sc = g_gate_hold(ggio->gctl_unit, NULL);
768 		if (sc == NULL)
769 			return (ENXIO);
770 		error = g_gate_modify(sc, ggio);
771 		g_gate_release(sc);
772 		return (error);
773 	    }
774 	case G_GATE_CMD_DESTROY:
775 	    {
776 		struct g_gate_ctl_destroy *ggio = (void *)addr;
777 
778 		G_GATE_CHECK_VERSION(ggio);
779 		sc = g_gate_hold(ggio->gctl_unit, ggio->gctl_name);
780 		if (sc == NULL)
781 			return (ENXIO);
782 		g_topology_lock();
783 		mtx_lock(&g_gate_units_lock);
784 		error = g_gate_destroy(sc, ggio->gctl_force);
785 		g_topology_unlock();
786 		if (error != 0)
787 			g_gate_release(sc);
788 		return (error);
789 	    }
790 	case G_GATE_CMD_CANCEL:
791 	    {
792 		struct g_gate_ctl_cancel *ggio = (void *)addr;
793 		struct bio *tbp, *lbp;
794 
795 		G_GATE_CHECK_VERSION(ggio);
796 		sc = g_gate_hold(ggio->gctl_unit, ggio->gctl_name);
797 		if (sc == NULL)
798 			return (ENXIO);
799 		lbp = NULL;
800 		mtx_lock(&sc->sc_queue_mtx);
801 		TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, tbp) {
802 			if (ggio->gctl_seq == 0 ||
803 			    ggio->gctl_seq == (uintptr_t)bp->bio_driver1) {
804 				G_GATE_LOGREQ(1, bp, "Request canceled.");
805 				bioq_remove(&sc->sc_outqueue, bp);
806 				/*
807 				 * Be sure to put requests back onto incoming
808 				 * queue in the proper order.
809 				 */
810 				if (lbp == NULL)
811 					bioq_insert_head(&sc->sc_inqueue, bp);
812 				else {
813 					TAILQ_INSERT_AFTER(&sc->sc_inqueue.queue,
814 					    lbp, bp, bio_queue);
815 				}
816 				lbp = bp;
817 				/*
818 				 * If only one request was canceled, leave now.
819 				 */
820 				if (ggio->gctl_seq != 0)
821 					break;
822 			}
823 		}
824 		if (ggio->gctl_unit == G_GATE_NAME_GIVEN)
825 			ggio->gctl_unit = sc->sc_unit;
826 		mtx_unlock(&sc->sc_queue_mtx);
827 		g_gate_release(sc);
828 		return (error);
829 	    }
830 	case G_GATE_CMD_START:
831 	    {
832 		struct g_gate_ctl_io *ggio = (void *)addr;
833 
834 		G_GATE_CHECK_VERSION(ggio);
835 		sc = g_gate_hold(ggio->gctl_unit, NULL);
836 		if (sc == NULL)
837 			return (ENXIO);
838 		error = 0;
839 		for (;;) {
840 			mtx_lock(&sc->sc_queue_mtx);
841 			bp = bioq_first(&sc->sc_inqueue);
842 			if (bp != NULL)
843 				break;
844 			if ((sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
845 				ggio->gctl_error = ECANCELED;
846 				mtx_unlock(&sc->sc_queue_mtx);
847 				goto start_end;
848 			}
849 			if (msleep(sc, &sc->sc_queue_mtx,
850 			    PPAUSE | PDROP | PCATCH, "ggwait", 0) != 0) {
851 				ggio->gctl_error = ECANCELED;
852 				goto start_end;
853 			}
854 		}
855 		ggio->gctl_cmd = bp->bio_cmd;
856 		if (bp->bio_cmd == BIO_WRITE &&
857 		    bp->bio_length > ggio->gctl_length) {
858 			mtx_unlock(&sc->sc_queue_mtx);
859 			ggio->gctl_length = bp->bio_length;
860 			ggio->gctl_error = ENOMEM;
861 			goto start_end;
862 		}
863 		bioq_remove(&sc->sc_inqueue, bp);
864 		bioq_insert_tail(&sc->sc_outqueue, bp);
865 		mtx_unlock(&sc->sc_queue_mtx);
866 
867 		ggio->gctl_seq = (uintptr_t)bp->bio_driver1;
868 		ggio->gctl_offset = bp->bio_offset;
869 		ggio->gctl_length = bp->bio_length;
870 
871 		switch (bp->bio_cmd) {
872 		case BIO_READ:
873 		case BIO_DELETE:
874 		case BIO_FLUSH:
875 		case BIO_SPEEDUP:
876 			break;
877 		case BIO_WRITE:
878 			error = copyout(bp->bio_data, ggio->gctl_data,
879 			    bp->bio_length);
880 			if (error != 0) {
881 				mtx_lock(&sc->sc_queue_mtx);
882 				bioq_remove(&sc->sc_outqueue, bp);
883 				bioq_insert_head(&sc->sc_inqueue, bp);
884 				mtx_unlock(&sc->sc_queue_mtx);
885 				goto start_end;
886 			}
887 			break;
888 		}
889 start_end:
890 		g_gate_release(sc);
891 		return (error);
892 	    }
893 	case G_GATE_CMD_DONE:
894 	    {
895 		struct g_gate_ctl_io *ggio = (void *)addr;
896 
897 		G_GATE_CHECK_VERSION(ggio);
898 		sc = g_gate_hold(ggio->gctl_unit, NULL);
899 		if (sc == NULL)
900 			return (ENOENT);
901 		error = 0;
902 		mtx_lock(&sc->sc_queue_mtx);
903 		TAILQ_FOREACH(bp, &sc->sc_outqueue.queue, bio_queue) {
904 			if (ggio->gctl_seq == (uintptr_t)bp->bio_driver1)
905 				break;
906 		}
907 		if (bp != NULL) {
908 			bioq_remove(&sc->sc_outqueue, bp);
909 			sc->sc_queue_count--;
910 		}
911 		mtx_unlock(&sc->sc_queue_mtx);
912 		if (bp == NULL) {
913 			/*
914 			 * Request was probably canceled.
915 			 */
916 			goto done_end;
917 		}
918 		if (ggio->gctl_error == EAGAIN) {
919 			bp->bio_error = 0;
920 			G_GATE_LOGREQ(1, bp, "Request desisted.");
921 			mtx_lock(&sc->sc_queue_mtx);
922 			sc->sc_queue_count++;
923 			bioq_insert_head(&sc->sc_inqueue, bp);
924 			wakeup(sc);
925 			mtx_unlock(&sc->sc_queue_mtx);
926 		} else {
927 			bp->bio_error = ggio->gctl_error;
928 			if (bp->bio_error == 0) {
929 				bp->bio_completed = bp->bio_length;
930 				switch (bp->bio_cmd) {
931 				case BIO_READ:
932 					error = copyin(ggio->gctl_data,
933 					    bp->bio_data, bp->bio_length);
934 					if (error != 0)
935 						bp->bio_error = error;
936 					break;
937 				case BIO_DELETE:
938 				case BIO_WRITE:
939 				case BIO_FLUSH:
940 				case BIO_SPEEDUP:
941 					break;
942 				}
943 			}
944 			G_GATE_LOGREQ(2, bp, "Request done.");
945 			g_io_deliver(bp, bp->bio_error);
946 		}
947 done_end:
948 		g_gate_release(sc);
949 		return (error);
950 	    }
951 	}
952 	return (ENOIOCTL);
953 }
954 
955 static void
956 g_gate_device(void)
957 {
958 
959 	status_dev = make_dev(&g_gate_cdevsw, 0x0, UID_ROOT, GID_WHEEL, 0600,
960 	    G_GATE_CTL_NAME);
961 }
962 
963 static int
964 g_gate_modevent(module_t mod, int type, void *data)
965 {
966 	int error = 0;
967 
968 	switch (type) {
969 	case MOD_LOAD:
970 		mtx_init(&g_gate_units_lock, "gg_units_lock", NULL, MTX_DEF);
971 		g_gate_units = malloc(g_gate_maxunits * sizeof(g_gate_units[0]),
972 		    M_GATE, M_WAITOK | M_ZERO);
973 		g_gate_nunits = 0;
974 		g_gate_device();
975 		break;
976 	case MOD_UNLOAD:
977 		mtx_lock(&g_gate_units_lock);
978 		if (g_gate_nunits > 0) {
979 			mtx_unlock(&g_gate_units_lock);
980 			error = EBUSY;
981 			break;
982 		}
983 		mtx_unlock(&g_gate_units_lock);
984 		mtx_destroy(&g_gate_units_lock);
985 		if (status_dev != NULL)
986 			destroy_dev(status_dev);
987 		free(g_gate_units, M_GATE);
988 		break;
989 	default:
990 		return (EOPNOTSUPP);
991 		break;
992 	}
993 
994 	return (error);
995 }
996 static moduledata_t g_gate_module = {
997 	G_GATE_MOD_NAME,
998 	g_gate_modevent,
999 	NULL
1000 };
1001 DECLARE_MODULE(geom_gate, g_gate_module, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
1002 DECLARE_GEOM_CLASS(g_gate_class, g_gate);
1003 MODULE_VERSION(geom_gate, 0);
1004