xref: /freebsd/sys/geom/gate/g_gate.c (revision 38effe887ee979f91ad5abf42a2291558e7ff8d1)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
5  * Copyright (c) 2009-2010 The FreeBSD Foundation
6  * All rights reserved.
7  *
8  * Portions of this software were developed by Pawel Jakub Dawidek
9  * under sponsorship from the FreeBSD Foundation.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/bio.h>
39 #include <sys/conf.h>
40 #include <sys/kernel.h>
41 #include <sys/kthread.h>
42 #include <sys/fcntl.h>
43 #include <sys/linker.h>
44 #include <sys/lock.h>
45 #include <sys/malloc.h>
46 #include <sys/mutex.h>
47 #include <sys/proc.h>
48 #include <sys/limits.h>
49 #include <sys/queue.h>
50 #include <sys/sbuf.h>
51 #include <sys/sysctl.h>
52 #include <sys/signalvar.h>
53 #include <sys/time.h>
54 #include <machine/atomic.h>
55 
56 #include <geom/geom.h>
57 #include <geom/geom_dbg.h>
58 #include <geom/gate/g_gate.h>
59 
60 FEATURE(geom_gate, "GEOM Gate module");
61 
62 static MALLOC_DEFINE(M_GATE, "gg_data", "GEOM Gate Data");
63 
64 SYSCTL_DECL(_kern_geom);
65 static SYSCTL_NODE(_kern_geom, OID_AUTO, gate, CTLFLAG_RW, 0,
66     "GEOM_GATE configuration");
67 static int g_gate_debug = 0;
68 SYSCTL_INT(_kern_geom_gate, OID_AUTO, debug, CTLFLAG_RWTUN, &g_gate_debug, 0,
69     "Debug level");
70 static u_int g_gate_maxunits = 256;
71 SYSCTL_UINT(_kern_geom_gate, OID_AUTO, maxunits, CTLFLAG_RDTUN,
72     &g_gate_maxunits, 0, "Maximum number of ggate devices");
73 
74 struct g_class g_gate_class = {
75 	.name = G_GATE_CLASS_NAME,
76 	.version = G_VERSION,
77 };
78 
79 static struct cdev *status_dev;
80 static d_ioctl_t g_gate_ioctl;
81 static struct cdevsw g_gate_cdevsw = {
82 	.d_version =	D_VERSION,
83 	.d_ioctl =	g_gate_ioctl,
84 	.d_name =	G_GATE_CTL_NAME
85 };
86 
87 
88 static struct g_gate_softc **g_gate_units;
89 static u_int g_gate_nunits;
90 static struct mtx g_gate_units_lock;
91 
92 static int
93 g_gate_destroy(struct g_gate_softc *sc, boolean_t force)
94 {
95 	struct bio_queue_head queue;
96 	struct g_provider *pp;
97 	struct g_consumer *cp;
98 	struct g_geom *gp;
99 	struct bio *bp;
100 
101 	g_topology_assert();
102 	mtx_assert(&g_gate_units_lock, MA_OWNED);
103 	pp = sc->sc_provider;
104 	if (!force && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
105 		mtx_unlock(&g_gate_units_lock);
106 		return (EBUSY);
107 	}
108 	mtx_unlock(&g_gate_units_lock);
109 	mtx_lock(&sc->sc_queue_mtx);
110 	if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0)
111 		sc->sc_flags |= G_GATE_FLAG_DESTROY;
112 	wakeup(sc);
113 	mtx_unlock(&sc->sc_queue_mtx);
114 	gp = pp->geom;
115 	g_wither_provider(pp, ENXIO);
116 	callout_drain(&sc->sc_callout);
117 	bioq_init(&queue);
118 	mtx_lock(&sc->sc_queue_mtx);
119 	while ((bp = bioq_takefirst(&sc->sc_inqueue)) != NULL) {
120 		sc->sc_queue_count--;
121 		bioq_insert_tail(&queue, bp);
122 	}
123 	while ((bp = bioq_takefirst(&sc->sc_outqueue)) != NULL) {
124 		sc->sc_queue_count--;
125 		bioq_insert_tail(&queue, bp);
126 	}
127 	mtx_unlock(&sc->sc_queue_mtx);
128 	g_topology_unlock();
129 	while ((bp = bioq_takefirst(&queue)) != NULL) {
130 		G_GATE_LOGREQ(1, bp, "Request canceled.");
131 		g_io_deliver(bp, ENXIO);
132 	}
133 	mtx_lock(&g_gate_units_lock);
134 	/* One reference is ours. */
135 	sc->sc_ref--;
136 	while (sc->sc_ref > 0)
137 		msleep(&sc->sc_ref, &g_gate_units_lock, 0, "gg:destroy", 0);
138 	g_gate_units[sc->sc_unit] = NULL;
139 	KASSERT(g_gate_nunits > 0, ("negative g_gate_nunits?"));
140 	g_gate_nunits--;
141 	mtx_unlock(&g_gate_units_lock);
142 	mtx_destroy(&sc->sc_queue_mtx);
143 	g_topology_lock();
144 	if ((cp = sc->sc_readcons) != NULL) {
145 		sc->sc_readcons = NULL;
146 		(void)g_access(cp, -1, 0, 0);
147 		g_detach(cp);
148 		g_destroy_consumer(cp);
149 	}
150 	G_GATE_DEBUG(1, "Device %s destroyed.", gp->name);
151 	gp->softc = NULL;
152 	g_wither_geom(gp, ENXIO);
153 	sc->sc_provider = NULL;
154 	free(sc, M_GATE);
155 	return (0);
156 }
157 
158 static int
159 g_gate_access(struct g_provider *pp, int dr, int dw, int de)
160 {
161 	struct g_gate_softc *sc;
162 
163 	if (dr <= 0 && dw <= 0 && de <= 0)
164 		return (0);
165 	sc = pp->geom->softc;
166 	if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0)
167 		return (ENXIO);
168 	/* XXX: Hack to allow read-only mounts. */
169 #if 0
170 	if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0 && dw > 0)
171 		return (EPERM);
172 #endif
173 	if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0 && dr > 0)
174 		return (EPERM);
175 	return (0);
176 }
177 
178 static void
179 g_gate_queue_io(struct bio *bp)
180 {
181 	struct g_gate_softc *sc;
182 
183 	sc = bp->bio_to->geom->softc;
184 	if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
185 		g_io_deliver(bp, ENXIO);
186 		return;
187 	}
188 
189 	mtx_lock(&sc->sc_queue_mtx);
190 
191 	if (sc->sc_queue_size > 0 && sc->sc_queue_count > sc->sc_queue_size) {
192 		mtx_unlock(&sc->sc_queue_mtx);
193 		G_GATE_LOGREQ(1, bp, "Queue full, request canceled.");
194 		g_io_deliver(bp, ENOMEM);
195 		return;
196 	}
197 
198 	bp->bio_driver1 = (void *)sc->sc_seq;
199 	sc->sc_seq++;
200 	sc->sc_queue_count++;
201 
202 	bioq_insert_tail(&sc->sc_inqueue, bp);
203 	wakeup(sc);
204 
205 	mtx_unlock(&sc->sc_queue_mtx);
206 }
207 
208 static void
209 g_gate_done(struct bio *cbp)
210 {
211 	struct bio *pbp;
212 
213 	pbp = cbp->bio_parent;
214 	if (cbp->bio_error == 0) {
215 		pbp->bio_completed = cbp->bio_completed;
216 		g_destroy_bio(cbp);
217 		pbp->bio_inbed++;
218 		g_io_deliver(pbp, 0);
219 	} else {
220 		/* If direct read failed, pass it through userland daemon. */
221 		g_destroy_bio(cbp);
222 		pbp->bio_children--;
223 		g_gate_queue_io(pbp);
224 	}
225 }
226 
227 static void
228 g_gate_start(struct bio *pbp)
229 {
230 	struct g_gate_softc *sc;
231 
232 	sc = pbp->bio_to->geom->softc;
233 	if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
234 		g_io_deliver(pbp, ENXIO);
235 		return;
236 	}
237 	G_GATE_LOGREQ(2, pbp, "Request received.");
238 	switch (pbp->bio_cmd) {
239 	case BIO_READ:
240 		if (sc->sc_readcons != NULL) {
241 			struct bio *cbp;
242 
243 			cbp = g_clone_bio(pbp);
244 			if (cbp == NULL) {
245 				g_io_deliver(pbp, ENOMEM);
246 				return;
247 			}
248 			cbp->bio_done = g_gate_done;
249 			cbp->bio_offset = pbp->bio_offset + sc->sc_readoffset;
250 			cbp->bio_to = sc->sc_readcons->provider;
251 			g_io_request(cbp, sc->sc_readcons);
252 			return;
253 		}
254 		break;
255 	case BIO_DELETE:
256 	case BIO_WRITE:
257 	case BIO_FLUSH:
258 		/* XXX: Hack to allow read-only mounts. */
259 		if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) {
260 			g_io_deliver(pbp, EPERM);
261 			return;
262 		}
263 		break;
264 	case BIO_GETATTR:
265 	default:
266 		G_GATE_LOGREQ(2, pbp, "Ignoring request.");
267 		g_io_deliver(pbp, EOPNOTSUPP);
268 		return;
269 	}
270 
271 	g_gate_queue_io(pbp);
272 }
273 
274 static struct g_gate_softc *
275 g_gate_hold(int unit, const char *name)
276 {
277 	struct g_gate_softc *sc = NULL;
278 
279 	mtx_lock(&g_gate_units_lock);
280 	if (unit >= 0 && unit < g_gate_maxunits)
281 		sc = g_gate_units[unit];
282 	else if (unit == G_GATE_NAME_GIVEN) {
283 		KASSERT(name != NULL, ("name is NULL"));
284 		for (unit = 0; unit < g_gate_maxunits; unit++) {
285 			if (g_gate_units[unit] == NULL)
286 				continue;
287 			if (strcmp(name,
288 			    g_gate_units[unit]->sc_provider->name) != 0) {
289 				continue;
290 			}
291 			sc = g_gate_units[unit];
292 			break;
293 		}
294 	}
295 	if (sc != NULL)
296 		sc->sc_ref++;
297 	mtx_unlock(&g_gate_units_lock);
298 	return (sc);
299 }
300 
301 static void
302 g_gate_release(struct g_gate_softc *sc)
303 {
304 
305 	g_topology_assert_not();
306 	mtx_lock(&g_gate_units_lock);
307 	sc->sc_ref--;
308 	KASSERT(sc->sc_ref >= 0, ("Negative sc_ref for %s.", sc->sc_name));
309 	if (sc->sc_ref == 0 && (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0)
310 		wakeup(&sc->sc_ref);
311 	mtx_unlock(&g_gate_units_lock);
312 }
313 
314 static int
315 g_gate_getunit(int unit, int *errorp)
316 {
317 
318 	mtx_assert(&g_gate_units_lock, MA_OWNED);
319 	if (unit >= 0) {
320 		if (unit >= g_gate_maxunits)
321 			*errorp = EINVAL;
322 		else if (g_gate_units[unit] == NULL)
323 			return (unit);
324 		else
325 			*errorp = EEXIST;
326 	} else {
327 		for (unit = 0; unit < g_gate_maxunits; unit++) {
328 			if (g_gate_units[unit] == NULL)
329 				return (unit);
330 		}
331 		*errorp = ENFILE;
332 	}
333 	return (-1);
334 }
335 
336 static void
337 g_gate_guard(void *arg)
338 {
339 	struct bio_queue_head queue;
340 	struct g_gate_softc *sc;
341 	struct bintime curtime;
342 	struct bio *bp, *bp2;
343 
344 	sc = arg;
345 	binuptime(&curtime);
346 	g_gate_hold(sc->sc_unit, NULL);
347 	bioq_init(&queue);
348 	mtx_lock(&sc->sc_queue_mtx);
349 	TAILQ_FOREACH_SAFE(bp, &sc->sc_inqueue.queue, bio_queue, bp2) {
350 		if (curtime.sec - bp->bio_t0.sec < 5)
351 			continue;
352 		bioq_remove(&sc->sc_inqueue, bp);
353 		sc->sc_queue_count--;
354 		bioq_insert_tail(&queue, bp);
355 	}
356 	TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, bp2) {
357 		if (curtime.sec - bp->bio_t0.sec < 5)
358 			continue;
359 		bioq_remove(&sc->sc_outqueue, bp);
360 		sc->sc_queue_count--;
361 		bioq_insert_tail(&queue, bp);
362 	}
363 	mtx_unlock(&sc->sc_queue_mtx);
364 	while ((bp = bioq_takefirst(&queue)) != NULL) {
365 		G_GATE_LOGREQ(1, bp, "Request timeout.");
366 		g_io_deliver(bp, EIO);
367 	}
368 	if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0) {
369 		callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
370 		    g_gate_guard, sc);
371 	}
372 	g_gate_release(sc);
373 }
374 
375 static void
376 g_gate_orphan(struct g_consumer *cp)
377 {
378 	struct g_gate_softc *sc;
379 	struct g_geom *gp;
380 
381 	g_topology_assert();
382 	gp = cp->geom;
383 	sc = gp->softc;
384 	if (sc == NULL)
385 		return;
386 	KASSERT(cp == sc->sc_readcons, ("cp=%p sc_readcons=%p", cp,
387 	    sc->sc_readcons));
388 	sc->sc_readcons = NULL;
389 	G_GATE_DEBUG(1, "Destroying read consumer on provider %s orphan.",
390 	    cp->provider->name);
391 	(void)g_access(cp, -1, 0, 0);
392 	g_detach(cp);
393 	g_destroy_consumer(cp);
394 }
395 
396 static void
397 g_gate_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
398     struct g_consumer *cp, struct g_provider *pp)
399 {
400 	struct g_gate_softc *sc;
401 
402 	sc = gp->softc;
403 	if (sc == NULL || pp != NULL || cp != NULL)
404 		return;
405 	sc = g_gate_hold(sc->sc_unit, NULL);
406 	if (sc == NULL)
407 		return;
408 	if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) {
409 		sbuf_printf(sb, "%s<access>%s</access>\n", indent, "read-only");
410 	} else if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0) {
411 		sbuf_printf(sb, "%s<access>%s</access>\n", indent,
412 		    "write-only");
413 	} else {
414 		sbuf_printf(sb, "%s<access>%s</access>\n", indent,
415 		    "read-write");
416 	}
417 	if (sc->sc_readcons != NULL) {
418 		sbuf_printf(sb, "%s<read_offset>%jd</read_offset>\n",
419 		    indent, (intmax_t)sc->sc_readoffset);
420 		sbuf_printf(sb, "%s<read_provider>%s</read_provider>\n",
421 		    indent, sc->sc_readcons->provider->name);
422 	}
423 	sbuf_printf(sb, "%s<timeout>%u</timeout>\n", indent, sc->sc_timeout);
424 	sbuf_printf(sb, "%s<info>%s</info>\n", indent, sc->sc_info);
425 	sbuf_printf(sb, "%s<queue_count>%u</queue_count>\n", indent,
426 	    sc->sc_queue_count);
427 	sbuf_printf(sb, "%s<queue_size>%u</queue_size>\n", indent,
428 	    sc->sc_queue_size);
429 	sbuf_printf(sb, "%s<ref>%u</ref>\n", indent, sc->sc_ref);
430 	sbuf_printf(sb, "%s<unit>%d</unit>\n", indent, sc->sc_unit);
431 	g_topology_unlock();
432 	g_gate_release(sc);
433 	g_topology_lock();
434 }
435 
436 static int
437 g_gate_create(struct g_gate_ctl_create *ggio)
438 {
439 	struct g_gate_softc *sc;
440 	struct g_geom *gp;
441 	struct g_provider *pp, *ropp;
442 	struct g_consumer *cp;
443 	char name[NAME_MAX];
444 	int error = 0, unit;
445 
446 	if (ggio->gctl_mediasize <= 0) {
447 		G_GATE_DEBUG(1, "Invalid media size.");
448 		return (EINVAL);
449 	}
450 	if (ggio->gctl_sectorsize <= 0) {
451 		G_GATE_DEBUG(1, "Invalid sector size.");
452 		return (EINVAL);
453 	}
454 	if (!powerof2(ggio->gctl_sectorsize)) {
455 		G_GATE_DEBUG(1, "Invalid sector size.");
456 		return (EINVAL);
457 	}
458 	if ((ggio->gctl_mediasize % ggio->gctl_sectorsize) != 0) {
459 		G_GATE_DEBUG(1, "Invalid media size.");
460 		return (EINVAL);
461 	}
462 	if ((ggio->gctl_flags & G_GATE_FLAG_READONLY) != 0 &&
463 	    (ggio->gctl_flags & G_GATE_FLAG_WRITEONLY) != 0) {
464 		G_GATE_DEBUG(1, "Invalid flags.");
465 		return (EINVAL);
466 	}
467 	if (ggio->gctl_unit != G_GATE_UNIT_AUTO &&
468 	    ggio->gctl_unit != G_GATE_NAME_GIVEN &&
469 	    ggio->gctl_unit < 0) {
470 		G_GATE_DEBUG(1, "Invalid unit number.");
471 		return (EINVAL);
472 	}
473 	if (ggio->gctl_unit == G_GATE_NAME_GIVEN &&
474 	    ggio->gctl_name[0] == '\0') {
475 		G_GATE_DEBUG(1, "No device name.");
476 		return (EINVAL);
477 	}
478 
479 	sc = malloc(sizeof(*sc), M_GATE, M_WAITOK | M_ZERO);
480 	sc->sc_flags = (ggio->gctl_flags & G_GATE_USERFLAGS);
481 	strlcpy(sc->sc_info, ggio->gctl_info, sizeof(sc->sc_info));
482 	sc->sc_seq = 1;
483 	bioq_init(&sc->sc_inqueue);
484 	bioq_init(&sc->sc_outqueue);
485 	mtx_init(&sc->sc_queue_mtx, "gg:queue", NULL, MTX_DEF);
486 	sc->sc_queue_count = 0;
487 	sc->sc_queue_size = ggio->gctl_maxcount;
488 	if (sc->sc_queue_size > G_GATE_MAX_QUEUE_SIZE)
489 		sc->sc_queue_size = G_GATE_MAX_QUEUE_SIZE;
490 	sc->sc_timeout = ggio->gctl_timeout;
491 	callout_init(&sc->sc_callout, 1);
492 
493 	mtx_lock(&g_gate_units_lock);
494 	sc->sc_unit = g_gate_getunit(ggio->gctl_unit, &error);
495 	if (sc->sc_unit < 0)
496 		goto fail1;
497 	if (ggio->gctl_unit == G_GATE_NAME_GIVEN)
498 		snprintf(name, sizeof(name), "%s", ggio->gctl_name);
499 	else {
500 		snprintf(name, sizeof(name), "%s%d", G_GATE_PROVIDER_NAME,
501 		    sc->sc_unit);
502 	}
503 	/* Check for name collision. */
504 	for (unit = 0; unit < g_gate_maxunits; unit++) {
505 		if (g_gate_units[unit] == NULL)
506 			continue;
507 		if (strcmp(name, g_gate_units[unit]->sc_name) != 0)
508 			continue;
509 		error = EEXIST;
510 		goto fail1;
511 	}
512 	sc->sc_name = name;
513 	g_gate_units[sc->sc_unit] = sc;
514 	g_gate_nunits++;
515 	mtx_unlock(&g_gate_units_lock);
516 
517 	g_topology_lock();
518 
519 	if (ggio->gctl_readprov[0] == '\0') {
520 		ropp = NULL;
521 	} else {
522 		ropp = g_provider_by_name(ggio->gctl_readprov);
523 		if (ropp == NULL) {
524 			G_GATE_DEBUG(1, "Provider %s doesn't exist.",
525 			    ggio->gctl_readprov);
526 			error = EINVAL;
527 			goto fail2;
528 		}
529 		if ((ggio->gctl_readoffset % ggio->gctl_sectorsize) != 0) {
530 			G_GATE_DEBUG(1, "Invalid read offset.");
531 			error = EINVAL;
532 			goto fail2;
533 		}
534 		if (ggio->gctl_mediasize + ggio->gctl_readoffset >
535 		    ropp->mediasize) {
536 			G_GATE_DEBUG(1, "Invalid read offset or media size.");
537 			error = EINVAL;
538 			goto fail2;
539 		}
540 	}
541 
542 	gp = g_new_geomf(&g_gate_class, "%s", name);
543 	gp->start = g_gate_start;
544 	gp->access = g_gate_access;
545 	gp->orphan = g_gate_orphan;
546 	gp->dumpconf = g_gate_dumpconf;
547 	gp->softc = sc;
548 
549 	if (ropp != NULL) {
550 		cp = g_new_consumer(gp);
551 		cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
552 		error = g_attach(cp, ropp);
553 		if (error != 0) {
554 			G_GATE_DEBUG(1, "Unable to attach to %s.", ropp->name);
555 			goto fail3;
556 		}
557 		error = g_access(cp, 1, 0, 0);
558 		if (error != 0) {
559 			G_GATE_DEBUG(1, "Unable to access %s.", ropp->name);
560 			g_detach(cp);
561 			goto fail3;
562 		}
563 		sc->sc_readcons = cp;
564 		sc->sc_readoffset = ggio->gctl_readoffset;
565 	}
566 
567 	ggio->gctl_unit = sc->sc_unit;
568 
569 	pp = g_new_providerf(gp, "%s", name);
570 	pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE;
571 	pp->mediasize = ggio->gctl_mediasize;
572 	pp->sectorsize = ggio->gctl_sectorsize;
573 	sc->sc_provider = pp;
574 	g_error_provider(pp, 0);
575 
576 	g_topology_unlock();
577 	mtx_lock(&g_gate_units_lock);
578 	sc->sc_name = sc->sc_provider->name;
579 	mtx_unlock(&g_gate_units_lock);
580 	G_GATE_DEBUG(1, "Device %s created.", gp->name);
581 
582 	if (sc->sc_timeout > 0) {
583 		callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
584 		    g_gate_guard, sc);
585 	}
586 	return (0);
587 fail3:
588 	g_destroy_consumer(cp);
589 	g_destroy_geom(gp);
590 fail2:
591 	g_topology_unlock();
592 	mtx_lock(&g_gate_units_lock);
593 	g_gate_units[sc->sc_unit] = NULL;
594 	KASSERT(g_gate_nunits > 0, ("negative g_gate_nunits?"));
595 	g_gate_nunits--;
596 fail1:
597 	mtx_unlock(&g_gate_units_lock);
598 	mtx_destroy(&sc->sc_queue_mtx);
599 	free(sc, M_GATE);
600 	return (error);
601 }
602 
603 static int
604 g_gate_modify(struct g_gate_softc *sc, struct g_gate_ctl_modify *ggio)
605 {
606 	struct g_provider *pp;
607 	struct g_consumer *cp;
608 	int error;
609 
610 	if ((ggio->gctl_modify & GG_MODIFY_MEDIASIZE) != 0) {
611 		if (ggio->gctl_mediasize <= 0) {
612 			G_GATE_DEBUG(1, "Invalid media size.");
613 			return (EINVAL);
614 		}
615 		pp = sc->sc_provider;
616 		if ((ggio->gctl_mediasize % pp->sectorsize) != 0) {
617 			G_GATE_DEBUG(1, "Invalid media size.");
618 			return (EINVAL);
619 		}
620 		g_resize_provider(pp, ggio->gctl_mediasize);
621 		return (0);
622 	}
623 
624 	if ((ggio->gctl_modify & GG_MODIFY_INFO) != 0)
625 		(void)strlcpy(sc->sc_info, ggio->gctl_info, sizeof(sc->sc_info));
626 
627 	cp = NULL;
628 
629 	if ((ggio->gctl_modify & GG_MODIFY_READPROV) != 0) {
630 		g_topology_lock();
631 		if (sc->sc_readcons != NULL) {
632 			cp = sc->sc_readcons;
633 			sc->sc_readcons = NULL;
634 			(void)g_access(cp, -1, 0, 0);
635 			g_detach(cp);
636 			g_destroy_consumer(cp);
637 		}
638 		if (ggio->gctl_readprov[0] != '\0') {
639 			pp = g_provider_by_name(ggio->gctl_readprov);
640 			if (pp == NULL) {
641 				g_topology_unlock();
642 				G_GATE_DEBUG(1, "Provider %s doesn't exist.",
643 				    ggio->gctl_readprov);
644 				return (EINVAL);
645 			}
646 			cp = g_new_consumer(sc->sc_provider->geom);
647 			cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
648 			error = g_attach(cp, pp);
649 			if (error != 0) {
650 				G_GATE_DEBUG(1, "Unable to attach to %s.",
651 				    pp->name);
652 			} else {
653 				error = g_access(cp, 1, 0, 0);
654 				if (error != 0) {
655 					G_GATE_DEBUG(1, "Unable to access %s.",
656 					    pp->name);
657 					g_detach(cp);
658 				}
659 			}
660 			if (error != 0) {
661 				g_destroy_consumer(cp);
662 				g_topology_unlock();
663 				return (error);
664 			}
665 		}
666 	} else {
667 		cp = sc->sc_readcons;
668 	}
669 
670 	if ((ggio->gctl_modify & GG_MODIFY_READOFFSET) != 0) {
671 		if (cp == NULL) {
672 			G_GATE_DEBUG(1, "No read provider.");
673 			return (EINVAL);
674 		}
675 		pp = sc->sc_provider;
676 		if ((ggio->gctl_readoffset % pp->sectorsize) != 0) {
677 			G_GATE_DEBUG(1, "Invalid read offset.");
678 			return (EINVAL);
679 		}
680 		if (pp->mediasize + ggio->gctl_readoffset >
681 		    cp->provider->mediasize) {
682 			G_GATE_DEBUG(1, "Invalid read offset or media size.");
683 			return (EINVAL);
684 		}
685 		sc->sc_readoffset = ggio->gctl_readoffset;
686 	}
687 
688 	if ((ggio->gctl_modify & GG_MODIFY_READPROV) != 0) {
689 		sc->sc_readcons = cp;
690 		g_topology_unlock();
691 	}
692 
693 	return (0);
694 }
695 
696 #define	G_GATE_CHECK_VERSION(ggio)	do {				\
697 	if ((ggio)->gctl_version != G_GATE_VERSION) {			\
698 		printf("Version mismatch %d != %d.\n",			\
699 		    ggio->gctl_version, G_GATE_VERSION);		\
700 		return (EINVAL);					\
701 	}								\
702 } while (0)
703 static int
704 g_gate_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
705 {
706 	struct g_gate_softc *sc;
707 	struct bio *bp;
708 	int error = 0;
709 
710 	G_GATE_DEBUG(4, "ioctl(%s, %lx, %p, %x, %p)", devtoname(dev), cmd, addr,
711 	    flags, td);
712 
713 	switch (cmd) {
714 	case G_GATE_CMD_CREATE:
715 	    {
716 		struct g_gate_ctl_create *ggio = (void *)addr;
717 
718 		G_GATE_CHECK_VERSION(ggio);
719 		error = g_gate_create(ggio);
720 		/*
721 		 * Reset TDP_GEOM flag.
722 		 * There are pending events for sure, because we just created
723 		 * new provider and other classes want to taste it, but we
724 		 * cannot answer on I/O requests until we're here.
725 		 */
726 		td->td_pflags &= ~TDP_GEOM;
727 		return (error);
728 	    }
729 	case G_GATE_CMD_MODIFY:
730 	    {
731 		struct g_gate_ctl_modify *ggio = (void *)addr;
732 
733 		G_GATE_CHECK_VERSION(ggio);
734 		sc = g_gate_hold(ggio->gctl_unit, NULL);
735 		if (sc == NULL)
736 			return (ENXIO);
737 		error = g_gate_modify(sc, ggio);
738 		g_gate_release(sc);
739 		return (error);
740 	    }
741 	case G_GATE_CMD_DESTROY:
742 	    {
743 		struct g_gate_ctl_destroy *ggio = (void *)addr;
744 
745 		G_GATE_CHECK_VERSION(ggio);
746 		sc = g_gate_hold(ggio->gctl_unit, ggio->gctl_name);
747 		if (sc == NULL)
748 			return (ENXIO);
749 		g_topology_lock();
750 		mtx_lock(&g_gate_units_lock);
751 		error = g_gate_destroy(sc, ggio->gctl_force);
752 		g_topology_unlock();
753 		if (error != 0)
754 			g_gate_release(sc);
755 		return (error);
756 	    }
757 	case G_GATE_CMD_CANCEL:
758 	    {
759 		struct g_gate_ctl_cancel *ggio = (void *)addr;
760 		struct bio *tbp, *lbp;
761 
762 		G_GATE_CHECK_VERSION(ggio);
763 		sc = g_gate_hold(ggio->gctl_unit, ggio->gctl_name);
764 		if (sc == NULL)
765 			return (ENXIO);
766 		lbp = NULL;
767 		mtx_lock(&sc->sc_queue_mtx);
768 		TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, tbp) {
769 			if (ggio->gctl_seq == 0 ||
770 			    ggio->gctl_seq == (uintptr_t)bp->bio_driver1) {
771 				G_GATE_LOGREQ(1, bp, "Request canceled.");
772 				bioq_remove(&sc->sc_outqueue, bp);
773 				/*
774 				 * Be sure to put requests back onto incoming
775 				 * queue in the proper order.
776 				 */
777 				if (lbp == NULL)
778 					bioq_insert_head(&sc->sc_inqueue, bp);
779 				else {
780 					TAILQ_INSERT_AFTER(&sc->sc_inqueue.queue,
781 					    lbp, bp, bio_queue);
782 				}
783 				lbp = bp;
784 				/*
785 				 * If only one request was canceled, leave now.
786 				 */
787 				if (ggio->gctl_seq != 0)
788 					break;
789 			}
790 		}
791 		if (ggio->gctl_unit == G_GATE_NAME_GIVEN)
792 			ggio->gctl_unit = sc->sc_unit;
793 		mtx_unlock(&sc->sc_queue_mtx);
794 		g_gate_release(sc);
795 		return (error);
796 	    }
797 	case G_GATE_CMD_START:
798 	    {
799 		struct g_gate_ctl_io *ggio = (void *)addr;
800 
801 		G_GATE_CHECK_VERSION(ggio);
802 		sc = g_gate_hold(ggio->gctl_unit, NULL);
803 		if (sc == NULL)
804 			return (ENXIO);
805 		error = 0;
806 		for (;;) {
807 			mtx_lock(&sc->sc_queue_mtx);
808 			bp = bioq_first(&sc->sc_inqueue);
809 			if (bp != NULL)
810 				break;
811 			if ((sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
812 				ggio->gctl_error = ECANCELED;
813 				mtx_unlock(&sc->sc_queue_mtx);
814 				goto start_end;
815 			}
816 			if (msleep(sc, &sc->sc_queue_mtx,
817 			    PPAUSE | PDROP | PCATCH, "ggwait", 0) != 0) {
818 				ggio->gctl_error = ECANCELED;
819 				goto start_end;
820 			}
821 		}
822 		ggio->gctl_cmd = bp->bio_cmd;
823 		if (bp->bio_cmd == BIO_WRITE &&
824 		    bp->bio_length > ggio->gctl_length) {
825 			mtx_unlock(&sc->sc_queue_mtx);
826 			ggio->gctl_length = bp->bio_length;
827 			ggio->gctl_error = ENOMEM;
828 			goto start_end;
829 		}
830 		bioq_remove(&sc->sc_inqueue, bp);
831 		bioq_insert_tail(&sc->sc_outqueue, bp);
832 		mtx_unlock(&sc->sc_queue_mtx);
833 
834 		ggio->gctl_seq = (uintptr_t)bp->bio_driver1;
835 		ggio->gctl_offset = bp->bio_offset;
836 		ggio->gctl_length = bp->bio_length;
837 
838 		switch (bp->bio_cmd) {
839 		case BIO_READ:
840 		case BIO_DELETE:
841 		case BIO_FLUSH:
842 			break;
843 		case BIO_WRITE:
844 			error = copyout(bp->bio_data, ggio->gctl_data,
845 			    bp->bio_length);
846 			if (error != 0) {
847 				mtx_lock(&sc->sc_queue_mtx);
848 				bioq_remove(&sc->sc_outqueue, bp);
849 				bioq_insert_head(&sc->sc_inqueue, bp);
850 				mtx_unlock(&sc->sc_queue_mtx);
851 				goto start_end;
852 			}
853 			break;
854 		}
855 start_end:
856 		g_gate_release(sc);
857 		return (error);
858 	    }
859 	case G_GATE_CMD_DONE:
860 	    {
861 		struct g_gate_ctl_io *ggio = (void *)addr;
862 
863 		G_GATE_CHECK_VERSION(ggio);
864 		sc = g_gate_hold(ggio->gctl_unit, NULL);
865 		if (sc == NULL)
866 			return (ENOENT);
867 		error = 0;
868 		mtx_lock(&sc->sc_queue_mtx);
869 		TAILQ_FOREACH(bp, &sc->sc_outqueue.queue, bio_queue) {
870 			if (ggio->gctl_seq == (uintptr_t)bp->bio_driver1)
871 				break;
872 		}
873 		if (bp != NULL) {
874 			bioq_remove(&sc->sc_outqueue, bp);
875 			sc->sc_queue_count--;
876 		}
877 		mtx_unlock(&sc->sc_queue_mtx);
878 		if (bp == NULL) {
879 			/*
880 			 * Request was probably canceled.
881 			 */
882 			goto done_end;
883 		}
884 		if (ggio->gctl_error == EAGAIN) {
885 			bp->bio_error = 0;
886 			G_GATE_LOGREQ(1, bp, "Request desisted.");
887 			mtx_lock(&sc->sc_queue_mtx);
888 			sc->sc_queue_count++;
889 			bioq_insert_head(&sc->sc_inqueue, bp);
890 			wakeup(sc);
891 			mtx_unlock(&sc->sc_queue_mtx);
892 		} else {
893 			bp->bio_error = ggio->gctl_error;
894 			if (bp->bio_error == 0) {
895 				bp->bio_completed = bp->bio_length;
896 				switch (bp->bio_cmd) {
897 				case BIO_READ:
898 					error = copyin(ggio->gctl_data,
899 					    bp->bio_data, bp->bio_length);
900 					if (error != 0)
901 						bp->bio_error = error;
902 					break;
903 				case BIO_DELETE:
904 				case BIO_WRITE:
905 				case BIO_FLUSH:
906 					break;
907 				}
908 			}
909 			G_GATE_LOGREQ(2, bp, "Request done.");
910 			g_io_deliver(bp, bp->bio_error);
911 		}
912 done_end:
913 		g_gate_release(sc);
914 		return (error);
915 	    }
916 	}
917 	return (ENOIOCTL);
918 }
919 
920 static void
921 g_gate_device(void)
922 {
923 
924 	status_dev = make_dev(&g_gate_cdevsw, 0x0, UID_ROOT, GID_WHEEL, 0600,
925 	    G_GATE_CTL_NAME);
926 }
927 
928 static int
929 g_gate_modevent(module_t mod, int type, void *data)
930 {
931 	int error = 0;
932 
933 	switch (type) {
934 	case MOD_LOAD:
935 		mtx_init(&g_gate_units_lock, "gg_units_lock", NULL, MTX_DEF);
936 		g_gate_units = malloc(g_gate_maxunits * sizeof(g_gate_units[0]),
937 		    M_GATE, M_WAITOK | M_ZERO);
938 		g_gate_nunits = 0;
939 		g_gate_device();
940 		break;
941 	case MOD_UNLOAD:
942 		mtx_lock(&g_gate_units_lock);
943 		if (g_gate_nunits > 0) {
944 			mtx_unlock(&g_gate_units_lock);
945 			error = EBUSY;
946 			break;
947 		}
948 		mtx_unlock(&g_gate_units_lock);
949 		mtx_destroy(&g_gate_units_lock);
950 		if (status_dev != NULL)
951 			destroy_dev(status_dev);
952 		free(g_gate_units, M_GATE);
953 		break;
954 	default:
955 		return (EOPNOTSUPP);
956 		break;
957 	}
958 
959 	return (error);
960 }
961 static moduledata_t g_gate_module = {
962 	G_GATE_MOD_NAME,
963 	g_gate_modevent,
964 	NULL
965 };
966 DECLARE_MODULE(geom_gate, g_gate_module, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
967 DECLARE_GEOM_CLASS(g_gate_class, g_gate);
968 MODULE_VERSION(geom_gate, 0);
969