xref: /freebsd/sys/geom/gate/g_gate.c (revision 3416500aef140042c64bc149cb1ec6620483bc44)
1 /*-
2  * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3  * Copyright (c) 2009-2010 The FreeBSD Foundation
4  * All rights reserved.
5  *
6  * Portions of this software were developed by Pawel Jakub Dawidek
7  * under sponsorship from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/bio.h>
37 #include <sys/conf.h>
38 #include <sys/kernel.h>
39 #include <sys/kthread.h>
40 #include <sys/fcntl.h>
41 #include <sys/linker.h>
42 #include <sys/lock.h>
43 #include <sys/malloc.h>
44 #include <sys/mutex.h>
45 #include <sys/proc.h>
46 #include <sys/limits.h>
47 #include <sys/queue.h>
48 #include <sys/sbuf.h>
49 #include <sys/sysctl.h>
50 #include <sys/signalvar.h>
51 #include <sys/time.h>
52 #include <machine/atomic.h>
53 
54 #include <geom/geom.h>
55 #include <geom/gate/g_gate.h>
56 
57 FEATURE(geom_gate, "GEOM Gate module");
58 
59 static MALLOC_DEFINE(M_GATE, "gg_data", "GEOM Gate Data");
60 
61 SYSCTL_DECL(_kern_geom);
62 static SYSCTL_NODE(_kern_geom, OID_AUTO, gate, CTLFLAG_RW, 0,
63     "GEOM_GATE configuration");
64 static int g_gate_debug = 0;
65 SYSCTL_INT(_kern_geom_gate, OID_AUTO, debug, CTLFLAG_RWTUN, &g_gate_debug, 0,
66     "Debug level");
67 static u_int g_gate_maxunits = 256;
68 SYSCTL_UINT(_kern_geom_gate, OID_AUTO, maxunits, CTLFLAG_RDTUN,
69     &g_gate_maxunits, 0, "Maximum number of ggate devices");
70 
71 struct g_class g_gate_class = {
72 	.name = G_GATE_CLASS_NAME,
73 	.version = G_VERSION,
74 };
75 
76 static struct cdev *status_dev;
77 static d_ioctl_t g_gate_ioctl;
78 static struct cdevsw g_gate_cdevsw = {
79 	.d_version =	D_VERSION,
80 	.d_ioctl =	g_gate_ioctl,
81 	.d_name =	G_GATE_CTL_NAME
82 };
83 
84 
85 static struct g_gate_softc **g_gate_units;
86 static u_int g_gate_nunits;
87 static struct mtx g_gate_units_lock;
88 
89 static int
90 g_gate_destroy(struct g_gate_softc *sc, boolean_t force)
91 {
92 	struct bio_queue_head queue;
93 	struct g_provider *pp;
94 	struct g_consumer *cp;
95 	struct g_geom *gp;
96 	struct bio *bp;
97 
98 	g_topology_assert();
99 	mtx_assert(&g_gate_units_lock, MA_OWNED);
100 	pp = sc->sc_provider;
101 	if (!force && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
102 		mtx_unlock(&g_gate_units_lock);
103 		return (EBUSY);
104 	}
105 	mtx_unlock(&g_gate_units_lock);
106 	mtx_lock(&sc->sc_queue_mtx);
107 	if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0)
108 		sc->sc_flags |= G_GATE_FLAG_DESTROY;
109 	wakeup(sc);
110 	mtx_unlock(&sc->sc_queue_mtx);
111 	gp = pp->geom;
112 	g_wither_provider(pp, ENXIO);
113 	callout_drain(&sc->sc_callout);
114 	bioq_init(&queue);
115 	mtx_lock(&sc->sc_queue_mtx);
116 	while ((bp = bioq_takefirst(&sc->sc_inqueue)) != NULL) {
117 		sc->sc_queue_count--;
118 		bioq_insert_tail(&queue, bp);
119 	}
120 	while ((bp = bioq_takefirst(&sc->sc_outqueue)) != NULL) {
121 		sc->sc_queue_count--;
122 		bioq_insert_tail(&queue, bp);
123 	}
124 	mtx_unlock(&sc->sc_queue_mtx);
125 	g_topology_unlock();
126 	while ((bp = bioq_takefirst(&queue)) != NULL) {
127 		G_GATE_LOGREQ(1, bp, "Request canceled.");
128 		g_io_deliver(bp, ENXIO);
129 	}
130 	mtx_lock(&g_gate_units_lock);
131 	/* One reference is ours. */
132 	sc->sc_ref--;
133 	while (sc->sc_ref > 0)
134 		msleep(&sc->sc_ref, &g_gate_units_lock, 0, "gg:destroy", 0);
135 	g_gate_units[sc->sc_unit] = NULL;
136 	KASSERT(g_gate_nunits > 0, ("negative g_gate_nunits?"));
137 	g_gate_nunits--;
138 	mtx_unlock(&g_gate_units_lock);
139 	mtx_destroy(&sc->sc_queue_mtx);
140 	g_topology_lock();
141 	if ((cp = sc->sc_readcons) != NULL) {
142 		sc->sc_readcons = NULL;
143 		(void)g_access(cp, -1, 0, 0);
144 		g_detach(cp);
145 		g_destroy_consumer(cp);
146 	}
147 	G_GATE_DEBUG(1, "Device %s destroyed.", gp->name);
148 	gp->softc = NULL;
149 	g_wither_geom(gp, ENXIO);
150 	sc->sc_provider = NULL;
151 	free(sc, M_GATE);
152 	return (0);
153 }
154 
155 static int
156 g_gate_access(struct g_provider *pp, int dr, int dw, int de)
157 {
158 	struct g_gate_softc *sc;
159 
160 	if (dr <= 0 && dw <= 0 && de <= 0)
161 		return (0);
162 	sc = pp->geom->softc;
163 	if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0)
164 		return (ENXIO);
165 	/* XXX: Hack to allow read-only mounts. */
166 #if 0
167 	if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0 && dw > 0)
168 		return (EPERM);
169 #endif
170 	if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0 && dr > 0)
171 		return (EPERM);
172 	return (0);
173 }
174 
175 static void
176 g_gate_queue_io(struct bio *bp)
177 {
178 	struct g_gate_softc *sc;
179 
180 	sc = bp->bio_to->geom->softc;
181 	if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
182 		g_io_deliver(bp, ENXIO);
183 		return;
184 	}
185 
186 	mtx_lock(&sc->sc_queue_mtx);
187 
188 	if (sc->sc_queue_size > 0 && sc->sc_queue_count > sc->sc_queue_size) {
189 		mtx_unlock(&sc->sc_queue_mtx);
190 		G_GATE_LOGREQ(1, bp, "Queue full, request canceled.");
191 		g_io_deliver(bp, ENOMEM);
192 		return;
193 	}
194 
195 	bp->bio_driver1 = (void *)sc->sc_seq;
196 	sc->sc_seq++;
197 	sc->sc_queue_count++;
198 
199 	bioq_insert_tail(&sc->sc_inqueue, bp);
200 	wakeup(sc);
201 
202 	mtx_unlock(&sc->sc_queue_mtx);
203 }
204 
205 static void
206 g_gate_done(struct bio *cbp)
207 {
208 	struct bio *pbp;
209 
210 	pbp = cbp->bio_parent;
211 	if (cbp->bio_error == 0) {
212 		pbp->bio_completed = cbp->bio_completed;
213 		g_destroy_bio(cbp);
214 		pbp->bio_inbed++;
215 		g_io_deliver(pbp, 0);
216 	} else {
217 		/* If direct read failed, pass it through userland daemon. */
218 		g_destroy_bio(cbp);
219 		pbp->bio_children--;
220 		g_gate_queue_io(pbp);
221 	}
222 }
223 
224 static void
225 g_gate_start(struct bio *pbp)
226 {
227 	struct g_gate_softc *sc;
228 
229 	sc = pbp->bio_to->geom->softc;
230 	if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
231 		g_io_deliver(pbp, ENXIO);
232 		return;
233 	}
234 	G_GATE_LOGREQ(2, pbp, "Request received.");
235 	switch (pbp->bio_cmd) {
236 	case BIO_READ:
237 		if (sc->sc_readcons != NULL) {
238 			struct bio *cbp;
239 
240 			cbp = g_clone_bio(pbp);
241 			if (cbp == NULL) {
242 				g_io_deliver(pbp, ENOMEM);
243 				return;
244 			}
245 			cbp->bio_done = g_gate_done;
246 			cbp->bio_offset = pbp->bio_offset + sc->sc_readoffset;
247 			cbp->bio_to = sc->sc_readcons->provider;
248 			g_io_request(cbp, sc->sc_readcons);
249 			return;
250 		}
251 		break;
252 	case BIO_DELETE:
253 	case BIO_WRITE:
254 	case BIO_FLUSH:
255 		/* XXX: Hack to allow read-only mounts. */
256 		if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) {
257 			g_io_deliver(pbp, EPERM);
258 			return;
259 		}
260 		break;
261 	case BIO_GETATTR:
262 	default:
263 		G_GATE_LOGREQ(2, pbp, "Ignoring request.");
264 		g_io_deliver(pbp, EOPNOTSUPP);
265 		return;
266 	}
267 
268 	g_gate_queue_io(pbp);
269 }
270 
271 static struct g_gate_softc *
272 g_gate_hold(int unit, const char *name)
273 {
274 	struct g_gate_softc *sc = NULL;
275 
276 	mtx_lock(&g_gate_units_lock);
277 	if (unit >= 0 && unit < g_gate_maxunits)
278 		sc = g_gate_units[unit];
279 	else if (unit == G_GATE_NAME_GIVEN) {
280 		KASSERT(name != NULL, ("name is NULL"));
281 		for (unit = 0; unit < g_gate_maxunits; unit++) {
282 			if (g_gate_units[unit] == NULL)
283 				continue;
284 			if (strcmp(name,
285 			    g_gate_units[unit]->sc_provider->name) != 0) {
286 				continue;
287 			}
288 			sc = g_gate_units[unit];
289 			break;
290 		}
291 	}
292 	if (sc != NULL)
293 		sc->sc_ref++;
294 	mtx_unlock(&g_gate_units_lock);
295 	return (sc);
296 }
297 
298 static void
299 g_gate_release(struct g_gate_softc *sc)
300 {
301 
302 	g_topology_assert_not();
303 	mtx_lock(&g_gate_units_lock);
304 	sc->sc_ref--;
305 	KASSERT(sc->sc_ref >= 0, ("Negative sc_ref for %s.", sc->sc_name));
306 	if (sc->sc_ref == 0 && (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0)
307 		wakeup(&sc->sc_ref);
308 	mtx_unlock(&g_gate_units_lock);
309 }
310 
311 static int
312 g_gate_getunit(int unit, int *errorp)
313 {
314 
315 	mtx_assert(&g_gate_units_lock, MA_OWNED);
316 	if (unit >= 0) {
317 		if (unit >= g_gate_maxunits)
318 			*errorp = EINVAL;
319 		else if (g_gate_units[unit] == NULL)
320 			return (unit);
321 		else
322 			*errorp = EEXIST;
323 	} else {
324 		for (unit = 0; unit < g_gate_maxunits; unit++) {
325 			if (g_gate_units[unit] == NULL)
326 				return (unit);
327 		}
328 		*errorp = ENFILE;
329 	}
330 	return (-1);
331 }
332 
333 static void
334 g_gate_guard(void *arg)
335 {
336 	struct bio_queue_head queue;
337 	struct g_gate_softc *sc;
338 	struct bintime curtime;
339 	struct bio *bp, *bp2;
340 
341 	sc = arg;
342 	binuptime(&curtime);
343 	g_gate_hold(sc->sc_unit, NULL);
344 	bioq_init(&queue);
345 	mtx_lock(&sc->sc_queue_mtx);
346 	TAILQ_FOREACH_SAFE(bp, &sc->sc_inqueue.queue, bio_queue, bp2) {
347 		if (curtime.sec - bp->bio_t0.sec < 5)
348 			continue;
349 		bioq_remove(&sc->sc_inqueue, bp);
350 		sc->sc_queue_count--;
351 		bioq_insert_tail(&queue, bp);
352 	}
353 	TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, bp2) {
354 		if (curtime.sec - bp->bio_t0.sec < 5)
355 			continue;
356 		bioq_remove(&sc->sc_outqueue, bp);
357 		sc->sc_queue_count--;
358 		bioq_insert_tail(&queue, bp);
359 	}
360 	mtx_unlock(&sc->sc_queue_mtx);
361 	while ((bp = bioq_takefirst(&queue)) != NULL) {
362 		G_GATE_LOGREQ(1, bp, "Request timeout.");
363 		g_io_deliver(bp, EIO);
364 	}
365 	if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0) {
366 		callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
367 		    g_gate_guard, sc);
368 	}
369 	g_gate_release(sc);
370 }
371 
372 static void
373 g_gate_orphan(struct g_consumer *cp)
374 {
375 	struct g_gate_softc *sc;
376 	struct g_geom *gp;
377 
378 	g_topology_assert();
379 	gp = cp->geom;
380 	sc = gp->softc;
381 	if (sc == NULL)
382 		return;
383 	KASSERT(cp == sc->sc_readcons, ("cp=%p sc_readcons=%p", cp,
384 	    sc->sc_readcons));
385 	sc->sc_readcons = NULL;
386 	G_GATE_DEBUG(1, "Destroying read consumer on provider %s orphan.",
387 	    cp->provider->name);
388 	(void)g_access(cp, -1, 0, 0);
389 	g_detach(cp);
390 	g_destroy_consumer(cp);
391 }
392 
393 static void
394 g_gate_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
395     struct g_consumer *cp, struct g_provider *pp)
396 {
397 	struct g_gate_softc *sc;
398 
399 	sc = gp->softc;
400 	if (sc == NULL || pp != NULL || cp != NULL)
401 		return;
402 	sc = g_gate_hold(sc->sc_unit, NULL);
403 	if (sc == NULL)
404 		return;
405 	if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) {
406 		sbuf_printf(sb, "%s<access>%s</access>\n", indent, "read-only");
407 	} else if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0) {
408 		sbuf_printf(sb, "%s<access>%s</access>\n", indent,
409 		    "write-only");
410 	} else {
411 		sbuf_printf(sb, "%s<access>%s</access>\n", indent,
412 		    "read-write");
413 	}
414 	if (sc->sc_readcons != NULL) {
415 		sbuf_printf(sb, "%s<read_offset>%jd</read_offset>\n",
416 		    indent, (intmax_t)sc->sc_readoffset);
417 		sbuf_printf(sb, "%s<read_provider>%s</read_provider>\n",
418 		    indent, sc->sc_readcons->provider->name);
419 	}
420 	sbuf_printf(sb, "%s<timeout>%u</timeout>\n", indent, sc->sc_timeout);
421 	sbuf_printf(sb, "%s<info>%s</info>\n", indent, sc->sc_info);
422 	sbuf_printf(sb, "%s<queue_count>%u</queue_count>\n", indent,
423 	    sc->sc_queue_count);
424 	sbuf_printf(sb, "%s<queue_size>%u</queue_size>\n", indent,
425 	    sc->sc_queue_size);
426 	sbuf_printf(sb, "%s<ref>%u</ref>\n", indent, sc->sc_ref);
427 	sbuf_printf(sb, "%s<unit>%d</unit>\n", indent, sc->sc_unit);
428 	g_topology_unlock();
429 	g_gate_release(sc);
430 	g_topology_lock();
431 }
432 
433 static int
434 g_gate_create(struct g_gate_ctl_create *ggio)
435 {
436 	struct g_gate_softc *sc;
437 	struct g_geom *gp;
438 	struct g_provider *pp, *ropp;
439 	struct g_consumer *cp;
440 	char name[NAME_MAX];
441 	int error = 0, unit;
442 
443 	if (ggio->gctl_mediasize <= 0) {
444 		G_GATE_DEBUG(1, "Invalid media size.");
445 		return (EINVAL);
446 	}
447 	if (ggio->gctl_sectorsize <= 0) {
448 		G_GATE_DEBUG(1, "Invalid sector size.");
449 		return (EINVAL);
450 	}
451 	if (!powerof2(ggio->gctl_sectorsize)) {
452 		G_GATE_DEBUG(1, "Invalid sector size.");
453 		return (EINVAL);
454 	}
455 	if ((ggio->gctl_mediasize % ggio->gctl_sectorsize) != 0) {
456 		G_GATE_DEBUG(1, "Invalid media size.");
457 		return (EINVAL);
458 	}
459 	if ((ggio->gctl_flags & G_GATE_FLAG_READONLY) != 0 &&
460 	    (ggio->gctl_flags & G_GATE_FLAG_WRITEONLY) != 0) {
461 		G_GATE_DEBUG(1, "Invalid flags.");
462 		return (EINVAL);
463 	}
464 	if (ggio->gctl_unit != G_GATE_UNIT_AUTO &&
465 	    ggio->gctl_unit != G_GATE_NAME_GIVEN &&
466 	    ggio->gctl_unit < 0) {
467 		G_GATE_DEBUG(1, "Invalid unit number.");
468 		return (EINVAL);
469 	}
470 	if (ggio->gctl_unit == G_GATE_NAME_GIVEN &&
471 	    ggio->gctl_name[0] == '\0') {
472 		G_GATE_DEBUG(1, "No device name.");
473 		return (EINVAL);
474 	}
475 
476 	sc = malloc(sizeof(*sc), M_GATE, M_WAITOK | M_ZERO);
477 	sc->sc_flags = (ggio->gctl_flags & G_GATE_USERFLAGS);
478 	strlcpy(sc->sc_info, ggio->gctl_info, sizeof(sc->sc_info));
479 	sc->sc_seq = 1;
480 	bioq_init(&sc->sc_inqueue);
481 	bioq_init(&sc->sc_outqueue);
482 	mtx_init(&sc->sc_queue_mtx, "gg:queue", NULL, MTX_DEF);
483 	sc->sc_queue_count = 0;
484 	sc->sc_queue_size = ggio->gctl_maxcount;
485 	if (sc->sc_queue_size > G_GATE_MAX_QUEUE_SIZE)
486 		sc->sc_queue_size = G_GATE_MAX_QUEUE_SIZE;
487 	sc->sc_timeout = ggio->gctl_timeout;
488 	callout_init(&sc->sc_callout, 1);
489 
490 	mtx_lock(&g_gate_units_lock);
491 	sc->sc_unit = g_gate_getunit(ggio->gctl_unit, &error);
492 	if (sc->sc_unit < 0)
493 		goto fail1;
494 	if (ggio->gctl_unit == G_GATE_NAME_GIVEN)
495 		snprintf(name, sizeof(name), "%s", ggio->gctl_name);
496 	else {
497 		snprintf(name, sizeof(name), "%s%d", G_GATE_PROVIDER_NAME,
498 		    sc->sc_unit);
499 	}
500 	/* Check for name collision. */
501 	for (unit = 0; unit < g_gate_maxunits; unit++) {
502 		if (g_gate_units[unit] == NULL)
503 			continue;
504 		if (strcmp(name, g_gate_units[unit]->sc_name) != 0)
505 			continue;
506 		error = EEXIST;
507 		goto fail1;
508 	}
509 	sc->sc_name = name;
510 	g_gate_units[sc->sc_unit] = sc;
511 	g_gate_nunits++;
512 	mtx_unlock(&g_gate_units_lock);
513 
514 	g_topology_lock();
515 
516 	if (ggio->gctl_readprov[0] == '\0') {
517 		ropp = NULL;
518 	} else {
519 		ropp = g_provider_by_name(ggio->gctl_readprov);
520 		if (ropp == NULL) {
521 			G_GATE_DEBUG(1, "Provider %s doesn't exist.",
522 			    ggio->gctl_readprov);
523 			error = EINVAL;
524 			goto fail2;
525 		}
526 		if ((ggio->gctl_readoffset % ggio->gctl_sectorsize) != 0) {
527 			G_GATE_DEBUG(1, "Invalid read offset.");
528 			error = EINVAL;
529 			goto fail2;
530 		}
531 		if (ggio->gctl_mediasize + ggio->gctl_readoffset >
532 		    ropp->mediasize) {
533 			G_GATE_DEBUG(1, "Invalid read offset or media size.");
534 			error = EINVAL;
535 			goto fail2;
536 		}
537 	}
538 
539 	gp = g_new_geomf(&g_gate_class, "%s", name);
540 	gp->start = g_gate_start;
541 	gp->access = g_gate_access;
542 	gp->orphan = g_gate_orphan;
543 	gp->dumpconf = g_gate_dumpconf;
544 	gp->softc = sc;
545 
546 	if (ropp != NULL) {
547 		cp = g_new_consumer(gp);
548 		cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
549 		error = g_attach(cp, ropp);
550 		if (error != 0) {
551 			G_GATE_DEBUG(1, "Unable to attach to %s.", ropp->name);
552 			goto fail3;
553 		}
554 		error = g_access(cp, 1, 0, 0);
555 		if (error != 0) {
556 			G_GATE_DEBUG(1, "Unable to access %s.", ropp->name);
557 			g_detach(cp);
558 			goto fail3;
559 		}
560 		sc->sc_readcons = cp;
561 		sc->sc_readoffset = ggio->gctl_readoffset;
562 	}
563 
564 	ggio->gctl_unit = sc->sc_unit;
565 
566 	pp = g_new_providerf(gp, "%s", name);
567 	pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE;
568 	pp->mediasize = ggio->gctl_mediasize;
569 	pp->sectorsize = ggio->gctl_sectorsize;
570 	sc->sc_provider = pp;
571 	g_error_provider(pp, 0);
572 
573 	g_topology_unlock();
574 	mtx_lock(&g_gate_units_lock);
575 	sc->sc_name = sc->sc_provider->name;
576 	mtx_unlock(&g_gate_units_lock);
577 	G_GATE_DEBUG(1, "Device %s created.", gp->name);
578 
579 	if (sc->sc_timeout > 0) {
580 		callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
581 		    g_gate_guard, sc);
582 	}
583 	return (0);
584 fail3:
585 	g_destroy_consumer(cp);
586 	g_destroy_geom(gp);
587 fail2:
588 	g_topology_unlock();
589 	mtx_lock(&g_gate_units_lock);
590 	g_gate_units[sc->sc_unit] = NULL;
591 	KASSERT(g_gate_nunits > 0, ("negative g_gate_nunits?"));
592 	g_gate_nunits--;
593 fail1:
594 	mtx_unlock(&g_gate_units_lock);
595 	mtx_destroy(&sc->sc_queue_mtx);
596 	free(sc, M_GATE);
597 	return (error);
598 }
599 
600 static int
601 g_gate_modify(struct g_gate_softc *sc, struct g_gate_ctl_modify *ggio)
602 {
603 	struct g_provider *pp;
604 	struct g_consumer *cp;
605 	int error;
606 
607 	if ((ggio->gctl_modify & GG_MODIFY_MEDIASIZE) != 0) {
608 		if (ggio->gctl_mediasize <= 0) {
609 			G_GATE_DEBUG(1, "Invalid media size.");
610 			return (EINVAL);
611 		}
612 		pp = sc->sc_provider;
613 		if ((ggio->gctl_mediasize % pp->sectorsize) != 0) {
614 			G_GATE_DEBUG(1, "Invalid media size.");
615 			return (EINVAL);
616 		}
617 		/* TODO */
618 		return (EOPNOTSUPP);
619 	}
620 
621 	if ((ggio->gctl_modify & GG_MODIFY_INFO) != 0)
622 		(void)strlcpy(sc->sc_info, ggio->gctl_info, sizeof(sc->sc_info));
623 
624 	cp = NULL;
625 
626 	if ((ggio->gctl_modify & GG_MODIFY_READPROV) != 0) {
627 		g_topology_lock();
628 		if (sc->sc_readcons != NULL) {
629 			cp = sc->sc_readcons;
630 			sc->sc_readcons = NULL;
631 			(void)g_access(cp, -1, 0, 0);
632 			g_detach(cp);
633 			g_destroy_consumer(cp);
634 		}
635 		if (ggio->gctl_readprov[0] != '\0') {
636 			pp = g_provider_by_name(ggio->gctl_readprov);
637 			if (pp == NULL) {
638 				g_topology_unlock();
639 				G_GATE_DEBUG(1, "Provider %s doesn't exist.",
640 				    ggio->gctl_readprov);
641 				return (EINVAL);
642 			}
643 			cp = g_new_consumer(sc->sc_provider->geom);
644 			cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
645 			error = g_attach(cp, pp);
646 			if (error != 0) {
647 				G_GATE_DEBUG(1, "Unable to attach to %s.",
648 				    pp->name);
649 			} else {
650 				error = g_access(cp, 1, 0, 0);
651 				if (error != 0) {
652 					G_GATE_DEBUG(1, "Unable to access %s.",
653 					    pp->name);
654 					g_detach(cp);
655 				}
656 			}
657 			if (error != 0) {
658 				g_destroy_consumer(cp);
659 				g_topology_unlock();
660 				return (error);
661 			}
662 		}
663 	} else {
664 		cp = sc->sc_readcons;
665 	}
666 
667 	if ((ggio->gctl_modify & GG_MODIFY_READOFFSET) != 0) {
668 		if (cp == NULL) {
669 			G_GATE_DEBUG(1, "No read provider.");
670 			return (EINVAL);
671 		}
672 		pp = sc->sc_provider;
673 		if ((ggio->gctl_readoffset % pp->sectorsize) != 0) {
674 			G_GATE_DEBUG(1, "Invalid read offset.");
675 			return (EINVAL);
676 		}
677 		if (pp->mediasize + ggio->gctl_readoffset >
678 		    cp->provider->mediasize) {
679 			G_GATE_DEBUG(1, "Invalid read offset or media size.");
680 			return (EINVAL);
681 		}
682 		sc->sc_readoffset = ggio->gctl_readoffset;
683 	}
684 
685 	if ((ggio->gctl_modify & GG_MODIFY_READPROV) != 0) {
686 		sc->sc_readcons = cp;
687 		g_topology_unlock();
688 	}
689 
690 	return (0);
691 }
692 
693 #define	G_GATE_CHECK_VERSION(ggio)	do {				\
694 	if ((ggio)->gctl_version != G_GATE_VERSION) {			\
695 		printf("Version mismatch %d != %d.\n",			\
696 		    ggio->gctl_version, G_GATE_VERSION);		\
697 		return (EINVAL);					\
698 	}								\
699 } while (0)
700 static int
701 g_gate_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
702 {
703 	struct g_gate_softc *sc;
704 	struct bio *bp;
705 	int error = 0;
706 
707 	G_GATE_DEBUG(4, "ioctl(%s, %lx, %p, %x, %p)", devtoname(dev), cmd, addr,
708 	    flags, td);
709 
710 	switch (cmd) {
711 	case G_GATE_CMD_CREATE:
712 	    {
713 		struct g_gate_ctl_create *ggio = (void *)addr;
714 
715 		G_GATE_CHECK_VERSION(ggio);
716 		error = g_gate_create(ggio);
717 		/*
718 		 * Reset TDP_GEOM flag.
719 		 * There are pending events for sure, because we just created
720 		 * new provider and other classes want to taste it, but we
721 		 * cannot answer on I/O requests until we're here.
722 		 */
723 		td->td_pflags &= ~TDP_GEOM;
724 		return (error);
725 	    }
726 	case G_GATE_CMD_MODIFY:
727 	    {
728 		struct g_gate_ctl_modify *ggio = (void *)addr;
729 
730 		G_GATE_CHECK_VERSION(ggio);
731 		sc = g_gate_hold(ggio->gctl_unit, NULL);
732 		if (sc == NULL)
733 			return (ENXIO);
734 		error = g_gate_modify(sc, ggio);
735 		g_gate_release(sc);
736 		return (error);
737 	    }
738 	case G_GATE_CMD_DESTROY:
739 	    {
740 		struct g_gate_ctl_destroy *ggio = (void *)addr;
741 
742 		G_GATE_CHECK_VERSION(ggio);
743 		sc = g_gate_hold(ggio->gctl_unit, ggio->gctl_name);
744 		if (sc == NULL)
745 			return (ENXIO);
746 		g_topology_lock();
747 		mtx_lock(&g_gate_units_lock);
748 		error = g_gate_destroy(sc, ggio->gctl_force);
749 		g_topology_unlock();
750 		if (error != 0)
751 			g_gate_release(sc);
752 		return (error);
753 	    }
754 	case G_GATE_CMD_CANCEL:
755 	    {
756 		struct g_gate_ctl_cancel *ggio = (void *)addr;
757 		struct bio *tbp, *lbp;
758 
759 		G_GATE_CHECK_VERSION(ggio);
760 		sc = g_gate_hold(ggio->gctl_unit, ggio->gctl_name);
761 		if (sc == NULL)
762 			return (ENXIO);
763 		lbp = NULL;
764 		mtx_lock(&sc->sc_queue_mtx);
765 		TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, tbp) {
766 			if (ggio->gctl_seq == 0 ||
767 			    ggio->gctl_seq == (uintptr_t)bp->bio_driver1) {
768 				G_GATE_LOGREQ(1, bp, "Request canceled.");
769 				bioq_remove(&sc->sc_outqueue, bp);
770 				/*
771 				 * Be sure to put requests back onto incoming
772 				 * queue in the proper order.
773 				 */
774 				if (lbp == NULL)
775 					bioq_insert_head(&sc->sc_inqueue, bp);
776 				else {
777 					TAILQ_INSERT_AFTER(&sc->sc_inqueue.queue,
778 					    lbp, bp, bio_queue);
779 				}
780 				lbp = bp;
781 				/*
782 				 * If only one request was canceled, leave now.
783 				 */
784 				if (ggio->gctl_seq != 0)
785 					break;
786 			}
787 		}
788 		if (ggio->gctl_unit == G_GATE_NAME_GIVEN)
789 			ggio->gctl_unit = sc->sc_unit;
790 		mtx_unlock(&sc->sc_queue_mtx);
791 		g_gate_release(sc);
792 		return (error);
793 	    }
794 	case G_GATE_CMD_START:
795 	    {
796 		struct g_gate_ctl_io *ggio = (void *)addr;
797 
798 		G_GATE_CHECK_VERSION(ggio);
799 		sc = g_gate_hold(ggio->gctl_unit, NULL);
800 		if (sc == NULL)
801 			return (ENXIO);
802 		error = 0;
803 		for (;;) {
804 			mtx_lock(&sc->sc_queue_mtx);
805 			bp = bioq_first(&sc->sc_inqueue);
806 			if (bp != NULL)
807 				break;
808 			if ((sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
809 				ggio->gctl_error = ECANCELED;
810 				mtx_unlock(&sc->sc_queue_mtx);
811 				goto start_end;
812 			}
813 			if (msleep(sc, &sc->sc_queue_mtx,
814 			    PPAUSE | PDROP | PCATCH, "ggwait", 0) != 0) {
815 				ggio->gctl_error = ECANCELED;
816 				goto start_end;
817 			}
818 		}
819 		ggio->gctl_cmd = bp->bio_cmd;
820 		if (bp->bio_cmd == BIO_WRITE &&
821 		    bp->bio_length > ggio->gctl_length) {
822 			mtx_unlock(&sc->sc_queue_mtx);
823 			ggio->gctl_length = bp->bio_length;
824 			ggio->gctl_error = ENOMEM;
825 			goto start_end;
826 		}
827 		bioq_remove(&sc->sc_inqueue, bp);
828 		bioq_insert_tail(&sc->sc_outqueue, bp);
829 		mtx_unlock(&sc->sc_queue_mtx);
830 
831 		ggio->gctl_seq = (uintptr_t)bp->bio_driver1;
832 		ggio->gctl_offset = bp->bio_offset;
833 		ggio->gctl_length = bp->bio_length;
834 
835 		switch (bp->bio_cmd) {
836 		case BIO_READ:
837 		case BIO_DELETE:
838 		case BIO_FLUSH:
839 			break;
840 		case BIO_WRITE:
841 			error = copyout(bp->bio_data, ggio->gctl_data,
842 			    bp->bio_length);
843 			if (error != 0) {
844 				mtx_lock(&sc->sc_queue_mtx);
845 				bioq_remove(&sc->sc_outqueue, bp);
846 				bioq_insert_head(&sc->sc_inqueue, bp);
847 				mtx_unlock(&sc->sc_queue_mtx);
848 				goto start_end;
849 			}
850 			break;
851 		}
852 start_end:
853 		g_gate_release(sc);
854 		return (error);
855 	    }
856 	case G_GATE_CMD_DONE:
857 	    {
858 		struct g_gate_ctl_io *ggio = (void *)addr;
859 
860 		G_GATE_CHECK_VERSION(ggio);
861 		sc = g_gate_hold(ggio->gctl_unit, NULL);
862 		if (sc == NULL)
863 			return (ENOENT);
864 		error = 0;
865 		mtx_lock(&sc->sc_queue_mtx);
866 		TAILQ_FOREACH(bp, &sc->sc_outqueue.queue, bio_queue) {
867 			if (ggio->gctl_seq == (uintptr_t)bp->bio_driver1)
868 				break;
869 		}
870 		if (bp != NULL) {
871 			bioq_remove(&sc->sc_outqueue, bp);
872 			sc->sc_queue_count--;
873 		}
874 		mtx_unlock(&sc->sc_queue_mtx);
875 		if (bp == NULL) {
876 			/*
877 			 * Request was probably canceled.
878 			 */
879 			goto done_end;
880 		}
881 		if (ggio->gctl_error == EAGAIN) {
882 			bp->bio_error = 0;
883 			G_GATE_LOGREQ(1, bp, "Request desisted.");
884 			mtx_lock(&sc->sc_queue_mtx);
885 			sc->sc_queue_count++;
886 			bioq_insert_head(&sc->sc_inqueue, bp);
887 			wakeup(sc);
888 			mtx_unlock(&sc->sc_queue_mtx);
889 		} else {
890 			bp->bio_error = ggio->gctl_error;
891 			if (bp->bio_error == 0) {
892 				bp->bio_completed = bp->bio_length;
893 				switch (bp->bio_cmd) {
894 				case BIO_READ:
895 					error = copyin(ggio->gctl_data,
896 					    bp->bio_data, bp->bio_length);
897 					if (error != 0)
898 						bp->bio_error = error;
899 					break;
900 				case BIO_DELETE:
901 				case BIO_WRITE:
902 				case BIO_FLUSH:
903 					break;
904 				}
905 			}
906 			G_GATE_LOGREQ(2, bp, "Request done.");
907 			g_io_deliver(bp, bp->bio_error);
908 		}
909 done_end:
910 		g_gate_release(sc);
911 		return (error);
912 	    }
913 	}
914 	return (ENOIOCTL);
915 }
916 
917 static void
918 g_gate_device(void)
919 {
920 
921 	status_dev = make_dev(&g_gate_cdevsw, 0x0, UID_ROOT, GID_WHEEL, 0600,
922 	    G_GATE_CTL_NAME);
923 }
924 
925 static int
926 g_gate_modevent(module_t mod, int type, void *data)
927 {
928 	int error = 0;
929 
930 	switch (type) {
931 	case MOD_LOAD:
932 		mtx_init(&g_gate_units_lock, "gg_units_lock", NULL, MTX_DEF);
933 		g_gate_units = malloc(g_gate_maxunits * sizeof(g_gate_units[0]),
934 		    M_GATE, M_WAITOK | M_ZERO);
935 		g_gate_nunits = 0;
936 		g_gate_device();
937 		break;
938 	case MOD_UNLOAD:
939 		mtx_lock(&g_gate_units_lock);
940 		if (g_gate_nunits > 0) {
941 			mtx_unlock(&g_gate_units_lock);
942 			error = EBUSY;
943 			break;
944 		}
945 		mtx_unlock(&g_gate_units_lock);
946 		mtx_destroy(&g_gate_units_lock);
947 		if (status_dev != NULL)
948 			destroy_dev(status_dev);
949 		free(g_gate_units, M_GATE);
950 		break;
951 	default:
952 		return (EOPNOTSUPP);
953 		break;
954 	}
955 
956 	return (error);
957 }
958 static moduledata_t g_gate_module = {
959 	G_GATE_MOD_NAME,
960 	g_gate_modevent,
961 	NULL
962 };
963 DECLARE_MODULE(geom_gate, g_gate_module, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
964 DECLARE_GEOM_CLASS(g_gate_class, g_gate);
965