xref: /freebsd/sys/geom/gate/g_gate.c (revision 7562eaabc01a48e6b11d5b558c41e3b92dae5c2d)
1 /*-
2  * Copyright (c) 2004 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/bio.h>
32 #include <sys/conf.h>
33 #include <sys/kernel.h>
34 #include <sys/kthread.h>
35 #include <sys/fcntl.h>
36 #include <sys/linker.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/mutex.h>
40 #include <sys/proc.h>
41 #include <sys/limits.h>
42 #include <sys/queue.h>
43 #include <sys/sysctl.h>
44 #include <sys/signalvar.h>
45 #include <sys/time.h>
46 #include <machine/atomic.h>
47 
48 #include <geom/geom.h>
49 #include <geom/gate/g_gate.h>
50 
51 static MALLOC_DEFINE(M_GATE, "gg data", "GEOM Gate Data");
52 
53 SYSCTL_DECL(_kern_geom);
54 SYSCTL_NODE(_kern_geom, OID_AUTO, gate, CTLFLAG_RW, 0, "GEOM_GATE stuff");
55 static u_int g_gate_debug = 0;
56 SYSCTL_UINT(_kern_geom_gate, OID_AUTO, debug, CTLFLAG_RW, &g_gate_debug, 0,
57     "Debug level");
58 
59 static int g_gate_destroy_geom(struct gctl_req *, struct g_class *,
60     struct g_geom *);
61 struct g_class g_gate_class = {
62 	.name = G_GATE_CLASS_NAME,
63 	.version = G_VERSION,
64 	.destroy_geom = g_gate_destroy_geom
65 };
66 
67 static struct cdev *status_dev;
68 static d_ioctl_t g_gate_ioctl;
69 static struct cdevsw g_gate_cdevsw = {
70 	.d_version =	D_VERSION,
71 	.d_ioctl =	g_gate_ioctl,
72 	.d_name =	G_GATE_CTL_NAME
73 };
74 
75 
76 static LIST_HEAD(, g_gate_softc) g_gate_list =
77     LIST_HEAD_INITIALIZER(&g_gate_list);
78 static struct mtx g_gate_list_mtx;
79 
80 
81 static void
82 g_gate_wither(struct g_gate_softc *sc)
83 {
84 
85 	atomic_set_32(&sc->sc_flags, G_GATE_FLAG_DESTROY);
86 }
87 
88 static int
89 g_gate_destroy(struct g_gate_softc *sc, boolean_t force)
90 {
91 	struct g_provider *pp;
92 	struct bio *bp;
93 
94 	g_topology_assert();
95 	mtx_assert(&g_gate_list_mtx, MA_OWNED);
96 	pp = sc->sc_provider;
97 	if (!force && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
98 		mtx_unlock(&g_gate_list_mtx);
99 		return (EBUSY);
100 	}
101 	if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0) {
102 		g_gate_wither(sc);
103 		LIST_REMOVE(sc, sc_next);
104 	}
105 	mtx_unlock(&g_gate_list_mtx);
106 	mtx_lock(&sc->sc_inqueue_mtx);
107 	wakeup(sc);
108 	mtx_unlock(&sc->sc_inqueue_mtx);
109 	if (sc->sc_ref > 0) {
110 		G_GATE_DEBUG(1, "Cannot destroy %s yet.", sc->sc_name);
111 		return (0);
112 	}
113 	callout_drain(&sc->sc_callout);
114 	mtx_lock(&sc->sc_inqueue_mtx);
115 	for (;;) {
116 		bp = bioq_first(&sc->sc_inqueue);
117 		if (bp != NULL) {
118 			bioq_remove(&sc->sc_inqueue, bp);
119 			atomic_subtract_rel_32(&sc->sc_queue_count, 1);
120 			G_GATE_LOGREQ(1, bp, "Request canceled.");
121 			g_io_deliver(bp, ENXIO);
122 		} else {
123 			break;
124 		}
125 	}
126 	mtx_destroy(&sc->sc_inqueue_mtx);
127 	mtx_lock(&sc->sc_outqueue_mtx);
128 	for (;;) {
129 		bp = bioq_first(&sc->sc_outqueue);
130 		if (bp != NULL) {
131 			bioq_remove(&sc->sc_outqueue, bp);
132 			atomic_subtract_rel_32(&sc->sc_queue_count, 1);
133 			G_GATE_LOGREQ(1, bp, "Request canceled.");
134 			g_io_deliver(bp, ENXIO);
135 		} else {
136 			break;
137 		}
138 	}
139 	mtx_destroy(&sc->sc_outqueue_mtx);
140 	G_GATE_DEBUG(0, "Device %s destroyed.", sc->sc_name);
141 	pp->geom->softc = NULL;
142 	g_wither_geom(pp->geom, ENXIO);
143 	sc->sc_provider = NULL;
144 	free(sc, M_GATE);
145 	return (0);
146 }
147 
148 static void
149 g_gate_destroy_it(void *arg, int flag __unused)
150 {
151 	struct g_gate_softc *sc;
152 
153 	g_topology_assert();
154 	sc = arg;
155 	mtx_lock(&g_gate_list_mtx);
156 	g_gate_destroy(sc, 1);
157 }
158 
159 static int
160 g_gate_destroy_geom(struct gctl_req *req, struct g_class *mp, struct g_geom *gp)
161 {
162 
163 	g_topology_assert();
164 	mtx_lock(&g_gate_list_mtx);
165 	return (g_gate_destroy(gp->softc, 0));
166 }
167 
168 static int
169 g_gate_access(struct g_provider *pp, int dr, int dw, int de)
170 {
171 	struct g_gate_softc *sc;
172 
173 	if (dr <= 0 && dw <= 0 && de <= 0)
174 		return (0);
175 	sc = pp->geom->softc;
176 	if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0)
177 		return (ENXIO);
178 	/* XXX: Hack to allow read-only mounts. */
179 #if 0
180 	if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0 && dw > 0)
181 		return (EPERM);
182 #endif
183 	if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0 && dr > 0)
184 		return (EPERM);
185 	return (0);
186 }
187 
188 static void
189 g_gate_start(struct bio *bp)
190 {
191 	struct g_gate_softc *sc;
192 	uint32_t qcount;
193 
194 	sc = bp->bio_to->geom->softc;
195 	if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
196 		g_io_deliver(bp, ENXIO);
197 		return;
198 	}
199 	G_GATE_LOGREQ(2, bp, "Request received.");
200 	switch (bp->bio_cmd) {
201 	case BIO_READ:
202 		break;
203 	case BIO_DELETE:
204 	case BIO_WRITE:
205 		/* XXX: Hack to allow read-only mounts. */
206 		if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) {
207 			g_io_deliver(bp, EPERM);
208 			return;
209 		}
210 		break;
211 	case BIO_GETATTR:
212 	default:
213 		G_GATE_LOGREQ(2, bp, "Ignoring request.");
214 		g_io_deliver(bp, EOPNOTSUPP);
215 		return;
216 	}
217 
218 	atomic_store_rel_32(&qcount, sc->sc_queue_count);
219 	if (qcount > sc->sc_queue_size) {
220 		G_GATE_LOGREQ(1, bp, "Queue full, request canceled.");
221 		g_io_deliver(bp, EIO);
222 		return;
223 	}
224 	atomic_add_acq_32(&sc->sc_queue_count, 1);
225 	bp->bio_driver1 = (void *)sc->sc_seq;
226 	sc->sc_seq++;
227 
228 	mtx_lock(&sc->sc_inqueue_mtx);
229 	bioq_disksort(&sc->sc_inqueue, bp);
230 	wakeup(sc);
231 	mtx_unlock(&sc->sc_inqueue_mtx);
232 }
233 
234 static struct g_gate_softc *
235 g_gate_find(u_int unit)
236 {
237 	struct g_gate_softc *sc;
238 
239 	mtx_assert(&g_gate_list_mtx, MA_OWNED);
240 	LIST_FOREACH(sc, &g_gate_list, sc_next) {
241 		if (sc->sc_unit == unit)
242 			break;
243 	}
244 	return (sc);
245 }
246 
247 static struct g_gate_softc *
248 g_gate_hold(u_int unit)
249 {
250 	struct g_gate_softc *sc;
251 
252 	mtx_lock(&g_gate_list_mtx);
253 	sc = g_gate_find(unit);
254 	if (sc != NULL) {
255 		if ((sc->sc_flags & G_GATE_FLAG_DESTROY) != 0)
256 			sc = NULL;
257 		else
258 			sc->sc_ref++;
259 	}
260 	mtx_unlock(&g_gate_list_mtx);
261 	return (sc);
262 }
263 
264 static void
265 g_gate_release(struct g_gate_softc *sc)
266 {
267 
268 	g_topology_assert_not();
269 	mtx_lock(&g_gate_list_mtx);
270 	sc->sc_ref--;
271 	KASSERT(sc->sc_ref >= 0, ("Negative sc_ref for %s.", sc->sc_name));
272 	if (sc->sc_ref == 0 && (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
273 		mtx_unlock(&g_gate_list_mtx);
274 		g_waitfor_event(g_gate_destroy_it, sc, M_WAITOK, NULL);
275 	} else {
276 		mtx_unlock(&g_gate_list_mtx);
277 	}
278 }
279 
280 static int
281 g_gate_getunit(int unit)
282 {
283 	struct g_gate_softc *sc;
284 
285 	mtx_assert(&g_gate_list_mtx, MA_OWNED);
286 	if (unit >= 0) {
287 		LIST_FOREACH(sc, &g_gate_list, sc_next) {
288 			if (sc->sc_unit == unit)
289 				return (-1);
290 		}
291 	} else {
292 		unit = 0;
293 once_again:
294 		LIST_FOREACH(sc, &g_gate_list, sc_next) {
295 			if (sc->sc_unit == unit) {
296 				if (++unit > 666)
297 					return (-1);
298 				goto once_again;
299 			}
300 		}
301 	}
302 	return (unit);
303 }
304 
305 static void
306 g_gate_guard(void *arg)
307 {
308 	struct g_gate_softc *sc;
309 	struct bintime curtime;
310 	struct bio *bp, *bp2;
311 
312 	sc = arg;
313 	binuptime(&curtime);
314 	g_gate_hold(sc->sc_unit);
315 	mtx_lock(&sc->sc_inqueue_mtx);
316 	TAILQ_FOREACH_SAFE(bp, &sc->sc_inqueue.queue, bio_queue, bp2) {
317 		if (curtime.sec - bp->bio_t0.sec < 5)
318 			continue;
319 		bioq_remove(&sc->sc_inqueue, bp);
320 		atomic_subtract_rel_32(&sc->sc_queue_count, 1);
321 		G_GATE_LOGREQ(1, bp, "Request timeout.");
322 		g_io_deliver(bp, EIO);
323 	}
324 	mtx_unlock(&sc->sc_inqueue_mtx);
325 	mtx_lock(&sc->sc_outqueue_mtx);
326 	TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, bp2) {
327 		if (curtime.sec - bp->bio_t0.sec < 5)
328 			continue;
329 		bioq_remove(&sc->sc_outqueue, bp);
330 		atomic_subtract_rel_32(&sc->sc_queue_count, 1);
331 		G_GATE_LOGREQ(1, bp, "Request timeout.");
332 		g_io_deliver(bp, EIO);
333 	}
334 	mtx_unlock(&sc->sc_outqueue_mtx);
335 	if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0) {
336 		callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
337 		    g_gate_guard, sc);
338 	}
339 	g_gate_release(sc);
340 }
341 
342 static void
343 g_gate_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
344     struct g_consumer *cp, struct g_provider *pp)
345 {
346 	struct g_gate_softc *sc;
347 
348 	sc = gp->softc;
349 	if (sc == NULL || pp != NULL || cp != NULL)
350 		return;
351 	g_gate_hold(sc->sc_unit);
352 	if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) {
353 		sbuf_printf(sb, "%s<access>%s</access>\n", indent, "read-only");
354 	} else if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0) {
355 		sbuf_printf(sb, "%s<access>%s</access>\n", indent,
356 		    "write-only");
357 	} else {
358 		sbuf_printf(sb, "%s<access>%s</access>\n", indent,
359 		    "read-write");
360 	}
361 	sbuf_printf(sb, "%s<timeout>%u</timeout>\n", indent, sc->sc_timeout);
362 	sbuf_printf(sb, "%s<info>%s</info>\n", indent, sc->sc_info);
363 	sbuf_printf(sb, "%s<queue_count>%u</queue_count>\n", indent,
364 	    sc->sc_queue_count);
365 	sbuf_printf(sb, "%s<queue_size>%u</queue_size>\n", indent,
366 	    sc->sc_queue_size);
367 	sbuf_printf(sb, "%s<ref>%u</ref>\n", indent, sc->sc_ref);
368 	g_topology_unlock();
369 	g_gate_release(sc);
370 	g_topology_lock();
371 }
372 
373 static int
374 g_gate_create(struct g_gate_ctl_create *ggio)
375 {
376 	struct g_gate_softc *sc;
377 	struct g_geom *gp;
378 	struct g_provider *pp;
379 
380 	if (ggio->gctl_mediasize == 0) {
381 		G_GATE_DEBUG(1, "Invalid media size.");
382 		return (EINVAL);
383 	}
384 	if (ggio->gctl_sectorsize > 0 && !powerof2(ggio->gctl_sectorsize)) {
385 		G_GATE_DEBUG(1, "Invalid sector size.");
386 		return (EINVAL);
387 	}
388 	if ((ggio->gctl_flags & G_GATE_FLAG_READONLY) != 0 &&
389 	    (ggio->gctl_flags & G_GATE_FLAG_WRITEONLY) != 0) {
390 		G_GATE_DEBUG(1, "Invalid flags.");
391 		return (EINVAL);
392 	}
393 	if (ggio->gctl_unit < -1) {
394 		G_GATE_DEBUG(1, "Invalid unit number.");
395 		return (EINVAL);
396 	}
397 
398 	sc = malloc(sizeof(*sc), M_GATE, M_WAITOK | M_ZERO);
399 	sc->sc_flags = (ggio->gctl_flags & G_GATE_USERFLAGS);
400 	strlcpy(sc->sc_info, ggio->gctl_info, sizeof(sc->sc_info));
401 	sc->sc_seq = 0;
402 	bioq_init(&sc->sc_inqueue);
403 	mtx_init(&sc->sc_inqueue_mtx, "gg:inqueue", NULL, MTX_DEF);
404 	bioq_init(&sc->sc_outqueue);
405 	mtx_init(&sc->sc_outqueue_mtx, "gg:outqueue", NULL, MTX_DEF);
406 	sc->sc_queue_count = 0;
407 	sc->sc_queue_size = ggio->gctl_maxcount;
408 	if (sc->sc_queue_size > G_GATE_MAX_QUEUE_SIZE)
409 		sc->sc_queue_size = G_GATE_MAX_QUEUE_SIZE;
410 	sc->sc_timeout = ggio->gctl_timeout;
411 	callout_init(&sc->sc_callout, CALLOUT_MPSAFE);
412 	mtx_lock(&g_gate_list_mtx);
413 	ggio->gctl_unit = g_gate_getunit(ggio->gctl_unit);
414 	if (ggio->gctl_unit == -1) {
415 		mtx_unlock(&g_gate_list_mtx);
416 		mtx_destroy(&sc->sc_inqueue_mtx);
417 		mtx_destroy(&sc->sc_outqueue_mtx);
418 		free(sc, M_GATE);
419 		return (EBUSY);
420 	}
421 	sc->sc_unit = ggio->gctl_unit;
422 	LIST_INSERT_HEAD(&g_gate_list, sc, sc_next);
423 	mtx_unlock(&g_gate_list_mtx);
424 
425 	DROP_GIANT();
426 	g_topology_lock();
427 	gp = g_new_geomf(&g_gate_class, "%s%d", G_GATE_PROVIDER_NAME,
428 	    sc->sc_unit);
429 	gp->start = g_gate_start;
430 	gp->access = g_gate_access;
431 	gp->dumpconf = g_gate_dumpconf;
432 	gp->softc = sc;
433 	pp = g_new_providerf(gp, "%s%d", G_GATE_PROVIDER_NAME, sc->sc_unit);
434 	pp->mediasize = ggio->gctl_mediasize;
435 	pp->sectorsize = ggio->gctl_sectorsize;
436 	sc->sc_provider = pp;
437 	g_error_provider(pp, 0);
438 	g_topology_unlock();
439 	PICKUP_GIANT();
440 
441 	if (sc->sc_timeout > 0) {
442 		callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
443 		    g_gate_guard, sc);
444 	}
445 	return (0);
446 }
447 
448 #define	G_GATE_CHECK_VERSION(ggio)	do {				\
449 	if ((ggio)->gctl_version != G_GATE_VERSION)			\
450 		return (EINVAL);					\
451 } while (0)
452 static int
453 g_gate_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
454 {
455 	struct g_gate_softc *sc;
456 	struct bio *bp;
457 	int error = 0;
458 
459 	G_GATE_DEBUG(4, "ioctl(%s, %lx, %p, %x, %p)", devtoname(dev), cmd, addr,
460 	    flags, td);
461 
462 	switch (cmd) {
463 	case G_GATE_CMD_CREATE:
464 	    {
465 		struct g_gate_ctl_create *ggio = (void *)addr;
466 
467 		G_GATE_CHECK_VERSION(ggio);
468 		return (g_gate_create(ggio));
469 	    }
470 	case G_GATE_CMD_DESTROY:
471 	    {
472 		struct g_gate_ctl_destroy *ggio = (void *)addr;
473 
474 		G_GATE_CHECK_VERSION(ggio);
475 		sc = g_gate_hold(ggio->gctl_unit);
476 		if (sc == NULL)
477 			return (ENXIO);
478 		g_topology_lock();
479 		mtx_lock(&g_gate_list_mtx);
480 		error = g_gate_destroy(sc, ggio->gctl_force);
481 		if (error == 0)
482 			g_gate_wither(sc);
483 		g_topology_unlock();
484 		g_gate_release(sc);
485 		return (error);
486 	    }
487 	case G_GATE_CMD_START:
488 	    {
489 		struct g_gate_ctl_io *ggio = (void *)addr;
490 
491 		G_GATE_CHECK_VERSION(ggio);
492 		sc = g_gate_hold(ggio->gctl_unit);
493 		if (sc == NULL)
494 			return (ENXIO);
495 		for (;;) {
496 			mtx_lock(&sc->sc_inqueue_mtx);
497 			bp = bioq_first(&sc->sc_inqueue);
498 			if (bp != NULL)
499 				break;
500 			if (msleep(sc, &sc->sc_inqueue_mtx,
501 			    PPAUSE | PDROP | PCATCH, "ggwait", 0) != 0) {
502 				g_gate_release(sc);
503 				ggio->gctl_error = ECANCELED;
504 				return (0);
505 			}
506 			if ((sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
507 				g_gate_release(sc);
508 				ggio->gctl_error = ECANCELED;
509 				return (0);
510 			}
511 		}
512 		ggio->gctl_cmd = bp->bio_cmd;
513 		if ((bp->bio_cmd == BIO_DELETE || bp->bio_cmd == BIO_WRITE) &&
514 		    bp->bio_length > ggio->gctl_length) {
515 			mtx_unlock(&sc->sc_inqueue_mtx);
516 			g_gate_release(sc);
517 			ggio->gctl_length = bp->bio_length;
518 			ggio->gctl_error = ENOMEM;
519 			return (0);
520 		}
521 		bioq_remove(&sc->sc_inqueue, bp);
522 		atomic_subtract_rel_32(&sc->sc_queue_count, 1);
523 		mtx_unlock(&sc->sc_inqueue_mtx);
524 		ggio->gctl_seq = (uintptr_t)bp->bio_driver1;
525 		ggio->gctl_offset = bp->bio_offset;
526 		ggio->gctl_length = bp->bio_length;
527 		switch (bp->bio_cmd) {
528 		case BIO_READ:
529 			break;
530 		case BIO_DELETE:
531 		case BIO_WRITE:
532 			error = copyout(bp->bio_data, ggio->gctl_data,
533 			    bp->bio_length);
534 			if (error != 0) {
535 				mtx_lock(&sc->sc_inqueue_mtx);
536 				bioq_disksort(&sc->sc_inqueue, bp);
537 				mtx_unlock(&sc->sc_inqueue_mtx);
538 				g_gate_release(sc);
539 				return (error);
540 			}
541 			break;
542 		}
543 		mtx_lock(&sc->sc_outqueue_mtx);
544 		bioq_insert_tail(&sc->sc_outqueue, bp);
545 		atomic_add_acq_32(&sc->sc_queue_count, 1);
546 		mtx_unlock(&sc->sc_outqueue_mtx);
547 		g_gate_release(sc);
548 		return (0);
549 	    }
550 	case G_GATE_CMD_DONE:
551 	    {
552 		struct g_gate_ctl_io *ggio = (void *)addr;
553 
554 		G_GATE_CHECK_VERSION(ggio);
555 		sc = g_gate_hold(ggio->gctl_unit);
556 		if (sc == NULL)
557 			return (ENOENT);
558 		mtx_lock(&sc->sc_outqueue_mtx);
559 		TAILQ_FOREACH(bp, &sc->sc_outqueue.queue, bio_queue) {
560 			if (ggio->gctl_seq == (uintptr_t)bp->bio_driver1)
561 				break;
562 		}
563 		if (bp != NULL) {
564 			bioq_remove(&sc->sc_outqueue, bp);
565 			atomic_subtract_rel_32(&sc->sc_queue_count, 1);
566 		}
567 		mtx_unlock(&sc->sc_outqueue_mtx);
568 		if (bp == NULL) {
569 			/*
570 			 * Request was probably canceled.
571 			 */
572 			g_gate_release(sc);
573 			return (0);
574 		}
575 		if (ggio->gctl_error == EAGAIN) {
576 			bp->bio_error = 0;
577 			G_GATE_LOGREQ(1, bp, "Request desisted.");
578 			atomic_add_acq_32(&sc->sc_queue_count, 1);
579 			mtx_lock(&sc->sc_inqueue_mtx);
580 			bioq_disksort(&sc->sc_inqueue, bp);
581 			wakeup(sc);
582 			mtx_unlock(&sc->sc_inqueue_mtx);
583 		} else {
584 			bp->bio_error = ggio->gctl_error;
585 			if (bp->bio_error == 0) {
586 				bp->bio_completed = bp->bio_length;
587 				switch (bp->bio_cmd) {
588 				case BIO_READ:
589 					error = copyin(ggio->gctl_data,
590 					    bp->bio_data, bp->bio_length);
591 					if (error != 0)
592 						bp->bio_error = error;
593 					break;
594 				case BIO_DELETE:
595 				case BIO_WRITE:
596 					break;
597 				}
598 			}
599 			G_GATE_LOGREQ(2, bp, "Request done.");
600 			g_io_deliver(bp, bp->bio_error);
601 		}
602 		g_gate_release(sc);
603 		return (error);
604 	    }
605 	}
606 	return (ENOIOCTL);
607 }
608 
609 static void
610 g_gate_device(void)
611 {
612 
613 	status_dev = make_dev(&g_gate_cdevsw, 0x0, UID_ROOT, GID_WHEEL, 0600,
614 	    G_GATE_CTL_NAME);
615 }
616 
617 static int
618 g_gate_modevent(module_t mod, int type, void *data)
619 {
620 	int error = 0;
621 
622 	switch (type) {
623 	case MOD_LOAD:
624 		mtx_init(&g_gate_list_mtx, "gg_list_lock", NULL, MTX_DEF);
625 		g_gate_device();
626 		break;
627 	case MOD_UNLOAD:
628 		mtx_lock(&g_gate_list_mtx);
629 		if (!LIST_EMPTY(&g_gate_list)) {
630 			mtx_unlock(&g_gate_list_mtx);
631 			error = EBUSY;
632 			break;
633 		}
634 		mtx_unlock(&g_gate_list_mtx);
635 		mtx_destroy(&g_gate_list_mtx);
636 		if (status_dev != 0)
637 			destroy_dev(status_dev);
638 		break;
639 	default:
640 		return (EOPNOTSUPP);
641 		break;
642 	}
643 
644 	return (error);
645 }
646 static moduledata_t g_gate_module = {
647 	G_GATE_MOD_NAME,
648 	g_gate_modevent,
649 	NULL
650 };
651 DECLARE_MODULE(geom_gate, g_gate_module, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
652 DECLARE_GEOM_CLASS(g_gate_class, g_gate);
653