xref: /freebsd/sys/geom/gate/g_gate.c (revision 2357939bc239bd5334a169b62313806178dd8f30)
1 /*-
2  * Copyright (c) 2004 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/bio.h>
32 #include <sys/conf.h>
33 #include <sys/kernel.h>
34 #include <sys/kthread.h>
35 #include <sys/fcntl.h>
36 #include <sys/linker.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/mutex.h>
40 #include <sys/proc.h>
41 #include <sys/limits.h>
42 #include <sys/queue.h>
43 #include <sys/sysctl.h>
44 #include <sys/signalvar.h>
45 #include <sys/time.h>
46 #include <machine/atomic.h>
47 
48 #include <geom/geom.h>
49 #include <geom/gate/g_gate.h>
50 
51 static MALLOC_DEFINE(M_GATE, "gg data", "GEOM Gate Data");
52 
53 SYSCTL_DECL(_kern_geom);
54 SYSCTL_NODE(_kern_geom, OID_AUTO, gate, CTLFLAG_RW, 0, "GEOM_GATE stuff");
55 static u_int g_gate_debug = 0;
56 SYSCTL_UINT(_kern_geom_gate, OID_AUTO, debug, CTLFLAG_RW, &g_gate_debug, 0,
57     "Debug level");
58 
59 static int g_gate_destroy_geom(struct gctl_req *, struct g_class *,
60     struct g_geom *);
61 struct g_class g_gate_class = {
62 	.name = G_GATE_CLASS_NAME,
63 	.destroy_geom = g_gate_destroy_geom
64 };
65 
66 static dev_t status_dev;
67 static d_ioctl_t g_gate_ioctl;
68 static struct cdevsw g_gate_cdevsw = {
69 	.d_version =	D_VERSION,
70 	.d_ioctl =	g_gate_ioctl,
71 	.d_name =	G_GATE_CTL_NAME
72 };
73 
74 
75 static LIST_HEAD(, g_gate_softc) g_gate_list =
76     LIST_HEAD_INITIALIZER(&g_gate_list);
77 static struct mtx g_gate_list_mtx;
78 
79 
80 static void
81 g_gate_wither(struct g_gate_softc *sc)
82 {
83 
84 	atomic_set_32(&sc->sc_flags, G_GATE_FLAG_DESTROY);
85 }
86 
87 static int
88 g_gate_destroy(struct g_gate_softc *sc, boolean_t force)
89 {
90 	struct g_provider *pp;
91 	struct bio *bp;
92 
93 	g_topology_assert();
94 	mtx_assert(&g_gate_list_mtx, MA_OWNED);
95 	pp = sc->sc_provider;
96 	if (!force && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
97 		mtx_unlock(&g_gate_list_mtx);
98 		return (EBUSY);
99 	}
100 	if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0) {
101 		g_gate_wither(sc);
102 		LIST_REMOVE(sc, sc_next);
103 	}
104 	mtx_unlock(&g_gate_list_mtx);
105 	mtx_lock(&sc->sc_inqueue_mtx);
106 	wakeup(sc);
107 	mtx_unlock(&sc->sc_inqueue_mtx);
108 	if (sc->sc_ref > 0) {
109 		G_GATE_DEBUG(1, "Cannot destroy %s yet.", sc->sc_name);
110 		return (0);
111 	}
112 	callout_drain(&sc->sc_callout);
113 	mtx_lock(&sc->sc_inqueue_mtx);
114 	for (;;) {
115 		bp = bioq_first(&sc->sc_inqueue);
116 		if (bp != NULL) {
117 			bioq_remove(&sc->sc_inqueue, bp);
118 			atomic_subtract_rel_32(&sc->sc_queue_count, 1);
119 			G_GATE_LOGREQ(1, bp, "Request canceled.");
120 			g_io_deliver(bp, ENXIO);
121 		} else {
122 			break;
123 		}
124 	}
125 	mtx_destroy(&sc->sc_inqueue_mtx);
126 	mtx_lock(&sc->sc_outqueue_mtx);
127 	for (;;) {
128 		bp = bioq_first(&sc->sc_outqueue);
129 		if (bp != NULL) {
130 			bioq_remove(&sc->sc_outqueue, bp);
131 			atomic_subtract_rel_32(&sc->sc_queue_count, 1);
132 			G_GATE_LOGREQ(1, bp, "Request canceled.");
133 			g_io_deliver(bp, ENXIO);
134 		} else {
135 			break;
136 		}
137 	}
138 	mtx_destroy(&sc->sc_outqueue_mtx);
139 	G_GATE_DEBUG(0, "Device %s destroyed.", sc->sc_name);
140 	pp->geom->softc = NULL;
141 	g_wither_geom(pp->geom, ENXIO);
142 	sc->sc_provider = NULL;
143 	free(sc, M_GATE);
144 	return (0);
145 }
146 
147 static void
148 g_gate_destroy_it(void *arg, int flag __unused)
149 {
150 	struct g_gate_softc *sc;
151 
152 	g_topology_assert();
153 	sc = arg;
154 	mtx_lock(&g_gate_list_mtx);
155 	g_gate_destroy(sc, 1);
156 }
157 
158 static int
159 g_gate_destroy_geom(struct gctl_req *req, struct g_class *mp, struct g_geom *gp)
160 {
161 
162 	g_topology_assert();
163 	mtx_lock(&g_gate_list_mtx);
164 	return (g_gate_destroy(gp->softc, 0));
165 }
166 
167 static int
168 g_gate_access(struct g_provider *pp, int dr, int dw, int de)
169 {
170 	struct g_gate_softc *sc;
171 
172 	if (dr <= 0 && dw <= 0 && de <= 0)
173 		return (0);
174 	sc = pp->geom->softc;
175 	if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0)
176 		return (ENXIO);
177 	if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0 && dw > 0)
178 		return (EPERM);
179 	if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0 && dr > 0)
180 		return (EPERM);
181 	return (0);
182 }
183 
184 static void
185 g_gate_start(struct bio *bp)
186 {
187 	struct g_gate_softc *sc;
188 	uint32_t qcount;
189 
190 	sc = bp->bio_to->geom->softc;
191 	if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
192 		g_io_deliver(bp, ENXIO);
193 		return;
194 	}
195 	G_GATE_LOGREQ(2, bp, "Request received.");
196 	switch (bp->bio_cmd) {
197 	case BIO_READ:
198 	case BIO_DELETE:
199 	case BIO_WRITE:
200 		break;
201 	case BIO_GETATTR:
202 	default:
203 		G_GATE_LOGREQ(2, bp, "Ignoring request.");
204 		g_io_deliver(bp, EOPNOTSUPP);
205 		return;
206 	}
207 
208 	atomic_store_rel_32(&qcount, sc->sc_queue_count);
209 	if (qcount > sc->sc_queue_size) {
210 		G_GATE_LOGREQ(1, bp, "Queue full, request canceled.");
211 		g_io_deliver(bp, EIO);
212 		return;
213 	}
214 	atomic_add_acq_32(&sc->sc_queue_count, 1);
215 	bp->bio_driver1 = (void *)sc->sc_seq;
216 	sc->sc_seq++;
217 
218 	mtx_lock(&sc->sc_inqueue_mtx);
219 	bioq_disksort(&sc->sc_inqueue, bp);
220 	wakeup(sc);
221 	mtx_unlock(&sc->sc_inqueue_mtx);
222 }
223 
224 static struct g_gate_softc *
225 g_gate_find(u_int unit)
226 {
227 	struct g_gate_softc *sc;
228 
229 	mtx_assert(&g_gate_list_mtx, MA_OWNED);
230 	LIST_FOREACH(sc, &g_gate_list, sc_next) {
231 		if (sc->sc_unit == unit)
232 			break;
233 	}
234 	return (sc);
235 }
236 
237 static struct g_gate_softc *
238 g_gate_hold(u_int unit)
239 {
240 	struct g_gate_softc *sc;
241 
242 	mtx_lock(&g_gate_list_mtx);
243 	sc = g_gate_find(unit);
244 	if (sc != NULL) {
245 		if ((sc->sc_flags & G_GATE_FLAG_DESTROY) != 0)
246 			sc = NULL;
247 		else
248 			sc->sc_ref++;
249 	}
250 	mtx_unlock(&g_gate_list_mtx);
251 	return (sc);
252 }
253 
254 static void
255 g_gate_release(struct g_gate_softc *sc)
256 {
257 
258 	g_topology_assert_not();
259 	mtx_lock(&g_gate_list_mtx);
260 	sc->sc_ref--;
261 	KASSERT(sc->sc_ref >= 0, ("Negative sc_ref for %s.", sc->sc_name));
262 	if (sc->sc_ref == 0 && (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
263 		mtx_unlock(&g_gate_list_mtx);
264 		g_waitfor_event(g_gate_destroy_it, sc, M_WAITOK, NULL);
265 	} else {
266 		mtx_unlock(&g_gate_list_mtx);
267 	}
268 }
269 
270 static int
271 g_gate_getunit(int unit)
272 {
273 	struct g_gate_softc *sc;
274 
275 	mtx_assert(&g_gate_list_mtx, MA_OWNED);
276 	if (unit >= 0) {
277 		LIST_FOREACH(sc, &g_gate_list, sc_next) {
278 			if (sc->sc_unit == unit)
279 				return (-1);
280 		}
281 	} else {
282 		unit = 0;
283 once_again:
284 		LIST_FOREACH(sc, &g_gate_list, sc_next) {
285 			if (sc->sc_unit == unit) {
286 				if (++unit > 666)
287 					return (-1);
288 				goto once_again;
289 			}
290 		}
291 	}
292 	return (unit);
293 }
294 
295 static void
296 g_gate_guard(void *arg)
297 {
298 	struct g_gate_softc *sc;
299 	struct bintime curtime;
300 	struct bio *bp, *bp2;
301 
302 	sc = arg;
303 	binuptime(&curtime);
304 	g_gate_hold(sc->sc_unit);
305 	mtx_lock(&sc->sc_inqueue_mtx);
306 	TAILQ_FOREACH_SAFE(bp, &sc->sc_inqueue.queue, bio_queue, bp2) {
307 		if (curtime.sec - bp->bio_t0.sec < 5)
308 			continue;
309 		bioq_remove(&sc->sc_inqueue, bp);
310 		atomic_subtract_rel_32(&sc->sc_queue_count, 1);
311 		G_GATE_LOGREQ(1, bp, "Request timeout.");
312 		g_io_deliver(bp, EIO);
313 	}
314 	mtx_unlock(&sc->sc_inqueue_mtx);
315 	mtx_lock(&sc->sc_outqueue_mtx);
316 	TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, bp2) {
317 		if (curtime.sec - bp->bio_t0.sec < 5)
318 			continue;
319 		bioq_remove(&sc->sc_outqueue, bp);
320 		atomic_subtract_rel_32(&sc->sc_queue_count, 1);
321 		G_GATE_LOGREQ(1, bp, "Request timeout.");
322 		g_io_deliver(bp, EIO);
323 	}
324 	mtx_unlock(&sc->sc_outqueue_mtx);
325 	if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0) {
326 		callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
327 		    g_gate_guard, sc);
328 	}
329 	g_gate_release(sc);
330 }
331 
332 static void
333 g_gate_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
334     struct g_consumer *cp, struct g_provider *pp)
335 {
336 	struct g_gate_softc *sc;
337 
338 	sc = gp->softc;
339 	if (sc == NULL || pp != NULL || cp != NULL)
340 		return;
341 	g_gate_hold(sc->sc_unit);
342 	if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) {
343 		sbuf_printf(sb, "%s<access>%s</access>\n", indent, "read-only");
344 	} else if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0) {
345 		sbuf_printf(sb, "%s<access>%s</access>\n", indent,
346 		    "write-only");
347 	} else {
348 		sbuf_printf(sb, "%s<access>%s</access>\n", indent,
349 		    "read-write");
350 	}
351 	sbuf_printf(sb, "%s<timeout>%u</timeout>\n", indent, sc->sc_timeout);
352 	sbuf_printf(sb, "%s<info>%s</info>\n", indent, sc->sc_info);
353 	sbuf_printf(sb, "%s<queue_count>%u</queue_count>\n", indent,
354 	    sc->sc_queue_count);
355 	sbuf_printf(sb, "%s<queue_size>%u</queue_size>\n", indent,
356 	    sc->sc_queue_size);
357 	sbuf_printf(sb, "%s<ref>%u</ref>\n", indent, sc->sc_ref);
358 	g_gate_release(sc);
359 }
360 
361 static int
362 g_gate_create(struct g_gate_ctl_create *ggio)
363 {
364 	struct g_gate_softc *sc;
365 	struct g_geom *gp;
366 	struct g_provider *pp;
367 
368 	if (ggio->gctl_mediasize == 0) {
369 		G_GATE_DEBUG(1, "Invalid media size.");
370 		return (EINVAL);
371 	}
372 	if (ggio->gctl_sectorsize > 0 && !powerof2(ggio->gctl_sectorsize)) {
373 		G_GATE_DEBUG(1, "Invalid sector size.");
374 		return (EINVAL);
375 	}
376 	if ((ggio->gctl_flags & G_GATE_FLAG_READONLY) != 0 &&
377 	    (ggio->gctl_flags & G_GATE_FLAG_WRITEONLY) != 0) {
378 		G_GATE_DEBUG(1, "Invalid flags.");
379 		return (EINVAL);
380 	}
381 	if (ggio->gctl_unit < -1) {
382 		G_GATE_DEBUG(1, "Invalid unit number.");
383 		return (EINVAL);
384 	}
385 
386 	sc = malloc(sizeof(*sc), M_GATE, M_WAITOK | M_ZERO);
387 	sc->sc_flags = (ggio->gctl_flags & G_GATE_USERFLAGS);
388 	strlcpy(sc->sc_info, ggio->gctl_info, sizeof(sc->sc_info));
389 	sc->sc_seq = 0;
390 	bioq_init(&sc->sc_inqueue);
391 	mtx_init(&sc->sc_inqueue_mtx, "gg:inqueue", NULL, MTX_DEF);
392 	bioq_init(&sc->sc_outqueue);
393 	mtx_init(&sc->sc_outqueue_mtx, "gg:outqueue", NULL, MTX_DEF);
394 	sc->sc_queue_count = 0;
395 	sc->sc_queue_size = ggio->gctl_maxcount;
396 	if (sc->sc_queue_size > G_GATE_MAX_QUEUE_SIZE)
397 		sc->sc_queue_size = G_GATE_MAX_QUEUE_SIZE;
398 	sc->sc_timeout = ggio->gctl_timeout;
399 	callout_init(&sc->sc_callout, CALLOUT_MPSAFE);
400 	mtx_lock(&g_gate_list_mtx);
401 	ggio->gctl_unit = g_gate_getunit(ggio->gctl_unit);
402 	if (ggio->gctl_unit == -1) {
403 		mtx_destroy(&sc->sc_inqueue_mtx);
404 		mtx_destroy(&sc->sc_outqueue_mtx);
405 		free(sc, M_GATE);
406 		return (EBUSY);
407 	}
408 	sc->sc_unit = ggio->gctl_unit;
409 	LIST_INSERT_HEAD(&g_gate_list, sc, sc_next);
410 	mtx_unlock(&g_gate_list_mtx);
411 
412 	DROP_GIANT();
413 	g_topology_lock();
414 	gp = g_new_geomf(&g_gate_class, "%s%d", G_GATE_PROVIDER_NAME,
415 	    sc->sc_unit);
416 	gp->start = g_gate_start;
417 	gp->access = g_gate_access;
418 	gp->dumpconf = g_gate_dumpconf;
419 	gp->softc = sc;
420 	pp = g_new_providerf(gp, "%s%d", G_GATE_PROVIDER_NAME, sc->sc_unit);
421 	pp->mediasize = ggio->gctl_mediasize;
422 	pp->sectorsize = ggio->gctl_sectorsize;
423 	sc->sc_provider = pp;
424 	g_error_provider(pp, 0);
425 	g_topology_unlock();
426 	PICKUP_GIANT();
427 
428 	if (sc->sc_timeout > 0) {
429 		callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
430 		    g_gate_guard, sc);
431 	}
432 	return (0);
433 }
434 
435 #define	G_GATE_CHECK_VERSION(ggio)	do {				\
436 	if ((ggio)->gctl_version != G_GATE_VERSION)			\
437 		return (EINVAL);					\
438 } while (0)
439 static int
440 g_gate_ioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
441 {
442 	struct g_gate_softc *sc;
443 	struct bio *bp;
444 	int error = 0;
445 
446 	G_GATE_DEBUG(4, "ioctl(%s, %lx, %p, %x, %p)", devtoname(dev), cmd, addr,
447 	    flags, td);
448 
449 	switch (cmd) {
450 	case G_GATE_CMD_CREATE:
451 	    {
452 		struct g_gate_ctl_create *ggio = (void *)addr;
453 
454 		G_GATE_CHECK_VERSION(ggio);
455 		return (g_gate_create(ggio));
456 	    }
457 	case G_GATE_CMD_DESTROY:
458 	    {
459 		struct g_gate_ctl_destroy *ggio = (void *)addr;
460 
461 		G_GATE_CHECK_VERSION(ggio);
462 		sc = g_gate_hold(ggio->gctl_unit);
463 		if (sc == NULL)
464 			return (ENXIO);
465 		g_topology_lock();
466 		mtx_lock(&g_gate_list_mtx);
467 		error = g_gate_destroy(sc, ggio->gctl_force);
468 		if (error == 0)
469 			g_gate_wither(sc);
470 		g_topology_unlock();
471 		g_gate_release(sc);
472 		return (error);
473 	    }
474 	case G_GATE_CMD_START:
475 	    {
476 		struct g_gate_ctl_io *ggio = (void *)addr;
477 
478 		G_GATE_CHECK_VERSION(ggio);
479 		sc = g_gate_hold(ggio->gctl_unit);
480 		if (sc == NULL)
481 			return (ENXIO);
482 		for (;;) {
483 			mtx_lock(&sc->sc_inqueue_mtx);
484 			bp = bioq_first(&sc->sc_inqueue);
485 			if (bp != NULL)
486 				break;
487 			if (msleep(sc, &sc->sc_inqueue_mtx,
488 			    PPAUSE | PDROP | PCATCH, "ggwait", 0) != 0) {
489 				g_gate_release(sc);
490 				ggio->gctl_error = ECANCELED;
491 				return (0);
492 			}
493 			if ((sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
494 				g_gate_release(sc);
495 				ggio->gctl_error = ECANCELED;
496 				return (0);
497 			}
498 		}
499 		ggio->gctl_cmd = bp->bio_cmd;
500 		if ((bp->bio_cmd == BIO_DELETE || bp->bio_cmd == BIO_WRITE) &&
501 		    bp->bio_length > ggio->gctl_length) {
502 			mtx_unlock(&sc->sc_inqueue_mtx);
503 			g_gate_release(sc);
504 			ggio->gctl_length = bp->bio_length;
505 			ggio->gctl_error = ENOMEM;
506 			return (0);
507 		}
508 		bioq_remove(&sc->sc_inqueue, bp);
509 		atomic_subtract_rel_32(&sc->sc_queue_count, 1);
510 		mtx_unlock(&sc->sc_inqueue_mtx);
511 		ggio->gctl_seq = (uintptr_t)bp->bio_driver1;
512 		ggio->gctl_offset = bp->bio_offset;
513 		ggio->gctl_length = bp->bio_length;
514 		switch (bp->bio_cmd) {
515 		case BIO_READ:
516 			break;
517 		case BIO_DELETE:
518 		case BIO_WRITE:
519 			error = copyout(bp->bio_data, ggio->gctl_data,
520 			    bp->bio_length);
521 			if (error != 0) {
522 				mtx_lock(&sc->sc_inqueue_mtx);
523 				bioq_disksort(&sc->sc_inqueue, bp);
524 				mtx_unlock(&sc->sc_inqueue_mtx);
525 				g_gate_release(sc);
526 				return (error);
527 			}
528 			break;
529 		}
530 		mtx_lock(&sc->sc_outqueue_mtx);
531 		bioq_insert_tail(&sc->sc_outqueue, bp);
532 		atomic_add_acq_32(&sc->sc_queue_count, 1);
533 		mtx_unlock(&sc->sc_outqueue_mtx);
534 		g_gate_release(sc);
535 		return (0);
536 	    }
537 	case G_GATE_CMD_DONE:
538 	    {
539 		struct g_gate_ctl_io *ggio = (void *)addr;
540 
541 		G_GATE_CHECK_VERSION(ggio);
542 		sc = g_gate_hold(ggio->gctl_unit);
543 		if (sc == NULL)
544 			return (ENOENT);
545 		mtx_lock(&sc->sc_outqueue_mtx);
546 		TAILQ_FOREACH(bp, &sc->sc_outqueue.queue, bio_queue) {
547 			if (ggio->gctl_seq == (uintptr_t)bp->bio_driver1)
548 				break;
549 		}
550 		if (bp != NULL) {
551 			bioq_remove(&sc->sc_outqueue, bp);
552 			atomic_subtract_rel_32(&sc->sc_queue_count, 1);
553 		}
554 		mtx_unlock(&sc->sc_outqueue_mtx);
555 		if (bp == NULL) {
556 			/*
557 			 * Request was probably canceled.
558 			 */
559 			g_gate_release(sc);
560 			return (0);
561 		}
562 		if (ggio->gctl_error == EAGAIN) {
563 			bp->bio_error = 0;
564 			G_GATE_LOGREQ(1, bp, "Request desisted.");
565 			atomic_add_acq_32(&sc->sc_queue_count, 1);
566 			mtx_lock(&sc->sc_inqueue_mtx);
567 			bioq_disksort(&sc->sc_inqueue, bp);
568 			wakeup(sc);
569 			mtx_unlock(&sc->sc_inqueue_mtx);
570 		} else {
571 			bp->bio_error = ggio->gctl_error;
572 			if (bp->bio_error == 0) {
573 				bp->bio_completed = bp->bio_length;
574 				switch (bp->bio_cmd) {
575 				case BIO_READ:
576 					error = copyin(ggio->gctl_data,
577 					    bp->bio_data, bp->bio_length);
578 					if (error != 0)
579 						bp->bio_error = error;
580 					break;
581 				case BIO_DELETE:
582 				case BIO_WRITE:
583 					break;
584 				}
585 			}
586 			G_GATE_LOGREQ(2, bp, "Request done.");
587 			g_io_deliver(bp, bp->bio_error);
588 		}
589 		g_gate_release(sc);
590 		return (error);
591 	    }
592 	}
593 	return (ENOIOCTL);
594 }
595 
596 static void
597 g_gate_device(void *unused __unused)
598 {
599 
600 	status_dev = make_dev(&g_gate_cdevsw, 0x0, UID_ROOT, GID_WHEEL, 0600,
601 	    G_GATE_CTL_NAME);
602 }
603 
604 static int
605 g_gate_modevent(module_t mod, int type, void *data)
606 {
607 	int error = 0;
608 
609 	switch (type) {
610 	case MOD_LOAD:
611 		mtx_init(&g_gate_list_mtx, "gg_list_lock", NULL, MTX_DEF);
612 		g_gate_device(NULL);
613 		break;
614 	case MOD_UNLOAD:
615 		mtx_lock(&g_gate_list_mtx);
616 		if (!LIST_EMPTY(&g_gate_list)) {
617 			mtx_unlock(&g_gate_list_mtx);
618 			error = EBUSY;
619 			break;
620 		}
621 		mtx_unlock(&g_gate_list_mtx);
622 		mtx_destroy(&g_gate_list_mtx);
623 		if (status_dev != 0)
624 			destroy_dev(status_dev);
625 		break;
626 	default:
627 		break;
628 	}
629 
630 	return (error);
631 }
632 static moduledata_t g_gate_module = {
633 	G_GATE_MOD_NAME,
634 	g_gate_modevent,
635 	NULL
636 };
637 DECLARE_MODULE(geom_gate, g_gate_module, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
638 DECLARE_GEOM_CLASS(g_gate_class, g_gate);
639