xref: /freebsd/sys/geom/gate/g_gate.c (revision f0adf7f5cdd241db2f2c817683191a6ef64a4e95)
1 /*-
2  * Copyright (c) 2004 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/bio.h>
32 #include <sys/conf.h>
33 #include <sys/kernel.h>
34 #include <sys/kthread.h>
35 #include <sys/fcntl.h>
36 #include <sys/linker.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/mutex.h>
40 #include <sys/proc.h>
41 #include <sys/limits.h>
42 #include <sys/queue.h>
43 #include <sys/sysctl.h>
44 #include <sys/signalvar.h>
45 #include <sys/time.h>
46 #include <machine/atomic.h>
47 
48 #include <geom/geom.h>
49 #include <geom/gate/g_gate.h>
50 
51 static MALLOC_DEFINE(M_GATE, "gg data", "GEOM Gate Data");
52 
53 SYSCTL_DECL(_kern_geom);
54 SYSCTL_NODE(_kern_geom, OID_AUTO, gate, CTLFLAG_RW, 0, "GEOM_GATE stuff");
55 static u_int g_gate_debug = 0;
56 SYSCTL_UINT(_kern_geom_gate, OID_AUTO, debug, CTLFLAG_RW, &g_gate_debug, 0,
57     "Debug level");
58 
59 static int g_gate_destroy_geom(struct gctl_req *, struct g_class *,
60     struct g_geom *);
61 struct g_class g_gate_class = {
62 	.name = G_GATE_CLASS_NAME,
63 	.destroy_geom = g_gate_destroy_geom
64 };
65 
66 static struct cdev *status_dev;
67 static d_ioctl_t g_gate_ioctl;
68 static struct cdevsw g_gate_cdevsw = {
69 	.d_version =	D_VERSION,
70 	.d_ioctl =	g_gate_ioctl,
71 	.d_name =	G_GATE_CTL_NAME
72 };
73 
74 
75 static LIST_HEAD(, g_gate_softc) g_gate_list =
76     LIST_HEAD_INITIALIZER(&g_gate_list);
77 static struct mtx g_gate_list_mtx;
78 
79 
80 static void
81 g_gate_wither(struct g_gate_softc *sc)
82 {
83 
84 	atomic_set_32(&sc->sc_flags, G_GATE_FLAG_DESTROY);
85 }
86 
87 static int
88 g_gate_destroy(struct g_gate_softc *sc, boolean_t force)
89 {
90 	struct g_provider *pp;
91 	struct bio *bp;
92 
93 	g_topology_assert();
94 	mtx_assert(&g_gate_list_mtx, MA_OWNED);
95 	pp = sc->sc_provider;
96 	if (!force && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
97 		mtx_unlock(&g_gate_list_mtx);
98 		return (EBUSY);
99 	}
100 	if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0) {
101 		g_gate_wither(sc);
102 		LIST_REMOVE(sc, sc_next);
103 	}
104 	mtx_unlock(&g_gate_list_mtx);
105 	mtx_lock(&sc->sc_inqueue_mtx);
106 	wakeup(sc);
107 	mtx_unlock(&sc->sc_inqueue_mtx);
108 	if (sc->sc_ref > 0) {
109 		G_GATE_DEBUG(1, "Cannot destroy %s yet.", sc->sc_name);
110 		return (0);
111 	}
112 	callout_drain(&sc->sc_callout);
113 	mtx_lock(&sc->sc_inqueue_mtx);
114 	for (;;) {
115 		bp = bioq_first(&sc->sc_inqueue);
116 		if (bp != NULL) {
117 			bioq_remove(&sc->sc_inqueue, bp);
118 			atomic_subtract_rel_32(&sc->sc_queue_count, 1);
119 			G_GATE_LOGREQ(1, bp, "Request canceled.");
120 			g_io_deliver(bp, ENXIO);
121 		} else {
122 			break;
123 		}
124 	}
125 	mtx_destroy(&sc->sc_inqueue_mtx);
126 	mtx_lock(&sc->sc_outqueue_mtx);
127 	for (;;) {
128 		bp = bioq_first(&sc->sc_outqueue);
129 		if (bp != NULL) {
130 			bioq_remove(&sc->sc_outqueue, bp);
131 			atomic_subtract_rel_32(&sc->sc_queue_count, 1);
132 			G_GATE_LOGREQ(1, bp, "Request canceled.");
133 			g_io_deliver(bp, ENXIO);
134 		} else {
135 			break;
136 		}
137 	}
138 	mtx_destroy(&sc->sc_outqueue_mtx);
139 	G_GATE_DEBUG(0, "Device %s destroyed.", sc->sc_name);
140 	pp->geom->softc = NULL;
141 	g_wither_geom(pp->geom, ENXIO);
142 	sc->sc_provider = NULL;
143 	free(sc, M_GATE);
144 	return (0);
145 }
146 
147 static void
148 g_gate_destroy_it(void *arg, int flag __unused)
149 {
150 	struct g_gate_softc *sc;
151 
152 	g_topology_assert();
153 	sc = arg;
154 	mtx_lock(&g_gate_list_mtx);
155 	g_gate_destroy(sc, 1);
156 }
157 
158 static int
159 g_gate_destroy_geom(struct gctl_req *req, struct g_class *mp, struct g_geom *gp)
160 {
161 
162 	g_topology_assert();
163 	mtx_lock(&g_gate_list_mtx);
164 	return (g_gate_destroy(gp->softc, 0));
165 }
166 
167 static int
168 g_gate_access(struct g_provider *pp, int dr, int dw, int de)
169 {
170 	struct g_gate_softc *sc;
171 
172 	if (dr <= 0 && dw <= 0 && de <= 0)
173 		return (0);
174 	sc = pp->geom->softc;
175 	if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0)
176 		return (ENXIO);
177 	/* XXX: Hack to allow read-only mounts. */
178 #if 0
179 	if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0 && dw > 0)
180 		return (EPERM);
181 #endif
182 	if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0 && dr > 0)
183 		return (EPERM);
184 	return (0);
185 }
186 
187 static void
188 g_gate_start(struct bio *bp)
189 {
190 	struct g_gate_softc *sc;
191 	uint32_t qcount;
192 
193 	sc = bp->bio_to->geom->softc;
194 	if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
195 		g_io_deliver(bp, ENXIO);
196 		return;
197 	}
198 	G_GATE_LOGREQ(2, bp, "Request received.");
199 	switch (bp->bio_cmd) {
200 	case BIO_READ:
201 		break;
202 	case BIO_DELETE:
203 	case BIO_WRITE:
204 		/* XXX: Hack to allow read-only mounts. */
205 		if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) {
206 			g_io_deliver(bp, EPERM);
207 			return;
208 		}
209 		break;
210 	case BIO_GETATTR:
211 	default:
212 		G_GATE_LOGREQ(2, bp, "Ignoring request.");
213 		g_io_deliver(bp, EOPNOTSUPP);
214 		return;
215 	}
216 
217 	atomic_store_rel_32(&qcount, sc->sc_queue_count);
218 	if (qcount > sc->sc_queue_size) {
219 		G_GATE_LOGREQ(1, bp, "Queue full, request canceled.");
220 		g_io_deliver(bp, EIO);
221 		return;
222 	}
223 	atomic_add_acq_32(&sc->sc_queue_count, 1);
224 	bp->bio_driver1 = (void *)sc->sc_seq;
225 	sc->sc_seq++;
226 
227 	mtx_lock(&sc->sc_inqueue_mtx);
228 	bioq_disksort(&sc->sc_inqueue, bp);
229 	wakeup(sc);
230 	mtx_unlock(&sc->sc_inqueue_mtx);
231 }
232 
233 static struct g_gate_softc *
234 g_gate_find(u_int unit)
235 {
236 	struct g_gate_softc *sc;
237 
238 	mtx_assert(&g_gate_list_mtx, MA_OWNED);
239 	LIST_FOREACH(sc, &g_gate_list, sc_next) {
240 		if (sc->sc_unit == unit)
241 			break;
242 	}
243 	return (sc);
244 }
245 
246 static struct g_gate_softc *
247 g_gate_hold(u_int unit)
248 {
249 	struct g_gate_softc *sc;
250 
251 	mtx_lock(&g_gate_list_mtx);
252 	sc = g_gate_find(unit);
253 	if (sc != NULL) {
254 		if ((sc->sc_flags & G_GATE_FLAG_DESTROY) != 0)
255 			sc = NULL;
256 		else
257 			sc->sc_ref++;
258 	}
259 	mtx_unlock(&g_gate_list_mtx);
260 	return (sc);
261 }
262 
263 static void
264 g_gate_release(struct g_gate_softc *sc)
265 {
266 
267 	g_topology_assert_not();
268 	mtx_lock(&g_gate_list_mtx);
269 	sc->sc_ref--;
270 	KASSERT(sc->sc_ref >= 0, ("Negative sc_ref for %s.", sc->sc_name));
271 	if (sc->sc_ref == 0 && (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
272 		mtx_unlock(&g_gate_list_mtx);
273 		g_waitfor_event(g_gate_destroy_it, sc, M_WAITOK, NULL);
274 	} else {
275 		mtx_unlock(&g_gate_list_mtx);
276 	}
277 }
278 
279 static int
280 g_gate_getunit(int unit)
281 {
282 	struct g_gate_softc *sc;
283 
284 	mtx_assert(&g_gate_list_mtx, MA_OWNED);
285 	if (unit >= 0) {
286 		LIST_FOREACH(sc, &g_gate_list, sc_next) {
287 			if (sc->sc_unit == unit)
288 				return (-1);
289 		}
290 	} else {
291 		unit = 0;
292 once_again:
293 		LIST_FOREACH(sc, &g_gate_list, sc_next) {
294 			if (sc->sc_unit == unit) {
295 				if (++unit > 666)
296 					return (-1);
297 				goto once_again;
298 			}
299 		}
300 	}
301 	return (unit);
302 }
303 
304 static void
305 g_gate_guard(void *arg)
306 {
307 	struct g_gate_softc *sc;
308 	struct bintime curtime;
309 	struct bio *bp, *bp2;
310 
311 	sc = arg;
312 	binuptime(&curtime);
313 	g_gate_hold(sc->sc_unit);
314 	mtx_lock(&sc->sc_inqueue_mtx);
315 	TAILQ_FOREACH_SAFE(bp, &sc->sc_inqueue.queue, bio_queue, bp2) {
316 		if (curtime.sec - bp->bio_t0.sec < 5)
317 			continue;
318 		bioq_remove(&sc->sc_inqueue, bp);
319 		atomic_subtract_rel_32(&sc->sc_queue_count, 1);
320 		G_GATE_LOGREQ(1, bp, "Request timeout.");
321 		g_io_deliver(bp, EIO);
322 	}
323 	mtx_unlock(&sc->sc_inqueue_mtx);
324 	mtx_lock(&sc->sc_outqueue_mtx);
325 	TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, bp2) {
326 		if (curtime.sec - bp->bio_t0.sec < 5)
327 			continue;
328 		bioq_remove(&sc->sc_outqueue, bp);
329 		atomic_subtract_rel_32(&sc->sc_queue_count, 1);
330 		G_GATE_LOGREQ(1, bp, "Request timeout.");
331 		g_io_deliver(bp, EIO);
332 	}
333 	mtx_unlock(&sc->sc_outqueue_mtx);
334 	if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0) {
335 		callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
336 		    g_gate_guard, sc);
337 	}
338 	g_gate_release(sc);
339 }
340 
341 static void
342 g_gate_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
343     struct g_consumer *cp, struct g_provider *pp)
344 {
345 	struct g_gate_softc *sc;
346 
347 	sc = gp->softc;
348 	if (sc == NULL || pp != NULL || cp != NULL)
349 		return;
350 	g_gate_hold(sc->sc_unit);
351 	if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) {
352 		sbuf_printf(sb, "%s<access>%s</access>\n", indent, "read-only");
353 	} else if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0) {
354 		sbuf_printf(sb, "%s<access>%s</access>\n", indent,
355 		    "write-only");
356 	} else {
357 		sbuf_printf(sb, "%s<access>%s</access>\n", indent,
358 		    "read-write");
359 	}
360 	sbuf_printf(sb, "%s<timeout>%u</timeout>\n", indent, sc->sc_timeout);
361 	sbuf_printf(sb, "%s<info>%s</info>\n", indent, sc->sc_info);
362 	sbuf_printf(sb, "%s<queue_count>%u</queue_count>\n", indent,
363 	    sc->sc_queue_count);
364 	sbuf_printf(sb, "%s<queue_size>%u</queue_size>\n", indent,
365 	    sc->sc_queue_size);
366 	sbuf_printf(sb, "%s<ref>%u</ref>\n", indent, sc->sc_ref);
367 	g_topology_unlock();
368 	g_gate_release(sc);
369 	g_topology_lock();
370 }
371 
372 static int
373 g_gate_create(struct g_gate_ctl_create *ggio)
374 {
375 	struct g_gate_softc *sc;
376 	struct g_geom *gp;
377 	struct g_provider *pp;
378 
379 	if (ggio->gctl_mediasize == 0) {
380 		G_GATE_DEBUG(1, "Invalid media size.");
381 		return (EINVAL);
382 	}
383 	if (ggio->gctl_sectorsize > 0 && !powerof2(ggio->gctl_sectorsize)) {
384 		G_GATE_DEBUG(1, "Invalid sector size.");
385 		return (EINVAL);
386 	}
387 	if ((ggio->gctl_flags & G_GATE_FLAG_READONLY) != 0 &&
388 	    (ggio->gctl_flags & G_GATE_FLAG_WRITEONLY) != 0) {
389 		G_GATE_DEBUG(1, "Invalid flags.");
390 		return (EINVAL);
391 	}
392 	if (ggio->gctl_unit < -1) {
393 		G_GATE_DEBUG(1, "Invalid unit number.");
394 		return (EINVAL);
395 	}
396 
397 	sc = malloc(sizeof(*sc), M_GATE, M_WAITOK | M_ZERO);
398 	sc->sc_flags = (ggio->gctl_flags & G_GATE_USERFLAGS);
399 	strlcpy(sc->sc_info, ggio->gctl_info, sizeof(sc->sc_info));
400 	sc->sc_seq = 0;
401 	bioq_init(&sc->sc_inqueue);
402 	mtx_init(&sc->sc_inqueue_mtx, "gg:inqueue", NULL, MTX_DEF);
403 	bioq_init(&sc->sc_outqueue);
404 	mtx_init(&sc->sc_outqueue_mtx, "gg:outqueue", NULL, MTX_DEF);
405 	sc->sc_queue_count = 0;
406 	sc->sc_queue_size = ggio->gctl_maxcount;
407 	if (sc->sc_queue_size > G_GATE_MAX_QUEUE_SIZE)
408 		sc->sc_queue_size = G_GATE_MAX_QUEUE_SIZE;
409 	sc->sc_timeout = ggio->gctl_timeout;
410 	callout_init(&sc->sc_callout, CALLOUT_MPSAFE);
411 	mtx_lock(&g_gate_list_mtx);
412 	ggio->gctl_unit = g_gate_getunit(ggio->gctl_unit);
413 	if (ggio->gctl_unit == -1) {
414 		mtx_destroy(&sc->sc_inqueue_mtx);
415 		mtx_destroy(&sc->sc_outqueue_mtx);
416 		free(sc, M_GATE);
417 		return (EBUSY);
418 	}
419 	sc->sc_unit = ggio->gctl_unit;
420 	LIST_INSERT_HEAD(&g_gate_list, sc, sc_next);
421 	mtx_unlock(&g_gate_list_mtx);
422 
423 	DROP_GIANT();
424 	g_topology_lock();
425 	gp = g_new_geomf(&g_gate_class, "%s%d", G_GATE_PROVIDER_NAME,
426 	    sc->sc_unit);
427 	gp->start = g_gate_start;
428 	gp->access = g_gate_access;
429 	gp->dumpconf = g_gate_dumpconf;
430 	gp->softc = sc;
431 	pp = g_new_providerf(gp, "%s%d", G_GATE_PROVIDER_NAME, sc->sc_unit);
432 	pp->mediasize = ggio->gctl_mediasize;
433 	pp->sectorsize = ggio->gctl_sectorsize;
434 	sc->sc_provider = pp;
435 	g_error_provider(pp, 0);
436 	g_topology_unlock();
437 	PICKUP_GIANT();
438 
439 	if (sc->sc_timeout > 0) {
440 		callout_reset(&sc->sc_callout, sc->sc_timeout * hz,
441 		    g_gate_guard, sc);
442 	}
443 	return (0);
444 }
445 
446 #define	G_GATE_CHECK_VERSION(ggio)	do {				\
447 	if ((ggio)->gctl_version != G_GATE_VERSION)			\
448 		return (EINVAL);					\
449 } while (0)
450 static int
451 g_gate_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
452 {
453 	struct g_gate_softc *sc;
454 	struct bio *bp;
455 	int error = 0;
456 
457 	G_GATE_DEBUG(4, "ioctl(%s, %lx, %p, %x, %p)", devtoname(dev), cmd, addr,
458 	    flags, td);
459 
460 	switch (cmd) {
461 	case G_GATE_CMD_CREATE:
462 	    {
463 		struct g_gate_ctl_create *ggio = (void *)addr;
464 
465 		G_GATE_CHECK_VERSION(ggio);
466 		return (g_gate_create(ggio));
467 	    }
468 	case G_GATE_CMD_DESTROY:
469 	    {
470 		struct g_gate_ctl_destroy *ggio = (void *)addr;
471 
472 		G_GATE_CHECK_VERSION(ggio);
473 		sc = g_gate_hold(ggio->gctl_unit);
474 		if (sc == NULL)
475 			return (ENXIO);
476 		g_topology_lock();
477 		mtx_lock(&g_gate_list_mtx);
478 		error = g_gate_destroy(sc, ggio->gctl_force);
479 		if (error == 0)
480 			g_gate_wither(sc);
481 		g_topology_unlock();
482 		g_gate_release(sc);
483 		return (error);
484 	    }
485 	case G_GATE_CMD_START:
486 	    {
487 		struct g_gate_ctl_io *ggio = (void *)addr;
488 
489 		G_GATE_CHECK_VERSION(ggio);
490 		sc = g_gate_hold(ggio->gctl_unit);
491 		if (sc == NULL)
492 			return (ENXIO);
493 		for (;;) {
494 			mtx_lock(&sc->sc_inqueue_mtx);
495 			bp = bioq_first(&sc->sc_inqueue);
496 			if (bp != NULL)
497 				break;
498 			if (msleep(sc, &sc->sc_inqueue_mtx,
499 			    PPAUSE | PDROP | PCATCH, "ggwait", 0) != 0) {
500 				g_gate_release(sc);
501 				ggio->gctl_error = ECANCELED;
502 				return (0);
503 			}
504 			if ((sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) {
505 				g_gate_release(sc);
506 				ggio->gctl_error = ECANCELED;
507 				return (0);
508 			}
509 		}
510 		ggio->gctl_cmd = bp->bio_cmd;
511 		if ((bp->bio_cmd == BIO_DELETE || bp->bio_cmd == BIO_WRITE) &&
512 		    bp->bio_length > ggio->gctl_length) {
513 			mtx_unlock(&sc->sc_inqueue_mtx);
514 			g_gate_release(sc);
515 			ggio->gctl_length = bp->bio_length;
516 			ggio->gctl_error = ENOMEM;
517 			return (0);
518 		}
519 		bioq_remove(&sc->sc_inqueue, bp);
520 		atomic_subtract_rel_32(&sc->sc_queue_count, 1);
521 		mtx_unlock(&sc->sc_inqueue_mtx);
522 		ggio->gctl_seq = (uintptr_t)bp->bio_driver1;
523 		ggio->gctl_offset = bp->bio_offset;
524 		ggio->gctl_length = bp->bio_length;
525 		switch (bp->bio_cmd) {
526 		case BIO_READ:
527 			break;
528 		case BIO_DELETE:
529 		case BIO_WRITE:
530 			error = copyout(bp->bio_data, ggio->gctl_data,
531 			    bp->bio_length);
532 			if (error != 0) {
533 				mtx_lock(&sc->sc_inqueue_mtx);
534 				bioq_disksort(&sc->sc_inqueue, bp);
535 				mtx_unlock(&sc->sc_inqueue_mtx);
536 				g_gate_release(sc);
537 				return (error);
538 			}
539 			break;
540 		}
541 		mtx_lock(&sc->sc_outqueue_mtx);
542 		bioq_insert_tail(&sc->sc_outqueue, bp);
543 		atomic_add_acq_32(&sc->sc_queue_count, 1);
544 		mtx_unlock(&sc->sc_outqueue_mtx);
545 		g_gate_release(sc);
546 		return (0);
547 	    }
548 	case G_GATE_CMD_DONE:
549 	    {
550 		struct g_gate_ctl_io *ggio = (void *)addr;
551 
552 		G_GATE_CHECK_VERSION(ggio);
553 		sc = g_gate_hold(ggio->gctl_unit);
554 		if (sc == NULL)
555 			return (ENOENT);
556 		mtx_lock(&sc->sc_outqueue_mtx);
557 		TAILQ_FOREACH(bp, &sc->sc_outqueue.queue, bio_queue) {
558 			if (ggio->gctl_seq == (uintptr_t)bp->bio_driver1)
559 				break;
560 		}
561 		if (bp != NULL) {
562 			bioq_remove(&sc->sc_outqueue, bp);
563 			atomic_subtract_rel_32(&sc->sc_queue_count, 1);
564 		}
565 		mtx_unlock(&sc->sc_outqueue_mtx);
566 		if (bp == NULL) {
567 			/*
568 			 * Request was probably canceled.
569 			 */
570 			g_gate_release(sc);
571 			return (0);
572 		}
573 		if (ggio->gctl_error == EAGAIN) {
574 			bp->bio_error = 0;
575 			G_GATE_LOGREQ(1, bp, "Request desisted.");
576 			atomic_add_acq_32(&sc->sc_queue_count, 1);
577 			mtx_lock(&sc->sc_inqueue_mtx);
578 			bioq_disksort(&sc->sc_inqueue, bp);
579 			wakeup(sc);
580 			mtx_unlock(&sc->sc_inqueue_mtx);
581 		} else {
582 			bp->bio_error = ggio->gctl_error;
583 			if (bp->bio_error == 0) {
584 				bp->bio_completed = bp->bio_length;
585 				switch (bp->bio_cmd) {
586 				case BIO_READ:
587 					error = copyin(ggio->gctl_data,
588 					    bp->bio_data, bp->bio_length);
589 					if (error != 0)
590 						bp->bio_error = error;
591 					break;
592 				case BIO_DELETE:
593 				case BIO_WRITE:
594 					break;
595 				}
596 			}
597 			G_GATE_LOGREQ(2, bp, "Request done.");
598 			g_io_deliver(bp, bp->bio_error);
599 		}
600 		g_gate_release(sc);
601 		return (error);
602 	    }
603 	}
604 	return (ENOIOCTL);
605 }
606 
607 static void
608 g_gate_device(void)
609 {
610 
611 	status_dev = make_dev(&g_gate_cdevsw, 0x0, UID_ROOT, GID_WHEEL, 0600,
612 	    G_GATE_CTL_NAME);
613 }
614 
615 static int
616 g_gate_modevent(module_t mod, int type, void *data)
617 {
618 	int error = 0;
619 
620 	switch (type) {
621 	case MOD_LOAD:
622 		mtx_init(&g_gate_list_mtx, "gg_list_lock", NULL, MTX_DEF);
623 		g_gate_device();
624 		break;
625 	case MOD_UNLOAD:
626 		mtx_lock(&g_gate_list_mtx);
627 		if (!LIST_EMPTY(&g_gate_list)) {
628 			mtx_unlock(&g_gate_list_mtx);
629 			error = EBUSY;
630 			break;
631 		}
632 		mtx_unlock(&g_gate_list_mtx);
633 		mtx_destroy(&g_gate_list_mtx);
634 		if (status_dev != 0)
635 			destroy_dev(status_dev);
636 		break;
637 	default:
638 		return (EOPNOTSUPP);
639 		break;
640 	}
641 
642 	return (error);
643 }
644 static moduledata_t g_gate_module = {
645 	G_GATE_MOD_NAME,
646 	g_gate_modevent,
647 	NULL
648 };
649 DECLARE_MODULE(geom_gate, g_gate_module, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
650 DECLARE_GEOM_CLASS(g_gate_class, g_gate);
651