1 /*- 2 * Copyright (c) 2004 Pawel Jakub Dawidek <pjd@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/bio.h> 32 #include <sys/conf.h> 33 #include <sys/kernel.h> 34 #include <sys/kthread.h> 35 #include <sys/fcntl.h> 36 #include <sys/linker.h> 37 #include <sys/lock.h> 38 #include <sys/malloc.h> 39 #include <sys/mutex.h> 40 #include <sys/proc.h> 41 #include <sys/limits.h> 42 #include <sys/queue.h> 43 #include <sys/sysctl.h> 44 #include <sys/signalvar.h> 45 #include <sys/time.h> 46 #include <machine/atomic.h> 47 48 #include <geom/geom.h> 49 #include <geom/gate/g_gate.h> 50 51 static MALLOC_DEFINE(M_GATE, "gg data", "GEOM Gate Data"); 52 53 SYSCTL_DECL(_kern_geom); 54 SYSCTL_NODE(_kern_geom, OID_AUTO, gate, CTLFLAG_RW, 0, "GEOM_GATE stuff"); 55 static u_int g_gate_debug = 0; 56 SYSCTL_UINT(_kern_geom_gate, OID_AUTO, debug, CTLFLAG_RW, &g_gate_debug, 0, 57 "Debug level"); 58 59 static int g_gate_destroy_geom(struct gctl_req *, struct g_class *, 60 struct g_geom *); 61 struct g_class g_gate_class = { 62 .name = G_GATE_CLASS_NAME, 63 .version = G_VERSION, 64 .destroy_geom = g_gate_destroy_geom 65 }; 66 67 static struct cdev *status_dev; 68 static d_ioctl_t g_gate_ioctl; 69 static struct cdevsw g_gate_cdevsw = { 70 .d_version = D_VERSION, 71 .d_ioctl = g_gate_ioctl, 72 .d_name = G_GATE_CTL_NAME 73 }; 74 75 76 static LIST_HEAD(, g_gate_softc) g_gate_list = 77 LIST_HEAD_INITIALIZER(&g_gate_list); 78 static struct mtx g_gate_list_mtx; 79 80 81 static void 82 g_gate_wither(struct g_gate_softc *sc) 83 { 84 85 atomic_set_32(&sc->sc_flags, G_GATE_FLAG_DESTROY); 86 } 87 88 static int 89 g_gate_destroy(struct g_gate_softc *sc, boolean_t force) 90 { 91 struct g_provider *pp; 92 struct bio *bp; 93 94 g_topology_assert(); 95 mtx_assert(&g_gate_list_mtx, MA_OWNED); 96 pp = sc->sc_provider; 97 if (!force && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) { 98 mtx_unlock(&g_gate_list_mtx); 99 return (EBUSY); 100 } 101 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0) { 102 g_gate_wither(sc); 103 LIST_REMOVE(sc, sc_next); 104 } 105 mtx_unlock(&g_gate_list_mtx); 106 mtx_lock(&sc->sc_inqueue_mtx); 107 wakeup(sc); 108 mtx_unlock(&sc->sc_inqueue_mtx); 109 if (sc->sc_ref > 0) { 110 G_GATE_DEBUG(1, "Cannot destroy %s yet.", sc->sc_name); 111 return (0); 112 } 113 callout_drain(&sc->sc_callout); 114 mtx_lock(&sc->sc_inqueue_mtx); 115 for (;;) { 116 bp = bioq_first(&sc->sc_inqueue); 117 if (bp != NULL) { 118 bioq_remove(&sc->sc_inqueue, bp); 119 atomic_subtract_rel_32(&sc->sc_queue_count, 1); 120 G_GATE_LOGREQ(1, bp, "Request canceled."); 121 g_io_deliver(bp, ENXIO); 122 } else { 123 break; 124 } 125 } 126 mtx_destroy(&sc->sc_inqueue_mtx); 127 mtx_lock(&sc->sc_outqueue_mtx); 128 for (;;) { 129 bp = bioq_first(&sc->sc_outqueue); 130 if (bp != NULL) { 131 bioq_remove(&sc->sc_outqueue, bp); 132 atomic_subtract_rel_32(&sc->sc_queue_count, 1); 133 G_GATE_LOGREQ(1, bp, "Request canceled."); 134 g_io_deliver(bp, ENXIO); 135 } else { 136 break; 137 } 138 } 139 mtx_destroy(&sc->sc_outqueue_mtx); 140 G_GATE_DEBUG(0, "Device %s destroyed.", sc->sc_name); 141 pp->geom->softc = NULL; 142 g_wither_geom(pp->geom, ENXIO); 143 sc->sc_provider = NULL; 144 free(sc, M_GATE); 145 return (0); 146 } 147 148 static void 149 g_gate_destroy_it(void *arg, int flag __unused) 150 { 151 struct g_gate_softc *sc; 152 153 g_topology_assert(); 154 sc = arg; 155 mtx_lock(&g_gate_list_mtx); 156 g_gate_destroy(sc, 1); 157 } 158 159 static int 160 g_gate_destroy_geom(struct gctl_req *req, struct g_class *mp, struct g_geom *gp) 161 { 162 163 g_topology_assert(); 164 mtx_lock(&g_gate_list_mtx); 165 return (g_gate_destroy(gp->softc, 0)); 166 } 167 168 static int 169 g_gate_access(struct g_provider *pp, int dr, int dw, int de) 170 { 171 struct g_gate_softc *sc; 172 173 if (dr <= 0 && dw <= 0 && de <= 0) 174 return (0); 175 sc = pp->geom->softc; 176 if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) 177 return (ENXIO); 178 /* XXX: Hack to allow read-only mounts. */ 179 #if 0 180 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0 && dw > 0) 181 return (EPERM); 182 #endif 183 if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0 && dr > 0) 184 return (EPERM); 185 return (0); 186 } 187 188 static void 189 g_gate_start(struct bio *bp) 190 { 191 struct g_gate_softc *sc; 192 uint32_t qcount; 193 194 sc = bp->bio_to->geom->softc; 195 if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) { 196 g_io_deliver(bp, ENXIO); 197 return; 198 } 199 G_GATE_LOGREQ(2, bp, "Request received."); 200 switch (bp->bio_cmd) { 201 case BIO_READ: 202 break; 203 case BIO_DELETE: 204 case BIO_WRITE: 205 /* XXX: Hack to allow read-only mounts. */ 206 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) { 207 g_io_deliver(bp, EPERM); 208 return; 209 } 210 break; 211 case BIO_GETATTR: 212 default: 213 G_GATE_LOGREQ(2, bp, "Ignoring request."); 214 g_io_deliver(bp, EOPNOTSUPP); 215 return; 216 } 217 218 atomic_store_rel_32(&qcount, sc->sc_queue_count); 219 if (qcount > sc->sc_queue_size) { 220 G_GATE_LOGREQ(1, bp, "Queue full, request canceled."); 221 g_io_deliver(bp, EIO); 222 return; 223 } 224 atomic_add_acq_32(&sc->sc_queue_count, 1); 225 bp->bio_driver1 = (void *)sc->sc_seq; 226 sc->sc_seq++; 227 228 mtx_lock(&sc->sc_inqueue_mtx); 229 bioq_disksort(&sc->sc_inqueue, bp); 230 wakeup(sc); 231 mtx_unlock(&sc->sc_inqueue_mtx); 232 } 233 234 static struct g_gate_softc * 235 g_gate_find(u_int unit) 236 { 237 struct g_gate_softc *sc; 238 239 mtx_assert(&g_gate_list_mtx, MA_OWNED); 240 LIST_FOREACH(sc, &g_gate_list, sc_next) { 241 if (sc->sc_unit == unit) 242 break; 243 } 244 return (sc); 245 } 246 247 static struct g_gate_softc * 248 g_gate_hold(u_int unit) 249 { 250 struct g_gate_softc *sc; 251 252 mtx_lock(&g_gate_list_mtx); 253 sc = g_gate_find(unit); 254 if (sc != NULL) { 255 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) 256 sc = NULL; 257 else 258 sc->sc_ref++; 259 } 260 mtx_unlock(&g_gate_list_mtx); 261 return (sc); 262 } 263 264 static void 265 g_gate_release(struct g_gate_softc *sc) 266 { 267 268 g_topology_assert_not(); 269 mtx_lock(&g_gate_list_mtx); 270 sc->sc_ref--; 271 KASSERT(sc->sc_ref >= 0, ("Negative sc_ref for %s.", sc->sc_name)); 272 if (sc->sc_ref == 0 && (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) { 273 mtx_unlock(&g_gate_list_mtx); 274 g_waitfor_event(g_gate_destroy_it, sc, M_WAITOK, NULL); 275 } else { 276 mtx_unlock(&g_gate_list_mtx); 277 } 278 } 279 280 static int 281 g_gate_getunit(int unit) 282 { 283 struct g_gate_softc *sc; 284 285 mtx_assert(&g_gate_list_mtx, MA_OWNED); 286 if (unit >= 0) { 287 LIST_FOREACH(sc, &g_gate_list, sc_next) { 288 if (sc->sc_unit == unit) 289 return (-1); 290 } 291 } else { 292 unit = 0; 293 once_again: 294 LIST_FOREACH(sc, &g_gate_list, sc_next) { 295 if (sc->sc_unit == unit) { 296 if (++unit > 666) 297 return (-1); 298 goto once_again; 299 } 300 } 301 } 302 return (unit); 303 } 304 305 static void 306 g_gate_guard(void *arg) 307 { 308 struct g_gate_softc *sc; 309 struct bintime curtime; 310 struct bio *bp, *bp2; 311 312 sc = arg; 313 binuptime(&curtime); 314 g_gate_hold(sc->sc_unit); 315 mtx_lock(&sc->sc_inqueue_mtx); 316 TAILQ_FOREACH_SAFE(bp, &sc->sc_inqueue.queue, bio_queue, bp2) { 317 if (curtime.sec - bp->bio_t0.sec < 5) 318 continue; 319 bioq_remove(&sc->sc_inqueue, bp); 320 atomic_subtract_rel_32(&sc->sc_queue_count, 1); 321 G_GATE_LOGREQ(1, bp, "Request timeout."); 322 g_io_deliver(bp, EIO); 323 } 324 mtx_unlock(&sc->sc_inqueue_mtx); 325 mtx_lock(&sc->sc_outqueue_mtx); 326 TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, bp2) { 327 if (curtime.sec - bp->bio_t0.sec < 5) 328 continue; 329 bioq_remove(&sc->sc_outqueue, bp); 330 atomic_subtract_rel_32(&sc->sc_queue_count, 1); 331 G_GATE_LOGREQ(1, bp, "Request timeout."); 332 g_io_deliver(bp, EIO); 333 } 334 mtx_unlock(&sc->sc_outqueue_mtx); 335 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0) { 336 callout_reset(&sc->sc_callout, sc->sc_timeout * hz, 337 g_gate_guard, sc); 338 } 339 g_gate_release(sc); 340 } 341 342 static void 343 g_gate_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 344 struct g_consumer *cp, struct g_provider *pp) 345 { 346 struct g_gate_softc *sc; 347 348 sc = gp->softc; 349 if (sc == NULL || pp != NULL || cp != NULL) 350 return; 351 g_gate_hold(sc->sc_unit); 352 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) { 353 sbuf_printf(sb, "%s<access>%s</access>\n", indent, "read-only"); 354 } else if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0) { 355 sbuf_printf(sb, "%s<access>%s</access>\n", indent, 356 "write-only"); 357 } else { 358 sbuf_printf(sb, "%s<access>%s</access>\n", indent, 359 "read-write"); 360 } 361 sbuf_printf(sb, "%s<timeout>%u</timeout>\n", indent, sc->sc_timeout); 362 sbuf_printf(sb, "%s<info>%s</info>\n", indent, sc->sc_info); 363 sbuf_printf(sb, "%s<queue_count>%u</queue_count>\n", indent, 364 sc->sc_queue_count); 365 sbuf_printf(sb, "%s<queue_size>%u</queue_size>\n", indent, 366 sc->sc_queue_size); 367 sbuf_printf(sb, "%s<ref>%u</ref>\n", indent, sc->sc_ref); 368 g_topology_unlock(); 369 g_gate_release(sc); 370 g_topology_lock(); 371 } 372 373 static int 374 g_gate_create(struct g_gate_ctl_create *ggio) 375 { 376 struct g_gate_softc *sc; 377 struct g_geom *gp; 378 struct g_provider *pp; 379 380 if (ggio->gctl_mediasize == 0) { 381 G_GATE_DEBUG(1, "Invalid media size."); 382 return (EINVAL); 383 } 384 if (ggio->gctl_sectorsize > 0 && !powerof2(ggio->gctl_sectorsize)) { 385 G_GATE_DEBUG(1, "Invalid sector size."); 386 return (EINVAL); 387 } 388 if ((ggio->gctl_flags & G_GATE_FLAG_READONLY) != 0 && 389 (ggio->gctl_flags & G_GATE_FLAG_WRITEONLY) != 0) { 390 G_GATE_DEBUG(1, "Invalid flags."); 391 return (EINVAL); 392 } 393 if (ggio->gctl_unit < -1) { 394 G_GATE_DEBUG(1, "Invalid unit number."); 395 return (EINVAL); 396 } 397 398 sc = malloc(sizeof(*sc), M_GATE, M_WAITOK | M_ZERO); 399 sc->sc_flags = (ggio->gctl_flags & G_GATE_USERFLAGS); 400 strlcpy(sc->sc_info, ggio->gctl_info, sizeof(sc->sc_info)); 401 sc->sc_seq = 0; 402 bioq_init(&sc->sc_inqueue); 403 mtx_init(&sc->sc_inqueue_mtx, "gg:inqueue", NULL, MTX_DEF); 404 bioq_init(&sc->sc_outqueue); 405 mtx_init(&sc->sc_outqueue_mtx, "gg:outqueue", NULL, MTX_DEF); 406 sc->sc_queue_count = 0; 407 sc->sc_queue_size = ggio->gctl_maxcount; 408 if (sc->sc_queue_size > G_GATE_MAX_QUEUE_SIZE) 409 sc->sc_queue_size = G_GATE_MAX_QUEUE_SIZE; 410 sc->sc_timeout = ggio->gctl_timeout; 411 callout_init(&sc->sc_callout, CALLOUT_MPSAFE); 412 mtx_lock(&g_gate_list_mtx); 413 ggio->gctl_unit = g_gate_getunit(ggio->gctl_unit); 414 if (ggio->gctl_unit == -1) { 415 mtx_unlock(&g_gate_list_mtx); 416 mtx_destroy(&sc->sc_inqueue_mtx); 417 mtx_destroy(&sc->sc_outqueue_mtx); 418 free(sc, M_GATE); 419 return (EBUSY); 420 } 421 sc->sc_unit = ggio->gctl_unit; 422 LIST_INSERT_HEAD(&g_gate_list, sc, sc_next); 423 mtx_unlock(&g_gate_list_mtx); 424 425 g_topology_lock(); 426 gp = g_new_geomf(&g_gate_class, "%s%d", G_GATE_PROVIDER_NAME, 427 sc->sc_unit); 428 gp->start = g_gate_start; 429 gp->access = g_gate_access; 430 gp->dumpconf = g_gate_dumpconf; 431 gp->softc = sc; 432 pp = g_new_providerf(gp, "%s%d", G_GATE_PROVIDER_NAME, sc->sc_unit); 433 pp->mediasize = ggio->gctl_mediasize; 434 pp->sectorsize = ggio->gctl_sectorsize; 435 sc->sc_provider = pp; 436 g_error_provider(pp, 0); 437 g_topology_unlock(); 438 439 if (sc->sc_timeout > 0) { 440 callout_reset(&sc->sc_callout, sc->sc_timeout * hz, 441 g_gate_guard, sc); 442 } 443 return (0); 444 } 445 446 #define G_GATE_CHECK_VERSION(ggio) do { \ 447 if ((ggio)->gctl_version != G_GATE_VERSION) \ 448 return (EINVAL); \ 449 } while (0) 450 static int 451 g_gate_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 452 { 453 struct g_gate_softc *sc; 454 struct bio *bp; 455 int error = 0; 456 457 G_GATE_DEBUG(4, "ioctl(%s, %lx, %p, %x, %p)", devtoname(dev), cmd, addr, 458 flags, td); 459 460 switch (cmd) { 461 case G_GATE_CMD_CREATE: 462 { 463 struct g_gate_ctl_create *ggio = (void *)addr; 464 465 G_GATE_CHECK_VERSION(ggio); 466 DROP_GIANT(); 467 error = g_gate_create(ggio); 468 PICKUP_GIANT(); 469 return (error); 470 } 471 case G_GATE_CMD_DESTROY: 472 { 473 struct g_gate_ctl_destroy *ggio = (void *)addr; 474 475 G_GATE_CHECK_VERSION(ggio); 476 sc = g_gate_hold(ggio->gctl_unit); 477 if (sc == NULL) 478 return (ENXIO); 479 DROP_GIANT(); 480 g_topology_lock(); 481 mtx_lock(&g_gate_list_mtx); 482 error = g_gate_destroy(sc, ggio->gctl_force); 483 if (error == 0) 484 g_gate_wither(sc); 485 g_topology_unlock(); 486 PICKUP_GIANT(); 487 g_gate_release(sc); 488 return (error); 489 } 490 case G_GATE_CMD_START: 491 { 492 struct g_gate_ctl_io *ggio = (void *)addr; 493 494 G_GATE_CHECK_VERSION(ggio); 495 sc = g_gate_hold(ggio->gctl_unit); 496 if (sc == NULL) 497 return (ENXIO); 498 for (;;) { 499 mtx_lock(&sc->sc_inqueue_mtx); 500 bp = bioq_first(&sc->sc_inqueue); 501 if (bp != NULL) 502 break; 503 if (msleep(sc, &sc->sc_inqueue_mtx, 504 PPAUSE | PDROP | PCATCH, "ggwait", 0) != 0) { 505 g_gate_release(sc); 506 ggio->gctl_error = ECANCELED; 507 return (0); 508 } 509 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) { 510 g_gate_release(sc); 511 ggio->gctl_error = ECANCELED; 512 return (0); 513 } 514 } 515 ggio->gctl_cmd = bp->bio_cmd; 516 if ((bp->bio_cmd == BIO_DELETE || bp->bio_cmd == BIO_WRITE) && 517 bp->bio_length > ggio->gctl_length) { 518 mtx_unlock(&sc->sc_inqueue_mtx); 519 g_gate_release(sc); 520 ggio->gctl_length = bp->bio_length; 521 ggio->gctl_error = ENOMEM; 522 return (0); 523 } 524 bioq_remove(&sc->sc_inqueue, bp); 525 atomic_subtract_rel_32(&sc->sc_queue_count, 1); 526 mtx_unlock(&sc->sc_inqueue_mtx); 527 ggio->gctl_seq = (uintptr_t)bp->bio_driver1; 528 ggio->gctl_offset = bp->bio_offset; 529 ggio->gctl_length = bp->bio_length; 530 switch (bp->bio_cmd) { 531 case BIO_READ: 532 break; 533 case BIO_DELETE: 534 case BIO_WRITE: 535 error = copyout(bp->bio_data, ggio->gctl_data, 536 bp->bio_length); 537 if (error != 0) { 538 mtx_lock(&sc->sc_inqueue_mtx); 539 bioq_disksort(&sc->sc_inqueue, bp); 540 mtx_unlock(&sc->sc_inqueue_mtx); 541 g_gate_release(sc); 542 return (error); 543 } 544 break; 545 } 546 mtx_lock(&sc->sc_outqueue_mtx); 547 bioq_insert_tail(&sc->sc_outqueue, bp); 548 atomic_add_acq_32(&sc->sc_queue_count, 1); 549 mtx_unlock(&sc->sc_outqueue_mtx); 550 g_gate_release(sc); 551 return (0); 552 } 553 case G_GATE_CMD_DONE: 554 { 555 struct g_gate_ctl_io *ggio = (void *)addr; 556 557 G_GATE_CHECK_VERSION(ggio); 558 sc = g_gate_hold(ggio->gctl_unit); 559 if (sc == NULL) 560 return (ENOENT); 561 mtx_lock(&sc->sc_outqueue_mtx); 562 TAILQ_FOREACH(bp, &sc->sc_outqueue.queue, bio_queue) { 563 if (ggio->gctl_seq == (uintptr_t)bp->bio_driver1) 564 break; 565 } 566 if (bp != NULL) { 567 bioq_remove(&sc->sc_outqueue, bp); 568 atomic_subtract_rel_32(&sc->sc_queue_count, 1); 569 } 570 mtx_unlock(&sc->sc_outqueue_mtx); 571 if (bp == NULL) { 572 /* 573 * Request was probably canceled. 574 */ 575 g_gate_release(sc); 576 return (0); 577 } 578 if (ggio->gctl_error == EAGAIN) { 579 bp->bio_error = 0; 580 G_GATE_LOGREQ(1, bp, "Request desisted."); 581 atomic_add_acq_32(&sc->sc_queue_count, 1); 582 mtx_lock(&sc->sc_inqueue_mtx); 583 bioq_disksort(&sc->sc_inqueue, bp); 584 wakeup(sc); 585 mtx_unlock(&sc->sc_inqueue_mtx); 586 } else { 587 bp->bio_error = ggio->gctl_error; 588 if (bp->bio_error == 0) { 589 bp->bio_completed = bp->bio_length; 590 switch (bp->bio_cmd) { 591 case BIO_READ: 592 error = copyin(ggio->gctl_data, 593 bp->bio_data, bp->bio_length); 594 if (error != 0) 595 bp->bio_error = error; 596 break; 597 case BIO_DELETE: 598 case BIO_WRITE: 599 break; 600 } 601 } 602 G_GATE_LOGREQ(2, bp, "Request done."); 603 g_io_deliver(bp, bp->bio_error); 604 } 605 g_gate_release(sc); 606 return (error); 607 } 608 } 609 return (ENOIOCTL); 610 } 611 612 static void 613 g_gate_device(void) 614 { 615 616 status_dev = make_dev(&g_gate_cdevsw, 0x0, UID_ROOT, GID_WHEEL, 0600, 617 G_GATE_CTL_NAME); 618 } 619 620 static int 621 g_gate_modevent(module_t mod, int type, void *data) 622 { 623 int error = 0; 624 625 switch (type) { 626 case MOD_LOAD: 627 mtx_init(&g_gate_list_mtx, "gg_list_lock", NULL, MTX_DEF); 628 g_gate_device(); 629 break; 630 case MOD_UNLOAD: 631 mtx_lock(&g_gate_list_mtx); 632 if (!LIST_EMPTY(&g_gate_list)) { 633 mtx_unlock(&g_gate_list_mtx); 634 error = EBUSY; 635 break; 636 } 637 mtx_unlock(&g_gate_list_mtx); 638 mtx_destroy(&g_gate_list_mtx); 639 if (status_dev != 0) 640 destroy_dev(status_dev); 641 break; 642 default: 643 return (EOPNOTSUPP); 644 break; 645 } 646 647 return (error); 648 } 649 static moduledata_t g_gate_module = { 650 G_GATE_MOD_NAME, 651 g_gate_modevent, 652 NULL 653 }; 654 DECLARE_MODULE(geom_gate, g_gate_module, SI_SUB_DRIVERS, SI_ORDER_MIDDLE); 655 DECLARE_GEOM_CLASS(g_gate_class, g_gate); 656