1 /*- 2 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org> 3 * Copyright (c) 2009-2010 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * Portions of this software were developed by Pawel Jakub Dawidek 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/bio.h> 37 #include <sys/conf.h> 38 #include <sys/kernel.h> 39 #include <sys/kthread.h> 40 #include <sys/fcntl.h> 41 #include <sys/linker.h> 42 #include <sys/lock.h> 43 #include <sys/malloc.h> 44 #include <sys/mutex.h> 45 #include <sys/proc.h> 46 #include <sys/limits.h> 47 #include <sys/queue.h> 48 #include <sys/sysctl.h> 49 #include <sys/signalvar.h> 50 #include <sys/time.h> 51 #include <machine/atomic.h> 52 53 #include <geom/geom.h> 54 #include <geom/gate/g_gate.h> 55 56 static MALLOC_DEFINE(M_GATE, "gg_data", "GEOM Gate Data"); 57 58 SYSCTL_DECL(_kern_geom); 59 SYSCTL_NODE(_kern_geom, OID_AUTO, gate, CTLFLAG_RW, 0, "GEOM_GATE stuff"); 60 static int g_gate_debug = 0; 61 TUNABLE_INT("kern.geom.gate.debug", &g_gate_debug); 62 SYSCTL_INT(_kern_geom_gate, OID_AUTO, debug, CTLFLAG_RW, &g_gate_debug, 0, 63 "Debug level"); 64 static u_int g_gate_maxunits = 256; 65 TUNABLE_INT("kern.geom.gate.maxunits", &g_gate_maxunits); 66 SYSCTL_UINT(_kern_geom_gate, OID_AUTO, maxunits, CTLFLAG_RDTUN, 67 &g_gate_maxunits, 0, "Maximum number of ggate devices"); 68 69 struct g_class g_gate_class = { 70 .name = G_GATE_CLASS_NAME, 71 .version = G_VERSION, 72 }; 73 74 static struct cdev *status_dev; 75 static d_ioctl_t g_gate_ioctl; 76 static struct cdevsw g_gate_cdevsw = { 77 .d_version = D_VERSION, 78 .d_ioctl = g_gate_ioctl, 79 .d_name = G_GATE_CTL_NAME 80 }; 81 82 83 static struct g_gate_softc **g_gate_units; 84 static u_int g_gate_nunits; 85 static struct mtx g_gate_units_lock; 86 87 static int 88 g_gate_destroy(struct g_gate_softc *sc, boolean_t force) 89 { 90 struct g_provider *pp; 91 struct g_geom *gp; 92 struct bio *bp; 93 94 g_topology_assert(); 95 mtx_assert(&g_gate_units_lock, MA_OWNED); 96 pp = sc->sc_provider; 97 if (!force && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) { 98 mtx_unlock(&g_gate_units_lock); 99 return (EBUSY); 100 } 101 mtx_unlock(&g_gate_units_lock); 102 mtx_lock(&sc->sc_queue_mtx); 103 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0) 104 sc->sc_flags |= G_GATE_FLAG_DESTROY; 105 wakeup(sc); 106 mtx_unlock(&sc->sc_queue_mtx); 107 gp = pp->geom; 108 pp->flags |= G_PF_WITHER; 109 g_orphan_provider(pp, ENXIO); 110 callout_drain(&sc->sc_callout); 111 mtx_lock(&sc->sc_queue_mtx); 112 for (;;) { 113 bp = bioq_first(&sc->sc_inqueue); 114 if (bp != NULL) { 115 bioq_remove(&sc->sc_inqueue, bp); 116 sc->sc_queue_count--; 117 G_GATE_LOGREQ(1, bp, "Request canceled."); 118 g_io_deliver(bp, ENXIO); 119 } else { 120 break; 121 } 122 } 123 for (;;) { 124 bp = bioq_first(&sc->sc_outqueue); 125 if (bp != NULL) { 126 bioq_remove(&sc->sc_outqueue, bp); 127 sc->sc_queue_count--; 128 G_GATE_LOGREQ(1, bp, "Request canceled."); 129 g_io_deliver(bp, ENXIO); 130 } else { 131 break; 132 } 133 } 134 mtx_unlock(&sc->sc_queue_mtx); 135 g_topology_unlock(); 136 mtx_lock(&g_gate_units_lock); 137 /* One reference is ours. */ 138 sc->sc_ref--; 139 while (sc->sc_ref > 0) 140 msleep(&sc->sc_ref, &g_gate_units_lock, 0, "gg:destroy", 0); 141 g_gate_units[sc->sc_unit] = NULL; 142 KASSERT(g_gate_nunits > 0, ("negative g_gate_nunits?")); 143 g_gate_nunits--; 144 mtx_unlock(&g_gate_units_lock); 145 mtx_destroy(&sc->sc_queue_mtx); 146 g_topology_lock(); 147 G_GATE_DEBUG(0, "Device %s destroyed.", gp->name); 148 gp->softc = NULL; 149 g_wither_geom(gp, ENXIO); 150 sc->sc_provider = NULL; 151 free(sc, M_GATE); 152 return (0); 153 } 154 155 static int 156 g_gate_access(struct g_provider *pp, int dr, int dw, int de) 157 { 158 struct g_gate_softc *sc; 159 160 if (dr <= 0 && dw <= 0 && de <= 0) 161 return (0); 162 sc = pp->geom->softc; 163 if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) 164 return (ENXIO); 165 /* XXX: Hack to allow read-only mounts. */ 166 #if 0 167 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0 && dw > 0) 168 return (EPERM); 169 #endif 170 if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0 && dr > 0) 171 return (EPERM); 172 return (0); 173 } 174 175 static void 176 g_gate_start(struct bio *bp) 177 { 178 struct g_gate_softc *sc; 179 180 sc = bp->bio_to->geom->softc; 181 if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) { 182 g_io_deliver(bp, ENXIO); 183 return; 184 } 185 G_GATE_LOGREQ(2, bp, "Request received."); 186 switch (bp->bio_cmd) { 187 case BIO_READ: 188 break; 189 case BIO_DELETE: 190 case BIO_WRITE: 191 /* XXX: Hack to allow read-only mounts. */ 192 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) { 193 g_io_deliver(bp, EPERM); 194 return; 195 } 196 break; 197 case BIO_GETATTR: 198 default: 199 G_GATE_LOGREQ(2, bp, "Ignoring request."); 200 g_io_deliver(bp, EOPNOTSUPP); 201 return; 202 } 203 204 mtx_lock(&sc->sc_queue_mtx); 205 if (sc->sc_queue_count > sc->sc_queue_size) { 206 mtx_unlock(&sc->sc_queue_mtx); 207 G_GATE_LOGREQ(1, bp, "Queue full, request canceled."); 208 g_io_deliver(bp, ENOMEM); 209 return; 210 } 211 212 bp->bio_driver1 = (void *)sc->sc_seq; 213 sc->sc_seq++; 214 sc->sc_queue_count++; 215 216 bioq_insert_tail(&sc->sc_inqueue, bp); 217 wakeup(sc); 218 219 mtx_unlock(&sc->sc_queue_mtx); 220 } 221 222 static struct g_gate_softc * 223 g_gate_hold(u_int unit, const char *name) 224 { 225 struct g_gate_softc *sc = NULL; 226 227 mtx_lock(&g_gate_units_lock); 228 if (unit >= 0 && unit < g_gate_maxunits) 229 sc = g_gate_units[unit]; 230 else if (unit == G_GATE_NAME_GIVEN) { 231 KASSERT(name != NULL, ("name is NULL")); 232 for (unit = 0; unit < g_gate_maxunits; unit++) { 233 if (g_gate_units[unit] == NULL) 234 continue; 235 if (strcmp(name, 236 g_gate_units[unit]->sc_provider->name) != 0) { 237 continue; 238 } 239 sc = g_gate_units[unit]; 240 break; 241 } 242 } 243 if (sc != NULL) 244 sc->sc_ref++; 245 mtx_unlock(&g_gate_units_lock); 246 return (sc); 247 } 248 249 static void 250 g_gate_release(struct g_gate_softc *sc) 251 { 252 253 g_topology_assert_not(); 254 mtx_lock(&g_gate_units_lock); 255 sc->sc_ref--; 256 KASSERT(sc->sc_ref >= 0, ("Negative sc_ref for %s.", sc->sc_name)); 257 if (sc->sc_ref == 0 && (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) 258 wakeup(&sc->sc_ref); 259 mtx_unlock(&g_gate_units_lock); 260 } 261 262 static int 263 g_gate_getunit(int unit, int *errorp) 264 { 265 266 mtx_assert(&g_gate_units_lock, MA_OWNED); 267 if (unit >= 0) { 268 if (unit >= g_gate_maxunits) 269 *errorp = EINVAL; 270 else if (g_gate_units[unit] == NULL) 271 return (unit); 272 else 273 *errorp = EEXIST; 274 } else { 275 for (unit = 0; unit < g_gate_maxunits; unit++) { 276 if (g_gate_units[unit] == NULL) 277 return (unit); 278 } 279 *errorp = ENFILE; 280 } 281 return (-1); 282 } 283 284 static void 285 g_gate_guard(void *arg) 286 { 287 struct g_gate_softc *sc; 288 struct bintime curtime; 289 struct bio *bp, *bp2; 290 291 sc = arg; 292 binuptime(&curtime); 293 g_gate_hold(sc->sc_unit, NULL); 294 mtx_lock(&sc->sc_queue_mtx); 295 TAILQ_FOREACH_SAFE(bp, &sc->sc_inqueue.queue, bio_queue, bp2) { 296 if (curtime.sec - bp->bio_t0.sec < 5) 297 continue; 298 bioq_remove(&sc->sc_inqueue, bp); 299 sc->sc_queue_count--; 300 G_GATE_LOGREQ(1, bp, "Request timeout."); 301 g_io_deliver(bp, EIO); 302 } 303 TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, bp2) { 304 if (curtime.sec - bp->bio_t0.sec < 5) 305 continue; 306 bioq_remove(&sc->sc_outqueue, bp); 307 sc->sc_queue_count--; 308 G_GATE_LOGREQ(1, bp, "Request timeout."); 309 g_io_deliver(bp, EIO); 310 } 311 mtx_unlock(&sc->sc_queue_mtx); 312 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0) { 313 callout_reset(&sc->sc_callout, sc->sc_timeout * hz, 314 g_gate_guard, sc); 315 } 316 g_gate_release(sc); 317 } 318 319 static void 320 g_gate_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 321 struct g_consumer *cp, struct g_provider *pp) 322 { 323 struct g_gate_softc *sc; 324 325 sc = gp->softc; 326 if (sc == NULL || pp != NULL || cp != NULL) 327 return; 328 g_gate_hold(sc->sc_unit, NULL); 329 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) { 330 sbuf_printf(sb, "%s<access>%s</access>\n", indent, "read-only"); 331 } else if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0) { 332 sbuf_printf(sb, "%s<access>%s</access>\n", indent, 333 "write-only"); 334 } else { 335 sbuf_printf(sb, "%s<access>%s</access>\n", indent, 336 "read-write"); 337 } 338 sbuf_printf(sb, "%s<timeout>%u</timeout>\n", indent, sc->sc_timeout); 339 sbuf_printf(sb, "%s<info>%s</info>\n", indent, sc->sc_info); 340 sbuf_printf(sb, "%s<queue_count>%u</queue_count>\n", indent, 341 sc->sc_queue_count); 342 sbuf_printf(sb, "%s<queue_size>%u</queue_size>\n", indent, 343 sc->sc_queue_size); 344 sbuf_printf(sb, "%s<ref>%u</ref>\n", indent, sc->sc_ref); 345 sbuf_printf(sb, "%s<unit>%d</unit>\n", indent, sc->sc_unit); 346 g_topology_unlock(); 347 g_gate_release(sc); 348 g_topology_lock(); 349 } 350 351 static int 352 g_gate_create(struct g_gate_ctl_create *ggio) 353 { 354 struct g_gate_softc *sc; 355 struct g_geom *gp; 356 struct g_provider *pp; 357 char name[NAME_MAX]; 358 int error = 0, unit; 359 360 if (ggio->gctl_mediasize == 0) { 361 G_GATE_DEBUG(1, "Invalid media size."); 362 return (EINVAL); 363 } 364 if (ggio->gctl_sectorsize > 0 && !powerof2(ggio->gctl_sectorsize)) { 365 G_GATE_DEBUG(1, "Invalid sector size."); 366 return (EINVAL); 367 } 368 if ((ggio->gctl_mediasize % ggio->gctl_sectorsize) != 0) { 369 G_GATE_DEBUG(1, "Invalid media size."); 370 return (EINVAL); 371 } 372 if ((ggio->gctl_flags & G_GATE_FLAG_READONLY) != 0 && 373 (ggio->gctl_flags & G_GATE_FLAG_WRITEONLY) != 0) { 374 G_GATE_DEBUG(1, "Invalid flags."); 375 return (EINVAL); 376 } 377 if (ggio->gctl_unit != G_GATE_UNIT_AUTO && 378 ggio->gctl_unit != G_GATE_NAME_GIVEN && 379 ggio->gctl_unit < 0) { 380 G_GATE_DEBUG(1, "Invalid unit number."); 381 return (EINVAL); 382 } 383 if (ggio->gctl_unit == G_GATE_NAME_GIVEN && 384 ggio->gctl_name[0] == '\0') { 385 G_GATE_DEBUG(1, "No device name."); 386 return (EINVAL); 387 } 388 389 sc = malloc(sizeof(*sc), M_GATE, M_WAITOK | M_ZERO); 390 sc->sc_flags = (ggio->gctl_flags & G_GATE_USERFLAGS); 391 strlcpy(sc->sc_info, ggio->gctl_info, sizeof(sc->sc_info)); 392 sc->sc_seq = 1; 393 bioq_init(&sc->sc_inqueue); 394 bioq_init(&sc->sc_outqueue); 395 mtx_init(&sc->sc_queue_mtx, "gg:queue", NULL, MTX_DEF); 396 sc->sc_queue_count = 0; 397 sc->sc_queue_size = ggio->gctl_maxcount; 398 if (sc->sc_queue_size > G_GATE_MAX_QUEUE_SIZE) 399 sc->sc_queue_size = G_GATE_MAX_QUEUE_SIZE; 400 sc->sc_timeout = ggio->gctl_timeout; 401 callout_init(&sc->sc_callout, CALLOUT_MPSAFE); 402 mtx_lock(&g_gate_units_lock); 403 sc->sc_unit = g_gate_getunit(ggio->gctl_unit, &error); 404 if (sc->sc_unit < 0) { 405 mtx_unlock(&g_gate_units_lock); 406 mtx_destroy(&sc->sc_queue_mtx); 407 free(sc, M_GATE); 408 return (error); 409 } 410 if (ggio->gctl_unit == G_GATE_NAME_GIVEN) 411 snprintf(name, sizeof(name), "%s", ggio->gctl_name); 412 else { 413 snprintf(name, sizeof(name), "%s%d", G_GATE_PROVIDER_NAME, 414 sc->sc_unit); 415 } 416 /* Check for name collision. */ 417 for (unit = 0; unit < g_gate_maxunits; unit++) { 418 if (g_gate_units[unit] == NULL) 419 continue; 420 if (strcmp(name, g_gate_units[unit]->sc_provider->name) != 0) 421 continue; 422 mtx_unlock(&g_gate_units_lock); 423 mtx_destroy(&sc->sc_queue_mtx); 424 free(sc, M_GATE); 425 return (EEXIST); 426 } 427 g_gate_units[sc->sc_unit] = sc; 428 g_gate_nunits++; 429 mtx_unlock(&g_gate_units_lock); 430 431 ggio->gctl_unit = sc->sc_unit; 432 433 g_topology_lock(); 434 gp = g_new_geomf(&g_gate_class, "%s", name); 435 gp->start = g_gate_start; 436 gp->access = g_gate_access; 437 gp->dumpconf = g_gate_dumpconf; 438 gp->softc = sc; 439 pp = g_new_providerf(gp, "%s", name); 440 pp->mediasize = ggio->gctl_mediasize; 441 pp->sectorsize = ggio->gctl_sectorsize; 442 sc->sc_provider = pp; 443 g_error_provider(pp, 0); 444 g_topology_unlock(); 445 446 if (sc->sc_timeout > 0) { 447 callout_reset(&sc->sc_callout, sc->sc_timeout * hz, 448 g_gate_guard, sc); 449 } 450 return (0); 451 } 452 453 #define G_GATE_CHECK_VERSION(ggio) do { \ 454 if ((ggio)->gctl_version != G_GATE_VERSION) { \ 455 printf("Version mismatch %d != %d.\n", \ 456 ggio->gctl_version, G_GATE_VERSION); \ 457 return (EINVAL); \ 458 } \ 459 } while (0) 460 static int 461 g_gate_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 462 { 463 struct g_gate_softc *sc; 464 struct bio *bp; 465 int error = 0; 466 467 G_GATE_DEBUG(4, "ioctl(%s, %lx, %p, %x, %p)", devtoname(dev), cmd, addr, 468 flags, td); 469 470 switch (cmd) { 471 case G_GATE_CMD_CREATE: 472 { 473 struct g_gate_ctl_create *ggio = (void *)addr; 474 475 G_GATE_CHECK_VERSION(ggio); 476 error = g_gate_create(ggio); 477 /* 478 * Reset TDP_GEOM flag. 479 * There are pending events for sure, because we just created 480 * new provider and other classes want to taste it, but we 481 * cannot answer on I/O requests until we're here. 482 */ 483 td->td_pflags &= ~TDP_GEOM; 484 return (error); 485 } 486 case G_GATE_CMD_DESTROY: 487 { 488 struct g_gate_ctl_destroy *ggio = (void *)addr; 489 490 G_GATE_CHECK_VERSION(ggio); 491 sc = g_gate_hold(ggio->gctl_unit, ggio->gctl_name); 492 if (sc == NULL) 493 return (ENXIO); 494 g_topology_lock(); 495 mtx_lock(&g_gate_units_lock); 496 error = g_gate_destroy(sc, ggio->gctl_force); 497 g_topology_unlock(); 498 if (error != 0) 499 g_gate_release(sc); 500 return (error); 501 } 502 case G_GATE_CMD_CANCEL: 503 { 504 struct g_gate_ctl_cancel *ggio = (void *)addr; 505 struct bio *tbp, *lbp; 506 507 G_GATE_CHECK_VERSION(ggio); 508 sc = g_gate_hold(ggio->gctl_unit, ggio->gctl_name); 509 if (sc == NULL) 510 return (ENXIO); 511 lbp = NULL; 512 mtx_lock(&sc->sc_queue_mtx); 513 TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, tbp) { 514 if (ggio->gctl_seq == 0 || 515 ggio->gctl_seq == (uintptr_t)bp->bio_driver1) { 516 G_GATE_LOGREQ(1, bp, "Request canceled."); 517 bioq_remove(&sc->sc_outqueue, bp); 518 /* 519 * Be sure to put requests back onto incoming 520 * queue in the proper order. 521 */ 522 if (lbp == NULL) 523 bioq_insert_head(&sc->sc_inqueue, bp); 524 else { 525 TAILQ_INSERT_AFTER(&sc->sc_inqueue.queue, 526 lbp, bp, bio_queue); 527 } 528 lbp = bp; 529 /* 530 * If only one request was canceled, leave now. 531 */ 532 if (ggio->gctl_seq != 0) 533 break; 534 } 535 } 536 if (ggio->gctl_unit == G_GATE_NAME_GIVEN) 537 ggio->gctl_unit = sc->sc_unit; 538 mtx_unlock(&sc->sc_queue_mtx); 539 g_gate_release(sc); 540 return (error); 541 } 542 case G_GATE_CMD_START: 543 { 544 struct g_gate_ctl_io *ggio = (void *)addr; 545 546 G_GATE_CHECK_VERSION(ggio); 547 sc = g_gate_hold(ggio->gctl_unit, NULL); 548 if (sc == NULL) 549 return (ENXIO); 550 error = 0; 551 for (;;) { 552 mtx_lock(&sc->sc_queue_mtx); 553 bp = bioq_first(&sc->sc_inqueue); 554 if (bp != NULL) 555 break; 556 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) { 557 ggio->gctl_error = ECANCELED; 558 mtx_unlock(&sc->sc_queue_mtx); 559 goto start_end; 560 } 561 if (msleep(sc, &sc->sc_queue_mtx, 562 PPAUSE | PDROP | PCATCH, "ggwait", 0) != 0) { 563 ggio->gctl_error = ECANCELED; 564 goto start_end; 565 } 566 } 567 ggio->gctl_cmd = bp->bio_cmd; 568 if ((bp->bio_cmd == BIO_DELETE || bp->bio_cmd == BIO_WRITE) && 569 bp->bio_length > ggio->gctl_length) { 570 mtx_unlock(&sc->sc_queue_mtx); 571 ggio->gctl_length = bp->bio_length; 572 ggio->gctl_error = ENOMEM; 573 goto start_end; 574 } 575 bioq_remove(&sc->sc_inqueue, bp); 576 bioq_insert_tail(&sc->sc_outqueue, bp); 577 mtx_unlock(&sc->sc_queue_mtx); 578 579 ggio->gctl_seq = (uintptr_t)bp->bio_driver1; 580 ggio->gctl_offset = bp->bio_offset; 581 ggio->gctl_length = bp->bio_length; 582 583 switch (bp->bio_cmd) { 584 case BIO_READ: 585 break; 586 case BIO_DELETE: 587 case BIO_WRITE: 588 error = copyout(bp->bio_data, ggio->gctl_data, 589 bp->bio_length); 590 if (error != 0) { 591 mtx_lock(&sc->sc_queue_mtx); 592 bioq_remove(&sc->sc_outqueue, bp); 593 bioq_insert_head(&sc->sc_inqueue, bp); 594 mtx_unlock(&sc->sc_queue_mtx); 595 goto start_end; 596 } 597 break; 598 } 599 start_end: 600 g_gate_release(sc); 601 return (error); 602 } 603 case G_GATE_CMD_DONE: 604 { 605 struct g_gate_ctl_io *ggio = (void *)addr; 606 607 G_GATE_CHECK_VERSION(ggio); 608 sc = g_gate_hold(ggio->gctl_unit, NULL); 609 if (sc == NULL) 610 return (ENOENT); 611 error = 0; 612 mtx_lock(&sc->sc_queue_mtx); 613 TAILQ_FOREACH(bp, &sc->sc_outqueue.queue, bio_queue) { 614 if (ggio->gctl_seq == (uintptr_t)bp->bio_driver1) 615 break; 616 } 617 if (bp != NULL) { 618 bioq_remove(&sc->sc_outqueue, bp); 619 sc->sc_queue_count--; 620 } 621 mtx_unlock(&sc->sc_queue_mtx); 622 if (bp == NULL) { 623 /* 624 * Request was probably canceled. 625 */ 626 goto done_end; 627 } 628 if (ggio->gctl_error == EAGAIN) { 629 bp->bio_error = 0; 630 G_GATE_LOGREQ(1, bp, "Request desisted."); 631 mtx_lock(&sc->sc_queue_mtx); 632 sc->sc_queue_count++; 633 bioq_insert_head(&sc->sc_inqueue, bp); 634 wakeup(sc); 635 mtx_unlock(&sc->sc_queue_mtx); 636 } else { 637 bp->bio_error = ggio->gctl_error; 638 if (bp->bio_error == 0) { 639 bp->bio_completed = bp->bio_length; 640 switch (bp->bio_cmd) { 641 case BIO_READ: 642 error = copyin(ggio->gctl_data, 643 bp->bio_data, bp->bio_length); 644 if (error != 0) 645 bp->bio_error = error; 646 break; 647 case BIO_DELETE: 648 case BIO_WRITE: 649 break; 650 } 651 } 652 G_GATE_LOGREQ(2, bp, "Request done."); 653 g_io_deliver(bp, bp->bio_error); 654 } 655 done_end: 656 g_gate_release(sc); 657 return (error); 658 } 659 } 660 return (ENOIOCTL); 661 } 662 663 static void 664 g_gate_device(void) 665 { 666 667 status_dev = make_dev(&g_gate_cdevsw, 0x0, UID_ROOT, GID_WHEEL, 0600, 668 G_GATE_CTL_NAME); 669 } 670 671 static int 672 g_gate_modevent(module_t mod, int type, void *data) 673 { 674 int error = 0; 675 676 switch (type) { 677 case MOD_LOAD: 678 mtx_init(&g_gate_units_lock, "gg_units_lock", NULL, MTX_DEF); 679 g_gate_units = malloc(g_gate_maxunits * sizeof(g_gate_units[0]), 680 M_GATE, M_WAITOK | M_ZERO); 681 g_gate_nunits = 0; 682 g_gate_device(); 683 break; 684 case MOD_UNLOAD: 685 mtx_lock(&g_gate_units_lock); 686 if (g_gate_nunits > 0) { 687 mtx_unlock(&g_gate_units_lock); 688 error = EBUSY; 689 break; 690 } 691 mtx_unlock(&g_gate_units_lock); 692 mtx_destroy(&g_gate_units_lock); 693 if (status_dev != 0) 694 destroy_dev(status_dev); 695 free(g_gate_units, M_GATE); 696 break; 697 default: 698 return (EOPNOTSUPP); 699 break; 700 } 701 702 return (error); 703 } 704 static moduledata_t g_gate_module = { 705 G_GATE_MOD_NAME, 706 g_gate_modevent, 707 NULL 708 }; 709 DECLARE_MODULE(geom_gate, g_gate_module, SI_SUB_DRIVERS, SI_ORDER_MIDDLE); 710 DECLARE_GEOM_CLASS(g_gate_class, g_gate); 711