1 /*- 2 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org> 3 * Copyright (c) 2009-2010 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * Portions of this software were developed by Pawel Jakub Dawidek 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/bio.h> 37 #include <sys/conf.h> 38 #include <sys/kernel.h> 39 #include <sys/kthread.h> 40 #include <sys/fcntl.h> 41 #include <sys/linker.h> 42 #include <sys/lock.h> 43 #include <sys/malloc.h> 44 #include <sys/mutex.h> 45 #include <sys/proc.h> 46 #include <sys/limits.h> 47 #include <sys/queue.h> 48 #include <sys/sbuf.h> 49 #include <sys/sysctl.h> 50 #include <sys/signalvar.h> 51 #include <sys/time.h> 52 #include <machine/atomic.h> 53 54 #include <geom/geom.h> 55 #include <geom/gate/g_gate.h> 56 57 FEATURE(geom_gate, "GEOM Gate module"); 58 59 static MALLOC_DEFINE(M_GATE, "gg_data", "GEOM Gate Data"); 60 61 SYSCTL_DECL(_kern_geom); 62 static SYSCTL_NODE(_kern_geom, OID_AUTO, gate, CTLFLAG_RW, 0, 63 "GEOM_GATE stuff"); 64 static int g_gate_debug = 0; 65 TUNABLE_INT("kern.geom.gate.debug", &g_gate_debug); 66 SYSCTL_INT(_kern_geom_gate, OID_AUTO, debug, CTLFLAG_RW, &g_gate_debug, 0, 67 "Debug level"); 68 static u_int g_gate_maxunits = 256; 69 TUNABLE_INT("kern.geom.gate.maxunits", &g_gate_maxunits); 70 SYSCTL_UINT(_kern_geom_gate, OID_AUTO, maxunits, CTLFLAG_RDTUN, 71 &g_gate_maxunits, 0, "Maximum number of ggate devices"); 72 73 struct g_class g_gate_class = { 74 .name = G_GATE_CLASS_NAME, 75 .version = G_VERSION, 76 }; 77 78 static struct cdev *status_dev; 79 static d_ioctl_t g_gate_ioctl; 80 static struct cdevsw g_gate_cdevsw = { 81 .d_version = D_VERSION, 82 .d_ioctl = g_gate_ioctl, 83 .d_name = G_GATE_CTL_NAME 84 }; 85 86 87 static struct g_gate_softc **g_gate_units; 88 static u_int g_gate_nunits; 89 static struct mtx g_gate_units_lock; 90 91 static int 92 g_gate_destroy(struct g_gate_softc *sc, boolean_t force) 93 { 94 struct g_provider *pp; 95 struct g_geom *gp; 96 struct bio *bp; 97 98 g_topology_assert(); 99 mtx_assert(&g_gate_units_lock, MA_OWNED); 100 pp = sc->sc_provider; 101 if (!force && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) { 102 mtx_unlock(&g_gate_units_lock); 103 return (EBUSY); 104 } 105 mtx_unlock(&g_gate_units_lock); 106 mtx_lock(&sc->sc_queue_mtx); 107 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0) 108 sc->sc_flags |= G_GATE_FLAG_DESTROY; 109 wakeup(sc); 110 mtx_unlock(&sc->sc_queue_mtx); 111 gp = pp->geom; 112 pp->flags |= G_PF_WITHER; 113 g_orphan_provider(pp, ENXIO); 114 callout_drain(&sc->sc_callout); 115 mtx_lock(&sc->sc_queue_mtx); 116 while ((bp = bioq_first(&sc->sc_inqueue)) != NULL) { 117 bioq_remove(&sc->sc_inqueue, bp); 118 sc->sc_queue_count--; 119 G_GATE_LOGREQ(1, bp, "Request canceled."); 120 g_io_deliver(bp, ENXIO); 121 } 122 while ((bp = bioq_first(&sc->sc_outqueue)) != NULL) { 123 bioq_remove(&sc->sc_outqueue, bp); 124 sc->sc_queue_count--; 125 G_GATE_LOGREQ(1, bp, "Request canceled."); 126 g_io_deliver(bp, ENXIO); 127 } 128 mtx_unlock(&sc->sc_queue_mtx); 129 g_topology_unlock(); 130 mtx_lock(&g_gate_units_lock); 131 /* One reference is ours. */ 132 sc->sc_ref--; 133 while (sc->sc_ref > 0) 134 msleep(&sc->sc_ref, &g_gate_units_lock, 0, "gg:destroy", 0); 135 g_gate_units[sc->sc_unit] = NULL; 136 KASSERT(g_gate_nunits > 0, ("negative g_gate_nunits?")); 137 g_gate_nunits--; 138 mtx_unlock(&g_gate_units_lock); 139 mtx_destroy(&sc->sc_queue_mtx); 140 g_topology_lock(); 141 G_GATE_DEBUG(1, "Device %s destroyed.", gp->name); 142 gp->softc = NULL; 143 g_wither_geom(gp, ENXIO); 144 sc->sc_provider = NULL; 145 free(sc, M_GATE); 146 return (0); 147 } 148 149 static int 150 g_gate_access(struct g_provider *pp, int dr, int dw, int de) 151 { 152 struct g_gate_softc *sc; 153 154 if (dr <= 0 && dw <= 0 && de <= 0) 155 return (0); 156 sc = pp->geom->softc; 157 if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) 158 return (ENXIO); 159 /* XXX: Hack to allow read-only mounts. */ 160 #if 0 161 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0 && dw > 0) 162 return (EPERM); 163 #endif 164 if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0 && dr > 0) 165 return (EPERM); 166 return (0); 167 } 168 169 static void 170 g_gate_start(struct bio *bp) 171 { 172 struct g_gate_softc *sc; 173 174 sc = bp->bio_to->geom->softc; 175 if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) { 176 g_io_deliver(bp, ENXIO); 177 return; 178 } 179 G_GATE_LOGREQ(2, bp, "Request received."); 180 switch (bp->bio_cmd) { 181 case BIO_READ: 182 break; 183 case BIO_DELETE: 184 case BIO_WRITE: 185 case BIO_FLUSH: 186 /* XXX: Hack to allow read-only mounts. */ 187 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) { 188 g_io_deliver(bp, EPERM); 189 return; 190 } 191 break; 192 case BIO_GETATTR: 193 default: 194 G_GATE_LOGREQ(2, bp, "Ignoring request."); 195 g_io_deliver(bp, EOPNOTSUPP); 196 return; 197 } 198 199 mtx_lock(&sc->sc_queue_mtx); 200 if (sc->sc_queue_size > 0 && sc->sc_queue_count > sc->sc_queue_size) { 201 mtx_unlock(&sc->sc_queue_mtx); 202 G_GATE_LOGREQ(1, bp, "Queue full, request canceled."); 203 g_io_deliver(bp, ENOMEM); 204 return; 205 } 206 207 bp->bio_driver1 = (void *)sc->sc_seq; 208 sc->sc_seq++; 209 sc->sc_queue_count++; 210 211 bioq_insert_tail(&sc->sc_inqueue, bp); 212 wakeup(sc); 213 214 mtx_unlock(&sc->sc_queue_mtx); 215 } 216 217 static struct g_gate_softc * 218 g_gate_hold(int unit, const char *name) 219 { 220 struct g_gate_softc *sc = NULL; 221 222 mtx_lock(&g_gate_units_lock); 223 if (unit >= 0 && unit < g_gate_maxunits) 224 sc = g_gate_units[unit]; 225 else if (unit == G_GATE_NAME_GIVEN) { 226 KASSERT(name != NULL, ("name is NULL")); 227 for (unit = 0; unit < g_gate_maxunits; unit++) { 228 if (g_gate_units[unit] == NULL) 229 continue; 230 if (strcmp(name, 231 g_gate_units[unit]->sc_provider->name) != 0) { 232 continue; 233 } 234 sc = g_gate_units[unit]; 235 break; 236 } 237 } 238 if (sc != NULL) 239 sc->sc_ref++; 240 mtx_unlock(&g_gate_units_lock); 241 return (sc); 242 } 243 244 static void 245 g_gate_release(struct g_gate_softc *sc) 246 { 247 248 g_topology_assert_not(); 249 mtx_lock(&g_gate_units_lock); 250 sc->sc_ref--; 251 KASSERT(sc->sc_ref >= 0, ("Negative sc_ref for %s.", sc->sc_name)); 252 if (sc->sc_ref == 0 && (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) 253 wakeup(&sc->sc_ref); 254 mtx_unlock(&g_gate_units_lock); 255 } 256 257 static int 258 g_gate_getunit(int unit, int *errorp) 259 { 260 261 mtx_assert(&g_gate_units_lock, MA_OWNED); 262 if (unit >= 0) { 263 if (unit >= g_gate_maxunits) 264 *errorp = EINVAL; 265 else if (g_gate_units[unit] == NULL) 266 return (unit); 267 else 268 *errorp = EEXIST; 269 } else { 270 for (unit = 0; unit < g_gate_maxunits; unit++) { 271 if (g_gate_units[unit] == NULL) 272 return (unit); 273 } 274 *errorp = ENFILE; 275 } 276 return (-1); 277 } 278 279 static void 280 g_gate_guard(void *arg) 281 { 282 struct g_gate_softc *sc; 283 struct bintime curtime; 284 struct bio *bp, *bp2; 285 286 sc = arg; 287 binuptime(&curtime); 288 g_gate_hold(sc->sc_unit, NULL); 289 mtx_lock(&sc->sc_queue_mtx); 290 TAILQ_FOREACH_SAFE(bp, &sc->sc_inqueue.queue, bio_queue, bp2) { 291 if (curtime.sec - bp->bio_t0.sec < 5) 292 continue; 293 bioq_remove(&sc->sc_inqueue, bp); 294 sc->sc_queue_count--; 295 G_GATE_LOGREQ(1, bp, "Request timeout."); 296 g_io_deliver(bp, EIO); 297 } 298 TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, bp2) { 299 if (curtime.sec - bp->bio_t0.sec < 5) 300 continue; 301 bioq_remove(&sc->sc_outqueue, bp); 302 sc->sc_queue_count--; 303 G_GATE_LOGREQ(1, bp, "Request timeout."); 304 g_io_deliver(bp, EIO); 305 } 306 mtx_unlock(&sc->sc_queue_mtx); 307 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0) { 308 callout_reset(&sc->sc_callout, sc->sc_timeout * hz, 309 g_gate_guard, sc); 310 } 311 g_gate_release(sc); 312 } 313 314 static void 315 g_gate_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 316 struct g_consumer *cp, struct g_provider *pp) 317 { 318 struct g_gate_softc *sc; 319 320 sc = gp->softc; 321 if (sc == NULL || pp != NULL || cp != NULL) 322 return; 323 g_gate_hold(sc->sc_unit, NULL); 324 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) { 325 sbuf_printf(sb, "%s<access>%s</access>\n", indent, "read-only"); 326 } else if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0) { 327 sbuf_printf(sb, "%s<access>%s</access>\n", indent, 328 "write-only"); 329 } else { 330 sbuf_printf(sb, "%s<access>%s</access>\n", indent, 331 "read-write"); 332 } 333 sbuf_printf(sb, "%s<timeout>%u</timeout>\n", indent, sc->sc_timeout); 334 sbuf_printf(sb, "%s<info>%s</info>\n", indent, sc->sc_info); 335 sbuf_printf(sb, "%s<queue_count>%u</queue_count>\n", indent, 336 sc->sc_queue_count); 337 sbuf_printf(sb, "%s<queue_size>%u</queue_size>\n", indent, 338 sc->sc_queue_size); 339 sbuf_printf(sb, "%s<ref>%u</ref>\n", indent, sc->sc_ref); 340 sbuf_printf(sb, "%s<unit>%d</unit>\n", indent, sc->sc_unit); 341 g_topology_unlock(); 342 g_gate_release(sc); 343 g_topology_lock(); 344 } 345 346 static int 347 g_gate_create(struct g_gate_ctl_create *ggio) 348 { 349 struct g_gate_softc *sc; 350 struct g_geom *gp; 351 struct g_provider *pp; 352 char name[NAME_MAX]; 353 int error = 0, unit; 354 355 if (ggio->gctl_mediasize == 0) { 356 G_GATE_DEBUG(1, "Invalid media size."); 357 return (EINVAL); 358 } 359 if (ggio->gctl_sectorsize > 0 && !powerof2(ggio->gctl_sectorsize)) { 360 G_GATE_DEBUG(1, "Invalid sector size."); 361 return (EINVAL); 362 } 363 if ((ggio->gctl_mediasize % ggio->gctl_sectorsize) != 0) { 364 G_GATE_DEBUG(1, "Invalid media size."); 365 return (EINVAL); 366 } 367 if ((ggio->gctl_flags & G_GATE_FLAG_READONLY) != 0 && 368 (ggio->gctl_flags & G_GATE_FLAG_WRITEONLY) != 0) { 369 G_GATE_DEBUG(1, "Invalid flags."); 370 return (EINVAL); 371 } 372 if (ggio->gctl_unit != G_GATE_UNIT_AUTO && 373 ggio->gctl_unit != G_GATE_NAME_GIVEN && 374 ggio->gctl_unit < 0) { 375 G_GATE_DEBUG(1, "Invalid unit number."); 376 return (EINVAL); 377 } 378 if (ggio->gctl_unit == G_GATE_NAME_GIVEN && 379 ggio->gctl_name[0] == '\0') { 380 G_GATE_DEBUG(1, "No device name."); 381 return (EINVAL); 382 } 383 384 sc = malloc(sizeof(*sc), M_GATE, M_WAITOK | M_ZERO); 385 sc->sc_flags = (ggio->gctl_flags & G_GATE_USERFLAGS); 386 strlcpy(sc->sc_info, ggio->gctl_info, sizeof(sc->sc_info)); 387 sc->sc_seq = 1; 388 bioq_init(&sc->sc_inqueue); 389 bioq_init(&sc->sc_outqueue); 390 mtx_init(&sc->sc_queue_mtx, "gg:queue", NULL, MTX_DEF); 391 sc->sc_queue_count = 0; 392 sc->sc_queue_size = ggio->gctl_maxcount; 393 if (sc->sc_queue_size > G_GATE_MAX_QUEUE_SIZE) 394 sc->sc_queue_size = G_GATE_MAX_QUEUE_SIZE; 395 sc->sc_timeout = ggio->gctl_timeout; 396 callout_init(&sc->sc_callout, CALLOUT_MPSAFE); 397 mtx_lock(&g_gate_units_lock); 398 sc->sc_unit = g_gate_getunit(ggio->gctl_unit, &error); 399 if (sc->sc_unit < 0) { 400 mtx_unlock(&g_gate_units_lock); 401 mtx_destroy(&sc->sc_queue_mtx); 402 free(sc, M_GATE); 403 return (error); 404 } 405 if (ggio->gctl_unit == G_GATE_NAME_GIVEN) 406 snprintf(name, sizeof(name), "%s", ggio->gctl_name); 407 else { 408 snprintf(name, sizeof(name), "%s%d", G_GATE_PROVIDER_NAME, 409 sc->sc_unit); 410 } 411 /* Check for name collision. */ 412 for (unit = 0; unit < g_gate_maxunits; unit++) { 413 if (g_gate_units[unit] == NULL) 414 continue; 415 if (strcmp(name, g_gate_units[unit]->sc_name) != 0) 416 continue; 417 mtx_unlock(&g_gate_units_lock); 418 mtx_destroy(&sc->sc_queue_mtx); 419 free(sc, M_GATE); 420 return (EEXIST); 421 } 422 sc->sc_name = name; 423 g_gate_units[sc->sc_unit] = sc; 424 g_gate_nunits++; 425 mtx_unlock(&g_gate_units_lock); 426 427 ggio->gctl_unit = sc->sc_unit; 428 429 g_topology_lock(); 430 gp = g_new_geomf(&g_gate_class, "%s", name); 431 gp->start = g_gate_start; 432 gp->access = g_gate_access; 433 gp->dumpconf = g_gate_dumpconf; 434 gp->softc = sc; 435 pp = g_new_providerf(gp, "%s", name); 436 pp->mediasize = ggio->gctl_mediasize; 437 pp->sectorsize = ggio->gctl_sectorsize; 438 sc->sc_provider = pp; 439 g_error_provider(pp, 0); 440 g_topology_unlock(); 441 mtx_lock(&g_gate_units_lock); 442 sc->sc_name = sc->sc_provider->name; 443 mtx_unlock(&g_gate_units_lock); 444 G_GATE_DEBUG(1, "Device %s created.", gp->name); 445 446 if (sc->sc_timeout > 0) { 447 callout_reset(&sc->sc_callout, sc->sc_timeout * hz, 448 g_gate_guard, sc); 449 } 450 return (0); 451 } 452 453 #define G_GATE_CHECK_VERSION(ggio) do { \ 454 if ((ggio)->gctl_version != G_GATE_VERSION) { \ 455 printf("Version mismatch %d != %d.\n", \ 456 ggio->gctl_version, G_GATE_VERSION); \ 457 return (EINVAL); \ 458 } \ 459 } while (0) 460 static int 461 g_gate_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 462 { 463 struct g_gate_softc *sc; 464 struct bio *bp; 465 int error = 0; 466 467 G_GATE_DEBUG(4, "ioctl(%s, %lx, %p, %x, %p)", devtoname(dev), cmd, addr, 468 flags, td); 469 470 switch (cmd) { 471 case G_GATE_CMD_CREATE: 472 { 473 struct g_gate_ctl_create *ggio = (void *)addr; 474 475 G_GATE_CHECK_VERSION(ggio); 476 error = g_gate_create(ggio); 477 /* 478 * Reset TDP_GEOM flag. 479 * There are pending events for sure, because we just created 480 * new provider and other classes want to taste it, but we 481 * cannot answer on I/O requests until we're here. 482 */ 483 td->td_pflags &= ~TDP_GEOM; 484 return (error); 485 } 486 case G_GATE_CMD_DESTROY: 487 { 488 struct g_gate_ctl_destroy *ggio = (void *)addr; 489 490 G_GATE_CHECK_VERSION(ggio); 491 sc = g_gate_hold(ggio->gctl_unit, ggio->gctl_name); 492 if (sc == NULL) 493 return (ENXIO); 494 g_topology_lock(); 495 mtx_lock(&g_gate_units_lock); 496 error = g_gate_destroy(sc, ggio->gctl_force); 497 g_topology_unlock(); 498 if (error != 0) 499 g_gate_release(sc); 500 return (error); 501 } 502 case G_GATE_CMD_CANCEL: 503 { 504 struct g_gate_ctl_cancel *ggio = (void *)addr; 505 struct bio *tbp, *lbp; 506 507 G_GATE_CHECK_VERSION(ggio); 508 sc = g_gate_hold(ggio->gctl_unit, ggio->gctl_name); 509 if (sc == NULL) 510 return (ENXIO); 511 lbp = NULL; 512 mtx_lock(&sc->sc_queue_mtx); 513 TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, tbp) { 514 if (ggio->gctl_seq == 0 || 515 ggio->gctl_seq == (uintptr_t)bp->bio_driver1) { 516 G_GATE_LOGREQ(1, bp, "Request canceled."); 517 bioq_remove(&sc->sc_outqueue, bp); 518 /* 519 * Be sure to put requests back onto incoming 520 * queue in the proper order. 521 */ 522 if (lbp == NULL) 523 bioq_insert_head(&sc->sc_inqueue, bp); 524 else { 525 TAILQ_INSERT_AFTER(&sc->sc_inqueue.queue, 526 lbp, bp, bio_queue); 527 } 528 lbp = bp; 529 /* 530 * If only one request was canceled, leave now. 531 */ 532 if (ggio->gctl_seq != 0) 533 break; 534 } 535 } 536 if (ggio->gctl_unit == G_GATE_NAME_GIVEN) 537 ggio->gctl_unit = sc->sc_unit; 538 mtx_unlock(&sc->sc_queue_mtx); 539 g_gate_release(sc); 540 return (error); 541 } 542 case G_GATE_CMD_START: 543 { 544 struct g_gate_ctl_io *ggio = (void *)addr; 545 546 G_GATE_CHECK_VERSION(ggio); 547 sc = g_gate_hold(ggio->gctl_unit, NULL); 548 if (sc == NULL) 549 return (ENXIO); 550 error = 0; 551 for (;;) { 552 mtx_lock(&sc->sc_queue_mtx); 553 bp = bioq_first(&sc->sc_inqueue); 554 if (bp != NULL) 555 break; 556 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) { 557 ggio->gctl_error = ECANCELED; 558 mtx_unlock(&sc->sc_queue_mtx); 559 goto start_end; 560 } 561 if (msleep(sc, &sc->sc_queue_mtx, 562 PPAUSE | PDROP | PCATCH, "ggwait", 0) != 0) { 563 ggio->gctl_error = ECANCELED; 564 goto start_end; 565 } 566 } 567 ggio->gctl_cmd = bp->bio_cmd; 568 if ((bp->bio_cmd == BIO_DELETE || bp->bio_cmd == BIO_WRITE) && 569 bp->bio_length > ggio->gctl_length) { 570 mtx_unlock(&sc->sc_queue_mtx); 571 ggio->gctl_length = bp->bio_length; 572 ggio->gctl_error = ENOMEM; 573 goto start_end; 574 } 575 bioq_remove(&sc->sc_inqueue, bp); 576 bioq_insert_tail(&sc->sc_outqueue, bp); 577 mtx_unlock(&sc->sc_queue_mtx); 578 579 ggio->gctl_seq = (uintptr_t)bp->bio_driver1; 580 ggio->gctl_offset = bp->bio_offset; 581 ggio->gctl_length = bp->bio_length; 582 583 switch (bp->bio_cmd) { 584 case BIO_READ: 585 case BIO_DELETE: 586 case BIO_FLUSH: 587 break; 588 case BIO_WRITE: 589 error = copyout(bp->bio_data, ggio->gctl_data, 590 bp->bio_length); 591 if (error != 0) { 592 mtx_lock(&sc->sc_queue_mtx); 593 bioq_remove(&sc->sc_outqueue, bp); 594 bioq_insert_head(&sc->sc_inqueue, bp); 595 mtx_unlock(&sc->sc_queue_mtx); 596 goto start_end; 597 } 598 break; 599 } 600 start_end: 601 g_gate_release(sc); 602 return (error); 603 } 604 case G_GATE_CMD_DONE: 605 { 606 struct g_gate_ctl_io *ggio = (void *)addr; 607 608 G_GATE_CHECK_VERSION(ggio); 609 sc = g_gate_hold(ggio->gctl_unit, NULL); 610 if (sc == NULL) 611 return (ENOENT); 612 error = 0; 613 mtx_lock(&sc->sc_queue_mtx); 614 TAILQ_FOREACH(bp, &sc->sc_outqueue.queue, bio_queue) { 615 if (ggio->gctl_seq == (uintptr_t)bp->bio_driver1) 616 break; 617 } 618 if (bp != NULL) { 619 bioq_remove(&sc->sc_outqueue, bp); 620 sc->sc_queue_count--; 621 } 622 mtx_unlock(&sc->sc_queue_mtx); 623 if (bp == NULL) { 624 /* 625 * Request was probably canceled. 626 */ 627 goto done_end; 628 } 629 if (ggio->gctl_error == EAGAIN) { 630 bp->bio_error = 0; 631 G_GATE_LOGREQ(1, bp, "Request desisted."); 632 mtx_lock(&sc->sc_queue_mtx); 633 sc->sc_queue_count++; 634 bioq_insert_head(&sc->sc_inqueue, bp); 635 wakeup(sc); 636 mtx_unlock(&sc->sc_queue_mtx); 637 } else { 638 bp->bio_error = ggio->gctl_error; 639 if (bp->bio_error == 0) { 640 bp->bio_completed = bp->bio_length; 641 switch (bp->bio_cmd) { 642 case BIO_READ: 643 error = copyin(ggio->gctl_data, 644 bp->bio_data, bp->bio_length); 645 if (error != 0) 646 bp->bio_error = error; 647 break; 648 case BIO_DELETE: 649 case BIO_WRITE: 650 case BIO_FLUSH: 651 break; 652 } 653 } 654 G_GATE_LOGREQ(2, bp, "Request done."); 655 g_io_deliver(bp, bp->bio_error); 656 } 657 done_end: 658 g_gate_release(sc); 659 return (error); 660 } 661 } 662 return (ENOIOCTL); 663 } 664 665 static void 666 g_gate_device(void) 667 { 668 669 status_dev = make_dev(&g_gate_cdevsw, 0x0, UID_ROOT, GID_WHEEL, 0600, 670 G_GATE_CTL_NAME); 671 } 672 673 static int 674 g_gate_modevent(module_t mod, int type, void *data) 675 { 676 int error = 0; 677 678 switch (type) { 679 case MOD_LOAD: 680 mtx_init(&g_gate_units_lock, "gg_units_lock", NULL, MTX_DEF); 681 g_gate_units = malloc(g_gate_maxunits * sizeof(g_gate_units[0]), 682 M_GATE, M_WAITOK | M_ZERO); 683 g_gate_nunits = 0; 684 g_gate_device(); 685 break; 686 case MOD_UNLOAD: 687 mtx_lock(&g_gate_units_lock); 688 if (g_gate_nunits > 0) { 689 mtx_unlock(&g_gate_units_lock); 690 error = EBUSY; 691 break; 692 } 693 mtx_unlock(&g_gate_units_lock); 694 mtx_destroy(&g_gate_units_lock); 695 if (status_dev != 0) 696 destroy_dev(status_dev); 697 free(g_gate_units, M_GATE); 698 break; 699 default: 700 return (EOPNOTSUPP); 701 break; 702 } 703 704 return (error); 705 } 706 static moduledata_t g_gate_module = { 707 G_GATE_MOD_NAME, 708 g_gate_modevent, 709 NULL 710 }; 711 DECLARE_MODULE(geom_gate, g_gate_module, SI_SUB_DRIVERS, SI_ORDER_MIDDLE); 712 DECLARE_GEOM_CLASS(g_gate_class, g_gate); 713