1 /*- 2 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org> 3 * Copyright (c) 2009-2010 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * Portions of this software were developed by Pawel Jakub Dawidek 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/bio.h> 37 #include <sys/conf.h> 38 #include <sys/kernel.h> 39 #include <sys/kthread.h> 40 #include <sys/fcntl.h> 41 #include <sys/linker.h> 42 #include <sys/lock.h> 43 #include <sys/malloc.h> 44 #include <sys/mutex.h> 45 #include <sys/proc.h> 46 #include <sys/limits.h> 47 #include <sys/queue.h> 48 #include <sys/sbuf.h> 49 #include <sys/sysctl.h> 50 #include <sys/signalvar.h> 51 #include <sys/time.h> 52 #include <machine/atomic.h> 53 54 #include <geom/geom.h> 55 #include <geom/gate/g_gate.h> 56 57 FEATURE(geom_gate, "GEOM Gate module"); 58 59 static MALLOC_DEFINE(M_GATE, "gg_data", "GEOM Gate Data"); 60 61 SYSCTL_DECL(_kern_geom); 62 static SYSCTL_NODE(_kern_geom, OID_AUTO, gate, CTLFLAG_RW, 0, 63 "GEOM_GATE configuration"); 64 static int g_gate_debug = 0; 65 TUNABLE_INT("kern.geom.gate.debug", &g_gate_debug); 66 SYSCTL_INT(_kern_geom_gate, OID_AUTO, debug, CTLFLAG_RW, &g_gate_debug, 0, 67 "Debug level"); 68 static u_int g_gate_maxunits = 256; 69 TUNABLE_INT("kern.geom.gate.maxunits", &g_gate_maxunits); 70 SYSCTL_UINT(_kern_geom_gate, OID_AUTO, maxunits, CTLFLAG_RDTUN, 71 &g_gate_maxunits, 0, "Maximum number of ggate devices"); 72 73 struct g_class g_gate_class = { 74 .name = G_GATE_CLASS_NAME, 75 .version = G_VERSION, 76 }; 77 78 static struct cdev *status_dev; 79 static d_ioctl_t g_gate_ioctl; 80 static struct cdevsw g_gate_cdevsw = { 81 .d_version = D_VERSION, 82 .d_ioctl = g_gate_ioctl, 83 .d_name = G_GATE_CTL_NAME 84 }; 85 86 87 static struct g_gate_softc **g_gate_units; 88 static u_int g_gate_nunits; 89 static struct mtx g_gate_units_lock; 90 91 static int 92 g_gate_destroy(struct g_gate_softc *sc, boolean_t force) 93 { 94 struct g_provider *pp; 95 struct g_consumer *cp; 96 struct g_geom *gp; 97 struct bio *bp; 98 99 g_topology_assert(); 100 mtx_assert(&g_gate_units_lock, MA_OWNED); 101 pp = sc->sc_provider; 102 if (!force && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) { 103 mtx_unlock(&g_gate_units_lock); 104 return (EBUSY); 105 } 106 mtx_unlock(&g_gate_units_lock); 107 mtx_lock(&sc->sc_queue_mtx); 108 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0) 109 sc->sc_flags |= G_GATE_FLAG_DESTROY; 110 wakeup(sc); 111 mtx_unlock(&sc->sc_queue_mtx); 112 gp = pp->geom; 113 pp->flags |= G_PF_WITHER; 114 g_orphan_provider(pp, ENXIO); 115 callout_drain(&sc->sc_callout); 116 mtx_lock(&sc->sc_queue_mtx); 117 while ((bp = bioq_first(&sc->sc_inqueue)) != NULL) { 118 bioq_remove(&sc->sc_inqueue, bp); 119 sc->sc_queue_count--; 120 G_GATE_LOGREQ(1, bp, "Request canceled."); 121 g_io_deliver(bp, ENXIO); 122 } 123 while ((bp = bioq_first(&sc->sc_outqueue)) != NULL) { 124 bioq_remove(&sc->sc_outqueue, bp); 125 sc->sc_queue_count--; 126 G_GATE_LOGREQ(1, bp, "Request canceled."); 127 g_io_deliver(bp, ENXIO); 128 } 129 mtx_unlock(&sc->sc_queue_mtx); 130 g_topology_unlock(); 131 mtx_lock(&g_gate_units_lock); 132 /* One reference is ours. */ 133 sc->sc_ref--; 134 while (sc->sc_ref > 0) 135 msleep(&sc->sc_ref, &g_gate_units_lock, 0, "gg:destroy", 0); 136 g_gate_units[sc->sc_unit] = NULL; 137 KASSERT(g_gate_nunits > 0, ("negative g_gate_nunits?")); 138 g_gate_nunits--; 139 mtx_unlock(&g_gate_units_lock); 140 mtx_destroy(&sc->sc_queue_mtx); 141 g_topology_lock(); 142 if ((cp = sc->sc_readcons) != NULL) { 143 sc->sc_readcons = NULL; 144 (void)g_access(cp, -1, 0, 0); 145 g_detach(cp); 146 g_destroy_consumer(cp); 147 } 148 G_GATE_DEBUG(1, "Device %s destroyed.", gp->name); 149 gp->softc = NULL; 150 g_wither_geom(gp, ENXIO); 151 sc->sc_provider = NULL; 152 free(sc, M_GATE); 153 return (0); 154 } 155 156 static int 157 g_gate_access(struct g_provider *pp, int dr, int dw, int de) 158 { 159 struct g_gate_softc *sc; 160 161 if (dr <= 0 && dw <= 0 && de <= 0) 162 return (0); 163 sc = pp->geom->softc; 164 if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) 165 return (ENXIO); 166 /* XXX: Hack to allow read-only mounts. */ 167 #if 0 168 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0 && dw > 0) 169 return (EPERM); 170 #endif 171 if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0 && dr > 0) 172 return (EPERM); 173 return (0); 174 } 175 176 static void 177 g_gate_queue_io(struct bio *bp) 178 { 179 struct g_gate_softc *sc; 180 181 sc = bp->bio_to->geom->softc; 182 if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) { 183 g_io_deliver(bp, ENXIO); 184 return; 185 } 186 187 mtx_lock(&sc->sc_queue_mtx); 188 189 if (sc->sc_queue_size > 0 && sc->sc_queue_count > sc->sc_queue_size) { 190 mtx_unlock(&sc->sc_queue_mtx); 191 G_GATE_LOGREQ(1, bp, "Queue full, request canceled."); 192 g_io_deliver(bp, ENOMEM); 193 return; 194 } 195 196 bp->bio_driver1 = (void *)sc->sc_seq; 197 sc->sc_seq++; 198 sc->sc_queue_count++; 199 200 bioq_insert_tail(&sc->sc_inqueue, bp); 201 wakeup(sc); 202 203 mtx_unlock(&sc->sc_queue_mtx); 204 } 205 206 static void 207 g_gate_done(struct bio *cbp) 208 { 209 struct bio *pbp; 210 211 pbp = cbp->bio_parent; 212 if (cbp->bio_error == 0) { 213 pbp->bio_completed = cbp->bio_completed; 214 g_destroy_bio(cbp); 215 pbp->bio_inbed++; 216 g_io_deliver(pbp, 0); 217 } else { 218 /* If direct read failed, pass it through userland daemon. */ 219 g_destroy_bio(cbp); 220 pbp->bio_children--; 221 g_gate_queue_io(pbp); 222 } 223 } 224 225 static void 226 g_gate_start(struct bio *pbp) 227 { 228 struct g_gate_softc *sc; 229 230 sc = pbp->bio_to->geom->softc; 231 if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) { 232 g_io_deliver(pbp, ENXIO); 233 return; 234 } 235 G_GATE_LOGREQ(2, pbp, "Request received."); 236 switch (pbp->bio_cmd) { 237 case BIO_READ: 238 if (sc->sc_readcons != NULL) { 239 struct bio *cbp; 240 241 cbp = g_clone_bio(pbp); 242 if (cbp == NULL) { 243 g_io_deliver(pbp, ENOMEM); 244 return; 245 } 246 cbp->bio_done = g_gate_done; 247 cbp->bio_offset = pbp->bio_offset + sc->sc_readoffset; 248 cbp->bio_data = pbp->bio_data; 249 cbp->bio_length = pbp->bio_length; 250 cbp->bio_to = sc->sc_readcons->provider; 251 g_io_request(cbp, sc->sc_readcons); 252 return; 253 } 254 break; 255 case BIO_DELETE: 256 case BIO_WRITE: 257 case BIO_FLUSH: 258 /* XXX: Hack to allow read-only mounts. */ 259 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) { 260 g_io_deliver(pbp, EPERM); 261 return; 262 } 263 break; 264 case BIO_GETATTR: 265 default: 266 G_GATE_LOGREQ(2, pbp, "Ignoring request."); 267 g_io_deliver(pbp, EOPNOTSUPP); 268 return; 269 } 270 271 g_gate_queue_io(pbp); 272 } 273 274 static struct g_gate_softc * 275 g_gate_hold(int unit, const char *name) 276 { 277 struct g_gate_softc *sc = NULL; 278 279 mtx_lock(&g_gate_units_lock); 280 if (unit >= 0 && unit < g_gate_maxunits) 281 sc = g_gate_units[unit]; 282 else if (unit == G_GATE_NAME_GIVEN) { 283 KASSERT(name != NULL, ("name is NULL")); 284 for (unit = 0; unit < g_gate_maxunits; unit++) { 285 if (g_gate_units[unit] == NULL) 286 continue; 287 if (strcmp(name, 288 g_gate_units[unit]->sc_provider->name) != 0) { 289 continue; 290 } 291 sc = g_gate_units[unit]; 292 break; 293 } 294 } 295 if (sc != NULL) 296 sc->sc_ref++; 297 mtx_unlock(&g_gate_units_lock); 298 return (sc); 299 } 300 301 static void 302 g_gate_release(struct g_gate_softc *sc) 303 { 304 305 g_topology_assert_not(); 306 mtx_lock(&g_gate_units_lock); 307 sc->sc_ref--; 308 KASSERT(sc->sc_ref >= 0, ("Negative sc_ref for %s.", sc->sc_name)); 309 if (sc->sc_ref == 0 && (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) 310 wakeup(&sc->sc_ref); 311 mtx_unlock(&g_gate_units_lock); 312 } 313 314 static int 315 g_gate_getunit(int unit, int *errorp) 316 { 317 318 mtx_assert(&g_gate_units_lock, MA_OWNED); 319 if (unit >= 0) { 320 if (unit >= g_gate_maxunits) 321 *errorp = EINVAL; 322 else if (g_gate_units[unit] == NULL) 323 return (unit); 324 else 325 *errorp = EEXIST; 326 } else { 327 for (unit = 0; unit < g_gate_maxunits; unit++) { 328 if (g_gate_units[unit] == NULL) 329 return (unit); 330 } 331 *errorp = ENFILE; 332 } 333 return (-1); 334 } 335 336 static void 337 g_gate_guard(void *arg) 338 { 339 struct g_gate_softc *sc; 340 struct bintime curtime; 341 struct bio *bp, *bp2; 342 343 sc = arg; 344 binuptime(&curtime); 345 g_gate_hold(sc->sc_unit, NULL); 346 mtx_lock(&sc->sc_queue_mtx); 347 TAILQ_FOREACH_SAFE(bp, &sc->sc_inqueue.queue, bio_queue, bp2) { 348 if (curtime.sec - bp->bio_t0.sec < 5) 349 continue; 350 bioq_remove(&sc->sc_inqueue, bp); 351 sc->sc_queue_count--; 352 G_GATE_LOGREQ(1, bp, "Request timeout."); 353 g_io_deliver(bp, EIO); 354 } 355 TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, bp2) { 356 if (curtime.sec - bp->bio_t0.sec < 5) 357 continue; 358 bioq_remove(&sc->sc_outqueue, bp); 359 sc->sc_queue_count--; 360 G_GATE_LOGREQ(1, bp, "Request timeout."); 361 g_io_deliver(bp, EIO); 362 } 363 mtx_unlock(&sc->sc_queue_mtx); 364 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0) { 365 callout_reset(&sc->sc_callout, sc->sc_timeout * hz, 366 g_gate_guard, sc); 367 } 368 g_gate_release(sc); 369 } 370 371 static void 372 g_gate_orphan(struct g_consumer *cp) 373 { 374 struct g_gate_softc *sc; 375 struct g_geom *gp; 376 377 g_topology_assert(); 378 gp = cp->geom; 379 sc = gp->softc; 380 if (sc == NULL) 381 return; 382 KASSERT(cp == sc->sc_readcons, ("cp=%p sc_readcons=%p", cp, 383 sc->sc_readcons)); 384 sc->sc_readcons = NULL; 385 G_GATE_DEBUG(1, "Destroying read consumer on provider %s orphan.", 386 cp->provider->name); 387 (void)g_access(cp, -1, 0, 0); 388 g_detach(cp); 389 g_destroy_consumer(cp); 390 } 391 392 static void 393 g_gate_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 394 struct g_consumer *cp, struct g_provider *pp) 395 { 396 struct g_gate_softc *sc; 397 398 sc = gp->softc; 399 if (sc == NULL || pp != NULL || cp != NULL) 400 return; 401 sc = g_gate_hold(sc->sc_unit, NULL); 402 if (sc == NULL) 403 return; 404 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) { 405 sbuf_printf(sb, "%s<access>%s</access>\n", indent, "read-only"); 406 } else if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0) { 407 sbuf_printf(sb, "%s<access>%s</access>\n", indent, 408 "write-only"); 409 } else { 410 sbuf_printf(sb, "%s<access>%s</access>\n", indent, 411 "read-write"); 412 } 413 if (sc->sc_readcons != NULL) { 414 sbuf_printf(sb, "%s<read_offset>%jd</read_offset>\n", 415 indent, (intmax_t)sc->sc_readoffset); 416 sbuf_printf(sb, "%s<read_provider>%s</read_provider>\n", 417 indent, sc->sc_readcons->provider->name); 418 } 419 sbuf_printf(sb, "%s<timeout>%u</timeout>\n", indent, sc->sc_timeout); 420 sbuf_printf(sb, "%s<info>%s</info>\n", indent, sc->sc_info); 421 sbuf_printf(sb, "%s<queue_count>%u</queue_count>\n", indent, 422 sc->sc_queue_count); 423 sbuf_printf(sb, "%s<queue_size>%u</queue_size>\n", indent, 424 sc->sc_queue_size); 425 sbuf_printf(sb, "%s<ref>%u</ref>\n", indent, sc->sc_ref); 426 sbuf_printf(sb, "%s<unit>%d</unit>\n", indent, sc->sc_unit); 427 g_topology_unlock(); 428 g_gate_release(sc); 429 g_topology_lock(); 430 } 431 432 static int 433 g_gate_create(struct g_gate_ctl_create *ggio) 434 { 435 struct g_gate_softc *sc; 436 struct g_geom *gp; 437 struct g_provider *pp, *ropp; 438 struct g_consumer *cp; 439 char name[NAME_MAX]; 440 int error = 0, unit; 441 442 if (ggio->gctl_mediasize <= 0) { 443 G_GATE_DEBUG(1, "Invalid media size."); 444 return (EINVAL); 445 } 446 if (ggio->gctl_sectorsize <= 0) { 447 G_GATE_DEBUG(1, "Invalid sector size."); 448 return (EINVAL); 449 } 450 if (!powerof2(ggio->gctl_sectorsize)) { 451 G_GATE_DEBUG(1, "Invalid sector size."); 452 return (EINVAL); 453 } 454 if ((ggio->gctl_mediasize % ggio->gctl_sectorsize) != 0) { 455 G_GATE_DEBUG(1, "Invalid media size."); 456 return (EINVAL); 457 } 458 if ((ggio->gctl_flags & G_GATE_FLAG_READONLY) != 0 && 459 (ggio->gctl_flags & G_GATE_FLAG_WRITEONLY) != 0) { 460 G_GATE_DEBUG(1, "Invalid flags."); 461 return (EINVAL); 462 } 463 if (ggio->gctl_unit != G_GATE_UNIT_AUTO && 464 ggio->gctl_unit != G_GATE_NAME_GIVEN && 465 ggio->gctl_unit < 0) { 466 G_GATE_DEBUG(1, "Invalid unit number."); 467 return (EINVAL); 468 } 469 if (ggio->gctl_unit == G_GATE_NAME_GIVEN && 470 ggio->gctl_name[0] == '\0') { 471 G_GATE_DEBUG(1, "No device name."); 472 return (EINVAL); 473 } 474 475 sc = malloc(sizeof(*sc), M_GATE, M_WAITOK | M_ZERO); 476 sc->sc_flags = (ggio->gctl_flags & G_GATE_USERFLAGS); 477 strlcpy(sc->sc_info, ggio->gctl_info, sizeof(sc->sc_info)); 478 sc->sc_seq = 1; 479 bioq_init(&sc->sc_inqueue); 480 bioq_init(&sc->sc_outqueue); 481 mtx_init(&sc->sc_queue_mtx, "gg:queue", NULL, MTX_DEF); 482 sc->sc_queue_count = 0; 483 sc->sc_queue_size = ggio->gctl_maxcount; 484 if (sc->sc_queue_size > G_GATE_MAX_QUEUE_SIZE) 485 sc->sc_queue_size = G_GATE_MAX_QUEUE_SIZE; 486 sc->sc_timeout = ggio->gctl_timeout; 487 callout_init(&sc->sc_callout, CALLOUT_MPSAFE); 488 489 mtx_lock(&g_gate_units_lock); 490 sc->sc_unit = g_gate_getunit(ggio->gctl_unit, &error); 491 if (sc->sc_unit < 0) 492 goto fail1; 493 if (ggio->gctl_unit == G_GATE_NAME_GIVEN) 494 snprintf(name, sizeof(name), "%s", ggio->gctl_name); 495 else { 496 snprintf(name, sizeof(name), "%s%d", G_GATE_PROVIDER_NAME, 497 sc->sc_unit); 498 } 499 /* Check for name collision. */ 500 for (unit = 0; unit < g_gate_maxunits; unit++) { 501 if (g_gate_units[unit] == NULL) 502 continue; 503 if (strcmp(name, g_gate_units[unit]->sc_name) != 0) 504 continue; 505 error = EEXIST; 506 goto fail1; 507 } 508 sc->sc_name = name; 509 g_gate_units[sc->sc_unit] = sc; 510 g_gate_nunits++; 511 mtx_unlock(&g_gate_units_lock); 512 513 g_topology_lock(); 514 515 if (ggio->gctl_readprov[0] == '\0') { 516 ropp = NULL; 517 } else { 518 ropp = g_provider_by_name(ggio->gctl_readprov); 519 if (ropp == NULL) { 520 G_GATE_DEBUG(1, "Provider %s doesn't exist.", 521 ggio->gctl_readprov); 522 error = EINVAL; 523 goto fail2; 524 } 525 if ((ggio->gctl_readoffset % ggio->gctl_sectorsize) != 0) { 526 G_GATE_DEBUG(1, "Invalid read offset."); 527 error = EINVAL; 528 goto fail2; 529 } 530 if (ggio->gctl_mediasize + ggio->gctl_readoffset > 531 ropp->mediasize) { 532 G_GATE_DEBUG(1, "Invalid read offset or media size."); 533 error = EINVAL; 534 goto fail2; 535 } 536 } 537 538 gp = g_new_geomf(&g_gate_class, "%s", name); 539 gp->start = g_gate_start; 540 gp->access = g_gate_access; 541 gp->orphan = g_gate_orphan; 542 gp->dumpconf = g_gate_dumpconf; 543 gp->softc = sc; 544 545 if (ropp != NULL) { 546 cp = g_new_consumer(gp); 547 error = g_attach(cp, ropp); 548 if (error != 0) { 549 G_GATE_DEBUG(1, "Unable to attach to %s.", ropp->name); 550 goto fail3; 551 } 552 error = g_access(cp, 1, 0, 0); 553 if (error != 0) { 554 G_GATE_DEBUG(1, "Unable to access %s.", ropp->name); 555 g_detach(cp); 556 goto fail3; 557 } 558 sc->sc_readcons = cp; 559 sc->sc_readoffset = ggio->gctl_readoffset; 560 } 561 562 ggio->gctl_unit = sc->sc_unit; 563 564 pp = g_new_providerf(gp, "%s", name); 565 pp->mediasize = ggio->gctl_mediasize; 566 pp->sectorsize = ggio->gctl_sectorsize; 567 sc->sc_provider = pp; 568 g_error_provider(pp, 0); 569 570 g_topology_unlock(); 571 mtx_lock(&g_gate_units_lock); 572 sc->sc_name = sc->sc_provider->name; 573 mtx_unlock(&g_gate_units_lock); 574 G_GATE_DEBUG(1, "Device %s created.", gp->name); 575 576 if (sc->sc_timeout > 0) { 577 callout_reset(&sc->sc_callout, sc->sc_timeout * hz, 578 g_gate_guard, sc); 579 } 580 return (0); 581 fail3: 582 g_destroy_consumer(cp); 583 g_destroy_geom(gp); 584 fail2: 585 g_topology_unlock(); 586 mtx_lock(&g_gate_units_lock); 587 g_gate_units[sc->sc_unit] = NULL; 588 KASSERT(g_gate_nunits > 0, ("negative g_gate_nunits?")); 589 g_gate_nunits--; 590 fail1: 591 mtx_unlock(&g_gate_units_lock); 592 mtx_destroy(&sc->sc_queue_mtx); 593 free(sc, M_GATE); 594 return (error); 595 } 596 597 static int 598 g_gate_modify(struct g_gate_softc *sc, struct g_gate_ctl_modify *ggio) 599 { 600 struct g_provider *pp; 601 struct g_consumer *cp; 602 int error; 603 604 if ((ggio->gctl_modify & GG_MODIFY_MEDIASIZE) != 0) { 605 if (ggio->gctl_mediasize <= 0) { 606 G_GATE_DEBUG(1, "Invalid media size."); 607 return (EINVAL); 608 } 609 pp = sc->sc_provider; 610 if ((ggio->gctl_mediasize % pp->sectorsize) != 0) { 611 G_GATE_DEBUG(1, "Invalid media size."); 612 return (EINVAL); 613 } 614 /* TODO */ 615 return (EOPNOTSUPP); 616 } 617 618 if ((ggio->gctl_modify & GG_MODIFY_INFO) != 0) 619 (void)strlcpy(sc->sc_info, ggio->gctl_info, sizeof(sc->sc_info)); 620 621 cp = NULL; 622 623 if ((ggio->gctl_modify & GG_MODIFY_READPROV) != 0) { 624 g_topology_lock(); 625 if (sc->sc_readcons != NULL) { 626 cp = sc->sc_readcons; 627 sc->sc_readcons = NULL; 628 (void)g_access(cp, -1, 0, 0); 629 g_detach(cp); 630 g_destroy_consumer(cp); 631 } 632 if (ggio->gctl_readprov[0] != '\0') { 633 pp = g_provider_by_name(ggio->gctl_readprov); 634 if (pp == NULL) { 635 g_topology_unlock(); 636 G_GATE_DEBUG(1, "Provider %s doesn't exist.", 637 ggio->gctl_readprov); 638 return (EINVAL); 639 } 640 cp = g_new_consumer(sc->sc_provider->geom); 641 error = g_attach(cp, pp); 642 if (error != 0) { 643 G_GATE_DEBUG(1, "Unable to attach to %s.", 644 pp->name); 645 } else { 646 error = g_access(cp, 1, 0, 0); 647 if (error != 0) { 648 G_GATE_DEBUG(1, "Unable to access %s.", 649 pp->name); 650 g_detach(cp); 651 } 652 } 653 if (error != 0) { 654 g_destroy_consumer(cp); 655 g_topology_unlock(); 656 return (error); 657 } 658 } 659 } else { 660 cp = sc->sc_readcons; 661 } 662 663 if ((ggio->gctl_modify & GG_MODIFY_READOFFSET) != 0) { 664 if (cp == NULL) { 665 G_GATE_DEBUG(1, "No read provider."); 666 return (EINVAL); 667 } 668 pp = sc->sc_provider; 669 if ((ggio->gctl_readoffset % pp->sectorsize) != 0) { 670 G_GATE_DEBUG(1, "Invalid read offset."); 671 return (EINVAL); 672 } 673 if (pp->mediasize + ggio->gctl_readoffset > 674 cp->provider->mediasize) { 675 G_GATE_DEBUG(1, "Invalid read offset or media size."); 676 return (EINVAL); 677 } 678 sc->sc_readoffset = ggio->gctl_readoffset; 679 } 680 681 if ((ggio->gctl_modify & GG_MODIFY_READPROV) != 0) { 682 sc->sc_readcons = cp; 683 g_topology_unlock(); 684 } 685 686 return (0); 687 } 688 689 #define G_GATE_CHECK_VERSION(ggio) do { \ 690 if ((ggio)->gctl_version != G_GATE_VERSION) { \ 691 printf("Version mismatch %d != %d.\n", \ 692 ggio->gctl_version, G_GATE_VERSION); \ 693 return (EINVAL); \ 694 } \ 695 } while (0) 696 static int 697 g_gate_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 698 { 699 struct g_gate_softc *sc; 700 struct bio *bp; 701 int error = 0; 702 703 G_GATE_DEBUG(4, "ioctl(%s, %lx, %p, %x, %p)", devtoname(dev), cmd, addr, 704 flags, td); 705 706 switch (cmd) { 707 case G_GATE_CMD_CREATE: 708 { 709 struct g_gate_ctl_create *ggio = (void *)addr; 710 711 G_GATE_CHECK_VERSION(ggio); 712 error = g_gate_create(ggio); 713 /* 714 * Reset TDP_GEOM flag. 715 * There are pending events for sure, because we just created 716 * new provider and other classes want to taste it, but we 717 * cannot answer on I/O requests until we're here. 718 */ 719 td->td_pflags &= ~TDP_GEOM; 720 return (error); 721 } 722 case G_GATE_CMD_MODIFY: 723 { 724 struct g_gate_ctl_modify *ggio = (void *)addr; 725 726 G_GATE_CHECK_VERSION(ggio); 727 sc = g_gate_hold(ggio->gctl_unit, NULL); 728 if (sc == NULL) 729 return (ENXIO); 730 error = g_gate_modify(sc, ggio); 731 g_gate_release(sc); 732 return (error); 733 } 734 case G_GATE_CMD_DESTROY: 735 { 736 struct g_gate_ctl_destroy *ggio = (void *)addr; 737 738 G_GATE_CHECK_VERSION(ggio); 739 sc = g_gate_hold(ggio->gctl_unit, ggio->gctl_name); 740 if (sc == NULL) 741 return (ENXIO); 742 g_topology_lock(); 743 mtx_lock(&g_gate_units_lock); 744 error = g_gate_destroy(sc, ggio->gctl_force); 745 g_topology_unlock(); 746 if (error != 0) 747 g_gate_release(sc); 748 return (error); 749 } 750 case G_GATE_CMD_CANCEL: 751 { 752 struct g_gate_ctl_cancel *ggio = (void *)addr; 753 struct bio *tbp, *lbp; 754 755 G_GATE_CHECK_VERSION(ggio); 756 sc = g_gate_hold(ggio->gctl_unit, ggio->gctl_name); 757 if (sc == NULL) 758 return (ENXIO); 759 lbp = NULL; 760 mtx_lock(&sc->sc_queue_mtx); 761 TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, tbp) { 762 if (ggio->gctl_seq == 0 || 763 ggio->gctl_seq == (uintptr_t)bp->bio_driver1) { 764 G_GATE_LOGREQ(1, bp, "Request canceled."); 765 bioq_remove(&sc->sc_outqueue, bp); 766 /* 767 * Be sure to put requests back onto incoming 768 * queue in the proper order. 769 */ 770 if (lbp == NULL) 771 bioq_insert_head(&sc->sc_inqueue, bp); 772 else { 773 TAILQ_INSERT_AFTER(&sc->sc_inqueue.queue, 774 lbp, bp, bio_queue); 775 } 776 lbp = bp; 777 /* 778 * If only one request was canceled, leave now. 779 */ 780 if (ggio->gctl_seq != 0) 781 break; 782 } 783 } 784 if (ggio->gctl_unit == G_GATE_NAME_GIVEN) 785 ggio->gctl_unit = sc->sc_unit; 786 mtx_unlock(&sc->sc_queue_mtx); 787 g_gate_release(sc); 788 return (error); 789 } 790 case G_GATE_CMD_START: 791 { 792 struct g_gate_ctl_io *ggio = (void *)addr; 793 794 G_GATE_CHECK_VERSION(ggio); 795 sc = g_gate_hold(ggio->gctl_unit, NULL); 796 if (sc == NULL) 797 return (ENXIO); 798 error = 0; 799 for (;;) { 800 mtx_lock(&sc->sc_queue_mtx); 801 bp = bioq_first(&sc->sc_inqueue); 802 if (bp != NULL) 803 break; 804 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) { 805 ggio->gctl_error = ECANCELED; 806 mtx_unlock(&sc->sc_queue_mtx); 807 goto start_end; 808 } 809 if (msleep(sc, &sc->sc_queue_mtx, 810 PPAUSE | PDROP | PCATCH, "ggwait", 0) != 0) { 811 ggio->gctl_error = ECANCELED; 812 goto start_end; 813 } 814 } 815 ggio->gctl_cmd = bp->bio_cmd; 816 if ((bp->bio_cmd == BIO_DELETE || bp->bio_cmd == BIO_WRITE) && 817 bp->bio_length > ggio->gctl_length) { 818 mtx_unlock(&sc->sc_queue_mtx); 819 ggio->gctl_length = bp->bio_length; 820 ggio->gctl_error = ENOMEM; 821 goto start_end; 822 } 823 bioq_remove(&sc->sc_inqueue, bp); 824 bioq_insert_tail(&sc->sc_outqueue, bp); 825 mtx_unlock(&sc->sc_queue_mtx); 826 827 ggio->gctl_seq = (uintptr_t)bp->bio_driver1; 828 ggio->gctl_offset = bp->bio_offset; 829 ggio->gctl_length = bp->bio_length; 830 831 switch (bp->bio_cmd) { 832 case BIO_READ: 833 case BIO_DELETE: 834 case BIO_FLUSH: 835 break; 836 case BIO_WRITE: 837 error = copyout(bp->bio_data, ggio->gctl_data, 838 bp->bio_length); 839 if (error != 0) { 840 mtx_lock(&sc->sc_queue_mtx); 841 bioq_remove(&sc->sc_outqueue, bp); 842 bioq_insert_head(&sc->sc_inqueue, bp); 843 mtx_unlock(&sc->sc_queue_mtx); 844 goto start_end; 845 } 846 break; 847 } 848 start_end: 849 g_gate_release(sc); 850 return (error); 851 } 852 case G_GATE_CMD_DONE: 853 { 854 struct g_gate_ctl_io *ggio = (void *)addr; 855 856 G_GATE_CHECK_VERSION(ggio); 857 sc = g_gate_hold(ggio->gctl_unit, NULL); 858 if (sc == NULL) 859 return (ENOENT); 860 error = 0; 861 mtx_lock(&sc->sc_queue_mtx); 862 TAILQ_FOREACH(bp, &sc->sc_outqueue.queue, bio_queue) { 863 if (ggio->gctl_seq == (uintptr_t)bp->bio_driver1) 864 break; 865 } 866 if (bp != NULL) { 867 bioq_remove(&sc->sc_outqueue, bp); 868 sc->sc_queue_count--; 869 } 870 mtx_unlock(&sc->sc_queue_mtx); 871 if (bp == NULL) { 872 /* 873 * Request was probably canceled. 874 */ 875 goto done_end; 876 } 877 if (ggio->gctl_error == EAGAIN) { 878 bp->bio_error = 0; 879 G_GATE_LOGREQ(1, bp, "Request desisted."); 880 mtx_lock(&sc->sc_queue_mtx); 881 sc->sc_queue_count++; 882 bioq_insert_head(&sc->sc_inqueue, bp); 883 wakeup(sc); 884 mtx_unlock(&sc->sc_queue_mtx); 885 } else { 886 bp->bio_error = ggio->gctl_error; 887 if (bp->bio_error == 0) { 888 bp->bio_completed = bp->bio_length; 889 switch (bp->bio_cmd) { 890 case BIO_READ: 891 error = copyin(ggio->gctl_data, 892 bp->bio_data, bp->bio_length); 893 if (error != 0) 894 bp->bio_error = error; 895 break; 896 case BIO_DELETE: 897 case BIO_WRITE: 898 case BIO_FLUSH: 899 break; 900 } 901 } 902 G_GATE_LOGREQ(2, bp, "Request done."); 903 g_io_deliver(bp, bp->bio_error); 904 } 905 done_end: 906 g_gate_release(sc); 907 return (error); 908 } 909 } 910 return (ENOIOCTL); 911 } 912 913 static void 914 g_gate_device(void) 915 { 916 917 status_dev = make_dev(&g_gate_cdevsw, 0x0, UID_ROOT, GID_WHEEL, 0600, 918 G_GATE_CTL_NAME); 919 } 920 921 static int 922 g_gate_modevent(module_t mod, int type, void *data) 923 { 924 int error = 0; 925 926 switch (type) { 927 case MOD_LOAD: 928 mtx_init(&g_gate_units_lock, "gg_units_lock", NULL, MTX_DEF); 929 g_gate_units = malloc(g_gate_maxunits * sizeof(g_gate_units[0]), 930 M_GATE, M_WAITOK | M_ZERO); 931 g_gate_nunits = 0; 932 g_gate_device(); 933 break; 934 case MOD_UNLOAD: 935 mtx_lock(&g_gate_units_lock); 936 if (g_gate_nunits > 0) { 937 mtx_unlock(&g_gate_units_lock); 938 error = EBUSY; 939 break; 940 } 941 mtx_unlock(&g_gate_units_lock); 942 mtx_destroy(&g_gate_units_lock); 943 if (status_dev != 0) 944 destroy_dev(status_dev); 945 free(g_gate_units, M_GATE); 946 break; 947 default: 948 return (EOPNOTSUPP); 949 break; 950 } 951 952 return (error); 953 } 954 static moduledata_t g_gate_module = { 955 G_GATE_MOD_NAME, 956 g_gate_modevent, 957 NULL 958 }; 959 DECLARE_MODULE(geom_gate, g_gate_module, SI_SUB_DRIVERS, SI_ORDER_MIDDLE); 960 DECLARE_GEOM_CLASS(g_gate_class, g_gate); 961