1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org> 5 * Copyright (c) 2009-2010 The FreeBSD Foundation 6 * All rights reserved. 7 * 8 * Portions of this software were developed by Pawel Jakub Dawidek 9 * under sponsorship from the FreeBSD Foundation. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/bio.h> 36 #include <sys/conf.h> 37 #include <sys/kernel.h> 38 #include <sys/kthread.h> 39 #include <sys/fcntl.h> 40 #include <sys/linker.h> 41 #include <sys/lock.h> 42 #include <sys/malloc.h> 43 #include <sys/mutex.h> 44 #include <sys/proc.h> 45 #include <sys/limits.h> 46 #include <sys/queue.h> 47 #include <sys/sbuf.h> 48 #include <sys/sysctl.h> 49 #include <sys/signalvar.h> 50 #include <sys/time.h> 51 #include <machine/atomic.h> 52 53 #include <geom/geom.h> 54 #include <geom/geom_dbg.h> 55 #include <geom/gate/g_gate.h> 56 57 FEATURE(geom_gate, "GEOM Gate module"); 58 59 static MALLOC_DEFINE(M_GATE, "gg_data", "GEOM Gate Data"); 60 61 SYSCTL_DECL(_kern_geom); 62 static SYSCTL_NODE(_kern_geom, OID_AUTO, gate, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 63 "GEOM_GATE configuration"); 64 static int g_gate_debug = 0; 65 SYSCTL_INT(_kern_geom_gate, OID_AUTO, debug, CTLFLAG_RWTUN, &g_gate_debug, 0, 66 "Debug level"); 67 static u_int g_gate_maxunits = 256; 68 SYSCTL_UINT(_kern_geom_gate, OID_AUTO, maxunits, CTLFLAG_RDTUN, 69 &g_gate_maxunits, 0, "Maximum number of ggate devices"); 70 71 struct g_class g_gate_class = { 72 .name = G_GATE_CLASS_NAME, 73 .version = G_VERSION, 74 }; 75 76 static struct cdev *status_dev; 77 static d_ioctl_t g_gate_ioctl; 78 static struct cdevsw g_gate_cdevsw = { 79 .d_version = D_VERSION, 80 .d_ioctl = g_gate_ioctl, 81 .d_name = G_GATE_CTL_NAME 82 }; 83 84 static struct g_gate_softc **g_gate_units; 85 static u_int g_gate_nunits; 86 static struct mtx g_gate_units_lock; 87 88 static void 89 g_gate_detach(void *arg, int flags __unused) 90 { 91 struct g_consumer *cp = arg; 92 93 g_topology_assert(); 94 G_GATE_DEBUG(1, "Destroying read consumer on provider %s orphan.", 95 cp->provider->name); 96 (void)g_access(cp, -1, 0, 0); 97 g_detach(cp); 98 g_destroy_consumer(cp); 99 } 100 101 static int 102 g_gate_destroy(struct g_gate_softc *sc, boolean_t force) 103 { 104 struct bio_queue_head queue; 105 struct g_provider *pp; 106 struct g_consumer *cp; 107 struct g_geom *gp; 108 struct bio *bp; 109 110 g_topology_assert(); 111 mtx_assert(&g_gate_units_lock, MA_OWNED); 112 pp = sc->sc_provider; 113 if (!force && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) { 114 mtx_unlock(&g_gate_units_lock); 115 return (EBUSY); 116 } 117 mtx_unlock(&g_gate_units_lock); 118 mtx_lock(&sc->sc_queue_mtx); 119 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0) 120 sc->sc_flags |= G_GATE_FLAG_DESTROY; 121 wakeup(sc); 122 mtx_unlock(&sc->sc_queue_mtx); 123 gp = pp->geom; 124 g_wither_provider(pp, ENXIO); 125 callout_drain(&sc->sc_callout); 126 bioq_init(&queue); 127 mtx_lock(&sc->sc_queue_mtx); 128 while ((bp = bioq_takefirst(&sc->sc_inqueue)) != NULL) { 129 sc->sc_queue_count--; 130 bioq_insert_tail(&queue, bp); 131 } 132 while ((bp = bioq_takefirst(&sc->sc_outqueue)) != NULL) { 133 sc->sc_queue_count--; 134 bioq_insert_tail(&queue, bp); 135 } 136 mtx_unlock(&sc->sc_queue_mtx); 137 g_topology_unlock(); 138 while ((bp = bioq_takefirst(&queue)) != NULL) { 139 G_GATE_LOGREQ(1, bp, "Request canceled."); 140 g_io_deliver(bp, ENXIO); 141 } 142 mtx_lock(&g_gate_units_lock); 143 /* One reference is ours. */ 144 sc->sc_ref--; 145 while (sc->sc_ref > 0) 146 msleep(&sc->sc_ref, &g_gate_units_lock, 0, "gg:destroy", 0); 147 g_gate_units[sc->sc_unit] = NULL; 148 KASSERT(g_gate_nunits > 0, ("negative g_gate_nunits?")); 149 g_gate_nunits--; 150 mtx_unlock(&g_gate_units_lock); 151 mtx_destroy(&sc->sc_queue_mtx); 152 mtx_destroy(&sc->sc_read_mtx); 153 g_topology_lock(); 154 if ((cp = sc->sc_readcons) != NULL) { 155 sc->sc_readcons = NULL; 156 (void)g_access(cp, -1, 0, 0); 157 g_detach(cp); 158 g_destroy_consumer(cp); 159 } 160 G_GATE_DEBUG(1, "Device %s destroyed.", gp->name); 161 gp->softc = NULL; 162 g_wither_geom(gp, ENXIO); 163 sc->sc_provider = NULL; 164 free(sc, M_GATE); 165 return (0); 166 } 167 168 static int 169 g_gate_access(struct g_provider *pp, int dr, int dw, int de) 170 { 171 struct g_gate_softc *sc; 172 173 if (dr <= 0 && dw <= 0 && de <= 0) 174 return (0); 175 sc = pp->geom->softc; 176 if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) 177 return (ENXIO); 178 /* XXX: Hack to allow read-only mounts. */ 179 #if 0 180 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0 && dw > 0) 181 return (EPERM); 182 #endif 183 if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0 && dr > 0) 184 return (EPERM); 185 return (0); 186 } 187 188 static void 189 g_gate_queue_io(struct bio *bp) 190 { 191 struct g_gate_softc *sc; 192 193 sc = bp->bio_to->geom->softc; 194 if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) { 195 g_io_deliver(bp, ENXIO); 196 return; 197 } 198 199 mtx_lock(&sc->sc_queue_mtx); 200 201 if (sc->sc_queue_size > 0 && sc->sc_queue_count > sc->sc_queue_size) { 202 mtx_unlock(&sc->sc_queue_mtx); 203 G_GATE_LOGREQ(1, bp, "Queue full, request canceled."); 204 g_io_deliver(bp, ENOMEM); 205 return; 206 } 207 208 bp->bio_driver1 = (void *)sc->sc_seq; 209 sc->sc_seq++; 210 sc->sc_queue_count++; 211 212 bioq_insert_tail(&sc->sc_inqueue, bp); 213 wakeup(sc); 214 215 mtx_unlock(&sc->sc_queue_mtx); 216 } 217 218 static void 219 g_gate_done(struct bio *cbp) 220 { 221 struct g_gate_softc *sc; 222 struct bio *pbp; 223 struct g_consumer *cp; 224 225 cp = cbp->bio_from; 226 pbp = cbp->bio_parent; 227 if (cbp->bio_error == 0) { 228 pbp->bio_completed = cbp->bio_completed; 229 g_destroy_bio(cbp); 230 pbp->bio_inbed++; 231 g_io_deliver(pbp, 0); 232 } else { 233 /* If direct read failed, pass it through userland daemon. */ 234 g_destroy_bio(cbp); 235 pbp->bio_children--; 236 g_gate_queue_io(pbp); 237 } 238 239 sc = cp->geom->softc; 240 mtx_lock(&sc->sc_read_mtx); 241 if (--cp->index == 0 && sc->sc_readcons != cp) 242 g_post_event(g_gate_detach, cp, M_NOWAIT, NULL); 243 mtx_unlock(&sc->sc_read_mtx); 244 } 245 246 static void 247 g_gate_start(struct bio *pbp) 248 { 249 struct g_gate_softc *sc; 250 struct g_consumer *cp; 251 struct bio *cbp; 252 253 sc = pbp->bio_to->geom->softc; 254 if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) { 255 g_io_deliver(pbp, ENXIO); 256 return; 257 } 258 G_GATE_LOGREQ(2, pbp, "Request received."); 259 switch (pbp->bio_cmd) { 260 case BIO_READ: 261 if (sc->sc_readcons == NULL) 262 break; 263 cbp = g_clone_bio(pbp); 264 if (cbp == NULL) { 265 g_io_deliver(pbp, ENOMEM); 266 return; 267 } 268 mtx_lock(&sc->sc_read_mtx); 269 if ((cp = sc->sc_readcons) == NULL) { 270 mtx_unlock(&sc->sc_read_mtx); 271 g_destroy_bio(cbp); 272 pbp->bio_children--; 273 break; 274 } 275 cp->index++; 276 cbp->bio_offset = pbp->bio_offset + sc->sc_readoffset; 277 mtx_unlock(&sc->sc_read_mtx); 278 cbp->bio_done = g_gate_done; 279 g_io_request(cbp, cp); 280 return; 281 case BIO_DELETE: 282 case BIO_WRITE: 283 case BIO_FLUSH: 284 case BIO_SPEEDUP: 285 /* XXX: Hack to allow read-only mounts. */ 286 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) { 287 g_io_deliver(pbp, EPERM); 288 return; 289 } 290 break; 291 case BIO_GETATTR: 292 default: 293 G_GATE_LOGREQ(2, pbp, "Ignoring request."); 294 g_io_deliver(pbp, EOPNOTSUPP); 295 return; 296 } 297 298 g_gate_queue_io(pbp); 299 } 300 301 static struct g_gate_softc * 302 g_gate_hold(int unit, const char *name) 303 { 304 struct g_gate_softc *sc = NULL; 305 306 mtx_lock(&g_gate_units_lock); 307 if (unit >= 0 && unit < g_gate_maxunits) 308 sc = g_gate_units[unit]; 309 else if (unit == G_GATE_NAME_GIVEN) { 310 KASSERT(name != NULL, ("name is NULL")); 311 for (unit = 0; unit < g_gate_maxunits; unit++) { 312 if (g_gate_units[unit] == NULL) 313 continue; 314 if (strcmp(name, 315 g_gate_units[unit]->sc_provider->name) != 0) { 316 continue; 317 } 318 sc = g_gate_units[unit]; 319 break; 320 } 321 } 322 if (sc != NULL) 323 sc->sc_ref++; 324 mtx_unlock(&g_gate_units_lock); 325 return (sc); 326 } 327 328 static void 329 g_gate_release(struct g_gate_softc *sc) 330 { 331 332 g_topology_assert_not(); 333 mtx_lock(&g_gate_units_lock); 334 sc->sc_ref--; 335 KASSERT(sc->sc_ref >= 0, ("Negative sc_ref for %s.", sc->sc_name)); 336 if (sc->sc_ref == 0 && (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) 337 wakeup(&sc->sc_ref); 338 mtx_unlock(&g_gate_units_lock); 339 } 340 341 static int 342 g_gate_getunit(int unit, int *errorp) 343 { 344 345 mtx_assert(&g_gate_units_lock, MA_OWNED); 346 if (unit >= 0) { 347 if (unit >= g_gate_maxunits) 348 *errorp = EINVAL; 349 else if (g_gate_units[unit] == NULL) 350 return (unit); 351 else 352 *errorp = EEXIST; 353 } else { 354 for (unit = 0; unit < g_gate_maxunits; unit++) { 355 if (g_gate_units[unit] == NULL) 356 return (unit); 357 } 358 *errorp = ENFILE; 359 } 360 return (-1); 361 } 362 363 static void 364 g_gate_guard(void *arg) 365 { 366 struct bio_queue_head queue; 367 struct g_gate_softc *sc; 368 struct bintime curtime; 369 struct bio *bp, *bp2; 370 371 sc = arg; 372 binuptime(&curtime); 373 g_gate_hold(sc->sc_unit, NULL); 374 bioq_init(&queue); 375 mtx_lock(&sc->sc_queue_mtx); 376 TAILQ_FOREACH_SAFE(bp, &sc->sc_inqueue.queue, bio_queue, bp2) { 377 if (curtime.sec - bp->bio_t0.sec < 5) 378 continue; 379 bioq_remove(&sc->sc_inqueue, bp); 380 sc->sc_queue_count--; 381 bioq_insert_tail(&queue, bp); 382 } 383 TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, bp2) { 384 if (curtime.sec - bp->bio_t0.sec < 5) 385 continue; 386 bioq_remove(&sc->sc_outqueue, bp); 387 sc->sc_queue_count--; 388 bioq_insert_tail(&queue, bp); 389 } 390 mtx_unlock(&sc->sc_queue_mtx); 391 while ((bp = bioq_takefirst(&queue)) != NULL) { 392 G_GATE_LOGREQ(1, bp, "Request timeout."); 393 g_io_deliver(bp, EIO); 394 } 395 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0) { 396 callout_reset(&sc->sc_callout, sc->sc_timeout * hz, 397 g_gate_guard, sc); 398 } 399 g_gate_release(sc); 400 } 401 402 static void 403 g_gate_orphan(struct g_consumer *cp) 404 { 405 struct g_gate_softc *sc; 406 struct g_geom *gp; 407 int done; 408 409 g_topology_assert(); 410 gp = cp->geom; 411 sc = gp->softc; 412 mtx_lock(&sc->sc_read_mtx); 413 if (sc->sc_readcons == cp) 414 sc->sc_readcons = NULL; 415 done = (cp->index == 0); 416 mtx_unlock(&sc->sc_read_mtx); 417 if (done) 418 g_gate_detach(cp, 0); 419 } 420 421 static void 422 g_gate_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 423 struct g_consumer *cp, struct g_provider *pp) 424 { 425 struct g_gate_softc *sc; 426 427 sc = gp->softc; 428 if (sc == NULL || pp != NULL || cp != NULL) 429 return; 430 sc = g_gate_hold(sc->sc_unit, NULL); 431 if (sc == NULL) 432 return; 433 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) { 434 sbuf_printf(sb, "%s<access>%s</access>\n", indent, "read-only"); 435 } else if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0) { 436 sbuf_printf(sb, "%s<access>%s</access>\n", indent, 437 "write-only"); 438 } else { 439 sbuf_printf(sb, "%s<access>%s</access>\n", indent, 440 "read-write"); 441 } 442 if (sc->sc_readcons != NULL) { 443 sbuf_printf(sb, "%s<read_offset>%jd</read_offset>\n", 444 indent, (intmax_t)sc->sc_readoffset); 445 sbuf_printf(sb, "%s<read_provider>%s</read_provider>\n", 446 indent, sc->sc_readcons->provider->name); 447 } 448 sbuf_printf(sb, "%s<timeout>%u</timeout>\n", indent, sc->sc_timeout); 449 sbuf_printf(sb, "%s<info>%s</info>\n", indent, sc->sc_info); 450 sbuf_printf(sb, "%s<queue_count>%u</queue_count>\n", indent, 451 sc->sc_queue_count); 452 sbuf_printf(sb, "%s<queue_size>%u</queue_size>\n", indent, 453 sc->sc_queue_size); 454 sbuf_printf(sb, "%s<ref>%u</ref>\n", indent, sc->sc_ref); 455 sbuf_printf(sb, "%s<unit>%d</unit>\n", indent, sc->sc_unit); 456 g_topology_unlock(); 457 g_gate_release(sc); 458 g_topology_lock(); 459 } 460 461 static int 462 g_gate_create(struct g_gate_ctl_create *ggio) 463 { 464 struct g_gate_softc *sc; 465 struct g_geom *gp; 466 struct g_provider *pp, *ropp; 467 struct g_consumer *cp; 468 char name[NAME_MAX + 1]; 469 char readprov[NAME_MAX + 1]; 470 int error = 0, unit; 471 472 if (ggio->gctl_mediasize <= 0) { 473 G_GATE_DEBUG(1, "Invalid media size."); 474 return (EINVAL); 475 } 476 if (ggio->gctl_sectorsize <= 0) { 477 G_GATE_DEBUG(1, "Invalid sector size."); 478 return (EINVAL); 479 } 480 if (!powerof2(ggio->gctl_sectorsize)) { 481 G_GATE_DEBUG(1, "Invalid sector size."); 482 return (EINVAL); 483 } 484 if ((ggio->gctl_mediasize % ggio->gctl_sectorsize) != 0) { 485 G_GATE_DEBUG(1, "Invalid media size."); 486 return (EINVAL); 487 } 488 if ((ggio->gctl_flags & G_GATE_FLAG_READONLY) != 0 && 489 (ggio->gctl_flags & G_GATE_FLAG_WRITEONLY) != 0) { 490 G_GATE_DEBUG(1, "Invalid flags."); 491 return (EINVAL); 492 } 493 if (ggio->gctl_unit != G_GATE_UNIT_AUTO && 494 ggio->gctl_unit != G_GATE_NAME_GIVEN && 495 ggio->gctl_unit < 0) { 496 G_GATE_DEBUG(1, "Invalid unit number."); 497 return (EINVAL); 498 } 499 if (ggio->gctl_unit == G_GATE_NAME_GIVEN && 500 ggio->gctl_name[0] == '\0') { 501 G_GATE_DEBUG(1, "No device name."); 502 return (EINVAL); 503 } 504 505 sc = malloc(sizeof(*sc), M_GATE, M_WAITOK | M_ZERO); 506 sc->sc_flags = (ggio->gctl_flags & G_GATE_USERFLAGS); 507 memset(sc->sc_info, 0, sizeof(sc->sc_info)); 508 strncpy(sc->sc_info, ggio->gctl_info, 509 MIN(sizeof(sc->sc_info) - 1, sizeof(ggio->gctl_info))); 510 sc->sc_seq = 1; 511 bioq_init(&sc->sc_inqueue); 512 bioq_init(&sc->sc_outqueue); 513 mtx_init(&sc->sc_queue_mtx, "gg:queue", NULL, MTX_DEF); 514 mtx_init(&sc->sc_read_mtx, "gg:read", NULL, MTX_DEF); 515 sc->sc_queue_count = 0; 516 sc->sc_queue_size = ggio->gctl_maxcount; 517 if (sc->sc_queue_size > G_GATE_MAX_QUEUE_SIZE) 518 sc->sc_queue_size = G_GATE_MAX_QUEUE_SIZE; 519 sc->sc_timeout = ggio->gctl_timeout; 520 callout_init(&sc->sc_callout, 1); 521 522 mtx_lock(&g_gate_units_lock); 523 sc->sc_unit = g_gate_getunit(ggio->gctl_unit, &error); 524 if (sc->sc_unit < 0) 525 goto fail1; 526 if (ggio->gctl_unit == G_GATE_NAME_GIVEN) { 527 memset(name, 0, sizeof(name)); 528 strncpy(name, ggio->gctl_name, 529 MIN(sizeof(name) - 1, sizeof(ggio->gctl_name))); 530 } else { 531 snprintf(name, sizeof(name), "%s%d", G_GATE_PROVIDER_NAME, 532 sc->sc_unit); 533 } 534 /* Check for name collision. */ 535 for (unit = 0; unit < g_gate_maxunits; unit++) { 536 if (g_gate_units[unit] == NULL) 537 continue; 538 if (strcmp(name, g_gate_units[unit]->sc_name) != 0) 539 continue; 540 error = EEXIST; 541 goto fail1; 542 } 543 // local stack buffer 'name' assigned here temporarily only. 544 // the real provider name is assigned below. 545 sc->sc_name = name; 546 g_gate_units[sc->sc_unit] = sc; 547 g_gate_nunits++; 548 mtx_unlock(&g_gate_units_lock); 549 550 g_topology_lock(); 551 552 if (ggio->gctl_readprov[0] == '\0') { 553 ropp = NULL; 554 } else { 555 memset(readprov, 0, sizeof(readprov)); 556 strncpy(readprov, ggio->gctl_readprov, 557 MIN(sizeof(readprov) - 1, sizeof(ggio->gctl_readprov))); 558 ropp = g_provider_by_name(readprov); 559 if (ropp == NULL) { 560 G_GATE_DEBUG(1, "Provider %s doesn't exist.", readprov); 561 error = EINVAL; 562 goto fail2; 563 } 564 if ((ggio->gctl_readoffset % ggio->gctl_sectorsize) != 0) { 565 G_GATE_DEBUG(1, "Invalid read offset."); 566 error = EINVAL; 567 goto fail2; 568 } 569 if (ggio->gctl_mediasize + ggio->gctl_readoffset > 570 ropp->mediasize) { 571 G_GATE_DEBUG(1, "Invalid read offset or media size."); 572 error = EINVAL; 573 goto fail2; 574 } 575 } 576 577 gp = g_new_geomf(&g_gate_class, "%s", name); 578 gp->start = g_gate_start; 579 gp->access = g_gate_access; 580 gp->orphan = g_gate_orphan; 581 gp->dumpconf = g_gate_dumpconf; 582 gp->softc = sc; 583 584 if (ropp != NULL) { 585 cp = g_new_consumer(gp); 586 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 587 error = g_attach(cp, ropp); 588 if (error != 0) { 589 G_GATE_DEBUG(1, "Unable to attach to %s.", ropp->name); 590 goto fail3; 591 } 592 error = g_access(cp, 1, 0, 0); 593 if (error != 0) { 594 G_GATE_DEBUG(1, "Unable to access %s.", ropp->name); 595 g_detach(cp); 596 goto fail3; 597 } 598 sc->sc_readcons = cp; 599 sc->sc_readoffset = ggio->gctl_readoffset; 600 } 601 602 ggio->gctl_unit = sc->sc_unit; 603 604 pp = g_new_providerf(gp, "%s", name); 605 pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE; 606 pp->mediasize = ggio->gctl_mediasize; 607 pp->sectorsize = ggio->gctl_sectorsize; 608 sc->sc_provider = pp; 609 g_error_provider(pp, 0); 610 611 g_topology_unlock(); 612 mtx_lock(&g_gate_units_lock); 613 sc->sc_name = sc->sc_provider->name; 614 mtx_unlock(&g_gate_units_lock); 615 G_GATE_DEBUG(1, "Device %s created.", gp->name); 616 617 if (sc->sc_timeout > 0) { 618 callout_reset(&sc->sc_callout, sc->sc_timeout * hz, 619 g_gate_guard, sc); 620 } 621 return (0); 622 fail3: 623 g_destroy_consumer(cp); 624 g_destroy_geom(gp); 625 fail2: 626 g_topology_unlock(); 627 mtx_lock(&g_gate_units_lock); 628 g_gate_units[sc->sc_unit] = NULL; 629 KASSERT(g_gate_nunits > 0, ("negative g_gate_nunits?")); 630 g_gate_nunits--; 631 fail1: 632 mtx_unlock(&g_gate_units_lock); 633 mtx_destroy(&sc->sc_queue_mtx); 634 mtx_destroy(&sc->sc_read_mtx); 635 free(sc, M_GATE); 636 return (error); 637 } 638 639 static int 640 g_gate_modify(struct g_gate_softc *sc, struct g_gate_ctl_modify *ggio) 641 { 642 char readprov[NAME_MAX + 1]; 643 struct g_provider *pp; 644 struct g_consumer *cp; 645 int done, error; 646 647 if ((ggio->gctl_modify & GG_MODIFY_MEDIASIZE) != 0) { 648 if (ggio->gctl_mediasize <= 0) { 649 G_GATE_DEBUG(1, "Invalid media size."); 650 return (EINVAL); 651 } 652 pp = sc->sc_provider; 653 if ((ggio->gctl_mediasize % pp->sectorsize) != 0) { 654 G_GATE_DEBUG(1, "Invalid media size."); 655 return (EINVAL); 656 } 657 g_resize_provider(pp, ggio->gctl_mediasize); 658 return (0); 659 } 660 661 if ((ggio->gctl_modify & GG_MODIFY_INFO) != 0) { 662 memset(sc->sc_info, 0, sizeof(sc->sc_info)); 663 strncpy(sc->sc_info, ggio->gctl_info, 664 MIN(sizeof(sc->sc_info) - 1, sizeof(ggio->gctl_info))); 665 } 666 cp = NULL; 667 668 if ((ggio->gctl_modify & GG_MODIFY_READPROV) != 0) { 669 g_topology_lock(); 670 mtx_lock(&sc->sc_read_mtx); 671 if ((cp = sc->sc_readcons) != NULL) { 672 sc->sc_readcons = NULL; 673 done = (cp->index == 0); 674 mtx_unlock(&sc->sc_read_mtx); 675 if (done) 676 g_gate_detach(cp, 0); 677 } else 678 mtx_unlock(&sc->sc_read_mtx); 679 if (ggio->gctl_readprov[0] != '\0') { 680 memset(readprov, 0, sizeof(readprov)); 681 strncpy(readprov, ggio->gctl_readprov, 682 MIN(sizeof(readprov) - 1, 683 sizeof(ggio->gctl_readprov))); 684 pp = g_provider_by_name(readprov); 685 if (pp == NULL) { 686 g_topology_unlock(); 687 G_GATE_DEBUG(1, "Provider %s doesn't exist.", 688 readprov); 689 return (EINVAL); 690 } 691 cp = g_new_consumer(sc->sc_provider->geom); 692 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 693 error = g_attach(cp, pp); 694 if (error != 0) { 695 G_GATE_DEBUG(1, "Unable to attach to %s.", 696 pp->name); 697 } else { 698 error = g_access(cp, 1, 0, 0); 699 if (error != 0) { 700 G_GATE_DEBUG(1, "Unable to access %s.", 701 pp->name); 702 g_detach(cp); 703 } 704 } 705 if (error != 0) { 706 g_destroy_consumer(cp); 707 g_topology_unlock(); 708 return (error); 709 } 710 } 711 } else { 712 cp = sc->sc_readcons; 713 } 714 715 if ((ggio->gctl_modify & GG_MODIFY_READOFFSET) != 0) { 716 if (cp == NULL) { 717 G_GATE_DEBUG(1, "No read provider."); 718 return (EINVAL); 719 } 720 pp = sc->sc_provider; 721 if ((ggio->gctl_readoffset % pp->sectorsize) != 0) { 722 G_GATE_DEBUG(1, "Invalid read offset."); 723 return (EINVAL); 724 } 725 if (pp->mediasize + ggio->gctl_readoffset > 726 cp->provider->mediasize) { 727 G_GATE_DEBUG(1, "Invalid read offset or media size."); 728 return (EINVAL); 729 } 730 sc->sc_readoffset = ggio->gctl_readoffset; 731 } 732 733 if ((ggio->gctl_modify & GG_MODIFY_READPROV) != 0) { 734 sc->sc_readcons = cp; 735 g_topology_unlock(); 736 } 737 738 return (0); 739 } 740 741 #define G_GATE_CHECK_VERSION(ggio) do { \ 742 if ((ggio)->gctl_version != G_GATE_VERSION) { \ 743 printf("Version mismatch %d != %d.\n", \ 744 ggio->gctl_version, G_GATE_VERSION); \ 745 return (EINVAL); \ 746 } \ 747 } while (0) 748 static int 749 g_gate_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 750 { 751 struct g_gate_softc *sc; 752 struct bio *bp; 753 int error = 0; 754 755 G_GATE_DEBUG(4, "ioctl(%s, %lx, %p, %x, %p)", devtoname(dev), cmd, addr, 756 flags, td); 757 758 switch (cmd) { 759 case G_GATE_CMD_CREATE: 760 { 761 struct g_gate_ctl_create *ggio = (void *)addr; 762 763 G_GATE_CHECK_VERSION(ggio); 764 error = g_gate_create(ggio); 765 /* 766 * Reset TDP_GEOM flag. 767 * There are pending events for sure, because we just created 768 * new provider and other classes want to taste it, but we 769 * cannot answer on I/O requests until we're here. 770 */ 771 td->td_pflags &= ~TDP_GEOM; 772 return (error); 773 } 774 case G_GATE_CMD_MODIFY: 775 { 776 struct g_gate_ctl_modify *ggio = (void *)addr; 777 778 G_GATE_CHECK_VERSION(ggio); 779 sc = g_gate_hold(ggio->gctl_unit, NULL); 780 if (sc == NULL) 781 return (ENXIO); 782 error = g_gate_modify(sc, ggio); 783 g_gate_release(sc); 784 return (error); 785 } 786 case G_GATE_CMD_DESTROY: 787 { 788 struct g_gate_ctl_destroy *ggio = (void *)addr; 789 790 G_GATE_CHECK_VERSION(ggio); 791 sc = g_gate_hold(ggio->gctl_unit, ggio->gctl_name); 792 if (sc == NULL) 793 return (ENXIO); 794 g_topology_lock(); 795 mtx_lock(&g_gate_units_lock); 796 error = g_gate_destroy(sc, ggio->gctl_force); 797 g_topology_unlock(); 798 if (error != 0) 799 g_gate_release(sc); 800 return (error); 801 } 802 case G_GATE_CMD_CANCEL: 803 { 804 struct g_gate_ctl_cancel *ggio = (void *)addr; 805 struct bio *tbp, *lbp; 806 807 G_GATE_CHECK_VERSION(ggio); 808 sc = g_gate_hold(ggio->gctl_unit, ggio->gctl_name); 809 if (sc == NULL) 810 return (ENXIO); 811 lbp = NULL; 812 mtx_lock(&sc->sc_queue_mtx); 813 TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, tbp) { 814 if (ggio->gctl_seq == 0 || 815 ggio->gctl_seq == (uintptr_t)bp->bio_driver1) { 816 G_GATE_LOGREQ(1, bp, "Request canceled."); 817 bioq_remove(&sc->sc_outqueue, bp); 818 /* 819 * Be sure to put requests back onto incoming 820 * queue in the proper order. 821 */ 822 if (lbp == NULL) 823 bioq_insert_head(&sc->sc_inqueue, bp); 824 else { 825 TAILQ_INSERT_AFTER(&sc->sc_inqueue.queue, 826 lbp, bp, bio_queue); 827 } 828 lbp = bp; 829 /* 830 * If only one request was canceled, leave now. 831 */ 832 if (ggio->gctl_seq != 0) 833 break; 834 } 835 } 836 if (ggio->gctl_unit == G_GATE_NAME_GIVEN) 837 ggio->gctl_unit = sc->sc_unit; 838 mtx_unlock(&sc->sc_queue_mtx); 839 g_gate_release(sc); 840 return (error); 841 } 842 case G_GATE_CMD_START: 843 { 844 struct g_gate_ctl_io *ggio = (void *)addr; 845 846 G_GATE_CHECK_VERSION(ggio); 847 sc = g_gate_hold(ggio->gctl_unit, NULL); 848 if (sc == NULL) 849 return (ENXIO); 850 error = 0; 851 for (;;) { 852 mtx_lock(&sc->sc_queue_mtx); 853 bp = bioq_first(&sc->sc_inqueue); 854 if (bp != NULL) 855 break; 856 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) { 857 ggio->gctl_error = ECANCELED; 858 mtx_unlock(&sc->sc_queue_mtx); 859 goto start_end; 860 } 861 error = msleep(sc, &sc->sc_queue_mtx, 862 PPAUSE | PDROP | PCATCH, "ggwait", 0); 863 if (error != 0) 864 goto start_end; 865 } 866 ggio->gctl_cmd = bp->bio_cmd; 867 if (bp->bio_cmd == BIO_WRITE && 868 bp->bio_length > ggio->gctl_length) { 869 mtx_unlock(&sc->sc_queue_mtx); 870 ggio->gctl_length = bp->bio_length; 871 ggio->gctl_error = ENOMEM; 872 goto start_end; 873 } 874 bioq_remove(&sc->sc_inqueue, bp); 875 bioq_insert_tail(&sc->sc_outqueue, bp); 876 mtx_unlock(&sc->sc_queue_mtx); 877 878 ggio->gctl_seq = (uintptr_t)bp->bio_driver1; 879 ggio->gctl_offset = bp->bio_offset; 880 ggio->gctl_length = bp->bio_length; 881 882 switch (bp->bio_cmd) { 883 case BIO_READ: 884 case BIO_DELETE: 885 case BIO_FLUSH: 886 case BIO_SPEEDUP: 887 break; 888 case BIO_WRITE: 889 error = copyout(bp->bio_data, ggio->gctl_data, 890 bp->bio_length); 891 if (error != 0) { 892 mtx_lock(&sc->sc_queue_mtx); 893 bioq_remove(&sc->sc_outqueue, bp); 894 bioq_insert_head(&sc->sc_inqueue, bp); 895 mtx_unlock(&sc->sc_queue_mtx); 896 goto start_end; 897 } 898 break; 899 } 900 start_end: 901 g_gate_release(sc); 902 return (error); 903 } 904 case G_GATE_CMD_DONE: 905 { 906 struct g_gate_ctl_io *ggio = (void *)addr; 907 908 G_GATE_CHECK_VERSION(ggio); 909 sc = g_gate_hold(ggio->gctl_unit, NULL); 910 if (sc == NULL) 911 return (ENOENT); 912 error = 0; 913 mtx_lock(&sc->sc_queue_mtx); 914 TAILQ_FOREACH(bp, &sc->sc_outqueue.queue, bio_queue) { 915 if (ggio->gctl_seq == (uintptr_t)bp->bio_driver1) 916 break; 917 } 918 if (bp != NULL) { 919 bioq_remove(&sc->sc_outqueue, bp); 920 sc->sc_queue_count--; 921 } 922 mtx_unlock(&sc->sc_queue_mtx); 923 if (bp == NULL) { 924 /* 925 * Request was probably canceled. 926 */ 927 goto done_end; 928 } 929 if (ggio->gctl_error == EAGAIN) { 930 bp->bio_error = 0; 931 G_GATE_LOGREQ(1, bp, "Request desisted."); 932 mtx_lock(&sc->sc_queue_mtx); 933 sc->sc_queue_count++; 934 bioq_insert_head(&sc->sc_inqueue, bp); 935 wakeup(sc); 936 mtx_unlock(&sc->sc_queue_mtx); 937 } else { 938 bp->bio_error = ggio->gctl_error; 939 if (bp->bio_error == 0) { 940 bp->bio_completed = bp->bio_length; 941 switch (bp->bio_cmd) { 942 case BIO_READ: 943 error = copyin(ggio->gctl_data, 944 bp->bio_data, bp->bio_length); 945 if (error != 0) 946 bp->bio_error = error; 947 break; 948 case BIO_DELETE: 949 case BIO_WRITE: 950 case BIO_FLUSH: 951 case BIO_SPEEDUP: 952 break; 953 } 954 } 955 G_GATE_LOGREQ(2, bp, "Request done."); 956 g_io_deliver(bp, bp->bio_error); 957 } 958 done_end: 959 g_gate_release(sc); 960 return (error); 961 } 962 } 963 return (ENOIOCTL); 964 } 965 966 static void 967 g_gate_device(void) 968 { 969 970 status_dev = make_dev(&g_gate_cdevsw, 0x0, UID_ROOT, GID_WHEEL, 0600, 971 G_GATE_CTL_NAME); 972 } 973 974 static int 975 g_gate_modevent(module_t mod, int type, void *data) 976 { 977 int error = 0; 978 979 switch (type) { 980 case MOD_LOAD: 981 mtx_init(&g_gate_units_lock, "gg_units_lock", NULL, MTX_DEF); 982 g_gate_units = malloc(g_gate_maxunits * sizeof(g_gate_units[0]), 983 M_GATE, M_WAITOK | M_ZERO); 984 g_gate_nunits = 0; 985 g_gate_device(); 986 break; 987 case MOD_UNLOAD: 988 mtx_lock(&g_gate_units_lock); 989 if (g_gate_nunits > 0) { 990 mtx_unlock(&g_gate_units_lock); 991 error = EBUSY; 992 break; 993 } 994 mtx_unlock(&g_gate_units_lock); 995 mtx_destroy(&g_gate_units_lock); 996 if (status_dev != NULL) 997 destroy_dev(status_dev); 998 free(g_gate_units, M_GATE); 999 break; 1000 default: 1001 return (EOPNOTSUPP); 1002 break; 1003 } 1004 1005 return (error); 1006 } 1007 static moduledata_t g_gate_module = { 1008 G_GATE_MOD_NAME, 1009 g_gate_modevent, 1010 NULL 1011 }; 1012 DECLARE_MODULE(geom_gate, g_gate_module, SI_SUB_DRIVERS, SI_ORDER_MIDDLE); 1013 DECLARE_GEOM_CLASS(g_gate_class, g_gate); 1014 MODULE_VERSION(geom_gate, 0); 1015