1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org> 5 * Copyright (c) 2009-2010 The FreeBSD Foundation 6 * All rights reserved. 7 * 8 * Portions of this software were developed by Pawel Jakub Dawidek 9 * under sponsorship from the FreeBSD Foundation. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/bio.h> 36 #include <sys/conf.h> 37 #include <sys/kernel.h> 38 #include <sys/kthread.h> 39 #include <sys/fcntl.h> 40 #include <sys/linker.h> 41 #include <sys/lock.h> 42 #include <sys/malloc.h> 43 #include <sys/mutex.h> 44 #include <sys/proc.h> 45 #include <sys/limits.h> 46 #include <sys/queue.h> 47 #include <sys/sbuf.h> 48 #include <sys/sysctl.h> 49 #include <sys/signalvar.h> 50 #include <sys/time.h> 51 #include <machine/atomic.h> 52 53 #include <geom/geom.h> 54 #include <geom/geom_dbg.h> 55 #include <geom/gate/g_gate.h> 56 57 FEATURE(geom_gate, "GEOM Gate module"); 58 59 static MALLOC_DEFINE(M_GATE, "gg_data", "GEOM Gate Data"); 60 61 SYSCTL_DECL(_kern_geom); 62 static SYSCTL_NODE(_kern_geom, OID_AUTO, gate, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 63 "GEOM_GATE configuration"); 64 static int g_gate_debug = 0; 65 SYSCTL_INT(_kern_geom_gate, OID_AUTO, debug, CTLFLAG_RWTUN, &g_gate_debug, 0, 66 "Debug level"); 67 static u_int g_gate_maxunits = 256; 68 SYSCTL_UINT(_kern_geom_gate, OID_AUTO, maxunits, CTLFLAG_RDTUN, 69 &g_gate_maxunits, 0, "Maximum number of ggate devices"); 70 71 struct g_class g_gate_class = { 72 .name = G_GATE_CLASS_NAME, 73 .version = G_VERSION, 74 }; 75 76 static struct cdev *status_dev; 77 static d_ioctl_t g_gate_ioctl; 78 static struct cdevsw g_gate_cdevsw = { 79 .d_version = D_VERSION, 80 .d_ioctl = g_gate_ioctl, 81 .d_name = G_GATE_CTL_NAME 82 }; 83 84 static struct g_gate_softc **g_gate_units; 85 static u_int g_gate_nunits; 86 static struct mtx g_gate_units_lock; 87 88 static void 89 g_gate_detach(void *arg, int flags __unused) 90 { 91 struct g_consumer *cp = arg; 92 93 g_topology_assert(); 94 G_GATE_DEBUG(1, "Destroying read consumer on provider %s orphan.", 95 cp->provider->name); 96 (void)g_access(cp, -1, 0, 0); 97 g_detach(cp); 98 g_destroy_consumer(cp); 99 } 100 101 static int 102 g_gate_destroy(struct g_gate_softc *sc, boolean_t force) 103 { 104 struct bio_queue_head queue; 105 struct g_provider *pp; 106 struct g_consumer *cp; 107 struct g_geom *gp; 108 struct bio *bp; 109 110 g_topology_assert(); 111 mtx_assert(&g_gate_units_lock, MA_OWNED); 112 pp = sc->sc_provider; 113 if (!force && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) { 114 mtx_unlock(&g_gate_units_lock); 115 return (EBUSY); 116 } 117 mtx_unlock(&g_gate_units_lock); 118 mtx_lock(&sc->sc_queue_mtx); 119 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0) 120 sc->sc_flags |= G_GATE_FLAG_DESTROY; 121 wakeup(sc); 122 mtx_unlock(&sc->sc_queue_mtx); 123 gp = pp->geom; 124 g_wither_provider(pp, ENXIO); 125 callout_drain(&sc->sc_callout); 126 bioq_init(&queue); 127 mtx_lock(&sc->sc_queue_mtx); 128 while ((bp = bioq_takefirst(&sc->sc_inqueue)) != NULL) { 129 sc->sc_queue_count--; 130 bioq_insert_tail(&queue, bp); 131 } 132 while ((bp = bioq_takefirst(&sc->sc_outqueue)) != NULL) { 133 sc->sc_queue_count--; 134 bioq_insert_tail(&queue, bp); 135 } 136 mtx_unlock(&sc->sc_queue_mtx); 137 g_topology_unlock(); 138 while ((bp = bioq_takefirst(&queue)) != NULL) { 139 G_GATE_LOGREQ(1, bp, "Request canceled."); 140 g_io_deliver(bp, ENXIO); 141 } 142 mtx_lock(&g_gate_units_lock); 143 /* One reference is ours. */ 144 sc->sc_ref--; 145 while (sc->sc_ref > 0) 146 msleep(&sc->sc_ref, &g_gate_units_lock, 0, "gg:destroy", 0); 147 g_gate_units[sc->sc_unit] = NULL; 148 KASSERT(g_gate_nunits > 0, ("negative g_gate_nunits?")); 149 g_gate_nunits--; 150 mtx_unlock(&g_gate_units_lock); 151 mtx_destroy(&sc->sc_queue_mtx); 152 mtx_destroy(&sc->sc_read_mtx); 153 g_topology_lock(); 154 if ((cp = sc->sc_readcons) != NULL) { 155 sc->sc_readcons = NULL; 156 (void)g_access(cp, -1, 0, 0); 157 g_detach(cp); 158 g_destroy_consumer(cp); 159 } 160 G_GATE_DEBUG(1, "Device %s destroyed.", gp->name); 161 gp->softc = NULL; 162 g_wither_geom(gp, ENXIO); 163 sc->sc_provider = NULL; 164 free(sc, M_GATE); 165 return (0); 166 } 167 168 static int 169 g_gate_access(struct g_provider *pp, int dr, int dw, int de) 170 { 171 struct g_gate_softc *sc; 172 173 if (dr <= 0 && dw <= 0 && de <= 0) 174 return (0); 175 sc = pp->geom->softc; 176 if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) 177 return (ENXIO); 178 /* XXX: Hack to allow read-only mounts. */ 179 #if 0 180 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0 && dw > 0) 181 return (EPERM); 182 #endif 183 if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0 && dr > 0) 184 return (EPERM); 185 return (0); 186 } 187 188 static void 189 g_gate_queue_io(struct bio *bp) 190 { 191 struct g_gate_softc *sc; 192 193 sc = bp->bio_to->geom->softc; 194 if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) { 195 g_io_deliver(bp, ENXIO); 196 return; 197 } 198 199 mtx_lock(&sc->sc_queue_mtx); 200 201 if (sc->sc_queue_size > 0 && sc->sc_queue_count > sc->sc_queue_size) { 202 mtx_unlock(&sc->sc_queue_mtx); 203 G_GATE_LOGREQ(1, bp, "Queue full, request canceled."); 204 g_io_deliver(bp, ENOMEM); 205 return; 206 } 207 208 bp->bio_driver1 = (void *)sc->sc_seq; 209 sc->sc_seq++; 210 sc->sc_queue_count++; 211 212 bioq_insert_tail(&sc->sc_inqueue, bp); 213 wakeup(sc); 214 215 mtx_unlock(&sc->sc_queue_mtx); 216 } 217 218 static void 219 g_gate_done(struct bio *cbp) 220 { 221 struct g_gate_softc *sc; 222 struct bio *pbp; 223 struct g_consumer *cp; 224 225 cp = cbp->bio_from; 226 pbp = cbp->bio_parent; 227 if (cbp->bio_error == 0) { 228 pbp->bio_completed = cbp->bio_completed; 229 g_destroy_bio(cbp); 230 pbp->bio_inbed++; 231 g_io_deliver(pbp, 0); 232 } else { 233 /* If direct read failed, pass it through userland daemon. */ 234 g_destroy_bio(cbp); 235 pbp->bio_children--; 236 g_gate_queue_io(pbp); 237 } 238 239 sc = cp->geom->softc; 240 mtx_lock(&sc->sc_read_mtx); 241 if (--cp->index == 0 && sc->sc_readcons != cp) 242 g_post_event(g_gate_detach, cp, M_NOWAIT, NULL); 243 mtx_unlock(&sc->sc_read_mtx); 244 } 245 246 static void 247 g_gate_start(struct bio *pbp) 248 { 249 struct g_gate_softc *sc; 250 struct g_consumer *cp; 251 struct bio *cbp; 252 253 sc = pbp->bio_to->geom->softc; 254 if (sc == NULL || (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) { 255 g_io_deliver(pbp, ENXIO); 256 return; 257 } 258 G_GATE_LOGREQ(2, pbp, "Request received."); 259 switch (pbp->bio_cmd) { 260 case BIO_READ: 261 if (sc->sc_readcons == NULL) 262 break; 263 cbp = g_clone_bio(pbp); 264 if (cbp == NULL) { 265 g_io_deliver(pbp, ENOMEM); 266 return; 267 } 268 mtx_lock(&sc->sc_read_mtx); 269 if ((cp = sc->sc_readcons) == NULL) { 270 mtx_unlock(&sc->sc_read_mtx); 271 g_destroy_bio(cbp); 272 pbp->bio_children--; 273 break; 274 } 275 cp->index++; 276 cbp->bio_offset = pbp->bio_offset + sc->sc_readoffset; 277 mtx_unlock(&sc->sc_read_mtx); 278 cbp->bio_done = g_gate_done; 279 g_io_request(cbp, cp); 280 return; 281 case BIO_DELETE: 282 case BIO_WRITE: 283 case BIO_FLUSH: 284 case BIO_SPEEDUP: 285 /* XXX: Hack to allow read-only mounts. */ 286 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) { 287 g_io_deliver(pbp, EPERM); 288 return; 289 } 290 break; 291 case BIO_GETATTR: 292 default: 293 G_GATE_LOGREQ(2, pbp, "Ignoring request."); 294 g_io_deliver(pbp, EOPNOTSUPP); 295 return; 296 } 297 298 g_gate_queue_io(pbp); 299 } 300 301 static struct g_gate_softc * 302 g_gate_hold(int unit, const char *name) 303 { 304 struct g_gate_softc *sc = NULL; 305 306 mtx_lock(&g_gate_units_lock); 307 if (unit >= 0 && unit < g_gate_maxunits) 308 sc = g_gate_units[unit]; 309 else if (unit == G_GATE_NAME_GIVEN) { 310 KASSERT(name != NULL, ("name is NULL")); 311 for (unit = 0; unit < g_gate_maxunits; unit++) { 312 if (g_gate_units[unit] == NULL) 313 continue; 314 if (strcmp(name, 315 g_gate_units[unit]->sc_provider->name) != 0) { 316 continue; 317 } 318 sc = g_gate_units[unit]; 319 break; 320 } 321 } 322 if (sc != NULL) 323 sc->sc_ref++; 324 mtx_unlock(&g_gate_units_lock); 325 return (sc); 326 } 327 328 static void 329 g_gate_release(struct g_gate_softc *sc) 330 { 331 332 mtx_lock(&g_gate_units_lock); 333 sc->sc_ref--; 334 KASSERT(sc->sc_ref >= 0, ("Negative sc_ref for %s.", sc->sc_name)); 335 if (sc->sc_ref == 0 && (sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) 336 wakeup(&sc->sc_ref); 337 mtx_unlock(&g_gate_units_lock); 338 } 339 340 static int 341 g_gate_getunit(int unit, int *errorp) 342 { 343 344 mtx_assert(&g_gate_units_lock, MA_OWNED); 345 if (unit >= 0) { 346 if (unit >= g_gate_maxunits) 347 *errorp = EINVAL; 348 else if (g_gate_units[unit] == NULL) 349 return (unit); 350 else 351 *errorp = EEXIST; 352 } else { 353 for (unit = 0; unit < g_gate_maxunits; unit++) { 354 if (g_gate_units[unit] == NULL) 355 return (unit); 356 } 357 *errorp = ENFILE; 358 } 359 return (-1); 360 } 361 362 static void 363 g_gate_guard(void *arg) 364 { 365 struct bio_queue_head queue; 366 struct g_gate_softc *sc; 367 struct bintime curtime; 368 struct bio *bp, *bp2; 369 370 sc = arg; 371 binuptime(&curtime); 372 g_gate_hold(sc->sc_unit, NULL); 373 bioq_init(&queue); 374 mtx_lock(&sc->sc_queue_mtx); 375 TAILQ_FOREACH_SAFE(bp, &sc->sc_inqueue.queue, bio_queue, bp2) { 376 if (curtime.sec - bp->bio_t0.sec < 5) 377 continue; 378 bioq_remove(&sc->sc_inqueue, bp); 379 sc->sc_queue_count--; 380 bioq_insert_tail(&queue, bp); 381 } 382 TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, bp2) { 383 if (curtime.sec - bp->bio_t0.sec < 5) 384 continue; 385 bioq_remove(&sc->sc_outqueue, bp); 386 sc->sc_queue_count--; 387 bioq_insert_tail(&queue, bp); 388 } 389 mtx_unlock(&sc->sc_queue_mtx); 390 while ((bp = bioq_takefirst(&queue)) != NULL) { 391 G_GATE_LOGREQ(1, bp, "Request timeout."); 392 g_io_deliver(bp, EIO); 393 } 394 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) == 0) { 395 callout_reset(&sc->sc_callout, sc->sc_timeout * hz, 396 g_gate_guard, sc); 397 } 398 g_gate_release(sc); 399 } 400 401 static void 402 g_gate_orphan(struct g_consumer *cp) 403 { 404 struct g_gate_softc *sc; 405 struct g_geom *gp; 406 int done; 407 408 g_topology_assert(); 409 gp = cp->geom; 410 sc = gp->softc; 411 mtx_lock(&sc->sc_read_mtx); 412 if (sc->sc_readcons == cp) 413 sc->sc_readcons = NULL; 414 done = (cp->index == 0); 415 mtx_unlock(&sc->sc_read_mtx); 416 if (done) 417 g_gate_detach(cp, 0); 418 } 419 420 static void 421 g_gate_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 422 struct g_consumer *cp, struct g_provider *pp) 423 { 424 struct g_gate_softc *sc; 425 426 sc = gp->softc; 427 if (sc == NULL || pp != NULL || cp != NULL) 428 return; 429 sc = g_gate_hold(sc->sc_unit, NULL); 430 if (sc == NULL) 431 return; 432 if ((sc->sc_flags & G_GATE_FLAG_READONLY) != 0) { 433 sbuf_printf(sb, "%s<access>%s</access>\n", indent, "read-only"); 434 } else if ((sc->sc_flags & G_GATE_FLAG_WRITEONLY) != 0) { 435 sbuf_printf(sb, "%s<access>%s</access>\n", indent, 436 "write-only"); 437 } else { 438 sbuf_printf(sb, "%s<access>%s</access>\n", indent, 439 "read-write"); 440 } 441 if (sc->sc_readcons != NULL) { 442 sbuf_printf(sb, "%s<read_offset>%jd</read_offset>\n", 443 indent, (intmax_t)sc->sc_readoffset); 444 sbuf_printf(sb, "%s<read_provider>%s</read_provider>\n", 445 indent, sc->sc_readcons->provider->name); 446 } 447 sbuf_printf(sb, "%s<timeout>%u</timeout>\n", indent, sc->sc_timeout); 448 sbuf_printf(sb, "%s<info>%s</info>\n", indent, sc->sc_info); 449 sbuf_printf(sb, "%s<queue_count>%u</queue_count>\n", indent, 450 sc->sc_queue_count); 451 sbuf_printf(sb, "%s<queue_size>%u</queue_size>\n", indent, 452 sc->sc_queue_size); 453 sbuf_printf(sb, "%s<ref>%u</ref>\n", indent, sc->sc_ref); 454 sbuf_printf(sb, "%s<unit>%d</unit>\n", indent, sc->sc_unit); 455 g_gate_release(sc); 456 } 457 458 static int 459 g_gate_create(struct g_gate_ctl_create *ggio) 460 { 461 struct g_gate_softc *sc; 462 struct g_geom *gp; 463 struct g_provider *pp, *ropp; 464 struct g_consumer *cp; 465 char name[NAME_MAX + 1]; 466 char readprov[NAME_MAX + 1]; 467 int error = 0, unit; 468 469 if (ggio->gctl_mediasize <= 0) { 470 G_GATE_DEBUG(1, "Invalid media size."); 471 return (EINVAL); 472 } 473 if (ggio->gctl_sectorsize <= 0) { 474 G_GATE_DEBUG(1, "Invalid sector size."); 475 return (EINVAL); 476 } 477 if (!powerof2(ggio->gctl_sectorsize)) { 478 G_GATE_DEBUG(1, "Invalid sector size."); 479 return (EINVAL); 480 } 481 if ((ggio->gctl_mediasize % ggio->gctl_sectorsize) != 0) { 482 G_GATE_DEBUG(1, "Invalid media size."); 483 return (EINVAL); 484 } 485 if ((ggio->gctl_flags & G_GATE_FLAG_READONLY) != 0 && 486 (ggio->gctl_flags & G_GATE_FLAG_WRITEONLY) != 0) { 487 G_GATE_DEBUG(1, "Invalid flags."); 488 return (EINVAL); 489 } 490 if (ggio->gctl_unit != G_GATE_UNIT_AUTO && 491 ggio->gctl_unit != G_GATE_NAME_GIVEN && 492 ggio->gctl_unit < 0) { 493 G_GATE_DEBUG(1, "Invalid unit number."); 494 return (EINVAL); 495 } 496 if (ggio->gctl_unit == G_GATE_NAME_GIVEN && 497 ggio->gctl_name[0] == '\0') { 498 G_GATE_DEBUG(1, "No device name."); 499 return (EINVAL); 500 } 501 502 sc = malloc(sizeof(*sc), M_GATE, M_WAITOK | M_ZERO); 503 sc->sc_flags = (ggio->gctl_flags & G_GATE_USERFLAGS); 504 memset(sc->sc_info, 0, sizeof(sc->sc_info)); 505 strncpy(sc->sc_info, ggio->gctl_info, 506 MIN(sizeof(sc->sc_info) - 1, sizeof(ggio->gctl_info))); 507 sc->sc_seq = 1; 508 bioq_init(&sc->sc_inqueue); 509 bioq_init(&sc->sc_outqueue); 510 mtx_init(&sc->sc_queue_mtx, "gg:queue", NULL, MTX_DEF); 511 mtx_init(&sc->sc_read_mtx, "gg:read", NULL, MTX_DEF); 512 sc->sc_queue_count = 0; 513 sc->sc_queue_size = ggio->gctl_maxcount; 514 if (sc->sc_queue_size > G_GATE_MAX_QUEUE_SIZE) 515 sc->sc_queue_size = G_GATE_MAX_QUEUE_SIZE; 516 sc->sc_timeout = ggio->gctl_timeout; 517 callout_init(&sc->sc_callout, 1); 518 519 mtx_lock(&g_gate_units_lock); 520 sc->sc_unit = g_gate_getunit(ggio->gctl_unit, &error); 521 if (sc->sc_unit < 0) 522 goto fail1; 523 if (ggio->gctl_unit == G_GATE_NAME_GIVEN) { 524 memset(name, 0, sizeof(name)); 525 strncpy(name, ggio->gctl_name, 526 MIN(sizeof(name) - 1, sizeof(ggio->gctl_name))); 527 } else { 528 snprintf(name, sizeof(name), "%s%d", G_GATE_PROVIDER_NAME, 529 sc->sc_unit); 530 } 531 /* Check for name collision. */ 532 for (unit = 0; unit < g_gate_maxunits; unit++) { 533 if (g_gate_units[unit] == NULL) 534 continue; 535 if (strcmp(name, g_gate_units[unit]->sc_name) != 0) 536 continue; 537 error = EEXIST; 538 goto fail1; 539 } 540 // local stack buffer 'name' assigned here temporarily only. 541 // the real provider name is assigned below. 542 sc->sc_name = name; 543 g_gate_units[sc->sc_unit] = sc; 544 g_gate_nunits++; 545 mtx_unlock(&g_gate_units_lock); 546 547 g_topology_lock(); 548 549 if (ggio->gctl_readprov[0] == '\0') { 550 ropp = NULL; 551 } else { 552 memset(readprov, 0, sizeof(readprov)); 553 strncpy(readprov, ggio->gctl_readprov, 554 MIN(sizeof(readprov) - 1, sizeof(ggio->gctl_readprov))); 555 ropp = g_provider_by_name(readprov); 556 if (ropp == NULL) { 557 G_GATE_DEBUG(1, "Provider %s doesn't exist.", readprov); 558 error = EINVAL; 559 goto fail2; 560 } 561 if ((ggio->gctl_readoffset % ggio->gctl_sectorsize) != 0) { 562 G_GATE_DEBUG(1, "Invalid read offset."); 563 error = EINVAL; 564 goto fail2; 565 } 566 if (ggio->gctl_mediasize + ggio->gctl_readoffset > 567 ropp->mediasize) { 568 G_GATE_DEBUG(1, "Invalid read offset or media size."); 569 error = EINVAL; 570 goto fail2; 571 } 572 } 573 574 gp = g_new_geomf(&g_gate_class, "%s", name); 575 gp->start = g_gate_start; 576 gp->access = g_gate_access; 577 gp->orphan = g_gate_orphan; 578 gp->dumpconf = g_gate_dumpconf; 579 gp->softc = sc; 580 581 if (ropp != NULL) { 582 cp = g_new_consumer(gp); 583 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 584 error = g_attach(cp, ropp); 585 if (error != 0) { 586 G_GATE_DEBUG(1, "Unable to attach to %s.", ropp->name); 587 goto fail3; 588 } 589 error = g_access(cp, 1, 0, 0); 590 if (error != 0) { 591 G_GATE_DEBUG(1, "Unable to access %s.", ropp->name); 592 g_detach(cp); 593 goto fail3; 594 } 595 sc->sc_readcons = cp; 596 sc->sc_readoffset = ggio->gctl_readoffset; 597 } 598 599 ggio->gctl_unit = sc->sc_unit; 600 601 pp = g_new_providerf(gp, "%s", name); 602 pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE; 603 pp->mediasize = ggio->gctl_mediasize; 604 pp->sectorsize = ggio->gctl_sectorsize; 605 sc->sc_provider = pp; 606 g_error_provider(pp, 0); 607 608 g_topology_unlock(); 609 mtx_lock(&g_gate_units_lock); 610 sc->sc_name = sc->sc_provider->name; 611 mtx_unlock(&g_gate_units_lock); 612 G_GATE_DEBUG(1, "Device %s created.", gp->name); 613 614 if (sc->sc_timeout > 0) { 615 callout_reset(&sc->sc_callout, sc->sc_timeout * hz, 616 g_gate_guard, sc); 617 } 618 return (0); 619 fail3: 620 g_destroy_consumer(cp); 621 g_destroy_geom(gp); 622 fail2: 623 g_topology_unlock(); 624 mtx_lock(&g_gate_units_lock); 625 g_gate_units[sc->sc_unit] = NULL; 626 KASSERT(g_gate_nunits > 0, ("negative g_gate_nunits?")); 627 g_gate_nunits--; 628 fail1: 629 mtx_unlock(&g_gate_units_lock); 630 mtx_destroy(&sc->sc_queue_mtx); 631 mtx_destroy(&sc->sc_read_mtx); 632 free(sc, M_GATE); 633 return (error); 634 } 635 636 static int 637 g_gate_modify(struct g_gate_softc *sc, struct g_gate_ctl_modify *ggio) 638 { 639 char readprov[NAME_MAX + 1]; 640 struct g_provider *pp; 641 struct g_consumer *cp; 642 int done, error; 643 644 if ((ggio->gctl_modify & GG_MODIFY_MEDIASIZE) != 0) { 645 if (ggio->gctl_mediasize <= 0) { 646 G_GATE_DEBUG(1, "Invalid media size."); 647 return (EINVAL); 648 } 649 pp = sc->sc_provider; 650 if ((ggio->gctl_mediasize % pp->sectorsize) != 0) { 651 G_GATE_DEBUG(1, "Invalid media size."); 652 return (EINVAL); 653 } 654 g_resize_provider(pp, ggio->gctl_mediasize); 655 return (0); 656 } 657 658 if ((ggio->gctl_modify & GG_MODIFY_INFO) != 0) { 659 memset(sc->sc_info, 0, sizeof(sc->sc_info)); 660 strncpy(sc->sc_info, ggio->gctl_info, 661 MIN(sizeof(sc->sc_info) - 1, sizeof(ggio->gctl_info))); 662 } 663 cp = NULL; 664 665 if ((ggio->gctl_modify & GG_MODIFY_READPROV) != 0) { 666 g_topology_lock(); 667 mtx_lock(&sc->sc_read_mtx); 668 if ((cp = sc->sc_readcons) != NULL) { 669 sc->sc_readcons = NULL; 670 done = (cp->index == 0); 671 mtx_unlock(&sc->sc_read_mtx); 672 if (done) 673 g_gate_detach(cp, 0); 674 } else 675 mtx_unlock(&sc->sc_read_mtx); 676 if (ggio->gctl_readprov[0] != '\0') { 677 memset(readprov, 0, sizeof(readprov)); 678 strncpy(readprov, ggio->gctl_readprov, 679 MIN(sizeof(readprov) - 1, 680 sizeof(ggio->gctl_readprov))); 681 pp = g_provider_by_name(readprov); 682 if (pp == NULL) { 683 g_topology_unlock(); 684 G_GATE_DEBUG(1, "Provider %s doesn't exist.", 685 readprov); 686 return (EINVAL); 687 } 688 cp = g_new_consumer(sc->sc_provider->geom); 689 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 690 error = g_attach(cp, pp); 691 if (error != 0) { 692 G_GATE_DEBUG(1, "Unable to attach to %s.", 693 pp->name); 694 } else { 695 error = g_access(cp, 1, 0, 0); 696 if (error != 0) { 697 G_GATE_DEBUG(1, "Unable to access %s.", 698 pp->name); 699 g_detach(cp); 700 } 701 } 702 if (error != 0) { 703 g_destroy_consumer(cp); 704 g_topology_unlock(); 705 return (error); 706 } 707 } 708 } else { 709 cp = sc->sc_readcons; 710 } 711 712 if ((ggio->gctl_modify & GG_MODIFY_READOFFSET) != 0) { 713 if (cp == NULL) { 714 G_GATE_DEBUG(1, "No read provider."); 715 return (EINVAL); 716 } 717 pp = sc->sc_provider; 718 if ((ggio->gctl_readoffset % pp->sectorsize) != 0) { 719 G_GATE_DEBUG(1, "Invalid read offset."); 720 return (EINVAL); 721 } 722 if (pp->mediasize + ggio->gctl_readoffset > 723 cp->provider->mediasize) { 724 G_GATE_DEBUG(1, "Invalid read offset or media size."); 725 return (EINVAL); 726 } 727 sc->sc_readoffset = ggio->gctl_readoffset; 728 } 729 730 if ((ggio->gctl_modify & GG_MODIFY_READPROV) != 0) { 731 sc->sc_readcons = cp; 732 g_topology_unlock(); 733 } 734 735 return (0); 736 } 737 738 #define G_GATE_CHECK_VERSION(ggio) do { \ 739 if ((ggio)->gctl_version != G_GATE_VERSION) { \ 740 printf("Version mismatch %d != %d.\n", \ 741 ggio->gctl_version, G_GATE_VERSION); \ 742 return (EINVAL); \ 743 } \ 744 } while (0) 745 static int 746 g_gate_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td) 747 { 748 struct g_gate_softc *sc; 749 struct bio *bp; 750 int error = 0; 751 752 G_GATE_DEBUG(4, "ioctl(%s, %lx, %p, %x, %p)", devtoname(dev), cmd, addr, 753 flags, td); 754 755 switch (cmd) { 756 case G_GATE_CMD_CREATE: 757 { 758 struct g_gate_ctl_create *ggio = (void *)addr; 759 760 G_GATE_CHECK_VERSION(ggio); 761 error = g_gate_create(ggio); 762 /* 763 * Reset TDP_GEOM flag. 764 * There are pending events for sure, because we just created 765 * new provider and other classes want to taste it, but we 766 * cannot answer on I/O requests until we're here. 767 */ 768 td->td_pflags &= ~TDP_GEOM; 769 return (error); 770 } 771 case G_GATE_CMD_MODIFY: 772 { 773 struct g_gate_ctl_modify *ggio = (void *)addr; 774 775 G_GATE_CHECK_VERSION(ggio); 776 sc = g_gate_hold(ggio->gctl_unit, NULL); 777 if (sc == NULL) 778 return (ENXIO); 779 error = g_gate_modify(sc, ggio); 780 g_gate_release(sc); 781 return (error); 782 } 783 case G_GATE_CMD_DESTROY: 784 { 785 struct g_gate_ctl_destroy *ggio = (void *)addr; 786 787 G_GATE_CHECK_VERSION(ggio); 788 sc = g_gate_hold(ggio->gctl_unit, ggio->gctl_name); 789 if (sc == NULL) 790 return (ENXIO); 791 g_topology_lock(); 792 mtx_lock(&g_gate_units_lock); 793 error = g_gate_destroy(sc, ggio->gctl_force); 794 g_topology_unlock(); 795 if (error != 0) 796 g_gate_release(sc); 797 return (error); 798 } 799 case G_GATE_CMD_CANCEL: 800 { 801 struct g_gate_ctl_cancel *ggio = (void *)addr; 802 struct bio *tbp, *lbp; 803 804 G_GATE_CHECK_VERSION(ggio); 805 sc = g_gate_hold(ggio->gctl_unit, ggio->gctl_name); 806 if (sc == NULL) 807 return (ENXIO); 808 lbp = NULL; 809 mtx_lock(&sc->sc_queue_mtx); 810 TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, tbp) { 811 if (ggio->gctl_seq == 0 || 812 ggio->gctl_seq == (uintptr_t)bp->bio_driver1) { 813 G_GATE_LOGREQ(1, bp, "Request canceled."); 814 bioq_remove(&sc->sc_outqueue, bp); 815 /* 816 * Be sure to put requests back onto incoming 817 * queue in the proper order. 818 */ 819 if (lbp == NULL) 820 bioq_insert_head(&sc->sc_inqueue, bp); 821 else { 822 TAILQ_INSERT_AFTER(&sc->sc_inqueue.queue, 823 lbp, bp, bio_queue); 824 } 825 lbp = bp; 826 /* 827 * If only one request was canceled, leave now. 828 */ 829 if (ggio->gctl_seq != 0) 830 break; 831 } 832 } 833 if (ggio->gctl_unit == G_GATE_NAME_GIVEN) 834 ggio->gctl_unit = sc->sc_unit; 835 mtx_unlock(&sc->sc_queue_mtx); 836 g_gate_release(sc); 837 return (error); 838 } 839 case G_GATE_CMD_START: 840 { 841 struct g_gate_ctl_io *ggio = (void *)addr; 842 843 G_GATE_CHECK_VERSION(ggio); 844 sc = g_gate_hold(ggio->gctl_unit, NULL); 845 if (sc == NULL) 846 return (ENXIO); 847 error = 0; 848 for (;;) { 849 mtx_lock(&sc->sc_queue_mtx); 850 bp = bioq_first(&sc->sc_inqueue); 851 if (bp != NULL) 852 break; 853 if ((sc->sc_flags & G_GATE_FLAG_DESTROY) != 0) { 854 ggio->gctl_error = ECANCELED; 855 mtx_unlock(&sc->sc_queue_mtx); 856 goto start_end; 857 } 858 error = msleep(sc, &sc->sc_queue_mtx, 859 PPAUSE | PDROP | PCATCH, "ggwait", 0); 860 if (error != 0) 861 goto start_end; 862 } 863 ggio->gctl_cmd = bp->bio_cmd; 864 if (bp->bio_cmd == BIO_WRITE && 865 bp->bio_length > ggio->gctl_length) { 866 mtx_unlock(&sc->sc_queue_mtx); 867 ggio->gctl_length = bp->bio_length; 868 ggio->gctl_error = ENOMEM; 869 goto start_end; 870 } 871 bioq_remove(&sc->sc_inqueue, bp); 872 bioq_insert_tail(&sc->sc_outqueue, bp); 873 mtx_unlock(&sc->sc_queue_mtx); 874 875 ggio->gctl_seq = (uintptr_t)bp->bio_driver1; 876 ggio->gctl_offset = bp->bio_offset; 877 ggio->gctl_length = bp->bio_length; 878 879 switch (bp->bio_cmd) { 880 case BIO_READ: 881 case BIO_DELETE: 882 case BIO_FLUSH: 883 case BIO_SPEEDUP: 884 break; 885 case BIO_WRITE: 886 error = copyout(bp->bio_data, ggio->gctl_data, 887 bp->bio_length); 888 if (error != 0) { 889 mtx_lock(&sc->sc_queue_mtx); 890 bioq_remove(&sc->sc_outqueue, bp); 891 bioq_insert_head(&sc->sc_inqueue, bp); 892 mtx_unlock(&sc->sc_queue_mtx); 893 goto start_end; 894 } 895 break; 896 } 897 start_end: 898 g_gate_release(sc); 899 return (error); 900 } 901 case G_GATE_CMD_DONE: 902 { 903 struct g_gate_ctl_io *ggio = (void *)addr; 904 905 G_GATE_CHECK_VERSION(ggio); 906 sc = g_gate_hold(ggio->gctl_unit, NULL); 907 if (sc == NULL) 908 return (ENOENT); 909 error = 0; 910 mtx_lock(&sc->sc_queue_mtx); 911 TAILQ_FOREACH(bp, &sc->sc_outqueue.queue, bio_queue) { 912 if (ggio->gctl_seq == (uintptr_t)bp->bio_driver1) 913 break; 914 } 915 if (bp != NULL) { 916 bioq_remove(&sc->sc_outqueue, bp); 917 sc->sc_queue_count--; 918 } 919 mtx_unlock(&sc->sc_queue_mtx); 920 if (bp == NULL) { 921 /* 922 * Request was probably canceled. 923 */ 924 goto done_end; 925 } 926 if (ggio->gctl_error == EAGAIN) { 927 bp->bio_error = 0; 928 G_GATE_LOGREQ(1, bp, "Request desisted."); 929 mtx_lock(&sc->sc_queue_mtx); 930 sc->sc_queue_count++; 931 bioq_insert_head(&sc->sc_inqueue, bp); 932 wakeup(sc); 933 mtx_unlock(&sc->sc_queue_mtx); 934 } else { 935 bp->bio_error = ggio->gctl_error; 936 if (bp->bio_error == 0) { 937 bp->bio_completed = bp->bio_length; 938 switch (bp->bio_cmd) { 939 case BIO_READ: 940 error = copyin(ggio->gctl_data, 941 bp->bio_data, bp->bio_length); 942 if (error != 0) 943 bp->bio_error = error; 944 break; 945 case BIO_DELETE: 946 case BIO_WRITE: 947 case BIO_FLUSH: 948 case BIO_SPEEDUP: 949 break; 950 } 951 } 952 G_GATE_LOGREQ(2, bp, "Request done."); 953 g_io_deliver(bp, bp->bio_error); 954 } 955 done_end: 956 g_gate_release(sc); 957 return (error); 958 } 959 } 960 return (ENOIOCTL); 961 } 962 963 static void 964 g_gate_device(void) 965 { 966 967 status_dev = make_dev(&g_gate_cdevsw, 0x0, UID_ROOT, GID_WHEEL, 0600, 968 G_GATE_CTL_NAME); 969 } 970 971 static int 972 g_gate_modevent(module_t mod, int type, void *data) 973 { 974 int error = 0; 975 976 switch (type) { 977 case MOD_LOAD: 978 mtx_init(&g_gate_units_lock, "gg_units_lock", NULL, MTX_DEF); 979 g_gate_units = malloc(g_gate_maxunits * sizeof(g_gate_units[0]), 980 M_GATE, M_WAITOK | M_ZERO); 981 g_gate_nunits = 0; 982 g_gate_device(); 983 break; 984 case MOD_UNLOAD: 985 mtx_lock(&g_gate_units_lock); 986 if (g_gate_nunits > 0) { 987 mtx_unlock(&g_gate_units_lock); 988 error = EBUSY; 989 break; 990 } 991 mtx_unlock(&g_gate_units_lock); 992 mtx_destroy(&g_gate_units_lock); 993 if (status_dev != NULL) 994 destroy_dev(status_dev); 995 free(g_gate_units, M_GATE); 996 break; 997 default: 998 return (EOPNOTSUPP); 999 break; 1000 } 1001 1002 return (error); 1003 } 1004 static moduledata_t g_gate_module = { 1005 G_GATE_MOD_NAME, 1006 g_gate_modevent, 1007 NULL 1008 }; 1009 DECLARE_MODULE(geom_gate, g_gate_module, SI_SUB_DRIVERS, SI_ORDER_MIDDLE); 1010 DECLARE_GEOM_CLASS(g_gate_class, g_gate); 1011 MODULE_VERSION(geom_gate, 0); 1012