1 /*- 2 * Copyright (c) 2006 Ruslan Ermilov <ru@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/module.h> 34 #include <sys/lock.h> 35 #include <sys/mutex.h> 36 #include <sys/bio.h> 37 #include <sys/sysctl.h> 38 #include <sys/malloc.h> 39 #include <sys/queue.h> 40 #include <sys/time.h> 41 #include <vm/uma.h> 42 #include <geom/geom.h> 43 #include <geom/cache/g_cache.h> 44 45 FEATURE(geom_cache, "GEOM cache module"); 46 47 static MALLOC_DEFINE(M_GCACHE, "gcache_data", "GEOM_CACHE Data"); 48 49 SYSCTL_DECL(_kern_geom); 50 SYSCTL_NODE(_kern_geom, OID_AUTO, cache, CTLFLAG_RW, 0, "GEOM_CACHE stuff"); 51 static u_int g_cache_debug = 0; 52 SYSCTL_UINT(_kern_geom_cache, OID_AUTO, debug, CTLFLAG_RW, &g_cache_debug, 0, 53 "Debug level"); 54 static u_int g_cache_enable = 1; 55 SYSCTL_UINT(_kern_geom_cache, OID_AUTO, enable, CTLFLAG_RW, &g_cache_enable, 0, 56 ""); 57 static u_int g_cache_timeout = 10; 58 SYSCTL_UINT(_kern_geom_cache, OID_AUTO, timeout, CTLFLAG_RW, &g_cache_timeout, 59 0, ""); 60 static u_int g_cache_idletime = 5; 61 SYSCTL_UINT(_kern_geom_cache, OID_AUTO, idletime, CTLFLAG_RW, &g_cache_idletime, 62 0, ""); 63 static u_int g_cache_used_lo = 5; 64 static u_int g_cache_used_hi = 20; 65 static int 66 sysctl_handle_pct(SYSCTL_HANDLER_ARGS) 67 { 68 u_int val = *(u_int *)arg1; 69 int error; 70 71 error = sysctl_handle_int(oidp, &val, 0, req); 72 if (error || !req->newptr) 73 return (error); 74 if (val < 0 || val > 100) 75 return (EINVAL); 76 if ((arg1 == &g_cache_used_lo && val > g_cache_used_hi) || 77 (arg1 == &g_cache_used_hi && g_cache_used_lo > val)) 78 return (EINVAL); 79 *(u_int *)arg1 = val; 80 return (0); 81 } 82 SYSCTL_PROC(_kern_geom_cache, OID_AUTO, used_lo, CTLTYPE_UINT|CTLFLAG_RW, 83 &g_cache_used_lo, 0, sysctl_handle_pct, "IU", ""); 84 SYSCTL_PROC(_kern_geom_cache, OID_AUTO, used_hi, CTLTYPE_UINT|CTLFLAG_RW, 85 &g_cache_used_hi, 0, sysctl_handle_pct, "IU", ""); 86 87 88 static int g_cache_destroy(struct g_cache_softc *sc, boolean_t force); 89 static g_ctl_destroy_geom_t g_cache_destroy_geom; 90 91 static g_taste_t g_cache_taste; 92 static g_ctl_req_t g_cache_config; 93 static g_dumpconf_t g_cache_dumpconf; 94 95 struct g_class g_cache_class = { 96 .name = G_CACHE_CLASS_NAME, 97 .version = G_VERSION, 98 .ctlreq = g_cache_config, 99 .taste = g_cache_taste, 100 .destroy_geom = g_cache_destroy_geom 101 }; 102 103 #define OFF2BNO(off, sc) ((off) >> (sc)->sc_bshift) 104 #define BNO2OFF(bno, sc) ((bno) << (sc)->sc_bshift) 105 106 107 static struct g_cache_desc * 108 g_cache_alloc(struct g_cache_softc *sc) 109 { 110 struct g_cache_desc *dp; 111 112 mtx_assert(&sc->sc_mtx, MA_OWNED); 113 114 if (!TAILQ_EMPTY(&sc->sc_usedlist)) { 115 dp = TAILQ_FIRST(&sc->sc_usedlist); 116 TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used); 117 sc->sc_nused--; 118 dp->d_flags = 0; 119 LIST_REMOVE(dp, d_next); 120 return (dp); 121 } 122 if (sc->sc_nent > sc->sc_maxent) { 123 sc->sc_cachefull++; 124 return (NULL); 125 } 126 dp = malloc(sizeof(*dp), M_GCACHE, M_NOWAIT | M_ZERO); 127 if (dp == NULL) 128 return (NULL); 129 dp->d_data = uma_zalloc(sc->sc_zone, M_NOWAIT); 130 if (dp->d_data == NULL) { 131 free(dp, M_GCACHE); 132 return (NULL); 133 } 134 sc->sc_nent++; 135 return (dp); 136 } 137 138 static void 139 g_cache_free(struct g_cache_softc *sc, struct g_cache_desc *dp) 140 { 141 142 mtx_assert(&sc->sc_mtx, MA_OWNED); 143 144 uma_zfree(sc->sc_zone, dp->d_data); 145 free(dp, M_GCACHE); 146 sc->sc_nent--; 147 } 148 149 static void 150 g_cache_free_used(struct g_cache_softc *sc) 151 { 152 struct g_cache_desc *dp; 153 u_int n; 154 155 mtx_assert(&sc->sc_mtx, MA_OWNED); 156 157 n = g_cache_used_lo * sc->sc_maxent / 100; 158 while (sc->sc_nused > n) { 159 KASSERT(!TAILQ_EMPTY(&sc->sc_usedlist), ("used list empty")); 160 dp = TAILQ_FIRST(&sc->sc_usedlist); 161 TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used); 162 sc->sc_nused--; 163 LIST_REMOVE(dp, d_next); 164 g_cache_free(sc, dp); 165 } 166 } 167 168 static void 169 g_cache_deliver(struct g_cache_softc *sc, struct bio *bp, 170 struct g_cache_desc *dp, int error) 171 { 172 off_t off1, off, len; 173 174 mtx_assert(&sc->sc_mtx, MA_OWNED); 175 KASSERT(OFF2BNO(bp->bio_offset, sc) <= dp->d_bno, ("wrong entry")); 176 KASSERT(OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc) >= 177 dp->d_bno, ("wrong entry")); 178 179 off1 = BNO2OFF(dp->d_bno, sc); 180 off = MAX(bp->bio_offset, off1); 181 len = MIN(bp->bio_offset + bp->bio_length, off1 + sc->sc_bsize) - off; 182 183 if (bp->bio_error == 0) 184 bp->bio_error = error; 185 if (bp->bio_error == 0) { 186 bcopy(dp->d_data + (off - off1), 187 bp->bio_data + (off - bp->bio_offset), len); 188 } 189 bp->bio_completed += len; 190 KASSERT(bp->bio_completed <= bp->bio_length, ("extra data")); 191 if (bp->bio_completed == bp->bio_length) { 192 if (bp->bio_error != 0) 193 bp->bio_completed = 0; 194 g_io_deliver(bp, bp->bio_error); 195 } 196 197 if (dp->d_flags & D_FLAG_USED) { 198 TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used); 199 TAILQ_INSERT_TAIL(&sc->sc_usedlist, dp, d_used); 200 } else if (OFF2BNO(off + len, sc) > dp->d_bno) { 201 TAILQ_INSERT_TAIL(&sc->sc_usedlist, dp, d_used); 202 sc->sc_nused++; 203 dp->d_flags |= D_FLAG_USED; 204 } 205 dp->d_atime = time_uptime; 206 } 207 208 static void 209 g_cache_done(struct bio *bp) 210 { 211 struct g_cache_softc *sc; 212 struct g_cache_desc *dp; 213 struct bio *bp2, *tmpbp; 214 215 sc = bp->bio_from->geom->softc; 216 KASSERT(G_CACHE_DESC1(bp) == sc, ("corrupt bio_caller in g_cache_done()")); 217 dp = G_CACHE_DESC2(bp); 218 mtx_lock(&sc->sc_mtx); 219 bp2 = dp->d_biolist; 220 while (bp2 != NULL) { 221 KASSERT(G_CACHE_NEXT_BIO1(bp2) == sc, ("corrupt bio_driver in g_cache_done()")); 222 tmpbp = G_CACHE_NEXT_BIO2(bp2); 223 g_cache_deliver(sc, bp2, dp, bp->bio_error); 224 bp2 = tmpbp; 225 } 226 dp->d_biolist = NULL; 227 if (dp->d_flags & D_FLAG_INVALID) { 228 sc->sc_invalid--; 229 g_cache_free(sc, dp); 230 } else if (bp->bio_error) { 231 LIST_REMOVE(dp, d_next); 232 if (dp->d_flags & D_FLAG_USED) { 233 TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used); 234 sc->sc_nused--; 235 } 236 g_cache_free(sc, dp); 237 } 238 mtx_unlock(&sc->sc_mtx); 239 g_destroy_bio(bp); 240 } 241 242 static struct g_cache_desc * 243 g_cache_lookup(struct g_cache_softc *sc, off_t bno) 244 { 245 struct g_cache_desc *dp; 246 247 mtx_assert(&sc->sc_mtx, MA_OWNED); 248 249 LIST_FOREACH(dp, &sc->sc_desclist[G_CACHE_BUCKET(bno)], d_next) 250 if (dp->d_bno == bno) 251 return (dp); 252 return (NULL); 253 } 254 255 static int 256 g_cache_read(struct g_cache_softc *sc, struct bio *bp) 257 { 258 struct bio *cbp; 259 struct g_cache_desc *dp; 260 261 mtx_lock(&sc->sc_mtx); 262 dp = g_cache_lookup(sc, 263 OFF2BNO(bp->bio_offset + bp->bio_completed, sc)); 264 if (dp != NULL) { 265 /* Add to waiters list or deliver. */ 266 sc->sc_cachehits++; 267 if (dp->d_biolist != NULL) { 268 G_CACHE_NEXT_BIO1(bp) = sc; 269 G_CACHE_NEXT_BIO2(bp) = dp->d_biolist; 270 dp->d_biolist = bp; 271 } else 272 g_cache_deliver(sc, bp, dp, 0); 273 mtx_unlock(&sc->sc_mtx); 274 return (0); 275 } 276 277 /* Cache miss. Allocate entry and schedule bio. */ 278 sc->sc_cachemisses++; 279 dp = g_cache_alloc(sc); 280 if (dp == NULL) { 281 mtx_unlock(&sc->sc_mtx); 282 return (ENOMEM); 283 } 284 cbp = g_clone_bio(bp); 285 if (cbp == NULL) { 286 g_cache_free(sc, dp); 287 mtx_unlock(&sc->sc_mtx); 288 return (ENOMEM); 289 } 290 291 dp->d_bno = OFF2BNO(bp->bio_offset + bp->bio_completed, sc); 292 G_CACHE_NEXT_BIO1(bp) = sc; 293 G_CACHE_NEXT_BIO2(bp) = NULL; 294 dp->d_biolist = bp; 295 LIST_INSERT_HEAD(&sc->sc_desclist[G_CACHE_BUCKET(dp->d_bno)], 296 dp, d_next); 297 mtx_unlock(&sc->sc_mtx); 298 299 G_CACHE_DESC1(cbp) = sc; 300 G_CACHE_DESC2(cbp) = dp; 301 cbp->bio_done = g_cache_done; 302 cbp->bio_offset = BNO2OFF(dp->d_bno, sc); 303 cbp->bio_data = dp->d_data; 304 cbp->bio_length = sc->sc_bsize; 305 g_io_request(cbp, LIST_FIRST(&bp->bio_to->geom->consumer)); 306 return (0); 307 } 308 309 static void 310 g_cache_invalidate(struct g_cache_softc *sc, struct bio *bp) 311 { 312 struct g_cache_desc *dp; 313 off_t bno, lim; 314 315 mtx_lock(&sc->sc_mtx); 316 bno = OFF2BNO(bp->bio_offset, sc); 317 lim = OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc); 318 do { 319 if ((dp = g_cache_lookup(sc, bno)) != NULL) { 320 LIST_REMOVE(dp, d_next); 321 if (dp->d_flags & D_FLAG_USED) { 322 TAILQ_REMOVE(&sc->sc_usedlist, dp, d_used); 323 sc->sc_nused--; 324 } 325 if (dp->d_biolist == NULL) 326 g_cache_free(sc, dp); 327 else { 328 dp->d_flags = D_FLAG_INVALID; 329 sc->sc_invalid++; 330 } 331 } 332 bno++; 333 } while (bno <= lim); 334 mtx_unlock(&sc->sc_mtx); 335 } 336 337 static void 338 g_cache_start(struct bio *bp) 339 { 340 struct g_cache_softc *sc; 341 struct g_geom *gp; 342 struct g_cache_desc *dp; 343 struct bio *cbp; 344 345 gp = bp->bio_to->geom; 346 sc = gp->softc; 347 G_CACHE_LOGREQ(bp, "Request received."); 348 switch (bp->bio_cmd) { 349 case BIO_READ: 350 sc->sc_reads++; 351 sc->sc_readbytes += bp->bio_length; 352 if (!g_cache_enable) 353 break; 354 if (bp->bio_offset + bp->bio_length > sc->sc_tail) 355 break; 356 if (OFF2BNO(bp->bio_offset, sc) == 357 OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc)) { 358 sc->sc_cachereads++; 359 sc->sc_cachereadbytes += bp->bio_length; 360 if (g_cache_read(sc, bp) == 0) 361 return; 362 sc->sc_cachereads--; 363 sc->sc_cachereadbytes -= bp->bio_length; 364 break; 365 } else if (OFF2BNO(bp->bio_offset, sc) + 1 == 366 OFF2BNO(bp->bio_offset + bp->bio_length - 1, sc)) { 367 mtx_lock(&sc->sc_mtx); 368 dp = g_cache_lookup(sc, OFF2BNO(bp->bio_offset, sc)); 369 if (dp == NULL || dp->d_biolist != NULL) { 370 mtx_unlock(&sc->sc_mtx); 371 break; 372 } 373 sc->sc_cachereads++; 374 sc->sc_cachereadbytes += bp->bio_length; 375 g_cache_deliver(sc, bp, dp, 0); 376 mtx_unlock(&sc->sc_mtx); 377 if (g_cache_read(sc, bp) == 0) 378 return; 379 sc->sc_cachereads--; 380 sc->sc_cachereadbytes -= bp->bio_length; 381 break; 382 } 383 break; 384 case BIO_WRITE: 385 sc->sc_writes++; 386 sc->sc_wrotebytes += bp->bio_length; 387 g_cache_invalidate(sc, bp); 388 break; 389 } 390 cbp = g_clone_bio(bp); 391 if (cbp == NULL) { 392 g_io_deliver(bp, ENOMEM); 393 return; 394 } 395 cbp->bio_done = g_std_done; 396 G_CACHE_LOGREQ(cbp, "Sending request."); 397 g_io_request(cbp, LIST_FIRST(&gp->consumer)); 398 } 399 400 static void 401 g_cache_go(void *arg) 402 { 403 struct g_cache_softc *sc = arg; 404 struct g_cache_desc *dp; 405 int i; 406 407 mtx_assert(&sc->sc_mtx, MA_OWNED); 408 409 /* Forcibly mark idle ready entries as used. */ 410 for (i = 0; i < G_CACHE_BUCKETS; i++) { 411 LIST_FOREACH(dp, &sc->sc_desclist[i], d_next) { 412 if (dp->d_flags & D_FLAG_USED || 413 dp->d_biolist != NULL || 414 time_uptime - dp->d_atime < g_cache_idletime) 415 continue; 416 TAILQ_INSERT_TAIL(&sc->sc_usedlist, dp, d_used); 417 sc->sc_nused++; 418 dp->d_flags |= D_FLAG_USED; 419 } 420 } 421 422 /* Keep the number of used entries low. */ 423 if (sc->sc_nused > g_cache_used_hi * sc->sc_maxent / 100) 424 g_cache_free_used(sc); 425 426 callout_reset(&sc->sc_callout, g_cache_timeout * hz, g_cache_go, sc); 427 } 428 429 static int 430 g_cache_access(struct g_provider *pp, int dr, int dw, int de) 431 { 432 struct g_geom *gp; 433 struct g_consumer *cp; 434 int error; 435 436 gp = pp->geom; 437 cp = LIST_FIRST(&gp->consumer); 438 error = g_access(cp, dr, dw, de); 439 440 return (error); 441 } 442 443 static void 444 g_cache_orphan(struct g_consumer *cp) 445 { 446 447 g_topology_assert(); 448 g_cache_destroy(cp->geom->softc, 1); 449 } 450 451 static struct g_cache_softc * 452 g_cache_find_device(struct g_class *mp, const char *name) 453 { 454 struct g_geom *gp; 455 456 LIST_FOREACH(gp, &mp->geom, geom) { 457 if (strcmp(gp->name, name) == 0) 458 return (gp->softc); 459 } 460 return (NULL); 461 } 462 463 static struct g_geom * 464 g_cache_create(struct g_class *mp, struct g_provider *pp, 465 const struct g_cache_metadata *md, u_int type) 466 { 467 struct g_cache_softc *sc; 468 struct g_geom *gp; 469 struct g_provider *newpp; 470 struct g_consumer *cp; 471 u_int bshift; 472 int i; 473 474 g_topology_assert(); 475 476 gp = NULL; 477 newpp = NULL; 478 cp = NULL; 479 480 G_CACHE_DEBUG(1, "Creating device %s.", md->md_name); 481 482 /* Cache size is minimum 100. */ 483 if (md->md_size < 100) { 484 G_CACHE_DEBUG(0, "Invalid size for device %s.", md->md_name); 485 return (NULL); 486 } 487 488 /* Block size restrictions. */ 489 bshift = ffs(md->md_bsize) - 1; 490 if (md->md_bsize == 0 || md->md_bsize > MAXPHYS || 491 md->md_bsize != 1 << bshift || 492 (md->md_bsize % pp->sectorsize) != 0) { 493 G_CACHE_DEBUG(0, "Invalid blocksize for provider %s.", pp->name); 494 return (NULL); 495 } 496 497 /* Check for duplicate unit. */ 498 if (g_cache_find_device(mp, (const char *)&md->md_name) != NULL) { 499 G_CACHE_DEBUG(0, "Provider %s already exists.", md->md_name); 500 return (NULL); 501 } 502 503 gp = g_new_geomf(mp, md->md_name); 504 if (gp == NULL) { 505 G_CACHE_DEBUG(0, "Cannot create geom %s.", md->md_name); 506 return (NULL); 507 } 508 gp->softc = NULL; /* for a moment */ 509 510 sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO); 511 sc->sc_type = type; 512 sc->sc_bshift = bshift; 513 sc->sc_bsize = 1 << bshift; 514 sc->sc_zone = uma_zcreate("gcache", sc->sc_bsize, NULL, NULL, NULL, NULL, 515 UMA_ALIGN_PTR, 0); 516 mtx_init(&sc->sc_mtx, "GEOM CACHE mutex", NULL, MTX_DEF); 517 for (i = 0; i < G_CACHE_BUCKETS; i++) 518 LIST_INIT(&sc->sc_desclist[i]); 519 TAILQ_INIT(&sc->sc_usedlist); 520 sc->sc_maxent = md->md_size; 521 callout_init_mtx(&sc->sc_callout, &sc->sc_mtx, 0); 522 gp->softc = sc; 523 sc->sc_geom = gp; 524 gp->start = g_cache_start; 525 gp->orphan = g_cache_orphan; 526 gp->access = g_cache_access; 527 gp->dumpconf = g_cache_dumpconf; 528 529 newpp = g_new_providerf(gp, "cache/%s", gp->name); 530 if (newpp == NULL) { 531 G_CACHE_DEBUG(0, "Cannot create provider cache/%s.", gp->name); 532 goto fail; 533 } 534 newpp->sectorsize = pp->sectorsize; 535 newpp->mediasize = pp->mediasize; 536 if (type == G_CACHE_TYPE_AUTOMATIC) 537 newpp->mediasize -= pp->sectorsize; 538 sc->sc_tail = BNO2OFF(OFF2BNO(newpp->mediasize, sc), sc); 539 540 cp = g_new_consumer(gp); 541 if (cp == NULL) { 542 G_CACHE_DEBUG(0, "Cannot create consumer for %s.", gp->name); 543 goto fail; 544 } 545 if (g_attach(cp, pp) != 0) { 546 G_CACHE_DEBUG(0, "Cannot attach to provider %s.", pp->name); 547 goto fail; 548 } 549 550 g_error_provider(newpp, 0); 551 G_CACHE_DEBUG(0, "Device %s created.", gp->name); 552 callout_reset(&sc->sc_callout, g_cache_timeout * hz, g_cache_go, sc); 553 return (gp); 554 fail: 555 if (cp != NULL) { 556 if (cp->provider != NULL) 557 g_detach(cp); 558 g_destroy_consumer(cp); 559 } 560 if (newpp != NULL) 561 g_destroy_provider(newpp); 562 if (gp != NULL) { 563 if (gp->softc != NULL) { 564 mtx_destroy(&sc->sc_mtx); 565 g_free(gp->softc); 566 } 567 g_destroy_geom(gp); 568 } 569 return (NULL); 570 } 571 572 static int 573 g_cache_destroy(struct g_cache_softc *sc, boolean_t force) 574 { 575 struct g_geom *gp; 576 struct g_provider *pp; 577 struct g_cache_desc *dp, *dp2; 578 int i; 579 580 g_topology_assert(); 581 if (sc == NULL) 582 return (ENXIO); 583 gp = sc->sc_geom; 584 pp = LIST_FIRST(&gp->provider); 585 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) { 586 if (force) { 587 G_CACHE_DEBUG(0, "Device %s is still open, so it " 588 "can't be definitely removed.", pp->name); 589 } else { 590 G_CACHE_DEBUG(1, "Device %s is still open (r%dw%de%d).", 591 pp->name, pp->acr, pp->acw, pp->ace); 592 return (EBUSY); 593 } 594 } else { 595 G_CACHE_DEBUG(0, "Device %s removed.", gp->name); 596 } 597 callout_drain(&sc->sc_callout); 598 mtx_lock(&sc->sc_mtx); 599 for (i = 0; i < G_CACHE_BUCKETS; i++) { 600 dp = LIST_FIRST(&sc->sc_desclist[i]); 601 while (dp != NULL) { 602 dp2 = LIST_NEXT(dp, d_next); 603 g_cache_free(sc, dp); 604 dp = dp2; 605 } 606 } 607 mtx_unlock(&sc->sc_mtx); 608 mtx_destroy(&sc->sc_mtx); 609 uma_zdestroy(sc->sc_zone); 610 g_free(sc); 611 gp->softc = NULL; 612 g_wither_geom(gp, ENXIO); 613 614 return (0); 615 } 616 617 static int 618 g_cache_destroy_geom(struct gctl_req *req, struct g_class *mp, struct g_geom *gp) 619 { 620 621 return (g_cache_destroy(gp->softc, 0)); 622 } 623 624 static int 625 g_cache_read_metadata(struct g_consumer *cp, struct g_cache_metadata *md) 626 { 627 struct g_provider *pp; 628 u_char *buf; 629 int error; 630 631 g_topology_assert(); 632 633 error = g_access(cp, 1, 0, 0); 634 if (error != 0) 635 return (error); 636 pp = cp->provider; 637 g_topology_unlock(); 638 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize, 639 &error); 640 g_topology_lock(); 641 g_access(cp, -1, 0, 0); 642 if (buf == NULL) 643 return (error); 644 645 /* Decode metadata. */ 646 cache_metadata_decode(buf, md); 647 g_free(buf); 648 649 return (0); 650 } 651 652 static int 653 g_cache_write_metadata(struct g_consumer *cp, struct g_cache_metadata *md) 654 { 655 struct g_provider *pp; 656 u_char *buf; 657 int error; 658 659 g_topology_assert(); 660 661 error = g_access(cp, 0, 1, 0); 662 if (error != 0) 663 return (error); 664 pp = cp->provider; 665 buf = malloc((size_t)pp->sectorsize, M_GCACHE, M_WAITOK | M_ZERO); 666 cache_metadata_encode(md, buf); 667 g_topology_unlock(); 668 error = g_write_data(cp, pp->mediasize - pp->sectorsize, buf, pp->sectorsize); 669 g_topology_lock(); 670 g_access(cp, 0, -1, 0); 671 free(buf, M_GCACHE); 672 673 return (error); 674 } 675 676 static struct g_geom * 677 g_cache_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 678 { 679 struct g_cache_metadata md; 680 struct g_consumer *cp; 681 struct g_geom *gp; 682 int error; 683 684 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name); 685 g_topology_assert(); 686 687 G_CACHE_DEBUG(3, "Tasting %s.", pp->name); 688 689 gp = g_new_geomf(mp, "cache:taste"); 690 gp->start = g_cache_start; 691 gp->orphan = g_cache_orphan; 692 gp->access = g_cache_access; 693 cp = g_new_consumer(gp); 694 g_attach(cp, pp); 695 error = g_cache_read_metadata(cp, &md); 696 g_detach(cp); 697 g_destroy_consumer(cp); 698 g_destroy_geom(gp); 699 if (error != 0) 700 return (NULL); 701 702 if (strcmp(md.md_magic, G_CACHE_MAGIC) != 0) 703 return (NULL); 704 if (md.md_version > G_CACHE_VERSION) { 705 printf("geom_cache.ko module is too old to handle %s.\n", 706 pp->name); 707 return (NULL); 708 } 709 if (md.md_provsize != pp->mediasize) 710 return (NULL); 711 712 gp = g_cache_create(mp, pp, &md, G_CACHE_TYPE_AUTOMATIC); 713 if (gp == NULL) { 714 G_CACHE_DEBUG(0, "Can't create %s.", md.md_name); 715 return (NULL); 716 } 717 return (gp); 718 } 719 720 static void 721 g_cache_ctl_create(struct gctl_req *req, struct g_class *mp) 722 { 723 struct g_cache_metadata md; 724 struct g_provider *pp; 725 struct g_geom *gp; 726 intmax_t *bsize, *size; 727 const char *name; 728 int *nargs; 729 730 g_topology_assert(); 731 732 nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs)); 733 if (nargs == NULL) { 734 gctl_error(req, "No '%s' argument", "nargs"); 735 return; 736 } 737 if (*nargs != 2) { 738 gctl_error(req, "Invalid number of arguments."); 739 return; 740 } 741 742 strlcpy(md.md_magic, G_CACHE_MAGIC, sizeof(md.md_magic)); 743 md.md_version = G_CACHE_VERSION; 744 name = gctl_get_asciiparam(req, "arg0"); 745 if (name == NULL) { 746 gctl_error(req, "No 'arg0' argument"); 747 return; 748 } 749 strlcpy(md.md_name, name, sizeof(md.md_name)); 750 751 size = gctl_get_paraml(req, "size", sizeof(*size)); 752 if (size == NULL) { 753 gctl_error(req, "No '%s' argument", "size"); 754 return; 755 } 756 if ((u_int)*size < 100) { 757 gctl_error(req, "Invalid '%s' argument", "size"); 758 return; 759 } 760 md.md_size = (u_int)*size; 761 762 bsize = gctl_get_paraml(req, "blocksize", sizeof(*bsize)); 763 if (bsize == NULL) { 764 gctl_error(req, "No '%s' argument", "blocksize"); 765 return; 766 } 767 if (*bsize < 0) { 768 gctl_error(req, "Invalid '%s' argument", "blocksize"); 769 return; 770 } 771 md.md_bsize = (u_int)*bsize; 772 773 /* This field is not important here. */ 774 md.md_provsize = 0; 775 776 name = gctl_get_asciiparam(req, "arg1"); 777 if (name == NULL) { 778 gctl_error(req, "No 'arg1' argument"); 779 return; 780 } 781 if (strncmp(name, "/dev/", strlen("/dev/")) == 0) 782 name += strlen("/dev/"); 783 pp = g_provider_by_name(name); 784 if (pp == NULL) { 785 G_CACHE_DEBUG(1, "Provider %s is invalid.", name); 786 gctl_error(req, "Provider %s is invalid.", name); 787 return; 788 } 789 gp = g_cache_create(mp, pp, &md, G_CACHE_TYPE_MANUAL); 790 if (gp == NULL) { 791 gctl_error(req, "Can't create %s.", md.md_name); 792 return; 793 } 794 } 795 796 static void 797 g_cache_ctl_configure(struct gctl_req *req, struct g_class *mp) 798 { 799 struct g_cache_metadata md; 800 struct g_cache_softc *sc; 801 struct g_consumer *cp; 802 intmax_t *bsize, *size; 803 const char *name; 804 int error, *nargs; 805 806 g_topology_assert(); 807 808 nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs)); 809 if (nargs == NULL) { 810 gctl_error(req, "No '%s' argument", "nargs"); 811 return; 812 } 813 if (*nargs != 1) { 814 gctl_error(req, "Missing device."); 815 return; 816 } 817 818 name = gctl_get_asciiparam(req, "arg0"); 819 if (name == NULL) { 820 gctl_error(req, "No 'arg0' argument"); 821 return; 822 } 823 sc = g_cache_find_device(mp, name); 824 if (sc == NULL) { 825 G_CACHE_DEBUG(1, "Device %s is invalid.", name); 826 gctl_error(req, "Device %s is invalid.", name); 827 return; 828 } 829 830 size = gctl_get_paraml(req, "size", sizeof(*size)); 831 if (size == NULL) { 832 gctl_error(req, "No '%s' argument", "size"); 833 return; 834 } 835 if ((u_int)*size != 0 && (u_int)*size < 100) { 836 gctl_error(req, "Invalid '%s' argument", "size"); 837 return; 838 } 839 if ((u_int)*size != 0) 840 sc->sc_maxent = (u_int)*size; 841 842 bsize = gctl_get_paraml(req, "blocksize", sizeof(*bsize)); 843 if (bsize == NULL) { 844 gctl_error(req, "No '%s' argument", "blocksize"); 845 return; 846 } 847 if (*bsize < 0) { 848 gctl_error(req, "Invalid '%s' argument", "blocksize"); 849 return; 850 } 851 852 if (sc->sc_type != G_CACHE_TYPE_AUTOMATIC) 853 return; 854 855 strlcpy(md.md_name, name, sizeof(md.md_name)); 856 strlcpy(md.md_magic, G_CACHE_MAGIC, sizeof(md.md_magic)); 857 md.md_version = G_CACHE_VERSION; 858 if ((u_int)*size != 0) 859 md.md_size = (u_int)*size; 860 else 861 md.md_size = sc->sc_maxent; 862 if ((u_int)*bsize != 0) 863 md.md_bsize = (u_int)*bsize; 864 else 865 md.md_bsize = sc->sc_bsize; 866 cp = LIST_FIRST(&sc->sc_geom->consumer); 867 md.md_provsize = cp->provider->mediasize; 868 error = g_cache_write_metadata(cp, &md); 869 if (error == 0) 870 G_CACHE_DEBUG(2, "Metadata on %s updated.", cp->provider->name); 871 else 872 G_CACHE_DEBUG(0, "Cannot update metadata on %s (error=%d).", 873 cp->provider->name, error); 874 } 875 876 static void 877 g_cache_ctl_destroy(struct gctl_req *req, struct g_class *mp) 878 { 879 int *nargs, *force, error, i; 880 struct g_cache_softc *sc; 881 const char *name; 882 char param[16]; 883 884 g_topology_assert(); 885 886 nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs)); 887 if (nargs == NULL) { 888 gctl_error(req, "No '%s' argument", "nargs"); 889 return; 890 } 891 if (*nargs <= 0) { 892 gctl_error(req, "Missing device(s)."); 893 return; 894 } 895 force = gctl_get_paraml(req, "force", sizeof(*force)); 896 if (force == NULL) { 897 gctl_error(req, "No 'force' argument"); 898 return; 899 } 900 901 for (i = 0; i < *nargs; i++) { 902 snprintf(param, sizeof(param), "arg%d", i); 903 name = gctl_get_asciiparam(req, param); 904 if (name == NULL) { 905 gctl_error(req, "No 'arg%d' argument", i); 906 return; 907 } 908 sc = g_cache_find_device(mp, name); 909 if (sc == NULL) { 910 G_CACHE_DEBUG(1, "Device %s is invalid.", name); 911 gctl_error(req, "Device %s is invalid.", name); 912 return; 913 } 914 error = g_cache_destroy(sc, *force); 915 if (error != 0) { 916 gctl_error(req, "Cannot destroy device %s (error=%d).", 917 sc->sc_name, error); 918 return; 919 } 920 } 921 } 922 923 static void 924 g_cache_ctl_reset(struct gctl_req *req, struct g_class *mp) 925 { 926 struct g_cache_softc *sc; 927 const char *name; 928 char param[16]; 929 int i, *nargs; 930 931 g_topology_assert(); 932 933 nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs)); 934 if (nargs == NULL) { 935 gctl_error(req, "No '%s' argument", "nargs"); 936 return; 937 } 938 if (*nargs <= 0) { 939 gctl_error(req, "Missing device(s)."); 940 return; 941 } 942 943 for (i = 0; i < *nargs; i++) { 944 snprintf(param, sizeof(param), "arg%d", i); 945 name = gctl_get_asciiparam(req, param); 946 if (name == NULL) { 947 gctl_error(req, "No 'arg%d' argument", i); 948 return; 949 } 950 sc = g_cache_find_device(mp, name); 951 if (sc == NULL) { 952 G_CACHE_DEBUG(1, "Device %s is invalid.", name); 953 gctl_error(req, "Device %s is invalid.", name); 954 return; 955 } 956 sc->sc_reads = 0; 957 sc->sc_readbytes = 0; 958 sc->sc_cachereads = 0; 959 sc->sc_cachereadbytes = 0; 960 sc->sc_cachehits = 0; 961 sc->sc_cachemisses = 0; 962 sc->sc_cachefull = 0; 963 sc->sc_writes = 0; 964 sc->sc_wrotebytes = 0; 965 } 966 } 967 968 static void 969 g_cache_config(struct gctl_req *req, struct g_class *mp, const char *verb) 970 { 971 uint32_t *version; 972 973 g_topology_assert(); 974 975 version = gctl_get_paraml(req, "version", sizeof(*version)); 976 if (version == NULL) { 977 gctl_error(req, "No '%s' argument.", "version"); 978 return; 979 } 980 if (*version != G_CACHE_VERSION) { 981 gctl_error(req, "Userland and kernel parts are out of sync."); 982 return; 983 } 984 985 if (strcmp(verb, "create") == 0) { 986 g_cache_ctl_create(req, mp); 987 return; 988 } else if (strcmp(verb, "configure") == 0) { 989 g_cache_ctl_configure(req, mp); 990 return; 991 } else if (strcmp(verb, "destroy") == 0 || 992 strcmp(verb, "stop") == 0) { 993 g_cache_ctl_destroy(req, mp); 994 return; 995 } else if (strcmp(verb, "reset") == 0) { 996 g_cache_ctl_reset(req, mp); 997 return; 998 } 999 1000 gctl_error(req, "Unknown verb."); 1001 } 1002 1003 static void 1004 g_cache_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 1005 struct g_consumer *cp, struct g_provider *pp) 1006 { 1007 struct g_cache_softc *sc; 1008 1009 if (pp != NULL || cp != NULL) 1010 return; 1011 sc = gp->softc; 1012 sbuf_printf(sb, "%s<Size>%u</Size>\n", indent, sc->sc_maxent); 1013 sbuf_printf(sb, "%s<BlockSize>%u</BlockSize>\n", indent, sc->sc_bsize); 1014 sbuf_printf(sb, "%s<TailOffset>%ju</TailOffset>\n", indent, 1015 (uintmax_t)sc->sc_tail); 1016 sbuf_printf(sb, "%s<Entries>%u</Entries>\n", indent, sc->sc_nent); 1017 sbuf_printf(sb, "%s<UsedEntries>%u</UsedEntries>\n", indent, 1018 sc->sc_nused); 1019 sbuf_printf(sb, "%s<InvalidEntries>%u</InvalidEntries>\n", indent, 1020 sc->sc_invalid); 1021 sbuf_printf(sb, "%s<Reads>%ju</Reads>\n", indent, sc->sc_reads); 1022 sbuf_printf(sb, "%s<ReadBytes>%ju</ReadBytes>\n", indent, 1023 sc->sc_readbytes); 1024 sbuf_printf(sb, "%s<CacheReads>%ju</CacheReads>\n", indent, 1025 sc->sc_cachereads); 1026 sbuf_printf(sb, "%s<CacheReadBytes>%ju</CacheReadBytes>\n", indent, 1027 sc->sc_cachereadbytes); 1028 sbuf_printf(sb, "%s<CacheHits>%ju</CacheHits>\n", indent, 1029 sc->sc_cachehits); 1030 sbuf_printf(sb, "%s<CacheMisses>%ju</CacheMisses>\n", indent, 1031 sc->sc_cachemisses); 1032 sbuf_printf(sb, "%s<CacheFull>%ju</CacheFull>\n", indent, 1033 sc->sc_cachefull); 1034 sbuf_printf(sb, "%s<Writes>%ju</Writes>\n", indent, sc->sc_writes); 1035 sbuf_printf(sb, "%s<WroteBytes>%ju</WroteBytes>\n", indent, 1036 sc->sc_wrotebytes); 1037 } 1038 1039 DECLARE_GEOM_CLASS(g_cache_class, g_cache); 1040