1 /*- 2 * Copyright (c) 2004 Max Khon 3 * Copyright (c) 2014 Juniper Networks, Inc. 4 * Copyright (c) 2006-2016 Maxim Sobolev <sobomax@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/bio.h> 34 #include <sys/endian.h> 35 #include <sys/errno.h> 36 #include <sys/kernel.h> 37 #include <sys/lock.h> 38 #include <sys/mutex.h> 39 #include <sys/malloc.h> 40 #include <sys/sysctl.h> 41 #include <sys/systm.h> 42 #include <sys/kthread.h> 43 44 #include <geom/geom.h> 45 46 #include <geom/uzip/g_uzip.h> 47 #include <geom/uzip/g_uzip_cloop.h> 48 #include <geom/uzip/g_uzip_softc.h> 49 #include <geom/uzip/g_uzip_dapi.h> 50 #include <geom/uzip/g_uzip_zlib.h> 51 #include <geom/uzip/g_uzip_lzma.h> 52 #include <geom/uzip/g_uzip_wrkthr.h> 53 54 #include "opt_geom.h" 55 56 MALLOC_DEFINE(M_GEOM_UZIP, "geom_uzip", "GEOM UZIP data structures"); 57 58 FEATURE(geom_uzip, "GEOM read-only compressed disks support"); 59 60 struct g_uzip_blk { 61 uint64_t offset; 62 uint32_t blen; 63 unsigned char last:1; 64 unsigned char padded:1; 65 #define BLEN_UNDEF UINT32_MAX 66 }; 67 68 #ifndef ABS 69 #define ABS(a) ((a) < 0 ? -(a) : (a)) 70 #endif 71 72 #define BLK_IN_RANGE(mcn, bcn, ilen) \ 73 (((bcn) != BLEN_UNDEF) && ( \ 74 ((ilen) >= 0 && (mcn >= bcn) && (mcn <= ((intmax_t)(bcn) + (ilen)))) || \ 75 ((ilen) < 0 && (mcn <= bcn) && (mcn >= ((intmax_t)(bcn) + (ilen)))) \ 76 )) 77 78 #ifdef GEOM_UZIP_DEBUG 79 # define GEOM_UZIP_DBG_DEFAULT 3 80 #else 81 # define GEOM_UZIP_DBG_DEFAULT 0 82 #endif 83 84 #define GUZ_DBG_ERR 1 85 #define GUZ_DBG_INFO 2 86 #define GUZ_DBG_IO 3 87 #define GUZ_DBG_TOC 4 88 89 #define GUZ_DEV_SUFX ".uzip" 90 #define GUZ_DEV_NAME(p) (p GUZ_DEV_SUFX) 91 92 static char g_uzip_attach_to[MAXPATHLEN] = {"*"}; 93 static char g_uzip_noattach_to[MAXPATHLEN] = {GUZ_DEV_NAME("*")}; 94 TUNABLE_STR("kern.geom.uzip.attach_to", g_uzip_attach_to, 95 sizeof(g_uzip_attach_to)); 96 TUNABLE_STR("kern.geom.uzip.noattach_to", g_uzip_noattach_to, 97 sizeof(g_uzip_noattach_to)); 98 99 SYSCTL_DECL(_kern_geom); 100 SYSCTL_NODE(_kern_geom, OID_AUTO, uzip, CTLFLAG_RW, 0, "GEOM_UZIP stuff"); 101 static u_int g_uzip_debug = GEOM_UZIP_DBG_DEFAULT; 102 SYSCTL_UINT(_kern_geom_uzip, OID_AUTO, debug, CTLFLAG_RWTUN, &g_uzip_debug, 0, 103 "Debug level (0-4)"); 104 static u_int g_uzip_debug_block = BLEN_UNDEF; 105 SYSCTL_UINT(_kern_geom_uzip, OID_AUTO, debug_block, CTLFLAG_RWTUN, 106 &g_uzip_debug_block, 0, "Debug operations around specific cluster#"); 107 108 #define DPRINTF(lvl, a) \ 109 if ((lvl) <= g_uzip_debug) { \ 110 printf a; \ 111 } 112 #define DPRINTF_BLK(lvl, cn, a) \ 113 if ((lvl) <= g_uzip_debug || \ 114 BLK_IN_RANGE(cn, g_uzip_debug_block, 8) || \ 115 BLK_IN_RANGE(cn, g_uzip_debug_block, -8)) { \ 116 printf a; \ 117 } 118 #define DPRINTF_BRNG(lvl, bcn, ecn, a) \ 119 KASSERT(bcn < ecn, ("DPRINTF_BRNG: invalid range (%ju, %ju)", \ 120 (uintmax_t)bcn, (uintmax_t)ecn)); \ 121 if (((lvl) <= g_uzip_debug) || \ 122 BLK_IN_RANGE(g_uzip_debug_block, bcn, \ 123 (intmax_t)ecn - (intmax_t)bcn)) { \ 124 printf a; \ 125 } 126 127 #define UZIP_CLASS_NAME "UZIP" 128 129 /* 130 * Maximum allowed valid block size (to prevent foot-shooting) 131 */ 132 #define MAX_BLKSZ (MAXPHYS) 133 134 static char CLOOP_MAGIC_START[] = "#!/bin/sh\n"; 135 136 static void g_uzip_read_done(struct bio *bp); 137 static void g_uzip_do(struct g_uzip_softc *, struct bio *bp); 138 139 static void 140 g_uzip_softc_free(struct g_uzip_softc *sc, struct g_geom *gp) 141 { 142 143 if (gp != NULL) { 144 DPRINTF(GUZ_DBG_INFO, ("%s: %d requests, %d cached\n", 145 gp->name, sc->req_total, sc->req_cached)); 146 } 147 148 mtx_lock(&sc->queue_mtx); 149 sc->wrkthr_flags |= GUZ_SHUTDOWN; 150 wakeup(sc); 151 while (!(sc->wrkthr_flags & GUZ_EXITING)) { 152 msleep(sc->procp, &sc->queue_mtx, PRIBIO, "guzfree", 153 hz / 10); 154 } 155 mtx_unlock(&sc->queue_mtx); 156 157 sc->dcp->free(sc->dcp); 158 free(sc->toc, M_GEOM_UZIP); 159 mtx_destroy(&sc->queue_mtx); 160 mtx_destroy(&sc->last_mtx); 161 free(sc->last_buf, M_GEOM_UZIP); 162 free(sc, M_GEOM_UZIP); 163 } 164 165 static int 166 g_uzip_cached(struct g_geom *gp, struct bio *bp) 167 { 168 struct g_uzip_softc *sc; 169 off_t ofs; 170 size_t blk, blkofs, usz; 171 172 sc = gp->softc; 173 ofs = bp->bio_offset + bp->bio_completed; 174 blk = ofs / sc->blksz; 175 mtx_lock(&sc->last_mtx); 176 if (blk == sc->last_blk) { 177 blkofs = ofs % sc->blksz; 178 usz = sc->blksz - blkofs; 179 if (bp->bio_resid < usz) 180 usz = bp->bio_resid; 181 memcpy(bp->bio_data + bp->bio_completed, sc->last_buf + blkofs, 182 usz); 183 sc->req_cached++; 184 mtx_unlock(&sc->last_mtx); 185 186 DPRINTF(GUZ_DBG_IO, ("%s/%s: %p: offset=%jd: got %jd bytes " 187 "from cache\n", __func__, gp->name, bp, (intmax_t)ofs, 188 (intmax_t)usz)); 189 190 bp->bio_completed += usz; 191 bp->bio_resid -= usz; 192 193 if (bp->bio_resid == 0) { 194 g_io_deliver(bp, 0); 195 return (1); 196 } 197 } else 198 mtx_unlock(&sc->last_mtx); 199 200 return (0); 201 } 202 203 #define BLK_ENDS(sc, bi) ((sc)->toc[(bi)].offset + \ 204 (sc)->toc[(bi)].blen) 205 206 #define BLK_IS_CONT(sc, bi) (BLK_ENDS((sc), (bi) - 1) == \ 207 (sc)->toc[(bi)].offset) 208 #define BLK_IS_NIL(sc, bi) ((sc)->toc[(bi)].blen == 0) 209 210 #define TOFF_2_BOFF(sc, pp, bi) ((sc)->toc[(bi)].offset - \ 211 (sc)->toc[(bi)].offset % (pp)->sectorsize) 212 #define TLEN_2_BLEN(sc, pp, bp, ei) roundup(BLK_ENDS((sc), (ei)) - \ 213 (bp)->bio_offset, (pp)->sectorsize) 214 215 static int 216 g_uzip_request(struct g_geom *gp, struct bio *bp) 217 { 218 struct g_uzip_softc *sc; 219 struct bio *bp2; 220 struct g_consumer *cp; 221 struct g_provider *pp; 222 off_t ofs, start_blk_ofs; 223 size_t i, start_blk, end_blk, zsize; 224 225 if (g_uzip_cached(gp, bp) != 0) 226 return (1); 227 228 sc = gp->softc; 229 230 cp = LIST_FIRST(&gp->consumer); 231 pp = cp->provider; 232 233 ofs = bp->bio_offset + bp->bio_completed; 234 start_blk = ofs / sc->blksz; 235 KASSERT(start_blk < sc->nblocks, ("start_blk out of range")); 236 end_blk = howmany(ofs + bp->bio_resid, sc->blksz); 237 KASSERT(end_blk <= sc->nblocks, ("end_blk out of range")); 238 239 for (; BLK_IS_NIL(sc, start_blk) && start_blk < end_blk; start_blk++) { 240 /* Fill in any leading Nil blocks */ 241 start_blk_ofs = ofs % sc->blksz; 242 zsize = MIN(sc->blksz - start_blk_ofs, bp->bio_resid); 243 DPRINTF_BLK(GUZ_DBG_IO, start_blk, ("%s/%s: %p/%ju: " 244 "filling %ju zero bytes\n", __func__, gp->name, gp, 245 (uintmax_t)bp->bio_completed, (uintmax_t)zsize)); 246 bzero(bp->bio_data + bp->bio_completed, zsize); 247 bp->bio_completed += zsize; 248 bp->bio_resid -= zsize; 249 ofs += zsize; 250 } 251 252 if (start_blk == end_blk) { 253 KASSERT(bp->bio_resid == 0, ("bp->bio_resid is invalid")); 254 /* 255 * No non-Nil data is left, complete request immediately. 256 */ 257 DPRINTF(GUZ_DBG_IO, ("%s/%s: %p: all done returning %ju " 258 "bytes\n", __func__, gp->name, gp, 259 (uintmax_t)bp->bio_completed)); 260 g_io_deliver(bp, 0); 261 return (1); 262 } 263 264 for (i = start_blk + 1; i < end_blk; i++) { 265 /* Trim discontinuous areas if any */ 266 if (!BLK_IS_CONT(sc, i)) { 267 end_blk = i; 268 break; 269 } 270 } 271 272 DPRINTF_BRNG(GUZ_DBG_IO, start_blk, end_blk, ("%s/%s: %p: " 273 "start=%u (%ju[%jd]), end=%u (%ju)\n", __func__, gp->name, bp, 274 (u_int)start_blk, (uintmax_t)sc->toc[start_blk].offset, 275 (intmax_t)sc->toc[start_blk].blen, 276 (u_int)end_blk, (uintmax_t)BLK_ENDS(sc, end_blk - 1))); 277 278 bp2 = g_clone_bio(bp); 279 if (bp2 == NULL) { 280 g_io_deliver(bp, ENOMEM); 281 return (1); 282 } 283 bp2->bio_done = g_uzip_read_done; 284 285 bp2->bio_offset = TOFF_2_BOFF(sc, pp, start_blk); 286 while (1) { 287 bp2->bio_length = TLEN_2_BLEN(sc, pp, bp2, end_blk - 1); 288 if (bp2->bio_length <= MAXPHYS) { 289 break; 290 } 291 if (end_blk == (start_blk + 1)) { 292 break; 293 } 294 end_blk--; 295 } 296 297 DPRINTF(GUZ_DBG_IO, ("%s/%s: bp2->bio_length = %jd, " 298 "bp2->bio_offset = %jd\n", __func__, gp->name, 299 (intmax_t)bp2->bio_length, (intmax_t)bp2->bio_offset)); 300 301 bp2->bio_data = malloc(bp2->bio_length, M_GEOM_UZIP, M_NOWAIT); 302 if (bp2->bio_data == NULL) { 303 g_destroy_bio(bp2); 304 g_io_deliver(bp, ENOMEM); 305 return (1); 306 } 307 308 DPRINTF_BRNG(GUZ_DBG_IO, start_blk, end_blk, ("%s/%s: %p: " 309 "reading %jd bytes from offset %jd\n", __func__, gp->name, bp, 310 (intmax_t)bp2->bio_length, (intmax_t)bp2->bio_offset)); 311 312 g_io_request(bp2, cp); 313 return (0); 314 } 315 316 static void 317 g_uzip_read_done(struct bio *bp) 318 { 319 struct bio *bp2; 320 struct g_geom *gp; 321 struct g_uzip_softc *sc; 322 323 bp2 = bp->bio_parent; 324 gp = bp2->bio_to->geom; 325 sc = gp->softc; 326 327 mtx_lock(&sc->queue_mtx); 328 bioq_disksort(&sc->bio_queue, bp); 329 mtx_unlock(&sc->queue_mtx); 330 wakeup(sc); 331 } 332 333 static int 334 g_uzip_memvcmp(const void *memory, unsigned char val, size_t size) 335 { 336 const u_char *mm; 337 338 mm = (const u_char *)memory; 339 return (*mm == val) && memcmp(mm, mm + 1, size - 1) == 0; 340 } 341 342 static void 343 g_uzip_do(struct g_uzip_softc *sc, struct bio *bp) 344 { 345 struct bio *bp2; 346 struct g_provider *pp; 347 struct g_consumer *cp; 348 struct g_geom *gp; 349 char *data, *data2; 350 off_t ofs; 351 size_t blk, blkofs, len, ulen, firstblk; 352 int err; 353 354 bp2 = bp->bio_parent; 355 gp = bp2->bio_to->geom; 356 357 cp = LIST_FIRST(&gp->consumer); 358 pp = cp->provider; 359 360 bp2->bio_error = bp->bio_error; 361 if (bp2->bio_error != 0) 362 goto done; 363 364 /* Make sure there's forward progress. */ 365 if (bp->bio_completed == 0) { 366 bp2->bio_error = ECANCELED; 367 goto done; 368 } 369 370 ofs = bp2->bio_offset + bp2->bio_completed; 371 firstblk = blk = ofs / sc->blksz; 372 blkofs = ofs % sc->blksz; 373 data = bp->bio_data + sc->toc[blk].offset % pp->sectorsize; 374 data2 = bp2->bio_data + bp2->bio_completed; 375 while (bp->bio_completed && bp2->bio_resid) { 376 if (blk > firstblk && !BLK_IS_CONT(sc, blk)) { 377 DPRINTF_BLK(GUZ_DBG_IO, blk, ("%s/%s: %p: backref'ed " 378 "cluster #%u requested, looping around\n", 379 __func__, gp->name, bp2, (u_int)blk)); 380 goto done; 381 } 382 ulen = MIN(sc->blksz - blkofs, bp2->bio_resid); 383 len = sc->toc[blk].blen; 384 DPRINTF(GUZ_DBG_IO, ("%s/%s: %p/%ju: data2=%p, ulen=%u, " 385 "data=%p, len=%u\n", __func__, gp->name, gp, 386 bp->bio_completed, data2, (u_int)ulen, data, (u_int)len)); 387 if (len == 0) { 388 /* All zero block: no cache update */ 389 zero_block: 390 bzero(data2, ulen); 391 } else if (len <= bp->bio_completed) { 392 mtx_lock(&sc->last_mtx); 393 err = sc->dcp->decompress(sc->dcp, gp->name, data, 394 len, sc->last_buf); 395 if (err != 0 && sc->toc[blk].last != 0) { 396 /* 397 * Last block decompression has failed, check 398 * if it's just zero padding. 399 */ 400 if (g_uzip_memvcmp(data, '\0', len) == 0) { 401 sc->toc[blk].blen = 0; 402 sc->last_blk = -1; 403 mtx_unlock(&sc->last_mtx); 404 len = 0; 405 goto zero_block; 406 } 407 } 408 if (err != 0) { 409 sc->last_blk = -1; 410 mtx_unlock(&sc->last_mtx); 411 bp2->bio_error = EILSEQ; 412 DPRINTF(GUZ_DBG_ERR, ("%s/%s: decompress" 413 "(%p, %ju, %ju) failed\n", __func__, 414 gp->name, sc->dcp, (uintmax_t)blk, 415 (uintmax_t)len)); 416 goto done; 417 } 418 sc->last_blk = blk; 419 memcpy(data2, sc->last_buf + blkofs, ulen); 420 mtx_unlock(&sc->last_mtx); 421 err = sc->dcp->rewind(sc->dcp, gp->name); 422 if (err != 0) { 423 bp2->bio_error = EILSEQ; 424 DPRINTF(GUZ_DBG_ERR, ("%s/%s: rewind(%p) " 425 "failed\n", __func__, gp->name, sc->dcp)); 426 goto done; 427 } 428 data += len; 429 } else 430 break; 431 432 data2 += ulen; 433 bp2->bio_completed += ulen; 434 bp2->bio_resid -= ulen; 435 bp->bio_completed -= len; 436 blkofs = 0; 437 blk++; 438 } 439 440 done: 441 /* Finish processing the request. */ 442 free(bp->bio_data, M_GEOM_UZIP); 443 g_destroy_bio(bp); 444 if (bp2->bio_error != 0 || bp2->bio_resid == 0) 445 g_io_deliver(bp2, bp2->bio_error); 446 else 447 g_uzip_request(gp, bp2); 448 } 449 450 static void 451 g_uzip_start(struct bio *bp) 452 { 453 struct g_provider *pp; 454 struct g_geom *gp; 455 struct g_uzip_softc *sc; 456 457 pp = bp->bio_to; 458 gp = pp->geom; 459 460 DPRINTF(GUZ_DBG_IO, ("%s/%s: %p: cmd=%d, offset=%jd, length=%jd, " 461 "buffer=%p\n", __func__, gp->name, bp, bp->bio_cmd, 462 (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length, bp->bio_data)); 463 464 sc = gp->softc; 465 sc->req_total++; 466 467 if (bp->bio_cmd == BIO_GETATTR) { 468 struct bio *bp2; 469 struct g_consumer *cp; 470 struct g_geom *gp; 471 struct g_provider *pp; 472 473 /* pass on MNT:* requests and ignore others */ 474 if (strncmp(bp->bio_attribute, "MNT:", 4) == 0) { 475 bp2 = g_clone_bio(bp); 476 if (bp2 == NULL) { 477 g_io_deliver(bp, ENOMEM); 478 return; 479 } 480 bp2->bio_done = g_std_done; 481 pp = bp->bio_to; 482 gp = pp->geom; 483 cp = LIST_FIRST(&gp->consumer); 484 g_io_request(bp2, cp); 485 return; 486 } 487 } 488 if (bp->bio_cmd != BIO_READ) { 489 g_io_deliver(bp, EOPNOTSUPP); 490 return; 491 } 492 493 bp->bio_resid = bp->bio_length; 494 bp->bio_completed = 0; 495 496 g_uzip_request(gp, bp); 497 } 498 499 static void 500 g_uzip_orphan(struct g_consumer *cp) 501 { 502 struct g_geom *gp; 503 504 g_trace(G_T_TOPOLOGY, "%s(%p/%s)", __func__, cp, cp->provider->name); 505 g_topology_assert(); 506 507 gp = cp->geom; 508 g_uzip_softc_free(gp->softc, gp); 509 gp->softc = NULL; 510 g_wither_geom(gp, ENXIO); 511 } 512 513 static int 514 g_uzip_access(struct g_provider *pp, int dr, int dw, int de) 515 { 516 struct g_geom *gp; 517 struct g_consumer *cp; 518 519 gp = pp->geom; 520 cp = LIST_FIRST(&gp->consumer); 521 KASSERT (cp != NULL, ("g_uzip_access but no consumer")); 522 523 if (cp->acw + dw > 0) 524 return (EROFS); 525 526 return (g_access(cp, dr, dw, de)); 527 } 528 529 static void 530 g_uzip_spoiled(struct g_consumer *cp) 531 { 532 struct g_geom *gp; 533 534 G_VALID_CONSUMER(cp); 535 gp = cp->geom; 536 g_trace(G_T_TOPOLOGY, "%s(%p/%s)", __func__, cp, gp->name); 537 g_topology_assert(); 538 539 g_uzip_softc_free(gp->softc, gp); 540 gp->softc = NULL; 541 g_wither_geom(gp, ENXIO); 542 } 543 544 static int 545 g_uzip_parse_toc(struct g_uzip_softc *sc, struct g_provider *pp, 546 struct g_geom *gp) 547 { 548 uint32_t i, j, backref_to; 549 uint64_t max_offset, min_offset; 550 struct g_uzip_blk *last_blk; 551 552 min_offset = sizeof(struct cloop_header) + 553 (sc->nblocks + 1) * sizeof(uint64_t); 554 max_offset = sc->toc[0].offset - 1; 555 last_blk = &sc->toc[0]; 556 for (i = 0; i < sc->nblocks; i++) { 557 /* First do some bounds checking */ 558 if ((sc->toc[i].offset < min_offset) || 559 (sc->toc[i].offset > pp->mediasize)) { 560 goto error_offset; 561 } 562 DPRINTF_BLK(GUZ_DBG_IO, i, ("%s: cluster #%u " 563 "offset=%ju max_offset=%ju\n", gp->name, 564 (u_int)i, (uintmax_t)sc->toc[i].offset, 565 (uintmax_t)max_offset)); 566 backref_to = BLEN_UNDEF; 567 if (sc->toc[i].offset < max_offset) { 568 /* 569 * For the backref'ed blocks search already parsed 570 * TOC entries for the matching offset and copy the 571 * size from matched entry. 572 */ 573 for (j = 0; j <= i; j++) { 574 if (sc->toc[j].offset == sc->toc[i].offset && 575 !BLK_IS_NIL(sc, j)) { 576 break; 577 } 578 if (j != i) { 579 continue; 580 } 581 DPRINTF(GUZ_DBG_ERR, ("%s: cannot match " 582 "backref'ed offset at cluster #%u\n", 583 gp->name, i)); 584 return (-1); 585 } 586 sc->toc[i].blen = sc->toc[j].blen; 587 backref_to = j; 588 } else { 589 last_blk = &sc->toc[i]; 590 /* 591 * For the "normal blocks" seek forward until we hit 592 * block whose offset is larger than ours and assume 593 * it's going to be the next one. 594 */ 595 for (j = i + 1; j < sc->nblocks; j++) { 596 if (sc->toc[j].offset > max_offset) { 597 break; 598 } 599 } 600 sc->toc[i].blen = sc->toc[j].offset - 601 sc->toc[i].offset; 602 if (BLK_ENDS(sc, i) > pp->mediasize) { 603 DPRINTF(GUZ_DBG_ERR, ("%s: cluster #%u " 604 "extends past media boundary (%ju > %ju)\n", 605 gp->name, (u_int)i, 606 (uintmax_t)BLK_ENDS(sc, i), 607 (intmax_t)pp->mediasize)); 608 return (-1); 609 } 610 KASSERT(max_offset <= sc->toc[i].offset, ( 611 "%s: max_offset is incorrect: %ju", 612 gp->name, (uintmax_t)max_offset)); 613 max_offset = BLK_ENDS(sc, i) - 1; 614 } 615 DPRINTF_BLK(GUZ_DBG_TOC, i, ("%s: cluster #%u, original %u " 616 "bytes, in %u bytes", gp->name, i, sc->blksz, 617 sc->toc[i].blen)); 618 if (backref_to != BLEN_UNDEF) { 619 DPRINTF_BLK(GUZ_DBG_TOC, i, (" (->#%u)", 620 (u_int)backref_to)); 621 } 622 DPRINTF_BLK(GUZ_DBG_TOC, i, ("\n")); 623 } 624 last_blk->last = 1; 625 /* Do a second pass to validate block lengths */ 626 for (i = 0; i < sc->nblocks; i++) { 627 if (sc->toc[i].blen > sc->dcp->max_blen) { 628 if (sc->toc[i].last == 0) { 629 DPRINTF(GUZ_DBG_ERR, ("%s: cluster #%u " 630 "length (%ju) exceeds " 631 "max_blen (%ju)\n", gp->name, i, 632 (uintmax_t)sc->toc[i].blen, 633 (uintmax_t)sc->dcp->max_blen)); 634 return (-1); 635 } 636 DPRINTF(GUZ_DBG_INFO, ("%s: cluster #%u extra " 637 "padding is detected, trimmed to %ju\n", 638 gp->name, i, (uintmax_t)sc->dcp->max_blen)); 639 sc->toc[i].blen = sc->dcp->max_blen; 640 sc->toc[i].padded = 1; 641 } 642 } 643 return (0); 644 645 error_offset: 646 DPRINTF(GUZ_DBG_ERR, ("%s: cluster #%u: invalid offset %ju, " 647 "min_offset=%ju mediasize=%jd\n", gp->name, (u_int)i, 648 sc->toc[i].offset, min_offset, pp->mediasize)); 649 return (-1); 650 } 651 652 static struct g_geom * 653 g_uzip_taste(struct g_class *mp, struct g_provider *pp, int flags) 654 { 655 int error; 656 uint32_t i, total_offsets, offsets_read, blk; 657 void *buf; 658 struct cloop_header *header; 659 struct g_consumer *cp; 660 struct g_geom *gp; 661 struct g_provider *pp2; 662 struct g_uzip_softc *sc; 663 enum { 664 G_UZIP = 1, 665 G_ULZMA 666 } type; 667 668 g_trace(G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name); 669 g_topology_assert(); 670 671 /* Skip providers that are already open for writing. */ 672 if (pp->acw > 0) 673 return (NULL); 674 675 if ((fnmatch(g_uzip_attach_to, pp->name, 0) != 0) || 676 (fnmatch(g_uzip_noattach_to, pp->name, 0) == 0)) { 677 DPRINTF(GUZ_DBG_INFO, ("%s(%s,%s), ignoring\n", __func__, 678 mp->name, pp->name)); 679 return (NULL); 680 } 681 682 buf = NULL; 683 684 /* 685 * Create geom instance. 686 */ 687 gp = g_new_geomf(mp, GUZ_DEV_NAME("%s"), pp->name); 688 cp = g_new_consumer(gp); 689 error = g_attach(cp, pp); 690 if (error == 0) 691 error = g_access(cp, 1, 0, 0); 692 if (error) { 693 goto e1; 694 } 695 g_topology_unlock(); 696 697 /* 698 * Read cloop header, look for CLOOP magic, perform 699 * other validity checks. 700 */ 701 DPRINTF(GUZ_DBG_INFO, ("%s: media sectorsize %u, mediasize %jd\n", 702 gp->name, pp->sectorsize, (intmax_t)pp->mediasize)); 703 buf = g_read_data(cp, 0, pp->sectorsize, NULL); 704 if (buf == NULL) 705 goto e2; 706 header = (struct cloop_header *) buf; 707 if (strncmp(header->magic, CLOOP_MAGIC_START, 708 sizeof(CLOOP_MAGIC_START) - 1) != 0) { 709 DPRINTF(GUZ_DBG_ERR, ("%s: no CLOOP magic\n", gp->name)); 710 goto e3; 711 } 712 713 switch (header->magic[CLOOP_OFS_COMPR]) { 714 case CLOOP_COMP_LZMA: 715 case CLOOP_COMP_LZMA_DDP: 716 type = G_ULZMA; 717 if (header->magic[CLOOP_OFS_VERSN] < CLOOP_MINVER_LZMA) { 718 DPRINTF(GUZ_DBG_ERR, ("%s: image version too old\n", 719 gp->name)); 720 goto e3; 721 } 722 DPRINTF(GUZ_DBG_INFO, ("%s: GEOM_UZIP_LZMA image found\n", 723 gp->name)); 724 break; 725 case CLOOP_COMP_LIBZ: 726 case CLOOP_COMP_LIBZ_DDP: 727 type = G_UZIP; 728 if (header->magic[CLOOP_OFS_VERSN] < CLOOP_MINVER_ZLIB) { 729 DPRINTF(GUZ_DBG_ERR, ("%s: image version too old\n", 730 gp->name)); 731 goto e3; 732 } 733 DPRINTF(GUZ_DBG_INFO, ("%s: GEOM_UZIP_ZLIB image found\n", 734 gp->name)); 735 break; 736 default: 737 DPRINTF(GUZ_DBG_ERR, ("%s: unsupported image type\n", 738 gp->name)); 739 goto e3; 740 } 741 742 /* 743 * Initialize softc and read offsets. 744 */ 745 sc = malloc(sizeof(*sc), M_GEOM_UZIP, M_WAITOK | M_ZERO); 746 gp->softc = sc; 747 sc->blksz = ntohl(header->blksz); 748 sc->nblocks = ntohl(header->nblocks); 749 if (sc->blksz % 512 != 0) { 750 printf("%s: block size (%u) should be multiple of 512.\n", 751 gp->name, sc->blksz); 752 goto e4; 753 } 754 if (sc->blksz > MAX_BLKSZ) { 755 printf("%s: block size (%u) should not be larger than %d.\n", 756 gp->name, sc->blksz, MAX_BLKSZ); 757 } 758 total_offsets = sc->nblocks + 1; 759 if (sizeof(struct cloop_header) + 760 total_offsets * sizeof(uint64_t) > pp->mediasize) { 761 printf("%s: media too small for %u blocks\n", 762 gp->name, sc->nblocks); 763 goto e4; 764 } 765 sc->toc = malloc(total_offsets * sizeof(struct g_uzip_blk), 766 M_GEOM_UZIP, M_WAITOK | M_ZERO); 767 offsets_read = MIN(total_offsets, 768 (pp->sectorsize - sizeof(*header)) / sizeof(uint64_t)); 769 for (i = 0; i < offsets_read; i++) { 770 sc->toc[i].offset = be64toh(((uint64_t *) (header + 1))[i]); 771 sc->toc[i].blen = BLEN_UNDEF; 772 } 773 DPRINTF(GUZ_DBG_INFO, ("%s: %u offsets in the first sector\n", 774 gp->name, offsets_read)); 775 for (blk = 1; offsets_read < total_offsets; blk++) { 776 uint32_t nread; 777 778 free(buf, M_GEOM); 779 buf = g_read_data( 780 cp, blk * pp->sectorsize, pp->sectorsize, NULL); 781 if (buf == NULL) 782 goto e5; 783 nread = MIN(total_offsets - offsets_read, 784 pp->sectorsize / sizeof(uint64_t)); 785 DPRINTF(GUZ_DBG_TOC, ("%s: %u offsets read from sector %d\n", 786 gp->name, nread, blk)); 787 for (i = 0; i < nread; i++) { 788 sc->toc[offsets_read + i].offset = 789 be64toh(((uint64_t *) buf)[i]); 790 sc->toc[offsets_read + i].blen = BLEN_UNDEF; 791 } 792 offsets_read += nread; 793 } 794 free(buf, M_GEOM); 795 buf = NULL; 796 offsets_read -= 1; 797 DPRINTF(GUZ_DBG_INFO, ("%s: done reading %u block offsets from %u " 798 "sectors\n", gp->name, offsets_read, blk)); 799 if (sc->nblocks != offsets_read) { 800 DPRINTF(GUZ_DBG_ERR, ("%s: read %s offsets than expected " 801 "blocks\n", gp->name, 802 sc->nblocks < offsets_read ? "more" : "less")); 803 goto e5; 804 } 805 806 if (type == G_UZIP) { 807 sc->dcp = g_uzip_zlib_ctor(sc->blksz); 808 } else { 809 sc->dcp = g_uzip_lzma_ctor(sc->blksz); 810 } 811 if (sc->dcp == NULL) { 812 goto e5; 813 } 814 815 /* 816 * "Fake" last+1 block, to make it easier for the TOC parser to 817 * iterate without making the last element a special case. 818 */ 819 sc->toc[sc->nblocks].offset = pp->mediasize; 820 /* Massage TOC (table of contents), make sure it is sound */ 821 if (g_uzip_parse_toc(sc, pp, gp) != 0) { 822 DPRINTF(GUZ_DBG_ERR, ("%s: TOC error\n", gp->name)); 823 goto e6; 824 } 825 mtx_init(&sc->last_mtx, "geom_uzip cache", NULL, MTX_DEF); 826 mtx_init(&sc->queue_mtx, "geom_uzip wrkthread", NULL, MTX_DEF); 827 bioq_init(&sc->bio_queue); 828 sc->last_blk = -1; 829 sc->last_buf = malloc(sc->blksz, M_GEOM_UZIP, M_WAITOK); 830 sc->req_total = 0; 831 sc->req_cached = 0; 832 833 sc->uzip_do = &g_uzip_do; 834 835 error = kproc_create(g_uzip_wrkthr, sc, &sc->procp, 0, 0, "%s", 836 gp->name); 837 if (error != 0) { 838 goto e7; 839 } 840 841 g_topology_lock(); 842 pp2 = g_new_providerf(gp, "%s", gp->name); 843 pp2->sectorsize = 512; 844 pp2->mediasize = (off_t)sc->nblocks * sc->blksz; 845 pp2->stripesize = pp->stripesize; 846 pp2->stripeoffset = pp->stripeoffset; 847 g_error_provider(pp2, 0); 848 g_access(cp, -1, 0, 0); 849 850 DPRINTF(GUZ_DBG_INFO, ("%s: taste ok (%d, %jd), (%d, %d), %x\n", 851 gp->name, pp2->sectorsize, (intmax_t)pp2->mediasize, 852 pp2->stripeoffset, pp2->stripesize, pp2->flags)); 853 DPRINTF(GUZ_DBG_INFO, ("%s: %u x %u blocks\n", gp->name, sc->nblocks, 854 sc->blksz)); 855 return (gp); 856 857 e7: 858 free(sc->last_buf, M_GEOM); 859 mtx_destroy(&sc->queue_mtx); 860 mtx_destroy(&sc->last_mtx); 861 e6: 862 sc->dcp->free(sc->dcp); 863 e5: 864 free(sc->toc, M_GEOM); 865 e4: 866 free(gp->softc, M_GEOM_UZIP); 867 e3: 868 if (buf != NULL) { 869 free(buf, M_GEOM); 870 } 871 e2: 872 g_topology_lock(); 873 g_access(cp, -1, 0, 0); 874 e1: 875 g_detach(cp); 876 g_destroy_consumer(cp); 877 g_destroy_geom(gp); 878 879 return (NULL); 880 } 881 882 static int 883 g_uzip_destroy_geom(struct gctl_req *req, struct g_class *mp, struct g_geom *gp) 884 { 885 struct g_provider *pp; 886 887 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, gp->name); 888 g_topology_assert(); 889 890 if (gp->softc == NULL) { 891 DPRINTF(GUZ_DBG_ERR, ("%s(%s): gp->softc == NULL\n", __func__, 892 gp->name)); 893 return (ENXIO); 894 } 895 896 KASSERT(gp != NULL, ("NULL geom")); 897 pp = LIST_FIRST(&gp->provider); 898 KASSERT(pp != NULL, ("NULL provider")); 899 if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0) 900 return (EBUSY); 901 902 g_uzip_softc_free(gp->softc, gp); 903 gp->softc = NULL; 904 g_wither_geom(gp, ENXIO); 905 906 return (0); 907 } 908 909 static struct g_class g_uzip_class = { 910 .name = UZIP_CLASS_NAME, 911 .version = G_VERSION, 912 .taste = g_uzip_taste, 913 .destroy_geom = g_uzip_destroy_geom, 914 915 .start = g_uzip_start, 916 .orphan = g_uzip_orphan, 917 .access = g_uzip_access, 918 .spoiled = g_uzip_spoiled, 919 }; 920 921 DECLARE_GEOM_CLASS(g_uzip_class, g_uzip); 922 MODULE_DEPEND(g_uzip, zlib, 1, 1, 1); 923