xref: /freebsd/sys/geom/uzip/g_uzip.c (revision ec65e4f8d0654361df5e97d4de3518edebf76b46)
1 /*-
2  * Copyright (c) 2004 Max Khon
3  * Copyright (c) 2014 Juniper Networks, Inc.
4  * Copyright (c) 2006-2016 Maxim Sobolev <sobomax@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/bio.h>
34 #include <sys/endian.h>
35 #include <sys/errno.h>
36 #include <sys/kernel.h>
37 #include <sys/lock.h>
38 #include <sys/mutex.h>
39 #include <sys/malloc.h>
40 #include <sys/sysctl.h>
41 #include <sys/systm.h>
42 #include <sys/kthread.h>
43 
44 #include <geom/geom.h>
45 
46 #include <geom/uzip/g_uzip.h>
47 #include <geom/uzip/g_uzip_cloop.h>
48 #include <geom/uzip/g_uzip_softc.h>
49 #include <geom/uzip/g_uzip_dapi.h>
50 #include <geom/uzip/g_uzip_zlib.h>
51 #include <geom/uzip/g_uzip_lzma.h>
52 #include <geom/uzip/g_uzip_wrkthr.h>
53 
54 MALLOC_DEFINE(M_GEOM_UZIP, "geom_uzip", "GEOM UZIP data structures");
55 
56 FEATURE(geom_uzip, "GEOM read-only compressed disks support");
57 
58 struct g_uzip_blk {
59         uint64_t offset;
60         uint32_t blen;
61 #define BLEN_UNDEF      UINT32_MAX
62 };
63 
64 #ifndef ABS
65 #define	ABS(a)			((a) < 0 ? -(a) : (a))
66 #endif
67 
68 #define BLK_IN_RANGE(mcn, bcn, ilen)	\
69     (((bcn) != BLEN_UNDEF) && ( \
70 	((ilen) >= 0 && (mcn >= bcn) && (mcn <= ((intmax_t)(bcn) + (ilen)))) || \
71 	((ilen) < 0 && (mcn <= bcn) && (mcn >= ((intmax_t)(bcn) + (ilen)))) \
72     ))
73 
74 #ifdef GEOM_UZIP_DEBUG
75 # define GEOM_UZIP_DBG_DEFAULT	3
76 #else
77 # define GEOM_UZIP_DBG_DEFAULT	0
78 #endif
79 
80 #define	GUZ_DBG_ERR	1
81 #define	GUZ_DBG_INFO	2
82 #define	GUZ_DBG_IO	3
83 #define	GUZ_DBG_TOC	4
84 
85 SYSCTL_DECL(_kern_geom);
86 SYSCTL_NODE(_kern_geom, OID_AUTO, uzip, CTLFLAG_RW, 0, "GEOM_UZIP stuff");
87 static u_int g_uzip_debug = GEOM_UZIP_DBG_DEFAULT;
88 SYSCTL_UINT(_kern_geom_uzip, OID_AUTO, debug, CTLFLAG_RWTUN, &g_uzip_debug, 0,
89     "Debug level (0-4)");
90 static u_int g_uzip_debug_block = BLEN_UNDEF;
91 SYSCTL_UINT(_kern_geom_uzip, OID_AUTO, debug_block, CTLFLAG_RWTUN,
92     &g_uzip_debug_block, 0, "Debug operations around specific cluster#");
93 
94 #define	DPRINTF(lvl, a)		\
95 	if ((lvl) <= g_uzip_debug) { \
96 		printf a; \
97 	}
98 #define	DPRINTF_BLK(lvl, cn, a)	\
99 	if ((lvl) <= g_uzip_debug || \
100 	    BLK_IN_RANGE(cn, g_uzip_debug_block, 8) || \
101 	    BLK_IN_RANGE(cn, g_uzip_debug_block, -8)) { \
102 		printf a; \
103 	}
104 #define	DPRINTF_BRNG(lvl, bcn, ecn, a) \
105 	KASSERT(bcn < ecn, ("DPRINTF_BRNG: invalid range (%ju, %ju)", \
106 	    (uintmax_t)bcn, (uintmax_t)ecn)); \
107 	if (((lvl) <= g_uzip_debug) || \
108 	    BLK_IN_RANGE(g_uzip_debug_block, bcn, \
109 	     (intmax_t)ecn - (intmax_t)bcn)) { \
110 		printf a; \
111 	}
112 
113 #define	UZIP_CLASS_NAME	"UZIP"
114 
115 /*
116  * Maximum allowed valid block size (to prevent foot-shooting)
117  */
118 #define	MAX_BLKSZ	(MAXPHYS)
119 
120 static char CLOOP_MAGIC_START[] = "#!/bin/sh\n";
121 
122 static void g_uzip_read_done(struct bio *bp);
123 static void g_uzip_do(struct g_uzip_softc *, struct bio *bp);
124 
125 static void
126 g_uzip_softc_free(struct g_uzip_softc *sc, struct g_geom *gp)
127 {
128 
129 	if (gp != NULL) {
130 		DPRINTF(GUZ_DBG_INFO, ("%s: %d requests, %d cached\n",
131 		    gp->name, sc->req_total, sc->req_cached));
132 	}
133 
134 	mtx_lock(&sc->queue_mtx);
135 	sc->wrkthr_flags |= GUZ_SHUTDOWN;
136 	wakeup(sc);
137 	while (!(sc->wrkthr_flags & GUZ_EXITING)) {
138 		msleep(sc->procp, &sc->queue_mtx, PRIBIO, "guzfree",
139 		    hz / 10);
140 	}
141 	mtx_unlock(&sc->queue_mtx);
142 
143 	sc->dcp->free(sc->dcp);
144 	free(sc->toc, M_GEOM_UZIP);
145 	mtx_destroy(&sc->queue_mtx);
146 	mtx_destroy(&sc->last_mtx);
147 	free(sc->last_buf, M_GEOM_UZIP);
148 	free(sc, M_GEOM_UZIP);
149 }
150 
151 static int
152 g_uzip_cached(struct g_geom *gp, struct bio *bp)
153 {
154 	struct g_uzip_softc *sc;
155 	off_t ofs;
156 	size_t blk, blkofs, usz;
157 
158 	sc = gp->softc;
159 	ofs = bp->bio_offset + bp->bio_completed;
160 	blk = ofs / sc->blksz;
161 	mtx_lock(&sc->last_mtx);
162 	if (blk == sc->last_blk) {
163 		blkofs = ofs % sc->blksz;
164 		usz = sc->blksz - blkofs;
165 		if (bp->bio_resid < usz)
166 			usz = bp->bio_resid;
167 		memcpy(bp->bio_data + bp->bio_completed, sc->last_buf + blkofs,
168 		    usz);
169 		sc->req_cached++;
170 		mtx_unlock(&sc->last_mtx);
171 
172 		DPRINTF(GUZ_DBG_IO, ("%s/%s: %p: offset=%jd: got %jd bytes "
173 		    "from cache\n", __func__, gp->name, bp, (intmax_t)ofs,
174 		    (intmax_t)usz));
175 
176 		bp->bio_completed += usz;
177 		bp->bio_resid -= usz;
178 
179 		if (bp->bio_resid == 0) {
180 			g_io_deliver(bp, 0);
181 			return (1);
182 		}
183 	} else
184 		mtx_unlock(&sc->last_mtx);
185 
186 	return (0);
187 }
188 
189 #define BLK_ENDS(sc, bi)	((sc)->toc[(bi)].offset + \
190     (sc)->toc[(bi)].blen)
191 
192 #define BLK_IS_CONT(sc, bi)	(BLK_ENDS((sc), (bi) - 1) == \
193     (sc)->toc[(bi)].offset)
194 #define	BLK_IS_NIL(sc, bi)	((sc)->toc[(bi)].blen == 0)
195 
196 #define TOFF_2_BOFF(sc, pp, bi)	    ((sc)->toc[(bi)].offset - \
197     (sc)->toc[(bi)].offset % (pp)->sectorsize)
198 #define	TLEN_2_BLEN(sc, pp, bp, ei) roundup(BLK_ENDS((sc), (ei)) - \
199     (bp)->bio_offset, (pp)->sectorsize)
200 
201 static int
202 g_uzip_request(struct g_geom *gp, struct bio *bp)
203 {
204 	struct g_uzip_softc *sc;
205 	struct bio *bp2;
206 	struct g_consumer *cp;
207 	struct g_provider *pp;
208 	off_t ofs, start_blk_ofs;
209 	size_t i, start_blk, end_blk, zsize;
210 
211 	if (g_uzip_cached(gp, bp) != 0)
212 		return (1);
213 
214 	sc = gp->softc;
215 
216 	cp = LIST_FIRST(&gp->consumer);
217 	pp = cp->provider;
218 
219 	ofs = bp->bio_offset + bp->bio_completed;
220 	start_blk = ofs / sc->blksz;
221 	KASSERT(start_blk < sc->nblocks, ("start_blk out of range"));
222 	end_blk = howmany(ofs + bp->bio_resid, sc->blksz);
223 	KASSERT(end_blk <= sc->nblocks, ("end_blk out of range"));
224 
225 	for (; BLK_IS_NIL(sc, start_blk) && start_blk < end_blk; start_blk++) {
226 		/* Fill in any leading Nil blocks */
227 		start_blk_ofs = ofs % sc->blksz;
228 		zsize = MIN(sc->blksz - start_blk_ofs, bp->bio_resid);
229 		DPRINTF_BLK(GUZ_DBG_IO, start_blk, ("%s/%s: %p/%ju: "
230 		    "filling %ju zero bytes\n", __func__, gp->name, gp,
231 		    (uintmax_t)bp->bio_completed, (uintmax_t)zsize));
232 		bzero(bp->bio_data + bp->bio_completed, zsize);
233 		bp->bio_completed += zsize;
234 		bp->bio_resid -= zsize;
235 		ofs += zsize;
236 	}
237 
238 	if (start_blk == end_blk) {
239 		KASSERT(bp->bio_resid == 0, ("bp->bio_resid is invalid"));
240 		/*
241 		 * No non-Nil data is left, complete request immediately.
242 		 */
243 		DPRINTF(GUZ_DBG_IO, ("%s/%s: %p: all done returning %ju "
244 		    "bytes\n", __func__, gp->name, gp,
245 		    (uintmax_t)bp->bio_completed));
246 		g_io_deliver(bp, 0);
247 		return (1);
248 	}
249 
250 	for (i = start_blk + 1; i < end_blk; i++) {
251 		/* Trim discontinuous areas if any */
252 		if (!BLK_IS_CONT(sc, i)) {
253 			end_blk = i;
254 			break;
255 		}
256 	}
257 
258 	DPRINTF_BRNG(GUZ_DBG_IO, start_blk, end_blk, ("%s/%s: %p: "
259 	    "start=%u (%ju), end=%u (%ju)\n", __func__, gp->name, bp,
260 	    (u_int)start_blk, (uintmax_t)sc->toc[start_blk].offset,
261 	    (u_int)end_blk, (uintmax_t)BLK_ENDS(sc, end_blk - 1)));
262 
263 	bp2 = g_clone_bio(bp);
264 	if (bp2 == NULL) {
265 		g_io_deliver(bp, ENOMEM);
266 		return (1);
267 	}
268 	bp2->bio_done = g_uzip_read_done;
269 
270 	bp2->bio_offset = TOFF_2_BOFF(sc, pp, start_blk);
271 	while (1) {
272 		bp2->bio_length = TLEN_2_BLEN(sc, pp, bp2, end_blk - 1);
273 		if (bp2->bio_length <= MAXPHYS)
274 			break;
275 		if (end_blk == (start_blk + 1)) {
276 			break;
277 		}
278 		end_blk--;
279 	}
280 
281 	DPRINTF(GUZ_DBG_IO, ("%s/%s: bp2->bio_length = %jd\n",
282 	    __func__, gp->name, (intmax_t)bp2->bio_length));
283 
284 	bp2->bio_data = malloc(bp2->bio_length, M_GEOM_UZIP, M_NOWAIT);
285 	if (bp2->bio_data == NULL) {
286 		g_destroy_bio(bp2);
287 		g_io_deliver(bp, ENOMEM);
288 		return (1);
289 	}
290 
291 	DPRINTF_BRNG(GUZ_DBG_IO, start_blk, end_blk, ("%s/%s: %p: "
292 	    "reading %jd bytes from offset %jd\n", __func__, gp->name, bp,
293 	    (intmax_t)bp2->bio_length, (intmax_t)bp2->bio_offset));
294 
295 	g_io_request(bp2, cp);
296 	return (0);
297 }
298 
299 static void
300 g_uzip_read_done(struct bio *bp)
301 {
302 	struct bio *bp2;
303 	struct g_geom *gp;
304 	struct g_uzip_softc *sc;
305 
306 	bp2 = bp->bio_parent;
307 	gp = bp2->bio_to->geom;
308 	sc = gp->softc;
309 
310 	mtx_lock(&sc->queue_mtx);
311 	bioq_disksort(&sc->bio_queue, bp);
312 	mtx_unlock(&sc->queue_mtx);
313 	wakeup(sc);
314 }
315 
316 static void
317 g_uzip_do(struct g_uzip_softc *sc, struct bio *bp)
318 {
319 	struct bio *bp2;
320 	struct g_provider *pp;
321 	struct g_consumer *cp;
322 	struct g_geom *gp;
323 	char *data, *data2;
324 	off_t ofs;
325 	size_t blk, blkofs, len, ulen, firstblk;
326 	int err;
327 
328 	bp2 = bp->bio_parent;
329 	gp = bp2->bio_to->geom;
330 
331 	cp = LIST_FIRST(&gp->consumer);
332 	pp = cp->provider;
333 
334 	bp2->bio_error = bp->bio_error;
335 	if (bp2->bio_error != 0)
336 		goto done;
337 
338 	/* Make sure there's forward progress. */
339 	if (bp->bio_completed == 0) {
340 		bp2->bio_error = ECANCELED;
341 		goto done;
342 	}
343 
344 	ofs = bp2->bio_offset + bp2->bio_completed;
345 	firstblk = blk = ofs / sc->blksz;
346 	blkofs = ofs % sc->blksz;
347 	data = bp->bio_data + sc->toc[blk].offset % pp->sectorsize;
348 	data2 = bp2->bio_data + bp2->bio_completed;
349 	while (bp->bio_completed && bp2->bio_resid) {
350 		if (blk > firstblk && !BLK_IS_CONT(sc, blk)) {
351 			DPRINTF_BLK(GUZ_DBG_IO, blk, ("%s/%s: %p: backref'ed "
352 			    "cluster #%u requested, looping around\n",
353 			    __func__, gp->name, bp2, (u_int)blk));
354 			goto done;
355 		}
356 		ulen = MIN(sc->blksz - blkofs, bp2->bio_resid);
357 		len = sc->toc[blk].blen;
358 		DPRINTF(GUZ_DBG_IO, ("%s/%s: %p/%ju: data2=%p, ulen=%u, "
359 		    "data=%p, len=%u\n", __func__, gp->name, gp,
360 		    bp->bio_completed, data2, (u_int)ulen, data, (u_int)len));
361 		if (len == 0) {
362 			/* All zero block: no cache update */
363 			bzero(data2, ulen);
364 		} else if (len <= bp->bio_completed) {
365 			mtx_lock(&sc->last_mtx);
366 			err = sc->dcp->decompress(sc->dcp, gp->name, data,
367 			    len, sc->last_buf);
368 			if (err != 0) {
369 				sc->last_blk = -1;
370 				mtx_unlock(&sc->last_mtx);
371 				bp2->bio_error = EILSEQ;
372 				DPRINTF(GUZ_DBG_ERR, ("%s/%s: decompress"
373 				    "(%p) failed\n", __func__, gp->name,
374 				    sc->dcp));
375 				goto done;
376 			}
377 			sc->last_blk = blk;
378 			memcpy(data2, sc->last_buf + blkofs, ulen);
379 			mtx_unlock(&sc->last_mtx);
380 			err = sc->dcp->rewind(sc->dcp, gp->name);
381 			if (err != 0) {
382 				bp2->bio_error = EILSEQ;
383 				DPRINTF(GUZ_DBG_ERR, ("%s/%s: rewind(%p) "
384 				    "failed\n", __func__, gp->name, sc->dcp));
385 				goto done;
386 			}
387 			data += len;
388 		} else
389 			break;
390 
391 		data2 += ulen;
392 		bp2->bio_completed += ulen;
393 		bp2->bio_resid -= ulen;
394 		bp->bio_completed -= len;
395 		blkofs = 0;
396 		blk++;
397 	}
398 
399 done:
400 	/* Finish processing the request. */
401 	free(bp->bio_data, M_GEOM_UZIP);
402 	g_destroy_bio(bp);
403 	if (bp2->bio_error != 0 || bp2->bio_resid == 0)
404 		g_io_deliver(bp2, bp2->bio_error);
405 	else
406 		g_uzip_request(gp, bp2);
407 }
408 
409 static void
410 g_uzip_start(struct bio *bp)
411 {
412 	struct g_provider *pp;
413 	struct g_geom *gp;
414 	struct g_uzip_softc *sc;
415 
416 	pp = bp->bio_to;
417 	gp = pp->geom;
418 
419 	DPRINTF(GUZ_DBG_IO, ("%s/%s: %p: cmd=%d, offset=%jd, length=%jd, "
420 	    "buffer=%p\n", __func__, gp->name, bp, bp->bio_cmd,
421 	    (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length, bp->bio_data));
422 
423 	sc = gp->softc;
424 	sc->req_total++;
425 
426 	if (bp->bio_cmd != BIO_READ) {
427 		g_io_deliver(bp, EOPNOTSUPP);
428 		return;
429 	}
430 
431 	bp->bio_resid = bp->bio_length;
432 	bp->bio_completed = 0;
433 
434 	g_uzip_request(gp, bp);
435 }
436 
437 static void
438 g_uzip_orphan(struct g_consumer *cp)
439 {
440 	struct g_geom *gp;
441 
442 	g_trace(G_T_TOPOLOGY, "%s(%p/%s)", __func__, cp, cp->provider->name);
443 	g_topology_assert();
444 
445 	gp = cp->geom;
446 	g_uzip_softc_free(gp->softc, gp);
447 	gp->softc = NULL;
448 	g_wither_geom(gp, ENXIO);
449 }
450 
451 static int
452 g_uzip_access(struct g_provider *pp, int dr, int dw, int de)
453 {
454 	struct g_geom *gp;
455 	struct g_consumer *cp;
456 
457 	gp = pp->geom;
458 	cp = LIST_FIRST(&gp->consumer);
459 	KASSERT (cp != NULL, ("g_uzip_access but no consumer"));
460 
461 	if (cp->acw + dw > 0)
462 		return (EROFS);
463 
464 	return (g_access(cp, dr, dw, de));
465 }
466 
467 static void
468 g_uzip_spoiled(struct g_consumer *cp)
469 {
470 	struct g_geom *gp;
471 
472 	gp = cp->geom;
473 	g_trace(G_T_TOPOLOGY, "%s(%p/%s)", __func__, cp, gp->name);
474 	g_topology_assert();
475 
476 	g_uzip_softc_free(gp->softc, gp);
477 	gp->softc = NULL;
478 	g_wither_geom(gp, ENXIO);
479 }
480 
481 static int
482 g_uzip_parse_toc(struct g_uzip_softc *sc, struct g_provider *pp,
483     struct g_geom *gp)
484 {
485 	uint32_t i, j, backref_to;
486 	uint64_t max_offset, min_offset;
487 
488 	min_offset = sizeof(struct cloop_header) +
489 	    (sc->nblocks + 1) * sizeof(uint64_t);
490 	max_offset = sc->toc[0].offset - 1;
491 	for (i = 0; i < sc->nblocks; i++) {
492 		/* First do some bounds checking */
493 		if ((sc->toc[i].offset < min_offset) ||
494 		    (sc->toc[i].offset > pp->mediasize)) {
495 			goto error_offset;
496 		}
497 		DPRINTF_BLK(GUZ_DBG_IO, i, ("%s: cluster #%u "
498 		    "sc->toc[i].offset=%ju max_offset=%ju\n", gp->name,
499 		    (u_int)i, (uintmax_t)sc->toc[i].offset,
500 		    (uintmax_t)max_offset));
501 		backref_to = BLEN_UNDEF;
502 		if (sc->toc[i].offset < max_offset) {
503 			/*
504 			 * For the backref'ed blocks search already parsed
505 			 * TOC entries for the matching offset and copy the
506 			 * size from matched entry.
507 			 */
508 			for (j = 0; j <= i; j++) {
509                                 if (sc->toc[j].offset == sc->toc[i].offset &&
510 				    !BLK_IS_NIL(sc, j)) {
511                                         break;
512                                 }
513                                 if (j != i) {
514 					continue;
515 				}
516 				DPRINTF(GUZ_DBG_ERR, ("%s: cannot match "
517 				    "backref'ed offset at cluster #%u\n",
518 				    gp->name, i));
519 				return (-1);
520 			}
521 			sc->toc[i].blen = sc->toc[j].blen;
522 			backref_to = j;
523 		} else {
524 			/*
525 			 * For the "normal blocks" seek forward until we hit
526 			 * block whose offset is larger than ours and assume
527 			 * it's going to be the next one.
528 			 */
529 			for (j = i + 1; j < sc->nblocks; j++) {
530 				if (sc->toc[j].offset > max_offset) {
531 					break;
532 				}
533 			}
534 			sc->toc[i].blen = sc->toc[j].offset -
535 			    sc->toc[i].offset;
536 			if (BLK_ENDS(sc, i) > pp->mediasize) {
537 				DPRINTF(GUZ_DBG_ERR, ("%s: cluster #%u "
538 				    "extends past media boundary (%ju > %ju)\n",
539 				    gp->name, (u_int)i,
540 				    (uintmax_t)BLK_ENDS(sc, i),
541 				    (intmax_t)pp->mediasize));
542 				return (-1);
543 			}
544 			KASSERT(max_offset <= sc->toc[i].offset, (
545 			    "%s: max_offset is incorrect: %ju",
546 			    gp->name, (uintmax_t)max_offset));
547 			max_offset = BLK_ENDS(sc, i) - 1;
548 		}
549 		DPRINTF_BLK(GUZ_DBG_TOC, i, ("%s: cluster #%u, original %u "
550 		    "bytes, in %u bytes", gp->name, i, sc->blksz,
551 		    sc->toc[i].blen));
552 		if (backref_to != BLEN_UNDEF) {
553 			DPRINTF_BLK(GUZ_DBG_TOC, i, (" (->#%u)",
554 			    (u_int)backref_to));
555 		}
556 		DPRINTF_BLK(GUZ_DBG_TOC, i, ("\n"));
557 	}
558 	return (0);
559 
560 error_offset:
561 	DPRINTF(GUZ_DBG_ERR, ("%s: cluster #%u: invalid offset %ju, "
562 	    "min_offset=%ju mediasize=%jd\n", gp->name, (u_int)i,
563 	    sc->toc[i].offset, min_offset, pp->mediasize));
564 	return (-1);
565 }
566 
567 static struct g_geom *
568 g_uzip_taste(struct g_class *mp, struct g_provider *pp, int flags)
569 {
570 	int error;
571 	uint32_t i, total_offsets, offsets_read, blk;
572 	void *buf;
573 	struct cloop_header *header;
574 	struct g_consumer *cp;
575 	struct g_geom *gp;
576 	struct g_provider *pp2;
577 	struct g_uzip_softc *sc;
578 	enum {
579 		GEOM_UZIP = 1,
580 		GEOM_ULZMA
581 	} type;
582 
583 	g_trace(G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name);
584 	g_topology_assert();
585 
586 	/* Skip providers that are already open for writing. */
587 	if (pp->acw > 0)
588 		return (NULL);
589 
590 	buf = NULL;
591 
592 	/*
593 	 * Create geom instance.
594 	 */
595 	gp = g_new_geomf(mp, "%s.uzip", pp->name);
596 	cp = g_new_consumer(gp);
597 	error = g_attach(cp, pp);
598 	if (error == 0)
599 		error = g_access(cp, 1, 0, 0);
600 	if (error) {
601 		goto e1;
602 	}
603 	g_topology_unlock();
604 
605 	/*
606 	 * Read cloop header, look for CLOOP magic, perform
607 	 * other validity checks.
608 	 */
609 	DPRINTF(GUZ_DBG_INFO, ("%s: media sectorsize %u, mediasize %jd\n",
610 	    gp->name, pp->sectorsize, (intmax_t)pp->mediasize));
611 	buf = g_read_data(cp, 0, pp->sectorsize, NULL);
612 	if (buf == NULL)
613 		goto e2;
614 	header = (struct cloop_header *) buf;
615 	if (strncmp(header->magic, CLOOP_MAGIC_START,
616 	    sizeof(CLOOP_MAGIC_START) - 1) != 0) {
617 		DPRINTF(GUZ_DBG_ERR, ("%s: no CLOOP magic\n", gp->name));
618 		goto e3;
619 	}
620 
621 	switch (header->magic[CLOOP_OFS_COMPR]) {
622 	case CLOOP_COMP_LZMA:
623 	case CLOOP_COMP_LZMA_DDP:
624 		type = GEOM_ULZMA;
625 		if (header->magic[CLOOP_OFS_VERSN] < CLOOP_MINVER_LZMA) {
626 			DPRINTF(GUZ_DBG_ERR, ("%s: image version too old\n",
627 			    gp->name));
628 			goto e3;
629 		}
630 		DPRINTF(GUZ_DBG_INFO, ("%s: GEOM_UZIP_LZMA image found\n",
631 		    gp->name));
632 		break;
633 	case CLOOP_COMP_LIBZ:
634 	case CLOOP_COMP_LIBZ_DDP:
635 		type = GEOM_UZIP;
636 		if (header->magic[CLOOP_OFS_VERSN] < CLOOP_MINVER_ZLIB) {
637 			DPRINTF(GUZ_DBG_ERR, ("%s: image version too old\n",
638 			    gp->name));
639 			goto e3;
640 		}
641 		DPRINTF(GUZ_DBG_INFO, ("%s: GEOM_UZIP_ZLIB image found\n",
642 		    gp->name));
643 		break;
644 	default:
645 		DPRINTF(GUZ_DBG_ERR, ("%s: unsupported image type\n",
646 		    gp->name));
647                 goto e3;
648         }
649 
650 	/*
651 	 * Initialize softc and read offsets.
652 	 */
653 	sc = malloc(sizeof(*sc), M_GEOM_UZIP, M_WAITOK | M_ZERO);
654 	gp->softc = sc;
655 	sc->blksz = ntohl(header->blksz);
656 	sc->nblocks = ntohl(header->nblocks);
657 	if (sc->blksz % 512 != 0) {
658 		printf("%s: block size (%u) should be multiple of 512.\n",
659 		    gp->name, sc->blksz);
660 		goto e4;
661 	}
662 	if (sc->blksz > MAX_BLKSZ) {
663 		printf("%s: block size (%u) should not be larger than %d.\n",
664 		    gp->name, sc->blksz, MAX_BLKSZ);
665 	}
666 	total_offsets = sc->nblocks + 1;
667 	if (sizeof(struct cloop_header) +
668 	    total_offsets * sizeof(uint64_t) > pp->mediasize) {
669 		printf("%s: media too small for %u blocks\n",
670 		    gp->name, sc->nblocks);
671 		goto e4;
672 	}
673 	sc->toc = malloc(total_offsets * sizeof(struct g_uzip_blk),
674 	    M_GEOM_UZIP, M_WAITOK | M_ZERO);
675 	offsets_read = MIN(total_offsets,
676 	    (pp->sectorsize - sizeof(*header)) / sizeof(uint64_t));
677 	for (i = 0; i < offsets_read; i++) {
678 		sc->toc[i].offset = be64toh(((uint64_t *) (header + 1))[i]);
679 		sc->toc[i].blen = BLEN_UNDEF;
680 	}
681 	DPRINTF(GUZ_DBG_INFO, ("%s: %u offsets in the first sector\n",
682 	       gp->name, offsets_read));
683 	for (blk = 1; offsets_read < total_offsets; blk++) {
684 		uint32_t nread;
685 
686 		free(buf, M_GEOM);
687 		buf = g_read_data(
688 		    cp, blk * pp->sectorsize, pp->sectorsize, NULL);
689 		if (buf == NULL)
690 			goto e5;
691 		nread = MIN(total_offsets - offsets_read,
692 		     pp->sectorsize / sizeof(uint64_t));
693 		DPRINTF(GUZ_DBG_TOC, ("%s: %u offsets read from sector %d\n",
694 		    gp->name, nread, blk));
695 		for (i = 0; i < nread; i++) {
696 			sc->toc[offsets_read + i].offset =
697 			    be64toh(((uint64_t *) buf)[i]);
698 			sc->toc[offsets_read + i].blen = BLEN_UNDEF;
699 		}
700 		offsets_read += nread;
701 	}
702 	free(buf, M_GEOM);
703 	buf = NULL;
704 	offsets_read -= 1;
705 	DPRINTF(GUZ_DBG_INFO, ("%s: done reading %u block offsets from %u "
706 	    "sectors\n", gp->name, offsets_read, blk));
707 	if (sc->nblocks != offsets_read) {
708 		DPRINTF(GUZ_DBG_ERR, ("%s: read %s offsets than expected "
709 		    "blocks\n", gp->name,
710 		    sc->nblocks < offsets_read ? "more" : "less"));
711 		goto e5;
712 	}
713 	/*
714 	 * "Fake" last+1 block, to make it easier for the TOC parser to
715 	 * iterate without making the last element a special case.
716 	 */
717 	sc->toc[sc->nblocks].offset = pp->mediasize;
718 	/* Massage TOC (table of contents), make sure it is sound */
719 	if (g_uzip_parse_toc(sc, pp, gp) != 0) {
720 		DPRINTF(GUZ_DBG_ERR, ("%s: TOC error\n", gp->name));
721 		goto e5;
722 	}
723 	mtx_init(&sc->last_mtx, "geom_uzip cache", NULL, MTX_DEF);
724 	mtx_init(&sc->queue_mtx, "geom_uzip wrkthread", NULL, MTX_DEF);
725 	bioq_init(&sc->bio_queue);
726 	sc->last_blk = -1;
727 	sc->last_buf = malloc(sc->blksz, M_GEOM_UZIP, M_WAITOK);
728 	sc->req_total = 0;
729 	sc->req_cached = 0;
730 
731 	if (type == GEOM_UZIP) {
732 		sc->dcp = g_uzip_zlib_ctor(sc->blksz);
733 	} else {
734 		sc->dcp = g_uzip_lzma_ctor(sc->blksz);
735 	}
736 	if (sc->dcp == NULL) {
737 		goto e6;
738 	}
739 
740 	sc->uzip_do = &g_uzip_do;
741 
742 	error = kproc_create(g_uzip_wrkthr, sc, &sc->procp, 0, 0, "%s",
743 	    gp->name);
744 	if (error != 0) {
745 		goto e7;
746 	}
747 
748 	g_topology_lock();
749 	pp2 = g_new_providerf(gp, "%s", gp->name);
750 	pp2->sectorsize = 512;
751 	pp2->mediasize = (off_t)sc->nblocks * sc->blksz;
752 	pp2->stripesize = pp->stripesize;
753 	pp2->stripeoffset = pp->stripeoffset;
754 	g_error_provider(pp2, 0);
755 	g_access(cp, -1, 0, 0);
756 
757 	DPRINTF(GUZ_DBG_INFO, ("%s: taste ok (%d, %jd), (%d, %d), %x\n",
758 	    gp->name, pp2->sectorsize, (intmax_t)pp2->mediasize,
759 	    pp2->stripeoffset, pp2->stripesize, pp2->flags));
760 	DPRINTF(GUZ_DBG_INFO, ("%s: %u x %u blocks\n", gp->name, sc->nblocks,
761 	    sc->blksz));
762 	return (gp);
763 
764 e7:
765 	sc->dcp->free(sc->dcp);
766 e6:
767 	free(sc->last_buf, M_GEOM);
768 	mtx_destroy(&sc->queue_mtx);
769 	mtx_destroy(&sc->last_mtx);
770 e5:
771 	free(sc->toc, M_GEOM);
772 e4:
773 	free(gp->softc, M_GEOM_UZIP);
774 e3:
775 	if (buf != NULL) {
776 		free(buf, M_GEOM);
777 	}
778 e2:
779 	g_topology_lock();
780 	g_access(cp, -1, 0, 0);
781 e1:
782 	g_detach(cp);
783 	g_destroy_consumer(cp);
784 	g_destroy_geom(gp);
785 
786 	return (NULL);
787 }
788 
789 static int
790 g_uzip_destroy_geom(struct gctl_req *req, struct g_class *mp, struct g_geom *gp)
791 {
792 	struct g_provider *pp;
793 
794 	g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, gp->name);
795 	g_topology_assert();
796 
797 	if (gp->softc == NULL) {
798 		DPRINTF(GUZ_DBG_ERR, ("%s(%s): gp->softc == NULL\n", __func__,
799 		    gp->name));
800 		return (ENXIO);
801 	}
802 
803 	KASSERT(gp != NULL, ("NULL geom"));
804 	pp = LIST_FIRST(&gp->provider);
805 	KASSERT(pp != NULL, ("NULL provider"));
806 	if (pp->acr > 0 || pp->acw > 0 || pp->ace > 0)
807 		return (EBUSY);
808 
809 	g_uzip_softc_free(gp->softc, gp);
810 	gp->softc = NULL;
811 	g_wither_geom(gp, ENXIO);
812 
813 	return (0);
814 }
815 
816 static struct g_class g_uzip_class = {
817 	.name = UZIP_CLASS_NAME,
818 	.version = G_VERSION,
819 	.taste = g_uzip_taste,
820 	.destroy_geom = g_uzip_destroy_geom,
821 
822 	.start = g_uzip_start,
823 	.orphan = g_uzip_orphan,
824 	.access = g_uzip_access,
825 	.spoiled = g_uzip_spoiled,
826 };
827 
828 DECLARE_GEOM_CLASS(g_uzip_class, g_uzip);
829 MODULE_DEPEND(g_uzip, zlib, 1, 1, 1);
830