xref: /linux/lib/decompress_unlzma.c (revision b8bb76713ec50df2f11efee386e16f93d51e1076)
1 /* Lzma decompressor for Linux kernel. Shamelessly snarfed
2  *from busybox 1.1.1
3  *
4  *Linux kernel adaptation
5  *Copyright (C) 2006  Alain < alain@knaff.lu >
6  *
7  *Based on small lzma deflate implementation/Small range coder
8  *implementation for lzma.
9  *Copyright (C) 2006  Aurelien Jacobs < aurel@gnuage.org >
10  *
11  *Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
12  *Copyright (C) 1999-2005  Igor Pavlov
13  *
14  *Copyrights of the parts, see headers below.
15  *
16  *
17  *This program is free software; you can redistribute it and/or
18  *modify it under the terms of the GNU Lesser General Public
19  *License as published by the Free Software Foundation; either
20  *version 2.1 of the License, or (at your option) any later version.
21  *
22  *This program is distributed in the hope that it will be useful,
23  *but WITHOUT ANY WARRANTY; without even the implied warranty of
24  *MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
25  *Lesser General Public License for more details.
26  *
27  *You should have received a copy of the GNU Lesser General Public
28  *License along with this library; if not, write to the Free Software
29  *Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
30  */
31 
32 #ifndef STATIC
33 #include <linux/decompress/unlzma.h>
34 #endif /* STATIC */
35 
36 #include <linux/decompress/mm.h>
37 
38 #define	MIN(a, b) (((a) < (b)) ? (a) : (b))
39 
40 static long long INIT read_int(unsigned char *ptr, int size)
41 {
42 	int i;
43 	long long ret = 0;
44 
45 	for (i = 0; i < size; i++)
46 		ret = (ret << 8) | ptr[size-i-1];
47 	return ret;
48 }
49 
50 #define ENDIAN_CONVERT(x) \
51   x = (typeof(x))read_int((unsigned char *)&x, sizeof(x))
52 
53 
54 /* Small range coder implementation for lzma.
55  *Copyright (C) 2006  Aurelien Jacobs < aurel@gnuage.org >
56  *
57  *Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
58  *Copyright (c) 1999-2005  Igor Pavlov
59  */
60 
61 #include <linux/compiler.h>
62 
63 #define LZMA_IOBUF_SIZE	0x10000
64 
65 struct rc {
66 	int (*fill)(void*, unsigned int);
67 	uint8_t *ptr;
68 	uint8_t *buffer;
69 	uint8_t *buffer_end;
70 	int buffer_size;
71 	uint32_t code;
72 	uint32_t range;
73 	uint32_t bound;
74 };
75 
76 
77 #define RC_TOP_BITS 24
78 #define RC_MOVE_BITS 5
79 #define RC_MODEL_TOTAL_BITS 11
80 
81 
82 /* Called twice: once at startup and once in rc_normalize() */
83 static void INIT rc_read(struct rc *rc)
84 {
85 	rc->buffer_size = rc->fill((char *)rc->buffer, LZMA_IOBUF_SIZE);
86 	if (rc->buffer_size <= 0)
87 		error("unexpected EOF");
88 	rc->ptr = rc->buffer;
89 	rc->buffer_end = rc->buffer + rc->buffer_size;
90 }
91 
92 /* Called once */
93 static inline void INIT rc_init(struct rc *rc,
94 				       int (*fill)(void*, unsigned int),
95 				       char *buffer, int buffer_size)
96 {
97 	rc->fill = fill;
98 	rc->buffer = (uint8_t *)buffer;
99 	rc->buffer_size = buffer_size;
100 	rc->buffer_end = rc->buffer + rc->buffer_size;
101 	rc->ptr = rc->buffer;
102 
103 	rc->code = 0;
104 	rc->range = 0xFFFFFFFF;
105 }
106 
107 static inline void INIT rc_init_code(struct rc *rc)
108 {
109 	int i;
110 
111 	for (i = 0; i < 5; i++) {
112 		if (rc->ptr >= rc->buffer_end)
113 			rc_read(rc);
114 		rc->code = (rc->code << 8) | *rc->ptr++;
115 	}
116 }
117 
118 
119 /* Called once. TODO: bb_maybe_free() */
120 static inline void INIT rc_free(struct rc *rc)
121 {
122 	free(rc->buffer);
123 }
124 
125 /* Called twice, but one callsite is in inline'd rc_is_bit_0_helper() */
126 static void INIT rc_do_normalize(struct rc *rc)
127 {
128 	if (rc->ptr >= rc->buffer_end)
129 		rc_read(rc);
130 	rc->range <<= 8;
131 	rc->code = (rc->code << 8) | *rc->ptr++;
132 }
133 static inline void INIT rc_normalize(struct rc *rc)
134 {
135 	if (rc->range < (1 << RC_TOP_BITS))
136 		rc_do_normalize(rc);
137 }
138 
139 /* Called 9 times */
140 /* Why rc_is_bit_0_helper exists?
141  *Because we want to always expose (rc->code < rc->bound) to optimizer
142  */
143 static inline uint32_t INIT rc_is_bit_0_helper(struct rc *rc, uint16_t *p)
144 {
145 	rc_normalize(rc);
146 	rc->bound = *p * (rc->range >> RC_MODEL_TOTAL_BITS);
147 	return rc->bound;
148 }
149 static inline int INIT rc_is_bit_0(struct rc *rc, uint16_t *p)
150 {
151 	uint32_t t = rc_is_bit_0_helper(rc, p);
152 	return rc->code < t;
153 }
154 
155 /* Called ~10 times, but very small, thus inlined */
156 static inline void INIT rc_update_bit_0(struct rc *rc, uint16_t *p)
157 {
158 	rc->range = rc->bound;
159 	*p += ((1 << RC_MODEL_TOTAL_BITS) - *p) >> RC_MOVE_BITS;
160 }
161 static inline void rc_update_bit_1(struct rc *rc, uint16_t *p)
162 {
163 	rc->range -= rc->bound;
164 	rc->code -= rc->bound;
165 	*p -= *p >> RC_MOVE_BITS;
166 }
167 
168 /* Called 4 times in unlzma loop */
169 static int INIT rc_get_bit(struct rc *rc, uint16_t *p, int *symbol)
170 {
171 	if (rc_is_bit_0(rc, p)) {
172 		rc_update_bit_0(rc, p);
173 		*symbol *= 2;
174 		return 0;
175 	} else {
176 		rc_update_bit_1(rc, p);
177 		*symbol = *symbol * 2 + 1;
178 		return 1;
179 	}
180 }
181 
182 /* Called once */
183 static inline int INIT rc_direct_bit(struct rc *rc)
184 {
185 	rc_normalize(rc);
186 	rc->range >>= 1;
187 	if (rc->code >= rc->range) {
188 		rc->code -= rc->range;
189 		return 1;
190 	}
191 	return 0;
192 }
193 
194 /* Called twice */
195 static inline void INIT
196 rc_bit_tree_decode(struct rc *rc, uint16_t *p, int num_levels, int *symbol)
197 {
198 	int i = num_levels;
199 
200 	*symbol = 1;
201 	while (i--)
202 		rc_get_bit(rc, p + *symbol, symbol);
203 	*symbol -= 1 << num_levels;
204 }
205 
206 
207 /*
208  * Small lzma deflate implementation.
209  * Copyright (C) 2006  Aurelien Jacobs < aurel@gnuage.org >
210  *
211  * Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
212  * Copyright (C) 1999-2005  Igor Pavlov
213  */
214 
215 
216 struct lzma_header {
217 	uint8_t pos;
218 	uint32_t dict_size;
219 	uint64_t dst_size;
220 } __attribute__ ((packed)) ;
221 
222 
223 #define LZMA_BASE_SIZE 1846
224 #define LZMA_LIT_SIZE 768
225 
226 #define LZMA_NUM_POS_BITS_MAX 4
227 
228 #define LZMA_LEN_NUM_LOW_BITS 3
229 #define LZMA_LEN_NUM_MID_BITS 3
230 #define LZMA_LEN_NUM_HIGH_BITS 8
231 
232 #define LZMA_LEN_CHOICE 0
233 #define LZMA_LEN_CHOICE_2 (LZMA_LEN_CHOICE + 1)
234 #define LZMA_LEN_LOW (LZMA_LEN_CHOICE_2 + 1)
235 #define LZMA_LEN_MID (LZMA_LEN_LOW \
236 		      + (1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_LOW_BITS)))
237 #define LZMA_LEN_HIGH (LZMA_LEN_MID \
238 		       +(1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_MID_BITS)))
239 #define LZMA_NUM_LEN_PROBS (LZMA_LEN_HIGH + (1 << LZMA_LEN_NUM_HIGH_BITS))
240 
241 #define LZMA_NUM_STATES 12
242 #define LZMA_NUM_LIT_STATES 7
243 
244 #define LZMA_START_POS_MODEL_INDEX 4
245 #define LZMA_END_POS_MODEL_INDEX 14
246 #define LZMA_NUM_FULL_DISTANCES (1 << (LZMA_END_POS_MODEL_INDEX >> 1))
247 
248 #define LZMA_NUM_POS_SLOT_BITS 6
249 #define LZMA_NUM_LEN_TO_POS_STATES 4
250 
251 #define LZMA_NUM_ALIGN_BITS 4
252 
253 #define LZMA_MATCH_MIN_LEN 2
254 
255 #define LZMA_IS_MATCH 0
256 #define LZMA_IS_REP (LZMA_IS_MATCH + (LZMA_NUM_STATES << LZMA_NUM_POS_BITS_MAX))
257 #define LZMA_IS_REP_G0 (LZMA_IS_REP + LZMA_NUM_STATES)
258 #define LZMA_IS_REP_G1 (LZMA_IS_REP_G0 + LZMA_NUM_STATES)
259 #define LZMA_IS_REP_G2 (LZMA_IS_REP_G1 + LZMA_NUM_STATES)
260 #define LZMA_IS_REP_0_LONG (LZMA_IS_REP_G2 + LZMA_NUM_STATES)
261 #define LZMA_POS_SLOT (LZMA_IS_REP_0_LONG \
262 		       + (LZMA_NUM_STATES << LZMA_NUM_POS_BITS_MAX))
263 #define LZMA_SPEC_POS (LZMA_POS_SLOT \
264 		       +(LZMA_NUM_LEN_TO_POS_STATES << LZMA_NUM_POS_SLOT_BITS))
265 #define LZMA_ALIGN (LZMA_SPEC_POS \
266 		    + LZMA_NUM_FULL_DISTANCES - LZMA_END_POS_MODEL_INDEX)
267 #define LZMA_LEN_CODER (LZMA_ALIGN + (1 << LZMA_NUM_ALIGN_BITS))
268 #define LZMA_REP_LEN_CODER (LZMA_LEN_CODER + LZMA_NUM_LEN_PROBS)
269 #define LZMA_LITERAL (LZMA_REP_LEN_CODER + LZMA_NUM_LEN_PROBS)
270 
271 
272 struct writer {
273 	uint8_t *buffer;
274 	uint8_t previous_byte;
275 	size_t buffer_pos;
276 	int bufsize;
277 	size_t global_pos;
278 	int(*flush)(void*, unsigned int);
279 	struct lzma_header *header;
280 };
281 
282 struct cstate {
283 	int state;
284 	uint32_t rep0, rep1, rep2, rep3;
285 };
286 
287 static inline size_t INIT get_pos(struct writer *wr)
288 {
289 	return
290 		wr->global_pos + wr->buffer_pos;
291 }
292 
293 static inline uint8_t INIT peek_old_byte(struct writer *wr,
294 						uint32_t offs)
295 {
296 	if (!wr->flush) {
297 		int32_t pos;
298 		while (offs > wr->header->dict_size)
299 			offs -= wr->header->dict_size;
300 		pos = wr->buffer_pos - offs;
301 		return wr->buffer[pos];
302 	} else {
303 		uint32_t pos = wr->buffer_pos - offs;
304 		while (pos >= wr->header->dict_size)
305 			pos += wr->header->dict_size;
306 		return wr->buffer[pos];
307 	}
308 
309 }
310 
311 static inline void INIT write_byte(struct writer *wr, uint8_t byte)
312 {
313 	wr->buffer[wr->buffer_pos++] = wr->previous_byte = byte;
314 	if (wr->flush && wr->buffer_pos == wr->header->dict_size) {
315 		wr->buffer_pos = 0;
316 		wr->global_pos += wr->header->dict_size;
317 		wr->flush((char *)wr->buffer, wr->header->dict_size);
318 	}
319 }
320 
321 
322 static inline void INIT copy_byte(struct writer *wr, uint32_t offs)
323 {
324 	write_byte(wr, peek_old_byte(wr, offs));
325 }
326 
327 static inline void INIT copy_bytes(struct writer *wr,
328 					 uint32_t rep0, int len)
329 {
330 	do {
331 		copy_byte(wr, rep0);
332 		len--;
333 	} while (len != 0 && wr->buffer_pos < wr->header->dst_size);
334 }
335 
336 static inline void INIT process_bit0(struct writer *wr, struct rc *rc,
337 				     struct cstate *cst, uint16_t *p,
338 				     int pos_state, uint16_t *prob,
339 				     int lc, uint32_t literal_pos_mask) {
340 	int mi = 1;
341 	rc_update_bit_0(rc, prob);
342 	prob = (p + LZMA_LITERAL +
343 		(LZMA_LIT_SIZE
344 		 * (((get_pos(wr) & literal_pos_mask) << lc)
345 		    + (wr->previous_byte >> (8 - lc))))
346 		);
347 
348 	if (cst->state >= LZMA_NUM_LIT_STATES) {
349 		int match_byte = peek_old_byte(wr, cst->rep0);
350 		do {
351 			int bit;
352 			uint16_t *prob_lit;
353 
354 			match_byte <<= 1;
355 			bit = match_byte & 0x100;
356 			prob_lit = prob + 0x100 + bit + mi;
357 			if (rc_get_bit(rc, prob_lit, &mi)) {
358 				if (!bit)
359 					break;
360 			} else {
361 				if (bit)
362 					break;
363 			}
364 		} while (mi < 0x100);
365 	}
366 	while (mi < 0x100) {
367 		uint16_t *prob_lit = prob + mi;
368 		rc_get_bit(rc, prob_lit, &mi);
369 	}
370 	write_byte(wr, mi);
371 	if (cst->state < 4)
372 		cst->state = 0;
373 	else if (cst->state < 10)
374 		cst->state -= 3;
375 	else
376 		cst->state -= 6;
377 }
378 
379 static inline void INIT process_bit1(struct writer *wr, struct rc *rc,
380 					    struct cstate *cst, uint16_t *p,
381 					    int pos_state, uint16_t *prob) {
382   int offset;
383 	uint16_t *prob_len;
384 	int num_bits;
385 	int len;
386 
387 	rc_update_bit_1(rc, prob);
388 	prob = p + LZMA_IS_REP + cst->state;
389 	if (rc_is_bit_0(rc, prob)) {
390 		rc_update_bit_0(rc, prob);
391 		cst->rep3 = cst->rep2;
392 		cst->rep2 = cst->rep1;
393 		cst->rep1 = cst->rep0;
394 		cst->state = cst->state < LZMA_NUM_LIT_STATES ? 0 : 3;
395 		prob = p + LZMA_LEN_CODER;
396 	} else {
397 		rc_update_bit_1(rc, prob);
398 		prob = p + LZMA_IS_REP_G0 + cst->state;
399 		if (rc_is_bit_0(rc, prob)) {
400 			rc_update_bit_0(rc, prob);
401 			prob = (p + LZMA_IS_REP_0_LONG
402 				+ (cst->state <<
403 				   LZMA_NUM_POS_BITS_MAX) +
404 				pos_state);
405 			if (rc_is_bit_0(rc, prob)) {
406 				rc_update_bit_0(rc, prob);
407 
408 				cst->state = cst->state < LZMA_NUM_LIT_STATES ?
409 					9 : 11;
410 				copy_byte(wr, cst->rep0);
411 				return;
412 			} else {
413 				rc_update_bit_1(rc, prob);
414 			}
415 		} else {
416 			uint32_t distance;
417 
418 			rc_update_bit_1(rc, prob);
419 			prob = p + LZMA_IS_REP_G1 + cst->state;
420 			if (rc_is_bit_0(rc, prob)) {
421 				rc_update_bit_0(rc, prob);
422 				distance = cst->rep1;
423 			} else {
424 				rc_update_bit_1(rc, prob);
425 				prob = p + LZMA_IS_REP_G2 + cst->state;
426 				if (rc_is_bit_0(rc, prob)) {
427 					rc_update_bit_0(rc, prob);
428 					distance = cst->rep2;
429 				} else {
430 					rc_update_bit_1(rc, prob);
431 					distance = cst->rep3;
432 					cst->rep3 = cst->rep2;
433 				}
434 				cst->rep2 = cst->rep1;
435 			}
436 			cst->rep1 = cst->rep0;
437 			cst->rep0 = distance;
438 		}
439 		cst->state = cst->state < LZMA_NUM_LIT_STATES ? 8 : 11;
440 		prob = p + LZMA_REP_LEN_CODER;
441 	}
442 
443 	prob_len = prob + LZMA_LEN_CHOICE;
444 	if (rc_is_bit_0(rc, prob_len)) {
445 		rc_update_bit_0(rc, prob_len);
446 		prob_len = (prob + LZMA_LEN_LOW
447 			    + (pos_state <<
448 			       LZMA_LEN_NUM_LOW_BITS));
449 		offset = 0;
450 		num_bits = LZMA_LEN_NUM_LOW_BITS;
451 	} else {
452 		rc_update_bit_1(rc, prob_len);
453 		prob_len = prob + LZMA_LEN_CHOICE_2;
454 		if (rc_is_bit_0(rc, prob_len)) {
455 			rc_update_bit_0(rc, prob_len);
456 			prob_len = (prob + LZMA_LEN_MID
457 				    + (pos_state <<
458 				       LZMA_LEN_NUM_MID_BITS));
459 			offset = 1 << LZMA_LEN_NUM_LOW_BITS;
460 			num_bits = LZMA_LEN_NUM_MID_BITS;
461 		} else {
462 			rc_update_bit_1(rc, prob_len);
463 			prob_len = prob + LZMA_LEN_HIGH;
464 			offset = ((1 << LZMA_LEN_NUM_LOW_BITS)
465 				  + (1 << LZMA_LEN_NUM_MID_BITS));
466 			num_bits = LZMA_LEN_NUM_HIGH_BITS;
467 		}
468 	}
469 
470 	rc_bit_tree_decode(rc, prob_len, num_bits, &len);
471 	len += offset;
472 
473 	if (cst->state < 4) {
474 		int pos_slot;
475 
476 		cst->state += LZMA_NUM_LIT_STATES;
477 		prob =
478 			p + LZMA_POS_SLOT +
479 			((len <
480 			  LZMA_NUM_LEN_TO_POS_STATES ? len :
481 			  LZMA_NUM_LEN_TO_POS_STATES - 1)
482 			 << LZMA_NUM_POS_SLOT_BITS);
483 		rc_bit_tree_decode(rc, prob,
484 				   LZMA_NUM_POS_SLOT_BITS,
485 				   &pos_slot);
486 		if (pos_slot >= LZMA_START_POS_MODEL_INDEX) {
487 			int i, mi;
488 			num_bits = (pos_slot >> 1) - 1;
489 			cst->rep0 = 2 | (pos_slot & 1);
490 			if (pos_slot < LZMA_END_POS_MODEL_INDEX) {
491 				cst->rep0 <<= num_bits;
492 				prob = p + LZMA_SPEC_POS +
493 					cst->rep0 - pos_slot - 1;
494 			} else {
495 				num_bits -= LZMA_NUM_ALIGN_BITS;
496 				while (num_bits--)
497 					cst->rep0 = (cst->rep0 << 1) |
498 						rc_direct_bit(rc);
499 				prob = p + LZMA_ALIGN;
500 				cst->rep0 <<= LZMA_NUM_ALIGN_BITS;
501 				num_bits = LZMA_NUM_ALIGN_BITS;
502 			}
503 			i = 1;
504 			mi = 1;
505 			while (num_bits--) {
506 				if (rc_get_bit(rc, prob + mi, &mi))
507 					cst->rep0 |= i;
508 				i <<= 1;
509 			}
510 		} else
511 			cst->rep0 = pos_slot;
512 		if (++(cst->rep0) == 0)
513 			return;
514 	}
515 
516 	len += LZMA_MATCH_MIN_LEN;
517 
518 	copy_bytes(wr, cst->rep0, len);
519 }
520 
521 
522 
523 STATIC inline int INIT unlzma(unsigned char *buf, int in_len,
524 			      int(*fill)(void*, unsigned int),
525 			      int(*flush)(void*, unsigned int),
526 			      unsigned char *output,
527 			      int *posp,
528 			      void(*error_fn)(char *x)
529 	)
530 {
531 	struct lzma_header header;
532 	int lc, pb, lp;
533 	uint32_t pos_state_mask;
534 	uint32_t literal_pos_mask;
535 	uint16_t *p;
536 	int num_probs;
537 	struct rc rc;
538 	int i, mi;
539 	struct writer wr;
540 	struct cstate cst;
541 	unsigned char *inbuf;
542 	int ret = -1;
543 
544 	set_error_fn(error_fn);
545 	if (!flush)
546 		in_len -= 4; /* Uncompressed size hack active in pre-boot
547 				environment */
548 	if (buf)
549 		inbuf = buf;
550 	else
551 		inbuf = malloc(LZMA_IOBUF_SIZE);
552 	if (!inbuf) {
553 		error("Could not allocate input bufer");
554 		goto exit_0;
555 	}
556 
557 	cst.state = 0;
558 	cst.rep0 = cst.rep1 = cst.rep2 = cst.rep3 = 1;
559 
560 	wr.header = &header;
561 	wr.flush = flush;
562 	wr.global_pos = 0;
563 	wr.previous_byte = 0;
564 	wr.buffer_pos = 0;
565 
566 	rc_init(&rc, fill, inbuf, in_len);
567 
568 	for (i = 0; i < sizeof(header); i++) {
569 		if (rc.ptr >= rc.buffer_end)
570 			rc_read(&rc);
571 		((unsigned char *)&header)[i] = *rc.ptr++;
572 	}
573 
574 	if (header.pos >= (9 * 5 * 5))
575 		error("bad header");
576 
577 	mi = 0;
578 	lc = header.pos;
579 	while (lc >= 9) {
580 		mi++;
581 		lc -= 9;
582 	}
583 	pb = 0;
584 	lp = mi;
585 	while (lp >= 5) {
586 		pb++;
587 		lp -= 5;
588 	}
589 	pos_state_mask = (1 << pb) - 1;
590 	literal_pos_mask = (1 << lp) - 1;
591 
592 	ENDIAN_CONVERT(header.dict_size);
593 	ENDIAN_CONVERT(header.dst_size);
594 
595 	if (header.dict_size == 0)
596 		header.dict_size = 1;
597 
598 	if (output)
599 		wr.buffer = output;
600 	else {
601 		wr.bufsize = MIN(header.dst_size, header.dict_size);
602 		wr.buffer = large_malloc(wr.bufsize);
603 	}
604 	if (wr.buffer == NULL)
605 		goto exit_1;
606 
607 	num_probs = LZMA_BASE_SIZE + (LZMA_LIT_SIZE << (lc + lp));
608 	p = (uint16_t *) large_malloc(num_probs * sizeof(*p));
609 	if (p == 0)
610 		goto exit_2;
611 	num_probs = LZMA_LITERAL + (LZMA_LIT_SIZE << (lc + lp));
612 	for (i = 0; i < num_probs; i++)
613 		p[i] = (1 << RC_MODEL_TOTAL_BITS) >> 1;
614 
615 	rc_init_code(&rc);
616 
617 	while (get_pos(&wr) < header.dst_size) {
618 		int pos_state =	get_pos(&wr) & pos_state_mask;
619 		uint16_t *prob = p + LZMA_IS_MATCH +
620 			(cst.state << LZMA_NUM_POS_BITS_MAX) + pos_state;
621 		if (rc_is_bit_0(&rc, prob))
622 			process_bit0(&wr, &rc, &cst, p, pos_state, prob,
623 				     lc, literal_pos_mask);
624 		else {
625 			process_bit1(&wr, &rc, &cst, p, pos_state, prob);
626 			if (cst.rep0 == 0)
627 				break;
628 		}
629 	}
630 
631 	if (posp)
632 		*posp = rc.ptr-rc.buffer;
633 	if (wr.flush)
634 		wr.flush(wr.buffer, wr.buffer_pos);
635 	ret = 0;
636 	large_free(p);
637 exit_2:
638 	if (!output)
639 		large_free(wr.buffer);
640 exit_1:
641 	if (!buf)
642 		free(inbuf);
643 exit_0:
644 	return ret;
645 }
646 
647 #define decompress unlzma
648