xref: /linux/lib/lz4/lz4_compress.c (revision f79e4d5f92a129a1159c973735007d4ddc8541f3)
1 /*
2  * LZ4 - Fast LZ compression algorithm
3  * Copyright (C) 2011 - 2016, Yann Collet.
4  * BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php)
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are
7  * met:
8  *	* Redistributions of source code must retain the above copyright
9  *	  notice, this list of conditions and the following disclaimer.
10  *	* Redistributions in binary form must reproduce the above
11  * copyright notice, this list of conditions and the following disclaimer
12  * in the documentation and/or other materials provided with the
13  * distribution.
14  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
15  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
16  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
17  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
18  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
20  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  * You can contact the author at :
26  *	- LZ4 homepage : http://www.lz4.org
27  *	- LZ4 source repository : https://github.com/lz4/lz4
28  *
29  *	Changed for kernel usage by:
30  *	Sven Schmidt <4sschmid@informatik.uni-hamburg.de>
31  */
32 
33 /*-************************************
34  *	Dependencies
35  **************************************/
36 #include <linux/lz4.h>
37 #include "lz4defs.h"
38 #include <linux/module.h>
39 #include <linux/kernel.h>
40 #include <asm/unaligned.h>
41 
42 static const int LZ4_minLength = (MFLIMIT + 1);
43 static const int LZ4_64Klimit = ((64 * KB) + (MFLIMIT - 1));
44 
45 /*-******************************
46  *	Compression functions
47  ********************************/
48 static FORCE_INLINE U32 LZ4_hash4(
49 	U32 sequence,
50 	tableType_t const tableType)
51 {
52 	if (tableType == byU16)
53 		return ((sequence * 2654435761U)
54 			>> ((MINMATCH * 8) - (LZ4_HASHLOG + 1)));
55 	else
56 		return ((sequence * 2654435761U)
57 			>> ((MINMATCH * 8) - LZ4_HASHLOG));
58 }
59 
60 static FORCE_INLINE U32 LZ4_hash5(
61 	U64 sequence,
62 	tableType_t const tableType)
63 {
64 	const U32 hashLog = (tableType == byU16)
65 		? LZ4_HASHLOG + 1
66 		: LZ4_HASHLOG;
67 
68 #if LZ4_LITTLE_ENDIAN
69 	static const U64 prime5bytes = 889523592379ULL;
70 
71 	return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));
72 #else
73 	static const U64 prime8bytes = 11400714785074694791ULL;
74 
75 	return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));
76 #endif
77 }
78 
79 static FORCE_INLINE U32 LZ4_hashPosition(
80 	const void *p,
81 	tableType_t const tableType)
82 {
83 #if LZ4_ARCH64
84 	if (tableType == byU32)
85 		return LZ4_hash5(LZ4_read_ARCH(p), tableType);
86 #endif
87 
88 	return LZ4_hash4(LZ4_read32(p), tableType);
89 }
90 
91 static void LZ4_putPositionOnHash(
92 	const BYTE *p,
93 	U32 h,
94 	void *tableBase,
95 	tableType_t const tableType,
96 	const BYTE *srcBase)
97 {
98 	switch (tableType) {
99 	case byPtr:
100 	{
101 		const BYTE **hashTable = (const BYTE **)tableBase;
102 
103 		hashTable[h] = p;
104 		return;
105 	}
106 	case byU32:
107 	{
108 		U32 *hashTable = (U32 *) tableBase;
109 
110 		hashTable[h] = (U32)(p - srcBase);
111 		return;
112 	}
113 	case byU16:
114 	{
115 		U16 *hashTable = (U16 *) tableBase;
116 
117 		hashTable[h] = (U16)(p - srcBase);
118 		return;
119 	}
120 	}
121 }
122 
123 static FORCE_INLINE void LZ4_putPosition(
124 	const BYTE *p,
125 	void *tableBase,
126 	tableType_t tableType,
127 	const BYTE *srcBase)
128 {
129 	U32 const h = LZ4_hashPosition(p, tableType);
130 
131 	LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
132 }
133 
134 static const BYTE *LZ4_getPositionOnHash(
135 	U32 h,
136 	void *tableBase,
137 	tableType_t tableType,
138 	const BYTE *srcBase)
139 {
140 	if (tableType == byPtr) {
141 		const BYTE **hashTable = (const BYTE **) tableBase;
142 
143 		return hashTable[h];
144 	}
145 
146 	if (tableType == byU32) {
147 		const U32 * const hashTable = (U32 *) tableBase;
148 
149 		return hashTable[h] + srcBase;
150 	}
151 
152 	{
153 		/* default, to ensure a return */
154 		const U16 * const hashTable = (U16 *) tableBase;
155 
156 		return hashTable[h] + srcBase;
157 	}
158 }
159 
160 static FORCE_INLINE const BYTE *LZ4_getPosition(
161 	const BYTE *p,
162 	void *tableBase,
163 	tableType_t tableType,
164 	const BYTE *srcBase)
165 {
166 	U32 const h = LZ4_hashPosition(p, tableType);
167 
168 	return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
169 }
170 
171 
172 /*
173  * LZ4_compress_generic() :
174  * inlined, to ensure branches are decided at compilation time
175  */
176 static FORCE_INLINE int LZ4_compress_generic(
177 	LZ4_stream_t_internal * const dictPtr,
178 	const char * const source,
179 	char * const dest,
180 	const int inputSize,
181 	const int maxOutputSize,
182 	const limitedOutput_directive outputLimited,
183 	const tableType_t tableType,
184 	const dict_directive dict,
185 	const dictIssue_directive dictIssue,
186 	const U32 acceleration)
187 {
188 	const BYTE *ip = (const BYTE *) source;
189 	const BYTE *base;
190 	const BYTE *lowLimit;
191 	const BYTE * const lowRefLimit = ip - dictPtr->dictSize;
192 	const BYTE * const dictionary = dictPtr->dictionary;
193 	const BYTE * const dictEnd = dictionary + dictPtr->dictSize;
194 	const size_t dictDelta = dictEnd - (const BYTE *)source;
195 	const BYTE *anchor = (const BYTE *) source;
196 	const BYTE * const iend = ip + inputSize;
197 	const BYTE * const mflimit = iend - MFLIMIT;
198 	const BYTE * const matchlimit = iend - LASTLITERALS;
199 
200 	BYTE *op = (BYTE *) dest;
201 	BYTE * const olimit = op + maxOutputSize;
202 
203 	U32 forwardH;
204 	size_t refDelta = 0;
205 
206 	/* Init conditions */
207 	if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) {
208 		/* Unsupported inputSize, too large (or negative) */
209 		return 0;
210 	}
211 
212 	switch (dict) {
213 	case noDict:
214 	default:
215 		base = (const BYTE *)source;
216 		lowLimit = (const BYTE *)source;
217 		break;
218 	case withPrefix64k:
219 		base = (const BYTE *)source - dictPtr->currentOffset;
220 		lowLimit = (const BYTE *)source - dictPtr->dictSize;
221 		break;
222 	case usingExtDict:
223 		base = (const BYTE *)source - dictPtr->currentOffset;
224 		lowLimit = (const BYTE *)source;
225 		break;
226 	}
227 
228 	if ((tableType == byU16)
229 		&& (inputSize >= LZ4_64Klimit)) {
230 		/* Size too large (not within 64K limit) */
231 		return 0;
232 	}
233 
234 	if (inputSize < LZ4_minLength) {
235 		/* Input too small, no compression (all literals) */
236 		goto _last_literals;
237 	}
238 
239 	/* First Byte */
240 	LZ4_putPosition(ip, dictPtr->hashTable, tableType, base);
241 	ip++;
242 	forwardH = LZ4_hashPosition(ip, tableType);
243 
244 	/* Main Loop */
245 	for ( ; ; ) {
246 		const BYTE *match;
247 		BYTE *token;
248 
249 		/* Find a match */
250 		{
251 			const BYTE *forwardIp = ip;
252 			unsigned int step = 1;
253 			unsigned int searchMatchNb = acceleration << LZ4_SKIPTRIGGER;
254 
255 			do {
256 				U32 const h = forwardH;
257 
258 				ip = forwardIp;
259 				forwardIp += step;
260 				step = (searchMatchNb++ >> LZ4_SKIPTRIGGER);
261 
262 				if (unlikely(forwardIp > mflimit))
263 					goto _last_literals;
264 
265 				match = LZ4_getPositionOnHash(h,
266 					dictPtr->hashTable,
267 					tableType, base);
268 
269 				if (dict == usingExtDict) {
270 					if (match < (const BYTE *)source) {
271 						refDelta = dictDelta;
272 						lowLimit = dictionary;
273 					} else {
274 						refDelta = 0;
275 						lowLimit = (const BYTE *)source;
276 				}	 }
277 
278 				forwardH = LZ4_hashPosition(forwardIp,
279 					tableType);
280 
281 				LZ4_putPositionOnHash(ip, h, dictPtr->hashTable,
282 					tableType, base);
283 			} while (((dictIssue == dictSmall)
284 					? (match < lowRefLimit)
285 					: 0)
286 				|| ((tableType == byU16)
287 					? 0
288 					: (match + MAX_DISTANCE < ip))
289 				|| (LZ4_read32(match + refDelta)
290 					!= LZ4_read32(ip)));
291 		}
292 
293 		/* Catch up */
294 		while (((ip > anchor) & (match + refDelta > lowLimit))
295 				&& (unlikely(ip[-1] == match[refDelta - 1]))) {
296 			ip--;
297 			match--;
298 		}
299 
300 		/* Encode Literals */
301 		{
302 			unsigned const int litLength = (unsigned int)(ip - anchor);
303 
304 			token = op++;
305 
306 			if ((outputLimited) &&
307 				/* Check output buffer overflow */
308 				(unlikely(op + litLength +
309 					(2 + 1 + LASTLITERALS) +
310 					(litLength / 255) > olimit)))
311 				return 0;
312 
313 			if (litLength >= RUN_MASK) {
314 				int len = (int)litLength - RUN_MASK;
315 
316 				*token = (RUN_MASK << ML_BITS);
317 
318 				for (; len >= 255; len -= 255)
319 					*op++ = 255;
320 				*op++ = (BYTE)len;
321 			} else
322 				*token = (BYTE)(litLength << ML_BITS);
323 
324 			/* Copy Literals */
325 			LZ4_wildCopy(op, anchor, op + litLength);
326 			op += litLength;
327 		}
328 
329 _next_match:
330 		/* Encode Offset */
331 		LZ4_writeLE16(op, (U16)(ip - match));
332 		op += 2;
333 
334 		/* Encode MatchLength */
335 		{
336 			unsigned int matchCode;
337 
338 			if ((dict == usingExtDict)
339 				&& (lowLimit == dictionary)) {
340 				const BYTE *limit;
341 
342 				match += refDelta;
343 				limit = ip + (dictEnd - match);
344 
345 				if (limit > matchlimit)
346 					limit = matchlimit;
347 
348 				matchCode = LZ4_count(ip + MINMATCH,
349 					match + MINMATCH, limit);
350 
351 				ip += MINMATCH + matchCode;
352 
353 				if (ip == limit) {
354 					unsigned const int more = LZ4_count(ip,
355 						(const BYTE *)source,
356 						matchlimit);
357 
358 					matchCode += more;
359 					ip += more;
360 				}
361 			} else {
362 				matchCode = LZ4_count(ip + MINMATCH,
363 					match + MINMATCH, matchlimit);
364 				ip += MINMATCH + matchCode;
365 			}
366 
367 			if (outputLimited &&
368 				/* Check output buffer overflow */
369 				(unlikely(op +
370 					(1 + LASTLITERALS) +
371 					(matchCode >> 8) > olimit)))
372 				return 0;
373 
374 			if (matchCode >= ML_MASK) {
375 				*token += ML_MASK;
376 				matchCode -= ML_MASK;
377 				LZ4_write32(op, 0xFFFFFFFF);
378 
379 				while (matchCode >= 4 * 255) {
380 					op += 4;
381 					LZ4_write32(op, 0xFFFFFFFF);
382 					matchCode -= 4 * 255;
383 				}
384 
385 				op += matchCode / 255;
386 				*op++ = (BYTE)(matchCode % 255);
387 			} else
388 				*token += (BYTE)(matchCode);
389 		}
390 
391 		anchor = ip;
392 
393 		/* Test end of chunk */
394 		if (ip > mflimit)
395 			break;
396 
397 		/* Fill table */
398 		LZ4_putPosition(ip - 2, dictPtr->hashTable, tableType, base);
399 
400 		/* Test next position */
401 		match = LZ4_getPosition(ip, dictPtr->hashTable,
402 			tableType, base);
403 
404 		if (dict == usingExtDict) {
405 			if (match < (const BYTE *)source) {
406 				refDelta = dictDelta;
407 				lowLimit = dictionary;
408 			} else {
409 				refDelta = 0;
410 				lowLimit = (const BYTE *)source;
411 			}
412 		}
413 
414 		LZ4_putPosition(ip, dictPtr->hashTable, tableType, base);
415 
416 		if (((dictIssue == dictSmall) ? (match >= lowRefLimit) : 1)
417 			&& (match + MAX_DISTANCE >= ip)
418 			&& (LZ4_read32(match + refDelta) == LZ4_read32(ip))) {
419 			token = op++;
420 			*token = 0;
421 			goto _next_match;
422 		}
423 
424 		/* Prepare next loop */
425 		forwardH = LZ4_hashPosition(++ip, tableType);
426 	}
427 
428 _last_literals:
429 	/* Encode Last Literals */
430 	{
431 		size_t const lastRun = (size_t)(iend - anchor);
432 
433 		if ((outputLimited) &&
434 			/* Check output buffer overflow */
435 			((op - (BYTE *)dest) + lastRun + 1 +
436 			((lastRun + 255 - RUN_MASK) / 255) > (U32)maxOutputSize))
437 			return 0;
438 
439 		if (lastRun >= RUN_MASK) {
440 			size_t accumulator = lastRun - RUN_MASK;
441 			*op++ = RUN_MASK << ML_BITS;
442 			for (; accumulator >= 255; accumulator -= 255)
443 				*op++ = 255;
444 			*op++ = (BYTE) accumulator;
445 		} else {
446 			*op++ = (BYTE)(lastRun << ML_BITS);
447 		}
448 
449 		memcpy(op, anchor, lastRun);
450 
451 		op += lastRun;
452 	}
453 
454 	/* End */
455 	return (int) (((char *)op) - dest);
456 }
457 
458 static int LZ4_compress_fast_extState(
459 	void *state,
460 	const char *source,
461 	char *dest,
462 	int inputSize,
463 	int maxOutputSize,
464 	int acceleration)
465 {
466 	LZ4_stream_t_internal *ctx = &((LZ4_stream_t *)state)->internal_donotuse;
467 #if LZ4_ARCH64
468 	const tableType_t tableType = byU32;
469 #else
470 	const tableType_t tableType = byPtr;
471 #endif
472 
473 	LZ4_resetStream((LZ4_stream_t *)state);
474 
475 	if (acceleration < 1)
476 		acceleration = LZ4_ACCELERATION_DEFAULT;
477 
478 	if (maxOutputSize >= LZ4_COMPRESSBOUND(inputSize)) {
479 		if (inputSize < LZ4_64Klimit)
480 			return LZ4_compress_generic(ctx, source,
481 				dest, inputSize, 0,
482 				noLimit, byU16, noDict,
483 				noDictIssue, acceleration);
484 		else
485 			return LZ4_compress_generic(ctx, source,
486 				dest, inputSize, 0,
487 				noLimit, tableType, noDict,
488 				noDictIssue, acceleration);
489 	} else {
490 		if (inputSize < LZ4_64Klimit)
491 			return LZ4_compress_generic(ctx, source,
492 				dest, inputSize,
493 				maxOutputSize, limitedOutput, byU16, noDict,
494 				noDictIssue, acceleration);
495 		else
496 			return LZ4_compress_generic(ctx, source,
497 				dest, inputSize,
498 				maxOutputSize, limitedOutput, tableType, noDict,
499 				noDictIssue, acceleration);
500 	}
501 }
502 
503 int LZ4_compress_fast(const char *source, char *dest, int inputSize,
504 	int maxOutputSize, int acceleration, void *wrkmem)
505 {
506 	return LZ4_compress_fast_extState(wrkmem, source, dest, inputSize,
507 		maxOutputSize, acceleration);
508 }
509 EXPORT_SYMBOL(LZ4_compress_fast);
510 
511 int LZ4_compress_default(const char *source, char *dest, int inputSize,
512 	int maxOutputSize, void *wrkmem)
513 {
514 	return LZ4_compress_fast(source, dest, inputSize,
515 		maxOutputSize, LZ4_ACCELERATION_DEFAULT, wrkmem);
516 }
517 EXPORT_SYMBOL(LZ4_compress_default);
518 
519 /*-******************************
520  *	*_destSize() variant
521  ********************************/
522 static int LZ4_compress_destSize_generic(
523 	LZ4_stream_t_internal * const ctx,
524 	const char * const src,
525 	char * const dst,
526 	int * const srcSizePtr,
527 	const int targetDstSize,
528 	const tableType_t tableType)
529 {
530 	const BYTE *ip = (const BYTE *) src;
531 	const BYTE *base = (const BYTE *) src;
532 	const BYTE *lowLimit = (const BYTE *) src;
533 	const BYTE *anchor = ip;
534 	const BYTE * const iend = ip + *srcSizePtr;
535 	const BYTE * const mflimit = iend - MFLIMIT;
536 	const BYTE * const matchlimit = iend - LASTLITERALS;
537 
538 	BYTE *op = (BYTE *) dst;
539 	BYTE * const oend = op + targetDstSize;
540 	BYTE * const oMaxLit = op + targetDstSize - 2 /* offset */
541 		- 8 /* because 8 + MINMATCH == MFLIMIT */ - 1 /* token */;
542 	BYTE * const oMaxMatch = op + targetDstSize
543 		- (LASTLITERALS + 1 /* token */);
544 	BYTE * const oMaxSeq = oMaxLit - 1 /* token */;
545 
546 	U32 forwardH;
547 
548 	/* Init conditions */
549 	/* Impossible to store anything */
550 	if (targetDstSize < 1)
551 		return 0;
552 	/* Unsupported input size, too large (or negative) */
553 	if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE)
554 		return 0;
555 	/* Size too large (not within 64K limit) */
556 	if ((tableType == byU16) && (*srcSizePtr >= LZ4_64Klimit))
557 		return 0;
558 	/* Input too small, no compression (all literals) */
559 	if (*srcSizePtr < LZ4_minLength)
560 		goto _last_literals;
561 
562 	/* First Byte */
563 	*srcSizePtr = 0;
564 	LZ4_putPosition(ip, ctx->hashTable, tableType, base);
565 	ip++; forwardH = LZ4_hashPosition(ip, tableType);
566 
567 	/* Main Loop */
568 	for ( ; ; ) {
569 		const BYTE *match;
570 		BYTE *token;
571 
572 		/* Find a match */
573 		{
574 			const BYTE *forwardIp = ip;
575 			unsigned int step = 1;
576 			unsigned int searchMatchNb = 1 << LZ4_SKIPTRIGGER;
577 
578 			do {
579 				U32 h = forwardH;
580 
581 				ip = forwardIp;
582 				forwardIp += step;
583 				step = (searchMatchNb++ >> LZ4_SKIPTRIGGER);
584 
585 				if (unlikely(forwardIp > mflimit))
586 					goto _last_literals;
587 
588 				match = LZ4_getPositionOnHash(h, ctx->hashTable,
589 					tableType, base);
590 				forwardH = LZ4_hashPosition(forwardIp,
591 					tableType);
592 				LZ4_putPositionOnHash(ip, h,
593 					ctx->hashTable, tableType,
594 					base);
595 
596 			} while (((tableType == byU16)
597 				? 0
598 				: (match + MAX_DISTANCE < ip))
599 				|| (LZ4_read32(match) != LZ4_read32(ip)));
600 		}
601 
602 		/* Catch up */
603 		while ((ip > anchor)
604 			&& (match > lowLimit)
605 			&& (unlikely(ip[-1] == match[-1]))) {
606 			ip--;
607 			match--;
608 		}
609 
610 		/* Encode Literal length */
611 		{
612 			unsigned int litLength = (unsigned int)(ip - anchor);
613 
614 			token = op++;
615 			if (op + ((litLength + 240) / 255)
616 				+ litLength > oMaxLit) {
617 				/* Not enough space for a last match */
618 				op--;
619 				goto _last_literals;
620 			}
621 			if (litLength >= RUN_MASK) {
622 				unsigned int len = litLength - RUN_MASK;
623 				*token = (RUN_MASK<<ML_BITS);
624 				for (; len >= 255; len -= 255)
625 					*op++ = 255;
626 				*op++ = (BYTE)len;
627 			} else
628 				*token = (BYTE)(litLength << ML_BITS);
629 
630 			/* Copy Literals */
631 			LZ4_wildCopy(op, anchor, op + litLength);
632 			op += litLength;
633 		}
634 
635 _next_match:
636 		/* Encode Offset */
637 		LZ4_writeLE16(op, (U16)(ip - match)); op += 2;
638 
639 		/* Encode MatchLength */
640 		{
641 			size_t matchLength = LZ4_count(ip + MINMATCH,
642 			match + MINMATCH, matchlimit);
643 
644 			if (op + ((matchLength + 240)/255) > oMaxMatch) {
645 				/* Match description too long : reduce it */
646 				matchLength = (15 - 1) + (oMaxMatch - op) * 255;
647 			}
648 			ip += MINMATCH + matchLength;
649 
650 			if (matchLength >= ML_MASK) {
651 				*token += ML_MASK;
652 				matchLength -= ML_MASK;
653 				while (matchLength >= 255) {
654 					matchLength -= 255;
655 					*op++ = 255;
656 				}
657 				*op++ = (BYTE)matchLength;
658 			} else
659 				*token += (BYTE)(matchLength);
660 		}
661 
662 		anchor = ip;
663 
664 		/* Test end of block */
665 		if (ip > mflimit)
666 			break;
667 		if (op > oMaxSeq)
668 			break;
669 
670 		/* Fill table */
671 		LZ4_putPosition(ip - 2, ctx->hashTable, tableType, base);
672 
673 		/* Test next position */
674 		match = LZ4_getPosition(ip, ctx->hashTable, tableType, base);
675 		LZ4_putPosition(ip, ctx->hashTable, tableType, base);
676 
677 		if ((match + MAX_DISTANCE >= ip)
678 			&& (LZ4_read32(match) == LZ4_read32(ip))) {
679 			token = op++; *token = 0;
680 			goto _next_match;
681 		}
682 
683 		/* Prepare next loop */
684 		forwardH = LZ4_hashPosition(++ip, tableType);
685 	}
686 
687 _last_literals:
688 	/* Encode Last Literals */
689 	{
690 		size_t lastRunSize = (size_t)(iend - anchor);
691 
692 		if (op + 1 /* token */
693 			+ ((lastRunSize + 240) / 255) /* litLength */
694 			+ lastRunSize /* literals */ > oend) {
695 			/* adapt lastRunSize to fill 'dst' */
696 			lastRunSize	= (oend - op) - 1;
697 			lastRunSize -= (lastRunSize + 240) / 255;
698 		}
699 		ip = anchor + lastRunSize;
700 
701 		if (lastRunSize >= RUN_MASK) {
702 			size_t accumulator = lastRunSize - RUN_MASK;
703 
704 			*op++ = RUN_MASK << ML_BITS;
705 			for (; accumulator >= 255; accumulator -= 255)
706 				*op++ = 255;
707 			*op++ = (BYTE) accumulator;
708 		} else {
709 			*op++ = (BYTE)(lastRunSize<<ML_BITS);
710 		}
711 		memcpy(op, anchor, lastRunSize);
712 		op += lastRunSize;
713 	}
714 
715 	/* End */
716 	*srcSizePtr = (int) (((const char *)ip) - src);
717 	return (int) (((char *)op) - dst);
718 }
719 
720 static int LZ4_compress_destSize_extState(
721 	LZ4_stream_t *state,
722 	const char *src,
723 	char *dst,
724 	int *srcSizePtr,
725 	int targetDstSize)
726 {
727 #if LZ4_ARCH64
728 	const tableType_t tableType = byU32;
729 #else
730 	const tableType_t tableType = byPtr;
731 #endif
732 
733 	LZ4_resetStream(state);
734 
735 	if (targetDstSize >= LZ4_COMPRESSBOUND(*srcSizePtr)) {
736 		/* compression success is guaranteed */
737 		return LZ4_compress_fast_extState(
738 			state, src, dst, *srcSizePtr,
739 			targetDstSize, 1);
740 	} else {
741 		if (*srcSizePtr < LZ4_64Klimit)
742 			return LZ4_compress_destSize_generic(
743 				&state->internal_donotuse,
744 				src, dst, srcSizePtr,
745 				targetDstSize, byU16);
746 		else
747 			return LZ4_compress_destSize_generic(
748 				&state->internal_donotuse,
749 				src, dst, srcSizePtr,
750 				targetDstSize, tableType);
751 	}
752 }
753 
754 
755 int LZ4_compress_destSize(
756 	const char *src,
757 	char *dst,
758 	int *srcSizePtr,
759 	int targetDstSize,
760 	void *wrkmem)
761 {
762 	return LZ4_compress_destSize_extState(wrkmem, src, dst, srcSizePtr,
763 		targetDstSize);
764 }
765 EXPORT_SYMBOL(LZ4_compress_destSize);
766 
767 /*-******************************
768  *	Streaming functions
769  ********************************/
770 void LZ4_resetStream(LZ4_stream_t *LZ4_stream)
771 {
772 	memset(LZ4_stream, 0, sizeof(LZ4_stream_t));
773 }
774 
775 int LZ4_loadDict(LZ4_stream_t *LZ4_dict,
776 	const char *dictionary, int dictSize)
777 {
778 	LZ4_stream_t_internal *dict = &LZ4_dict->internal_donotuse;
779 	const BYTE *p = (const BYTE *)dictionary;
780 	const BYTE * const dictEnd = p + dictSize;
781 	const BYTE *base;
782 
783 	if ((dict->initCheck)
784 		|| (dict->currentOffset > 1 * GB)) {
785 		/* Uninitialized structure, or reuse overflow */
786 		LZ4_resetStream(LZ4_dict);
787 	}
788 
789 	if (dictSize < (int)HASH_UNIT) {
790 		dict->dictionary = NULL;
791 		dict->dictSize = 0;
792 		return 0;
793 	}
794 
795 	if ((dictEnd - p) > 64 * KB)
796 		p = dictEnd - 64 * KB;
797 	dict->currentOffset += 64 * KB;
798 	base = p - dict->currentOffset;
799 	dict->dictionary = p;
800 	dict->dictSize = (U32)(dictEnd - p);
801 	dict->currentOffset += dict->dictSize;
802 
803 	while (p <= dictEnd - HASH_UNIT) {
804 		LZ4_putPosition(p, dict->hashTable, byU32, base);
805 		p += 3;
806 	}
807 
808 	return dict->dictSize;
809 }
810 EXPORT_SYMBOL(LZ4_loadDict);
811 
812 static void LZ4_renormDictT(LZ4_stream_t_internal *LZ4_dict,
813 	const BYTE *src)
814 {
815 	if ((LZ4_dict->currentOffset > 0x80000000) ||
816 		((uptrval)LZ4_dict->currentOffset > (uptrval)src)) {
817 		/* address space overflow */
818 		/* rescale hash table */
819 		U32 const delta = LZ4_dict->currentOffset - 64 * KB;
820 		const BYTE *dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
821 		int i;
822 
823 		for (i = 0; i < LZ4_HASH_SIZE_U32; i++) {
824 			if (LZ4_dict->hashTable[i] < delta)
825 				LZ4_dict->hashTable[i] = 0;
826 			else
827 				LZ4_dict->hashTable[i] -= delta;
828 		}
829 		LZ4_dict->currentOffset = 64 * KB;
830 		if (LZ4_dict->dictSize > 64 * KB)
831 			LZ4_dict->dictSize = 64 * KB;
832 		LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
833 	}
834 }
835 
836 int LZ4_saveDict(LZ4_stream_t *LZ4_dict, char *safeBuffer, int dictSize)
837 {
838 	LZ4_stream_t_internal * const dict = &LZ4_dict->internal_donotuse;
839 	const BYTE * const previousDictEnd = dict->dictionary + dict->dictSize;
840 
841 	if ((U32)dictSize > 64 * KB) {
842 		/* useless to define a dictionary > 64 * KB */
843 		dictSize = 64 * KB;
844 	}
845 	if ((U32)dictSize > dict->dictSize)
846 		dictSize = dict->dictSize;
847 
848 	memmove(safeBuffer, previousDictEnd - dictSize, dictSize);
849 
850 	dict->dictionary = (const BYTE *)safeBuffer;
851 	dict->dictSize = (U32)dictSize;
852 
853 	return dictSize;
854 }
855 EXPORT_SYMBOL(LZ4_saveDict);
856 
857 int LZ4_compress_fast_continue(LZ4_stream_t *LZ4_stream, const char *source,
858 	char *dest, int inputSize, int maxOutputSize, int acceleration)
859 {
860 	LZ4_stream_t_internal *streamPtr = &LZ4_stream->internal_donotuse;
861 	const BYTE * const dictEnd = streamPtr->dictionary
862 		+ streamPtr->dictSize;
863 
864 	const BYTE *smallest = (const BYTE *) source;
865 
866 	if (streamPtr->initCheck) {
867 		/* Uninitialized structure detected */
868 		return 0;
869 	}
870 
871 	if ((streamPtr->dictSize > 0) && (smallest > dictEnd))
872 		smallest = dictEnd;
873 
874 	LZ4_renormDictT(streamPtr, smallest);
875 
876 	if (acceleration < 1)
877 		acceleration = LZ4_ACCELERATION_DEFAULT;
878 
879 	/* Check overlapping input/dictionary space */
880 	{
881 		const BYTE *sourceEnd = (const BYTE *) source + inputSize;
882 
883 		if ((sourceEnd > streamPtr->dictionary)
884 			&& (sourceEnd < dictEnd)) {
885 			streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
886 			if (streamPtr->dictSize > 64 * KB)
887 				streamPtr->dictSize = 64 * KB;
888 			if (streamPtr->dictSize < 4)
889 				streamPtr->dictSize = 0;
890 			streamPtr->dictionary = dictEnd - streamPtr->dictSize;
891 		}
892 	}
893 
894 	/* prefix mode : source data follows dictionary */
895 	if (dictEnd == (const BYTE *)source) {
896 		int result;
897 
898 		if ((streamPtr->dictSize < 64 * KB) &&
899 			(streamPtr->dictSize < streamPtr->currentOffset)) {
900 			result = LZ4_compress_generic(
901 				streamPtr, source, dest, inputSize,
902 				maxOutputSize, limitedOutput, byU32,
903 				withPrefix64k, dictSmall, acceleration);
904 		} else {
905 			result = LZ4_compress_generic(
906 				streamPtr, source, dest, inputSize,
907 				maxOutputSize, limitedOutput, byU32,
908 				withPrefix64k, noDictIssue, acceleration);
909 		}
910 		streamPtr->dictSize += (U32)inputSize;
911 		streamPtr->currentOffset += (U32)inputSize;
912 		return result;
913 	}
914 
915 	/* external dictionary mode */
916 	{
917 		int result;
918 
919 		if ((streamPtr->dictSize < 64 * KB) &&
920 			(streamPtr->dictSize < streamPtr->currentOffset)) {
921 			result = LZ4_compress_generic(
922 				streamPtr, source, dest, inputSize,
923 				maxOutputSize, limitedOutput, byU32,
924 				usingExtDict, dictSmall, acceleration);
925 		} else {
926 			result = LZ4_compress_generic(
927 				streamPtr, source, dest, inputSize,
928 				maxOutputSize, limitedOutput, byU32,
929 				usingExtDict, noDictIssue, acceleration);
930 		}
931 		streamPtr->dictionary = (const BYTE *)source;
932 		streamPtr->dictSize = (U32)inputSize;
933 		streamPtr->currentOffset += (U32)inputSize;
934 		return result;
935 	}
936 }
937 EXPORT_SYMBOL(LZ4_compress_fast_continue);
938 
939 MODULE_LICENSE("Dual BSD/GPL");
940 MODULE_DESCRIPTION("LZ4 compressor");
941