xref: /linux/lib/lz4/lz4hc_compress.c (revision c159dfbdd4fc62fa08f6715d9d6c34d39cf40446)
1 /*
2  * LZ4 HC - High Compression Mode of LZ4
3  * Copyright (C) 2011-2015, Yann Collet.
4  *
5  * BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php)
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are
8  * met:
9  *	* Redistributions of source code must retain the above copyright
10  *	  notice, this list of conditions and the following disclaimer.
11  *	* Redistributions in binary form must reproduce the above
12  * copyright notice, this list of conditions and the following disclaimer
13  * in the documentation and/or other materials provided with the
14  * distribution.
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
18  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
19  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
21  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  * You can contact the author at :
27  *	- LZ4 homepage : http://www.lz4.org
28  *	- LZ4 source repository : https://github.com/lz4/lz4
29  *
30  *	Changed for kernel usage by:
31  *	Sven Schmidt <4sschmid@informatik.uni-hamburg.de>
32  */
33 
34 /*-************************************
35  *	Dependencies
36  **************************************/
37 #include "lz4defs.h"
38 #include <linux/module.h>
39 #include <linux/kernel.h>
40 #include <linux/string.h> /* memset */
41 
42 /* *************************************
43  *	Local Constants and types
44  ***************************************/
45 
46 #define OPTIMAL_ML (int)((ML_MASK - 1) + MINMATCH)
47 
48 #define HASH_FUNCTION(i)	(((i) * 2654435761U) \
49 	>> ((MINMATCH*8) - LZ4HC_HASH_LOG))
50 #define DELTANEXTU16(p)	chainTable[(U16)(p)] /* faster */
51 
52 static U32 LZ4HC_hashPtr(const void *ptr)
53 {
54 	return HASH_FUNCTION(LZ4_read32(ptr));
55 }
56 
57 /**************************************
58  *	HC Compression
59  **************************************/
60 static void LZ4HC_init(LZ4HC_CCtx_internal *hc4, const BYTE *start)
61 {
62 	memset((void *)hc4->hashTable, 0, sizeof(hc4->hashTable));
63 	memset(hc4->chainTable, 0xFF, sizeof(hc4->chainTable));
64 	hc4->nextToUpdate = 64 * KB;
65 	hc4->base = start - 64 * KB;
66 	hc4->end = start;
67 	hc4->dictBase = start - 64 * KB;
68 	hc4->dictLimit = 64 * KB;
69 	hc4->lowLimit = 64 * KB;
70 }
71 
72 /* Update chains up to ip (excluded) */
73 static FORCE_INLINE void LZ4HC_Insert(LZ4HC_CCtx_internal *hc4,
74 	const BYTE *ip)
75 {
76 	U16 * const chainTable = hc4->chainTable;
77 	U32 * const hashTable	= hc4->hashTable;
78 	const BYTE * const base = hc4->base;
79 	U32 const target = (U32)(ip - base);
80 	U32 idx = hc4->nextToUpdate;
81 
82 	while (idx < target) {
83 		U32 const h = LZ4HC_hashPtr(base + idx);
84 		size_t delta = idx - hashTable[h];
85 
86 		if (delta > MAX_DISTANCE)
87 			delta = MAX_DISTANCE;
88 
89 		DELTANEXTU16(idx) = (U16)delta;
90 
91 		hashTable[h] = idx;
92 		idx++;
93 	}
94 
95 	hc4->nextToUpdate = target;
96 }
97 
98 static FORCE_INLINE int LZ4HC_InsertAndFindBestMatch(
99 	LZ4HC_CCtx_internal *hc4, /* Index table will be updated */
100 	const BYTE *ip,
101 	const BYTE * const iLimit,
102 	const BYTE **matchpos,
103 	const int maxNbAttempts)
104 {
105 	U16 * const chainTable = hc4->chainTable;
106 	U32 * const HashTable = hc4->hashTable;
107 	const BYTE * const base = hc4->base;
108 	const BYTE * const dictBase = hc4->dictBase;
109 	const U32 dictLimit = hc4->dictLimit;
110 	const U32 lowLimit = (hc4->lowLimit + 64 * KB > (U32)(ip - base))
111 		? hc4->lowLimit
112 		: (U32)(ip - base) - (64 * KB - 1);
113 	U32 matchIndex;
114 	int nbAttempts = maxNbAttempts;
115 	size_t ml = 0;
116 
117 	/* HC4 match finder */
118 	LZ4HC_Insert(hc4, ip);
119 	matchIndex = HashTable[LZ4HC_hashPtr(ip)];
120 
121 	while ((matchIndex >= lowLimit)
122 		&& (nbAttempts)) {
123 		nbAttempts--;
124 		if (matchIndex >= dictLimit) {
125 			const BYTE * const match = base + matchIndex;
126 
127 			if (*(match + ml) == *(ip + ml)
128 				&& (LZ4_read32(match) == LZ4_read32(ip))) {
129 				size_t const mlt = LZ4_count(ip + MINMATCH,
130 					match + MINMATCH, iLimit) + MINMATCH;
131 
132 				if (mlt > ml) {
133 					ml = mlt;
134 					*matchpos = match;
135 				}
136 			}
137 		} else {
138 			const BYTE * const match = dictBase + matchIndex;
139 
140 			if (LZ4_read32(match) == LZ4_read32(ip)) {
141 				size_t mlt;
142 				const BYTE *vLimit = ip
143 					+ (dictLimit - matchIndex);
144 
145 				if (vLimit > iLimit)
146 					vLimit = iLimit;
147 				mlt = LZ4_count(ip + MINMATCH,
148 					match + MINMATCH, vLimit) + MINMATCH;
149 				if ((ip + mlt == vLimit)
150 					&& (vLimit < iLimit))
151 					mlt += LZ4_count(ip + mlt,
152 						base + dictLimit,
153 						iLimit);
154 				if (mlt > ml) {
155 					/* virtual matchpos */
156 					ml = mlt;
157 					*matchpos = base + matchIndex;
158 				}
159 			}
160 		}
161 		matchIndex -= DELTANEXTU16(matchIndex);
162 	}
163 
164 	return (int)ml;
165 }
166 
167 static FORCE_INLINE int LZ4HC_InsertAndGetWiderMatch(
168 	LZ4HC_CCtx_internal *hc4,
169 	const BYTE * const ip,
170 	const BYTE * const iLowLimit,
171 	const BYTE * const iHighLimit,
172 	int longest,
173 	const BYTE **matchpos,
174 	const BYTE **startpos,
175 	const int maxNbAttempts)
176 {
177 	U16 * const chainTable = hc4->chainTable;
178 	U32 * const HashTable = hc4->hashTable;
179 	const BYTE * const base = hc4->base;
180 	const U32 dictLimit = hc4->dictLimit;
181 	const BYTE * const lowPrefixPtr = base + dictLimit;
182 	const U32 lowLimit = (hc4->lowLimit + 64 * KB > (U32)(ip - base))
183 		? hc4->lowLimit
184 		: (U32)(ip - base) - (64 * KB - 1);
185 	const BYTE * const dictBase = hc4->dictBase;
186 	U32 matchIndex;
187 	int nbAttempts = maxNbAttempts;
188 	int delta = (int)(ip - iLowLimit);
189 
190 	/* First Match */
191 	LZ4HC_Insert(hc4, ip);
192 	matchIndex = HashTable[LZ4HC_hashPtr(ip)];
193 
194 	while ((matchIndex >= lowLimit)
195 		&& (nbAttempts)) {
196 		nbAttempts--;
197 		if (matchIndex >= dictLimit) {
198 			const BYTE *matchPtr = base + matchIndex;
199 
200 			if (*(iLowLimit + longest)
201 				== *(matchPtr - delta + longest)) {
202 				if (LZ4_read32(matchPtr) == LZ4_read32(ip)) {
203 					int mlt = MINMATCH + LZ4_count(
204 						ip + MINMATCH,
205 						matchPtr + MINMATCH,
206 						iHighLimit);
207 					int back = 0;
208 
209 					while ((ip + back > iLowLimit)
210 						&& (matchPtr + back > lowPrefixPtr)
211 						&& (ip[back - 1] == matchPtr[back - 1]))
212 						back--;
213 
214 					mlt -= back;
215 
216 					if (mlt > longest) {
217 						longest = (int)mlt;
218 						*matchpos = matchPtr + back;
219 						*startpos = ip + back;
220 					}
221 				}
222 			}
223 		} else {
224 			const BYTE * const matchPtr = dictBase + matchIndex;
225 
226 			if (LZ4_read32(matchPtr) == LZ4_read32(ip)) {
227 				size_t mlt;
228 				int back = 0;
229 				const BYTE *vLimit = ip + (dictLimit - matchIndex);
230 
231 				if (vLimit > iHighLimit)
232 					vLimit = iHighLimit;
233 
234 				mlt = LZ4_count(ip + MINMATCH,
235 					matchPtr + MINMATCH, vLimit) + MINMATCH;
236 
237 				if ((ip + mlt == vLimit) && (vLimit < iHighLimit))
238 					mlt += LZ4_count(ip + mlt, base + dictLimit,
239 						iHighLimit);
240 				while ((ip + back > iLowLimit)
241 					&& (matchIndex + back > lowLimit)
242 					&& (ip[back - 1] == matchPtr[back - 1]))
243 					back--;
244 
245 				mlt -= back;
246 
247 				if ((int)mlt > longest) {
248 					longest = (int)mlt;
249 					*matchpos = base + matchIndex + back;
250 					*startpos = ip + back;
251 				}
252 			}
253 		}
254 
255 		matchIndex -= DELTANEXTU16(matchIndex);
256 	}
257 
258 	return longest;
259 }
260 
261 static FORCE_INLINE int LZ4HC_encodeSequence(
262 	const BYTE **ip,
263 	BYTE **op,
264 	const BYTE **anchor,
265 	int matchLength,
266 	const BYTE * const match,
267 	limitedOutput_directive limitedOutputBuffer,
268 	BYTE *oend)
269 {
270 	int length;
271 	BYTE *token;
272 
273 	/* Encode Literal length */
274 	length = (int)(*ip - *anchor);
275 	token = (*op)++;
276 
277 	if ((limitedOutputBuffer)
278 		&& ((*op + (length>>8)
279 			+ length + (2 + 1 + LASTLITERALS)) > oend)) {
280 		/* Check output limit */
281 		return 1;
282 	}
283 	if (length >= (int)RUN_MASK) {
284 		int len;
285 
286 		*token = (RUN_MASK<<ML_BITS);
287 		len = length - RUN_MASK;
288 		for (; len > 254 ; len -= 255)
289 			*(*op)++ = 255;
290 		*(*op)++ = (BYTE)len;
291 	} else
292 		*token = (BYTE)(length<<ML_BITS);
293 
294 	/* Copy Literals */
295 	LZ4_wildCopy(*op, *anchor, (*op) + length);
296 	*op += length;
297 
298 	/* Encode Offset */
299 	LZ4_writeLE16(*op, (U16)(*ip - match));
300 	*op += 2;
301 
302 	/* Encode MatchLength */
303 	length = (int)(matchLength - MINMATCH);
304 
305 	if ((limitedOutputBuffer)
306 		&& (*op + (length>>8)
307 			+ (1 + LASTLITERALS) > oend)) {
308 		/* Check output limit */
309 		return 1;
310 	}
311 
312 	if (length >= (int)ML_MASK) {
313 		*token += ML_MASK;
314 		length -= ML_MASK;
315 
316 		for (; length > 509 ; length -= 510) {
317 			*(*op)++ = 255;
318 			*(*op)++ = 255;
319 		}
320 
321 		if (length > 254) {
322 			length -= 255;
323 			*(*op)++ = 255;
324 		}
325 
326 		*(*op)++ = (BYTE)length;
327 	} else
328 		*token += (BYTE)(length);
329 
330 	/* Prepare next loop */
331 	*ip += matchLength;
332 	*anchor = *ip;
333 
334 	return 0;
335 }
336 
337 static int LZ4HC_compress_generic(
338 	LZ4HC_CCtx_internal *const ctx,
339 	const char * const source,
340 	char * const dest,
341 	int const inputSize,
342 	int const maxOutputSize,
343 	int compressionLevel,
344 	limitedOutput_directive limit
345 	)
346 {
347 	const BYTE *ip = (const BYTE *) source;
348 	const BYTE *anchor = ip;
349 	const BYTE * const iend = ip + inputSize;
350 	const BYTE * const mflimit = iend - MFLIMIT;
351 	const BYTE * const matchlimit = (iend - LASTLITERALS);
352 
353 	BYTE *op = (BYTE *) dest;
354 	BYTE * const oend = op + maxOutputSize;
355 
356 	unsigned int maxNbAttempts;
357 	int ml, ml2, ml3, ml0;
358 	const BYTE *ref = NULL;
359 	const BYTE *start2 = NULL;
360 	const BYTE *ref2 = NULL;
361 	const BYTE *start3 = NULL;
362 	const BYTE *ref3 = NULL;
363 	const BYTE *start0;
364 	const BYTE *ref0;
365 
366 	/* init */
367 	if (compressionLevel > LZ4HC_MAX_CLEVEL)
368 		compressionLevel = LZ4HC_MAX_CLEVEL;
369 	if (compressionLevel < 1)
370 		compressionLevel = LZ4HC_DEFAULT_CLEVEL;
371 	maxNbAttempts = 1 << (compressionLevel - 1);
372 	ctx->end += inputSize;
373 
374 	ip++;
375 
376 	/* Main Loop */
377 	while (ip < mflimit) {
378 		ml = LZ4HC_InsertAndFindBestMatch(ctx, ip,
379 			matchlimit, (&ref), maxNbAttempts);
380 		if (!ml) {
381 			ip++;
382 			continue;
383 		}
384 
385 		/* saved, in case we would skip too much */
386 		start0 = ip;
387 		ref0 = ref;
388 		ml0 = ml;
389 
390 _Search2:
391 		if (ip + ml < mflimit)
392 			ml2 = LZ4HC_InsertAndGetWiderMatch(ctx,
393 				ip + ml - 2, ip + 0,
394 				matchlimit, ml, &ref2,
395 				&start2, maxNbAttempts);
396 		else
397 			ml2 = ml;
398 
399 		if (ml2 == ml) {
400 			/* No better match */
401 			if (LZ4HC_encodeSequence(&ip, &op,
402 				&anchor, ml, ref, limit, oend))
403 				return 0;
404 			continue;
405 		}
406 
407 		if (start0 < ip) {
408 			if (start2 < ip + ml0) {
409 				/* empirical */
410 				ip = start0;
411 				ref = ref0;
412 				ml = ml0;
413 			}
414 		}
415 
416 		/* Here, start0 == ip */
417 		if ((start2 - ip) < 3) {
418 			/* First Match too small : removed */
419 			ml = ml2;
420 			ip = start2;
421 			ref = ref2;
422 			goto _Search2;
423 		}
424 
425 _Search3:
426 		/*
427 		* Currently we have :
428 		* ml2 > ml1, and
429 		* ip1 + 3 <= ip2 (usually < ip1 + ml1)
430 		*/
431 		if ((start2 - ip) < OPTIMAL_ML) {
432 			int correction;
433 			int new_ml = ml;
434 
435 			if (new_ml > OPTIMAL_ML)
436 				new_ml = OPTIMAL_ML;
437 			if (ip + new_ml > start2 + ml2 - MINMATCH)
438 				new_ml = (int)(start2 - ip) + ml2 - MINMATCH;
439 
440 			correction = new_ml - (int)(start2 - ip);
441 
442 			if (correction > 0) {
443 				start2 += correction;
444 				ref2 += correction;
445 				ml2 -= correction;
446 			}
447 		}
448 		/*
449 		 * Now, we have start2 = ip + new_ml,
450 		 * with new_ml = min(ml, OPTIMAL_ML = 18)
451 		 */
452 
453 		if (start2 + ml2 < mflimit)
454 			ml3 = LZ4HC_InsertAndGetWiderMatch(ctx,
455 				start2 + ml2 - 3, start2,
456 				matchlimit, ml2, &ref3, &start3,
457 				maxNbAttempts);
458 		else
459 			ml3 = ml2;
460 
461 		if (ml3 == ml2) {
462 			/* No better match : 2 sequences to encode */
463 			/* ip & ref are known; Now for ml */
464 			if (start2 < ip + ml)
465 				ml = (int)(start2 - ip);
466 			/* Now, encode 2 sequences */
467 			if (LZ4HC_encodeSequence(&ip, &op, &anchor,
468 				ml, ref, limit, oend))
469 				return 0;
470 			ip = start2;
471 			if (LZ4HC_encodeSequence(&ip, &op, &anchor,
472 				ml2, ref2, limit, oend))
473 				return 0;
474 			continue;
475 		}
476 
477 		if (start3 < ip + ml + 3) {
478 			/* Not enough space for match 2 : remove it */
479 			if (start3 >= (ip + ml)) {
480 				/* can write Seq1 immediately
481 				 * ==> Seq2 is removed,
482 				 * so Seq3 becomes Seq1
483 				 */
484 				if (start2 < ip + ml) {
485 					int correction = (int)(ip + ml - start2);
486 
487 					start2 += correction;
488 					ref2 += correction;
489 					ml2 -= correction;
490 					if (ml2 < MINMATCH) {
491 						start2 = start3;
492 						ref2 = ref3;
493 						ml2 = ml3;
494 					}
495 				}
496 
497 				if (LZ4HC_encodeSequence(&ip, &op, &anchor,
498 					ml, ref, limit, oend))
499 					return 0;
500 				ip = start3;
501 				ref = ref3;
502 				ml = ml3;
503 
504 				start0 = start2;
505 				ref0 = ref2;
506 				ml0 = ml2;
507 				goto _Search2;
508 			}
509 
510 			start2 = start3;
511 			ref2 = ref3;
512 			ml2 = ml3;
513 			goto _Search3;
514 		}
515 
516 		/*
517 		* OK, now we have 3 ascending matches;
518 		* let's write at least the first one
519 		* ip & ref are known; Now for ml
520 		*/
521 		if (start2 < ip + ml) {
522 			if ((start2 - ip) < (int)ML_MASK) {
523 				int correction;
524 
525 				if (ml > OPTIMAL_ML)
526 					ml = OPTIMAL_ML;
527 				if (ip + ml > start2 + ml2 - MINMATCH)
528 					ml = (int)(start2 - ip) + ml2 - MINMATCH;
529 				correction = ml - (int)(start2 - ip);
530 				if (correction > 0) {
531 					start2 += correction;
532 					ref2 += correction;
533 					ml2 -= correction;
534 				}
535 			} else
536 				ml = (int)(start2 - ip);
537 		}
538 		if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml,
539 			ref, limit, oend))
540 			return 0;
541 
542 		ip = start2;
543 		ref = ref2;
544 		ml = ml2;
545 
546 		start2 = start3;
547 		ref2 = ref3;
548 		ml2 = ml3;
549 
550 		goto _Search3;
551 	}
552 
553 	/* Encode Last Literals */
554 	{
555 		int lastRun = (int)(iend - anchor);
556 
557 		if ((limit)
558 			&& (((char *)op - dest) + lastRun + 1
559 				+ ((lastRun + 255 - RUN_MASK)/255)
560 					> (U32)maxOutputSize)) {
561 			/* Check output limit */
562 			return 0;
563 		}
564 		if (lastRun >= (int)RUN_MASK) {
565 			*op++ = (RUN_MASK<<ML_BITS);
566 			lastRun -= RUN_MASK;
567 			for (; lastRun > 254 ; lastRun -= 255)
568 				*op++ = 255;
569 			*op++ = (BYTE) lastRun;
570 		} else
571 			*op++ = (BYTE)(lastRun<<ML_BITS);
572 		LZ4_memcpy(op, anchor, iend - anchor);
573 		op += iend - anchor;
574 	}
575 
576 	/* End */
577 	return (int) (((char *)op) - dest);
578 }
579 
580 static int LZ4_compress_HC_extStateHC(
581 	void *state,
582 	const char *src,
583 	char *dst,
584 	int srcSize,
585 	int maxDstSize,
586 	int compressionLevel)
587 {
588 	LZ4HC_CCtx_internal *ctx = &((LZ4_streamHC_t *)state)->internal_donotuse;
589 
590 	if (((size_t)(state)&(sizeof(void *) - 1)) != 0) {
591 		/* Error : state is not aligned
592 		 * for pointers (32 or 64 bits)
593 		 */
594 		return 0;
595 	}
596 
597 	LZ4HC_init(ctx, (const BYTE *)src);
598 
599 	if (maxDstSize < LZ4_compressBound(srcSize))
600 		return LZ4HC_compress_generic(ctx, src, dst,
601 			srcSize, maxDstSize, compressionLevel, limitedOutput);
602 	else
603 		return LZ4HC_compress_generic(ctx, src, dst,
604 			srcSize, maxDstSize, compressionLevel, noLimit);
605 }
606 
607 int LZ4_compress_HC(const char *src, char *dst, int srcSize,
608 	int maxDstSize, int compressionLevel, void *wrkmem)
609 {
610 	return LZ4_compress_HC_extStateHC(wrkmem, src, dst,
611 		srcSize, maxDstSize, compressionLevel);
612 }
613 EXPORT_SYMBOL(LZ4_compress_HC);
614 
615 /**************************************
616  *	Streaming Functions
617  **************************************/
618 void LZ4_resetStreamHC(LZ4_streamHC_t *LZ4_streamHCPtr, int compressionLevel)
619 {
620 	LZ4_streamHCPtr->internal_donotuse.base = NULL;
621 	LZ4_streamHCPtr->internal_donotuse.compressionLevel = (unsigned int)compressionLevel;
622 }
623 EXPORT_SYMBOL(LZ4_resetStreamHC);
624 
625 int LZ4_loadDictHC(LZ4_streamHC_t *LZ4_streamHCPtr,
626 	const char *dictionary,
627 	int dictSize)
628 {
629 	LZ4HC_CCtx_internal *ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
630 
631 	if (dictSize > 64 * KB) {
632 		dictionary += dictSize - 64 * KB;
633 		dictSize = 64 * KB;
634 	}
635 	LZ4HC_init(ctxPtr, (const BYTE *)dictionary);
636 	if (dictSize >= 4)
637 		LZ4HC_Insert(ctxPtr, (const BYTE *)dictionary + (dictSize - 3));
638 	ctxPtr->end = (const BYTE *)dictionary + dictSize;
639 	return dictSize;
640 }
641 EXPORT_SYMBOL(LZ4_loadDictHC);
642 
643 /* compression */
644 
645 static void LZ4HC_setExternalDict(
646 	LZ4HC_CCtx_internal *ctxPtr,
647 	const BYTE *newBlock)
648 {
649 	if (ctxPtr->end >= ctxPtr->base + 4) {
650 		/* Referencing remaining dictionary content */
651 		LZ4HC_Insert(ctxPtr, ctxPtr->end - 3);
652 	}
653 
654 	/*
655 	 * Only one memory segment for extDict,
656 	 * so any previous extDict is lost at this stage
657 	 */
658 	ctxPtr->lowLimit	= ctxPtr->dictLimit;
659 	ctxPtr->dictLimit = (U32)(ctxPtr->end - ctxPtr->base);
660 	ctxPtr->dictBase	= ctxPtr->base;
661 	ctxPtr->base = newBlock - ctxPtr->dictLimit;
662 	ctxPtr->end	= newBlock;
663 	/* match referencing will resume from there */
664 	ctxPtr->nextToUpdate = ctxPtr->dictLimit;
665 }
666 
667 static int LZ4_compressHC_continue_generic(
668 	LZ4_streamHC_t *LZ4_streamHCPtr,
669 	const char *source,
670 	char *dest,
671 	int inputSize,
672 	int maxOutputSize,
673 	limitedOutput_directive limit)
674 {
675 	LZ4HC_CCtx_internal *ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
676 
677 	/* auto - init if forgotten */
678 	if (ctxPtr->base == NULL)
679 		LZ4HC_init(ctxPtr, (const BYTE *) source);
680 
681 	/* Check overflow */
682 	if ((size_t)(ctxPtr->end - ctxPtr->base) > 2 * GB) {
683 		size_t dictSize = (size_t)(ctxPtr->end - ctxPtr->base)
684 			- ctxPtr->dictLimit;
685 		if (dictSize > 64 * KB)
686 			dictSize = 64 * KB;
687 		LZ4_loadDictHC(LZ4_streamHCPtr,
688 			(const char *)(ctxPtr->end) - dictSize, (int)dictSize);
689 	}
690 
691 	/* Check if blocks follow each other */
692 	if ((const BYTE *)source != ctxPtr->end)
693 		LZ4HC_setExternalDict(ctxPtr, (const BYTE *)source);
694 
695 	/* Check overlapping input/dictionary space */
696 	{
697 		const BYTE *sourceEnd = (const BYTE *) source + inputSize;
698 		const BYTE * const dictBegin = ctxPtr->dictBase + ctxPtr->lowLimit;
699 		const BYTE * const dictEnd = ctxPtr->dictBase + ctxPtr->dictLimit;
700 
701 		if ((sourceEnd > dictBegin)
702 			&& ((const BYTE *)source < dictEnd)) {
703 			if (sourceEnd > dictEnd)
704 				sourceEnd = dictEnd;
705 			ctxPtr->lowLimit = (U32)(sourceEnd - ctxPtr->dictBase);
706 
707 			if (ctxPtr->dictLimit - ctxPtr->lowLimit < 4)
708 				ctxPtr->lowLimit = ctxPtr->dictLimit;
709 		}
710 	}
711 
712 	return LZ4HC_compress_generic(ctxPtr, source, dest,
713 		inputSize, maxOutputSize, ctxPtr->compressionLevel, limit);
714 }
715 
716 int LZ4_compress_HC_continue(
717 	LZ4_streamHC_t *LZ4_streamHCPtr,
718 	const char *source,
719 	char *dest,
720 	int inputSize,
721 	int maxOutputSize)
722 {
723 	if (maxOutputSize < LZ4_compressBound(inputSize))
724 		return LZ4_compressHC_continue_generic(LZ4_streamHCPtr,
725 			source, dest, inputSize, maxOutputSize, limitedOutput);
726 	else
727 		return LZ4_compressHC_continue_generic(LZ4_streamHCPtr,
728 			source, dest, inputSize, maxOutputSize, noLimit);
729 }
730 EXPORT_SYMBOL(LZ4_compress_HC_continue);
731 
732 /* dictionary saving */
733 
734 int LZ4_saveDictHC(
735 	LZ4_streamHC_t *LZ4_streamHCPtr,
736 	char *safeBuffer,
737 	int dictSize)
738 {
739 	LZ4HC_CCtx_internal *const streamPtr = &LZ4_streamHCPtr->internal_donotuse;
740 	int const prefixSize = (int)(streamPtr->end
741 		- (streamPtr->base + streamPtr->dictLimit));
742 
743 	if (dictSize > 64 * KB)
744 		dictSize = 64 * KB;
745 	if (dictSize < 4)
746 		dictSize = 0;
747 	if (dictSize > prefixSize)
748 		dictSize = prefixSize;
749 
750 	memmove(safeBuffer, streamPtr->end - dictSize, dictSize);
751 
752 	{
753 		U32 const endIndex = (U32)(streamPtr->end - streamPtr->base);
754 
755 		streamPtr->end = (const BYTE *)safeBuffer + dictSize;
756 		streamPtr->base = streamPtr->end - endIndex;
757 		streamPtr->dictLimit = endIndex - dictSize;
758 		streamPtr->lowLimit = endIndex - dictSize;
759 
760 		if (streamPtr->nextToUpdate < streamPtr->dictLimit)
761 			streamPtr->nextToUpdate = streamPtr->dictLimit;
762 	}
763 	return dictSize;
764 }
765 EXPORT_SYMBOL(LZ4_saveDictHC);
766 
767 MODULE_LICENSE("Dual BSD/GPL");
768 MODULE_DESCRIPTION("LZ4 HC compressor");
769