1 /*
2 * LZ4 HC - High Compression Mode of LZ4
3 * Copyright (C) 2011-2015, Yann Collet.
4 *
5 * BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php)
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 * * Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * * Redistributions in binary form must reproduce the above
12 * copyright notice, this list of conditions and the following disclaimer
13 * in the documentation and/or other materials provided with the
14 * distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
18 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
19 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
21 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * You can contact the author at :
27 * - LZ4 homepage : http://www.lz4.org
28 * - LZ4 source repository : https://github.com/lz4/lz4
29 *
30 * Changed for kernel usage by:
31 * Sven Schmidt <4sschmid@informatik.uni-hamburg.de>
32 */
33
34 /*-************************************
35 * Dependencies
36 **************************************/
37 #include <linux/lz4.h>
38 #include "lz4defs.h"
39 #include <linux/module.h>
40 #include <linux/kernel.h>
41 #include <linux/string.h> /* memset */
42
43 /* *************************************
44 * Local Constants and types
45 ***************************************/
46
47 #define OPTIMAL_ML (int)((ML_MASK - 1) + MINMATCH)
48
49 #define HASH_FUNCTION(i) (((i) * 2654435761U) \
50 >> ((MINMATCH*8) - LZ4HC_HASH_LOG))
51 #define DELTANEXTU16(p) chainTable[(U16)(p)] /* faster */
52
LZ4HC_hashPtr(const void * ptr)53 static U32 LZ4HC_hashPtr(const void *ptr)
54 {
55 return HASH_FUNCTION(LZ4_read32(ptr));
56 }
57
58 /**************************************
59 * HC Compression
60 **************************************/
LZ4HC_init(LZ4HC_CCtx_internal * hc4,const BYTE * start)61 static void LZ4HC_init(LZ4HC_CCtx_internal *hc4, const BYTE *start)
62 {
63 memset((void *)hc4->hashTable, 0, sizeof(hc4->hashTable));
64 memset(hc4->chainTable, 0xFF, sizeof(hc4->chainTable));
65 hc4->nextToUpdate = 64 * KB;
66 hc4->base = start - 64 * KB;
67 hc4->end = start;
68 hc4->dictBase = start - 64 * KB;
69 hc4->dictLimit = 64 * KB;
70 hc4->lowLimit = 64 * KB;
71 }
72
73 /* Update chains up to ip (excluded) */
LZ4HC_Insert(LZ4HC_CCtx_internal * hc4,const BYTE * ip)74 static FORCE_INLINE void LZ4HC_Insert(LZ4HC_CCtx_internal *hc4,
75 const BYTE *ip)
76 {
77 U16 * const chainTable = hc4->chainTable;
78 U32 * const hashTable = hc4->hashTable;
79 const BYTE * const base = hc4->base;
80 U32 const target = (U32)(ip - base);
81 U32 idx = hc4->nextToUpdate;
82
83 while (idx < target) {
84 U32 const h = LZ4HC_hashPtr(base + idx);
85 size_t delta = idx - hashTable[h];
86
87 if (delta > MAX_DISTANCE)
88 delta = MAX_DISTANCE;
89
90 DELTANEXTU16(idx) = (U16)delta;
91
92 hashTable[h] = idx;
93 idx++;
94 }
95
96 hc4->nextToUpdate = target;
97 }
98
LZ4HC_InsertAndFindBestMatch(LZ4HC_CCtx_internal * hc4,const BYTE * ip,const BYTE * const iLimit,const BYTE ** matchpos,const int maxNbAttempts)99 static FORCE_INLINE int LZ4HC_InsertAndFindBestMatch(
100 LZ4HC_CCtx_internal *hc4, /* Index table will be updated */
101 const BYTE *ip,
102 const BYTE * const iLimit,
103 const BYTE **matchpos,
104 const int maxNbAttempts)
105 {
106 U16 * const chainTable = hc4->chainTable;
107 U32 * const HashTable = hc4->hashTable;
108 const BYTE * const base = hc4->base;
109 const BYTE * const dictBase = hc4->dictBase;
110 const U32 dictLimit = hc4->dictLimit;
111 const U32 lowLimit = (hc4->lowLimit + 64 * KB > (U32)(ip - base))
112 ? hc4->lowLimit
113 : (U32)(ip - base) - (64 * KB - 1);
114 U32 matchIndex;
115 int nbAttempts = maxNbAttempts;
116 size_t ml = 0;
117
118 /* HC4 match finder */
119 LZ4HC_Insert(hc4, ip);
120 matchIndex = HashTable[LZ4HC_hashPtr(ip)];
121
122 while ((matchIndex >= lowLimit)
123 && (nbAttempts)) {
124 nbAttempts--;
125 if (matchIndex >= dictLimit) {
126 const BYTE * const match = base + matchIndex;
127
128 if (*(match + ml) == *(ip + ml)
129 && (LZ4_read32(match) == LZ4_read32(ip))) {
130 size_t const mlt = LZ4_count(ip + MINMATCH,
131 match + MINMATCH, iLimit) + MINMATCH;
132
133 if (mlt > ml) {
134 ml = mlt;
135 *matchpos = match;
136 }
137 }
138 } else {
139 const BYTE * const match = dictBase + matchIndex;
140
141 if (LZ4_read32(match) == LZ4_read32(ip)) {
142 size_t mlt;
143 const BYTE *vLimit = ip
144 + (dictLimit - matchIndex);
145
146 if (vLimit > iLimit)
147 vLimit = iLimit;
148 mlt = LZ4_count(ip + MINMATCH,
149 match + MINMATCH, vLimit) + MINMATCH;
150 if ((ip + mlt == vLimit)
151 && (vLimit < iLimit))
152 mlt += LZ4_count(ip + mlt,
153 base + dictLimit,
154 iLimit);
155 if (mlt > ml) {
156 /* virtual matchpos */
157 ml = mlt;
158 *matchpos = base + matchIndex;
159 }
160 }
161 }
162 matchIndex -= DELTANEXTU16(matchIndex);
163 }
164
165 return (int)ml;
166 }
167
LZ4HC_InsertAndGetWiderMatch(LZ4HC_CCtx_internal * hc4,const BYTE * const ip,const BYTE * const iLowLimit,const BYTE * const iHighLimit,int longest,const BYTE ** matchpos,const BYTE ** startpos,const int maxNbAttempts)168 static FORCE_INLINE int LZ4HC_InsertAndGetWiderMatch(
169 LZ4HC_CCtx_internal *hc4,
170 const BYTE * const ip,
171 const BYTE * const iLowLimit,
172 const BYTE * const iHighLimit,
173 int longest,
174 const BYTE **matchpos,
175 const BYTE **startpos,
176 const int maxNbAttempts)
177 {
178 U16 * const chainTable = hc4->chainTable;
179 U32 * const HashTable = hc4->hashTable;
180 const BYTE * const base = hc4->base;
181 const U32 dictLimit = hc4->dictLimit;
182 const BYTE * const lowPrefixPtr = base + dictLimit;
183 const U32 lowLimit = (hc4->lowLimit + 64 * KB > (U32)(ip - base))
184 ? hc4->lowLimit
185 : (U32)(ip - base) - (64 * KB - 1);
186 const BYTE * const dictBase = hc4->dictBase;
187 U32 matchIndex;
188 int nbAttempts = maxNbAttempts;
189 int delta = (int)(ip - iLowLimit);
190
191 /* First Match */
192 LZ4HC_Insert(hc4, ip);
193 matchIndex = HashTable[LZ4HC_hashPtr(ip)];
194
195 while ((matchIndex >= lowLimit)
196 && (nbAttempts)) {
197 nbAttempts--;
198 if (matchIndex >= dictLimit) {
199 const BYTE *matchPtr = base + matchIndex;
200
201 if (*(iLowLimit + longest)
202 == *(matchPtr - delta + longest)) {
203 if (LZ4_read32(matchPtr) == LZ4_read32(ip)) {
204 int mlt = MINMATCH + LZ4_count(
205 ip + MINMATCH,
206 matchPtr + MINMATCH,
207 iHighLimit);
208 int back = 0;
209
210 while ((ip + back > iLowLimit)
211 && (matchPtr + back > lowPrefixPtr)
212 && (ip[back - 1] == matchPtr[back - 1]))
213 back--;
214
215 mlt -= back;
216
217 if (mlt > longest) {
218 longest = (int)mlt;
219 *matchpos = matchPtr + back;
220 *startpos = ip + back;
221 }
222 }
223 }
224 } else {
225 const BYTE * const matchPtr = dictBase + matchIndex;
226
227 if (LZ4_read32(matchPtr) == LZ4_read32(ip)) {
228 size_t mlt;
229 int back = 0;
230 const BYTE *vLimit = ip + (dictLimit - matchIndex);
231
232 if (vLimit > iHighLimit)
233 vLimit = iHighLimit;
234
235 mlt = LZ4_count(ip + MINMATCH,
236 matchPtr + MINMATCH, vLimit) + MINMATCH;
237
238 if ((ip + mlt == vLimit) && (vLimit < iHighLimit))
239 mlt += LZ4_count(ip + mlt, base + dictLimit,
240 iHighLimit);
241 while ((ip + back > iLowLimit)
242 && (matchIndex + back > lowLimit)
243 && (ip[back - 1] == matchPtr[back - 1]))
244 back--;
245
246 mlt -= back;
247
248 if ((int)mlt > longest) {
249 longest = (int)mlt;
250 *matchpos = base + matchIndex + back;
251 *startpos = ip + back;
252 }
253 }
254 }
255
256 matchIndex -= DELTANEXTU16(matchIndex);
257 }
258
259 return longest;
260 }
261
LZ4HC_encodeSequence(const BYTE ** ip,BYTE ** op,const BYTE ** anchor,int matchLength,const BYTE * const match,limitedOutput_directive limitedOutputBuffer,BYTE * oend)262 static FORCE_INLINE int LZ4HC_encodeSequence(
263 const BYTE **ip,
264 BYTE **op,
265 const BYTE **anchor,
266 int matchLength,
267 const BYTE * const match,
268 limitedOutput_directive limitedOutputBuffer,
269 BYTE *oend)
270 {
271 int length;
272 BYTE *token;
273
274 /* Encode Literal length */
275 length = (int)(*ip - *anchor);
276 token = (*op)++;
277
278 if ((limitedOutputBuffer)
279 && ((*op + (length>>8)
280 + length + (2 + 1 + LASTLITERALS)) > oend)) {
281 /* Check output limit */
282 return 1;
283 }
284 if (length >= (int)RUN_MASK) {
285 int len;
286
287 *token = (RUN_MASK<<ML_BITS);
288 len = length - RUN_MASK;
289 for (; len > 254 ; len -= 255)
290 *(*op)++ = 255;
291 *(*op)++ = (BYTE)len;
292 } else
293 *token = (BYTE)(length<<ML_BITS);
294
295 /* Copy Literals */
296 LZ4_wildCopy(*op, *anchor, (*op) + length);
297 *op += length;
298
299 /* Encode Offset */
300 LZ4_writeLE16(*op, (U16)(*ip - match));
301 *op += 2;
302
303 /* Encode MatchLength */
304 length = (int)(matchLength - MINMATCH);
305
306 if ((limitedOutputBuffer)
307 && (*op + (length>>8)
308 + (1 + LASTLITERALS) > oend)) {
309 /* Check output limit */
310 return 1;
311 }
312
313 if (length >= (int)ML_MASK) {
314 *token += ML_MASK;
315 length -= ML_MASK;
316
317 for (; length > 509 ; length -= 510) {
318 *(*op)++ = 255;
319 *(*op)++ = 255;
320 }
321
322 if (length > 254) {
323 length -= 255;
324 *(*op)++ = 255;
325 }
326
327 *(*op)++ = (BYTE)length;
328 } else
329 *token += (BYTE)(length);
330
331 /* Prepare next loop */
332 *ip += matchLength;
333 *anchor = *ip;
334
335 return 0;
336 }
337
LZ4HC_compress_generic(LZ4HC_CCtx_internal * const ctx,const char * const source,char * const dest,int const inputSize,int const maxOutputSize,int compressionLevel,limitedOutput_directive limit)338 static int LZ4HC_compress_generic(
339 LZ4HC_CCtx_internal *const ctx,
340 const char * const source,
341 char * const dest,
342 int const inputSize,
343 int const maxOutputSize,
344 int compressionLevel,
345 limitedOutput_directive limit
346 )
347 {
348 const BYTE *ip = (const BYTE *) source;
349 const BYTE *anchor = ip;
350 const BYTE * const iend = ip + inputSize;
351 const BYTE * const mflimit = iend - MFLIMIT;
352 const BYTE * const matchlimit = (iend - LASTLITERALS);
353
354 BYTE *op = (BYTE *) dest;
355 BYTE * const oend = op + maxOutputSize;
356
357 unsigned int maxNbAttempts;
358 int ml, ml2, ml3, ml0;
359 const BYTE *ref = NULL;
360 const BYTE *start2 = NULL;
361 const BYTE *ref2 = NULL;
362 const BYTE *start3 = NULL;
363 const BYTE *ref3 = NULL;
364 const BYTE *start0;
365 const BYTE *ref0;
366
367 /* init */
368 if (compressionLevel > LZ4HC_MAX_CLEVEL)
369 compressionLevel = LZ4HC_MAX_CLEVEL;
370 if (compressionLevel < 1)
371 compressionLevel = LZ4HC_DEFAULT_CLEVEL;
372 maxNbAttempts = 1 << (compressionLevel - 1);
373 ctx->end += inputSize;
374
375 ip++;
376
377 /* Main Loop */
378 while (ip < mflimit) {
379 ml = LZ4HC_InsertAndFindBestMatch(ctx, ip,
380 matchlimit, (&ref), maxNbAttempts);
381 if (!ml) {
382 ip++;
383 continue;
384 }
385
386 /* saved, in case we would skip too much */
387 start0 = ip;
388 ref0 = ref;
389 ml0 = ml;
390
391 _Search2:
392 if (ip + ml < mflimit)
393 ml2 = LZ4HC_InsertAndGetWiderMatch(ctx,
394 ip + ml - 2, ip + 0,
395 matchlimit, ml, &ref2,
396 &start2, maxNbAttempts);
397 else
398 ml2 = ml;
399
400 if (ml2 == ml) {
401 /* No better match */
402 if (LZ4HC_encodeSequence(&ip, &op,
403 &anchor, ml, ref, limit, oend))
404 return 0;
405 continue;
406 }
407
408 if (start0 < ip) {
409 if (start2 < ip + ml0) {
410 /* empirical */
411 ip = start0;
412 ref = ref0;
413 ml = ml0;
414 }
415 }
416
417 /* Here, start0 == ip */
418 if ((start2 - ip) < 3) {
419 /* First Match too small : removed */
420 ml = ml2;
421 ip = start2;
422 ref = ref2;
423 goto _Search2;
424 }
425
426 _Search3:
427 /*
428 * Currently we have :
429 * ml2 > ml1, and
430 * ip1 + 3 <= ip2 (usually < ip1 + ml1)
431 */
432 if ((start2 - ip) < OPTIMAL_ML) {
433 int correction;
434 int new_ml = ml;
435
436 if (new_ml > OPTIMAL_ML)
437 new_ml = OPTIMAL_ML;
438 if (ip + new_ml > start2 + ml2 - MINMATCH)
439 new_ml = (int)(start2 - ip) + ml2 - MINMATCH;
440
441 correction = new_ml - (int)(start2 - ip);
442
443 if (correction > 0) {
444 start2 += correction;
445 ref2 += correction;
446 ml2 -= correction;
447 }
448 }
449 /*
450 * Now, we have start2 = ip + new_ml,
451 * with new_ml = min(ml, OPTIMAL_ML = 18)
452 */
453
454 if (start2 + ml2 < mflimit)
455 ml3 = LZ4HC_InsertAndGetWiderMatch(ctx,
456 start2 + ml2 - 3, start2,
457 matchlimit, ml2, &ref3, &start3,
458 maxNbAttempts);
459 else
460 ml3 = ml2;
461
462 if (ml3 == ml2) {
463 /* No better match : 2 sequences to encode */
464 /* ip & ref are known; Now for ml */
465 if (start2 < ip + ml)
466 ml = (int)(start2 - ip);
467 /* Now, encode 2 sequences */
468 if (LZ4HC_encodeSequence(&ip, &op, &anchor,
469 ml, ref, limit, oend))
470 return 0;
471 ip = start2;
472 if (LZ4HC_encodeSequence(&ip, &op, &anchor,
473 ml2, ref2, limit, oend))
474 return 0;
475 continue;
476 }
477
478 if (start3 < ip + ml + 3) {
479 /* Not enough space for match 2 : remove it */
480 if (start3 >= (ip + ml)) {
481 /* can write Seq1 immediately
482 * ==> Seq2 is removed,
483 * so Seq3 becomes Seq1
484 */
485 if (start2 < ip + ml) {
486 int correction = (int)(ip + ml - start2);
487
488 start2 += correction;
489 ref2 += correction;
490 ml2 -= correction;
491 if (ml2 < MINMATCH) {
492 start2 = start3;
493 ref2 = ref3;
494 ml2 = ml3;
495 }
496 }
497
498 if (LZ4HC_encodeSequence(&ip, &op, &anchor,
499 ml, ref, limit, oend))
500 return 0;
501 ip = start3;
502 ref = ref3;
503 ml = ml3;
504
505 start0 = start2;
506 ref0 = ref2;
507 ml0 = ml2;
508 goto _Search2;
509 }
510
511 start2 = start3;
512 ref2 = ref3;
513 ml2 = ml3;
514 goto _Search3;
515 }
516
517 /*
518 * OK, now we have 3 ascending matches;
519 * let's write at least the first one
520 * ip & ref are known; Now for ml
521 */
522 if (start2 < ip + ml) {
523 if ((start2 - ip) < (int)ML_MASK) {
524 int correction;
525
526 if (ml > OPTIMAL_ML)
527 ml = OPTIMAL_ML;
528 if (ip + ml > start2 + ml2 - MINMATCH)
529 ml = (int)(start2 - ip) + ml2 - MINMATCH;
530 correction = ml - (int)(start2 - ip);
531 if (correction > 0) {
532 start2 += correction;
533 ref2 += correction;
534 ml2 -= correction;
535 }
536 } else
537 ml = (int)(start2 - ip);
538 }
539 if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml,
540 ref, limit, oend))
541 return 0;
542
543 ip = start2;
544 ref = ref2;
545 ml = ml2;
546
547 start2 = start3;
548 ref2 = ref3;
549 ml2 = ml3;
550
551 goto _Search3;
552 }
553
554 /* Encode Last Literals */
555 {
556 int lastRun = (int)(iend - anchor);
557
558 if ((limit)
559 && (((char *)op - dest) + lastRun + 1
560 + ((lastRun + 255 - RUN_MASK)/255)
561 > (U32)maxOutputSize)) {
562 /* Check output limit */
563 return 0;
564 }
565 if (lastRun >= (int)RUN_MASK) {
566 *op++ = (RUN_MASK<<ML_BITS);
567 lastRun -= RUN_MASK;
568 for (; lastRun > 254 ; lastRun -= 255)
569 *op++ = 255;
570 *op++ = (BYTE) lastRun;
571 } else
572 *op++ = (BYTE)(lastRun<<ML_BITS);
573 LZ4_memcpy(op, anchor, iend - anchor);
574 op += iend - anchor;
575 }
576
577 /* End */
578 return (int) (((char *)op) - dest);
579 }
580
LZ4_compress_HC_extStateHC(void * state,const char * src,char * dst,int srcSize,int maxDstSize,int compressionLevel)581 static int LZ4_compress_HC_extStateHC(
582 void *state,
583 const char *src,
584 char *dst,
585 int srcSize,
586 int maxDstSize,
587 int compressionLevel)
588 {
589 LZ4HC_CCtx_internal *ctx = &((LZ4_streamHC_t *)state)->internal_donotuse;
590
591 if (((size_t)(state)&(sizeof(void *) - 1)) != 0) {
592 /* Error : state is not aligned
593 * for pointers (32 or 64 bits)
594 */
595 return 0;
596 }
597
598 LZ4HC_init(ctx, (const BYTE *)src);
599
600 if (maxDstSize < LZ4_compressBound(srcSize))
601 return LZ4HC_compress_generic(ctx, src, dst,
602 srcSize, maxDstSize, compressionLevel, limitedOutput);
603 else
604 return LZ4HC_compress_generic(ctx, src, dst,
605 srcSize, maxDstSize, compressionLevel, noLimit);
606 }
607
LZ4_compress_HC(const char * src,char * dst,int srcSize,int maxDstSize,int compressionLevel,void * wrkmem)608 int LZ4_compress_HC(const char *src, char *dst, int srcSize,
609 int maxDstSize, int compressionLevel, void *wrkmem)
610 {
611 return LZ4_compress_HC_extStateHC(wrkmem, src, dst,
612 srcSize, maxDstSize, compressionLevel);
613 }
614 EXPORT_SYMBOL(LZ4_compress_HC);
615
616 /**************************************
617 * Streaming Functions
618 **************************************/
LZ4_resetStreamHC(LZ4_streamHC_t * LZ4_streamHCPtr,int compressionLevel)619 void LZ4_resetStreamHC(LZ4_streamHC_t *LZ4_streamHCPtr, int compressionLevel)
620 {
621 LZ4_streamHCPtr->internal_donotuse.base = NULL;
622 LZ4_streamHCPtr->internal_donotuse.compressionLevel = (unsigned int)compressionLevel;
623 }
624 EXPORT_SYMBOL(LZ4_resetStreamHC);
625
LZ4_loadDictHC(LZ4_streamHC_t * LZ4_streamHCPtr,const char * dictionary,int dictSize)626 int LZ4_loadDictHC(LZ4_streamHC_t *LZ4_streamHCPtr,
627 const char *dictionary,
628 int dictSize)
629 {
630 LZ4HC_CCtx_internal *ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
631
632 if (dictSize > 64 * KB) {
633 dictionary += dictSize - 64 * KB;
634 dictSize = 64 * KB;
635 }
636 LZ4HC_init(ctxPtr, (const BYTE *)dictionary);
637 if (dictSize >= 4)
638 LZ4HC_Insert(ctxPtr, (const BYTE *)dictionary + (dictSize - 3));
639 ctxPtr->end = (const BYTE *)dictionary + dictSize;
640 return dictSize;
641 }
642 EXPORT_SYMBOL(LZ4_loadDictHC);
643
644 /* compression */
645
LZ4HC_setExternalDict(LZ4HC_CCtx_internal * ctxPtr,const BYTE * newBlock)646 static void LZ4HC_setExternalDict(
647 LZ4HC_CCtx_internal *ctxPtr,
648 const BYTE *newBlock)
649 {
650 if (ctxPtr->end >= ctxPtr->base + 4) {
651 /* Referencing remaining dictionary content */
652 LZ4HC_Insert(ctxPtr, ctxPtr->end - 3);
653 }
654
655 /*
656 * Only one memory segment for extDict,
657 * so any previous extDict is lost at this stage
658 */
659 ctxPtr->lowLimit = ctxPtr->dictLimit;
660 ctxPtr->dictLimit = (U32)(ctxPtr->end - ctxPtr->base);
661 ctxPtr->dictBase = ctxPtr->base;
662 ctxPtr->base = newBlock - ctxPtr->dictLimit;
663 ctxPtr->end = newBlock;
664 /* match referencing will resume from there */
665 ctxPtr->nextToUpdate = ctxPtr->dictLimit;
666 }
667
LZ4_compressHC_continue_generic(LZ4_streamHC_t * LZ4_streamHCPtr,const char * source,char * dest,int inputSize,int maxOutputSize,limitedOutput_directive limit)668 static int LZ4_compressHC_continue_generic(
669 LZ4_streamHC_t *LZ4_streamHCPtr,
670 const char *source,
671 char *dest,
672 int inputSize,
673 int maxOutputSize,
674 limitedOutput_directive limit)
675 {
676 LZ4HC_CCtx_internal *ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
677
678 /* auto - init if forgotten */
679 if (ctxPtr->base == NULL)
680 LZ4HC_init(ctxPtr, (const BYTE *) source);
681
682 /* Check overflow */
683 if ((size_t)(ctxPtr->end - ctxPtr->base) > 2 * GB) {
684 size_t dictSize = (size_t)(ctxPtr->end - ctxPtr->base)
685 - ctxPtr->dictLimit;
686 if (dictSize > 64 * KB)
687 dictSize = 64 * KB;
688 LZ4_loadDictHC(LZ4_streamHCPtr,
689 (const char *)(ctxPtr->end) - dictSize, (int)dictSize);
690 }
691
692 /* Check if blocks follow each other */
693 if ((const BYTE *)source != ctxPtr->end)
694 LZ4HC_setExternalDict(ctxPtr, (const BYTE *)source);
695
696 /* Check overlapping input/dictionary space */
697 {
698 const BYTE *sourceEnd = (const BYTE *) source + inputSize;
699 const BYTE * const dictBegin = ctxPtr->dictBase + ctxPtr->lowLimit;
700 const BYTE * const dictEnd = ctxPtr->dictBase + ctxPtr->dictLimit;
701
702 if ((sourceEnd > dictBegin)
703 && ((const BYTE *)source < dictEnd)) {
704 if (sourceEnd > dictEnd)
705 sourceEnd = dictEnd;
706 ctxPtr->lowLimit = (U32)(sourceEnd - ctxPtr->dictBase);
707
708 if (ctxPtr->dictLimit - ctxPtr->lowLimit < 4)
709 ctxPtr->lowLimit = ctxPtr->dictLimit;
710 }
711 }
712
713 return LZ4HC_compress_generic(ctxPtr, source, dest,
714 inputSize, maxOutputSize, ctxPtr->compressionLevel, limit);
715 }
716
LZ4_compress_HC_continue(LZ4_streamHC_t * LZ4_streamHCPtr,const char * source,char * dest,int inputSize,int maxOutputSize)717 int LZ4_compress_HC_continue(
718 LZ4_streamHC_t *LZ4_streamHCPtr,
719 const char *source,
720 char *dest,
721 int inputSize,
722 int maxOutputSize)
723 {
724 if (maxOutputSize < LZ4_compressBound(inputSize))
725 return LZ4_compressHC_continue_generic(LZ4_streamHCPtr,
726 source, dest, inputSize, maxOutputSize, limitedOutput);
727 else
728 return LZ4_compressHC_continue_generic(LZ4_streamHCPtr,
729 source, dest, inputSize, maxOutputSize, noLimit);
730 }
731 EXPORT_SYMBOL(LZ4_compress_HC_continue);
732
733 /* dictionary saving */
734
LZ4_saveDictHC(LZ4_streamHC_t * LZ4_streamHCPtr,char * safeBuffer,int dictSize)735 int LZ4_saveDictHC(
736 LZ4_streamHC_t *LZ4_streamHCPtr,
737 char *safeBuffer,
738 int dictSize)
739 {
740 LZ4HC_CCtx_internal *const streamPtr = &LZ4_streamHCPtr->internal_donotuse;
741 int const prefixSize = (int)(streamPtr->end
742 - (streamPtr->base + streamPtr->dictLimit));
743
744 if (dictSize > 64 * KB)
745 dictSize = 64 * KB;
746 if (dictSize < 4)
747 dictSize = 0;
748 if (dictSize > prefixSize)
749 dictSize = prefixSize;
750
751 memmove(safeBuffer, streamPtr->end - dictSize, dictSize);
752
753 {
754 U32 const endIndex = (U32)(streamPtr->end - streamPtr->base);
755
756 streamPtr->end = (const BYTE *)safeBuffer + dictSize;
757 streamPtr->base = streamPtr->end - endIndex;
758 streamPtr->dictLimit = endIndex - dictSize;
759 streamPtr->lowLimit = endIndex - dictSize;
760
761 if (streamPtr->nextToUpdate < streamPtr->dictLimit)
762 streamPtr->nextToUpdate = streamPtr->dictLimit;
763 }
764 return dictSize;
765 }
766 EXPORT_SYMBOL(LZ4_saveDictHC);
767
768 MODULE_LICENSE("Dual BSD/GPL");
769 MODULE_DESCRIPTION("LZ4 HC compressor");
770