xref: /freebsd/sys/contrib/zstd/lib/compress/zstd_compress_internal.h (revision c66ec88fed842fbaad62c30d510644ceb7bd2d71)
1 /*
2  * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
3  * All rights reserved.
4  *
5  * This source code is licensed under both the BSD-style license (found in the
6  * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7  * in the COPYING file in the root directory of this source tree).
8  * You may select, at your option, one of the above-listed licenses.
9  */
10 
11 /* This header contains definitions
12  * that shall **only** be used by modules within lib/compress.
13  */
14 
15 #ifndef ZSTD_COMPRESS_H
16 #define ZSTD_COMPRESS_H
17 
18 /*-*************************************
19 *  Dependencies
20 ***************************************/
21 #include "../common/zstd_internal.h"
22 #include "zstd_cwksp.h"
23 #ifdef ZSTD_MULTITHREAD
24 #  include "zstdmt_compress.h"
25 #endif
26 
27 #if defined (__cplusplus)
28 extern "C" {
29 #endif
30 
31 /*-*************************************
32 *  Constants
33 ***************************************/
34 #define kSearchStrength      8
35 #define HASH_READ_SIZE       8
36 #define ZSTD_DUBT_UNSORTED_MARK 1   /* For btlazy2 strategy, index ZSTD_DUBT_UNSORTED_MARK==1 means "unsorted".
37                                        It could be confused for a real successor at index "1", if sorted as larger than its predecessor.
38                                        It's not a big deal though : candidate will just be sorted again.
39                                        Additionally, candidate position 1 will be lost.
40                                        But candidate 1 cannot hide a large tree of candidates, so it's a minimal loss.
41                                        The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be mishandled after table re-use with a different strategy.
42                                        This constant is required by ZSTD_compressBlock_btlazy2() and ZSTD_reduceTable_internal() */
43 
44 
45 /*-*************************************
46 *  Context memory management
47 ***************************************/
48 typedef enum { ZSTDcs_created=0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e;
49 typedef enum { zcss_init=0, zcss_load, zcss_flush } ZSTD_cStreamStage;
50 
51 typedef struct ZSTD_prefixDict_s {
52     const void* dict;
53     size_t dictSize;
54     ZSTD_dictContentType_e dictContentType;
55 } ZSTD_prefixDict;
56 
57 typedef struct {
58     void* dictBuffer;
59     void const* dict;
60     size_t dictSize;
61     ZSTD_dictContentType_e dictContentType;
62     ZSTD_CDict* cdict;
63 } ZSTD_localDict;
64 
65 typedef struct {
66     HUF_CElt CTable[HUF_CTABLE_SIZE_U32(255)];
67     HUF_repeat repeatMode;
68 } ZSTD_hufCTables_t;
69 
70 typedef struct {
71     FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];
72     FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];
73     FSE_CTable litlengthCTable[FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)];
74     FSE_repeat offcode_repeatMode;
75     FSE_repeat matchlength_repeatMode;
76     FSE_repeat litlength_repeatMode;
77 } ZSTD_fseCTables_t;
78 
79 typedef struct {
80     ZSTD_hufCTables_t huf;
81     ZSTD_fseCTables_t fse;
82 } ZSTD_entropyCTables_t;
83 
84 typedef struct {
85     U32 off;            /* Offset code (offset + ZSTD_REP_MOVE) for the match */
86     U32 len;            /* Raw length of match */
87 } ZSTD_match_t;
88 
89 typedef struct {
90     U32 offset;         /* Offset of sequence */
91     U32 litLength;      /* Length of literals prior to match */
92     U32 matchLength;    /* Raw length of match */
93 } rawSeq;
94 
95 typedef struct {
96   rawSeq* seq;          /* The start of the sequences */
97   size_t pos;           /* The index in seq where reading stopped. pos <= size. */
98   size_t posInSequence; /* The position within the sequence at seq[pos] where reading
99                            stopped. posInSequence <= seq[pos].litLength + seq[pos].matchLength */
100   size_t size;          /* The number of sequences. <= capacity. */
101   size_t capacity;      /* The capacity starting from `seq` pointer */
102 } rawSeqStore_t;
103 
104 UNUSED_ATTR static const rawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0, 0};
105 
106 typedef struct {
107     int price;
108     U32 off;
109     U32 mlen;
110     U32 litlen;
111     U32 rep[ZSTD_REP_NUM];
112 } ZSTD_optimal_t;
113 
114 typedef enum { zop_dynamic=0, zop_predef } ZSTD_OptPrice_e;
115 
116 typedef struct {
117     /* All tables are allocated inside cctx->workspace by ZSTD_resetCCtx_internal() */
118     unsigned* litFreq;           /* table of literals statistics, of size 256 */
119     unsigned* litLengthFreq;     /* table of litLength statistics, of size (MaxLL+1) */
120     unsigned* matchLengthFreq;   /* table of matchLength statistics, of size (MaxML+1) */
121     unsigned* offCodeFreq;       /* table of offCode statistics, of size (MaxOff+1) */
122     ZSTD_match_t* matchTable;    /* list of found matches, of size ZSTD_OPT_NUM+1 */
123     ZSTD_optimal_t* priceTable;  /* All positions tracked by optimal parser, of size ZSTD_OPT_NUM+1 */
124 
125     U32  litSum;                 /* nb of literals */
126     U32  litLengthSum;           /* nb of litLength codes */
127     U32  matchLengthSum;         /* nb of matchLength codes */
128     U32  offCodeSum;             /* nb of offset codes */
129     U32  litSumBasePrice;        /* to compare to log2(litfreq) */
130     U32  litLengthSumBasePrice;  /* to compare to log2(llfreq)  */
131     U32  matchLengthSumBasePrice;/* to compare to log2(mlfreq)  */
132     U32  offCodeSumBasePrice;    /* to compare to log2(offreq)  */
133     ZSTD_OptPrice_e priceType;   /* prices can be determined dynamically, or follow a pre-defined cost structure */
134     const ZSTD_entropyCTables_t* symbolCosts;  /* pre-calculated dictionary statistics */
135     ZSTD_literalCompressionMode_e literalCompressionMode;
136 } optState_t;
137 
138 typedef struct {
139   ZSTD_entropyCTables_t entropy;
140   U32 rep[ZSTD_REP_NUM];
141 } ZSTD_compressedBlockState_t;
142 
143 typedef struct {
144     BYTE const* nextSrc;    /* next block here to continue on current prefix */
145     BYTE const* base;       /* All regular indexes relative to this position */
146     BYTE const* dictBase;   /* extDict indexes relative to this position */
147     U32 dictLimit;          /* below that point, need extDict */
148     U32 lowLimit;           /* below that point, no more valid data */
149 } ZSTD_window_t;
150 
151 typedef struct ZSTD_matchState_t ZSTD_matchState_t;
152 struct ZSTD_matchState_t {
153     ZSTD_window_t window;   /* State for window round buffer management */
154     U32 loadedDictEnd;      /* index of end of dictionary, within context's referential.
155                              * When loadedDictEnd != 0, a dictionary is in use, and still valid.
156                              * This relies on a mechanism to set loadedDictEnd=0 when dictionary is no longer within distance.
157                              * Such mechanism is provided within ZSTD_window_enforceMaxDist() and ZSTD_checkDictValidity().
158                              * When dict referential is copied into active context (i.e. not attached),
159                              * loadedDictEnd == dictSize, since referential starts from zero.
160                              */
161     U32 nextToUpdate;       /* index from which to continue table update */
162     U32 hashLog3;           /* dispatch table for matches of len==3 : larger == faster, more memory */
163     U32* hashTable;
164     U32* hashTable3;
165     U32* chainTable;
166     int dedicatedDictSearch;  /* Indicates whether this matchState is using the
167                                * dedicated dictionary search structure.
168                                */
169     optState_t opt;         /* optimal parser state */
170     const ZSTD_matchState_t* dictMatchState;
171     ZSTD_compressionParameters cParams;
172     const rawSeqStore_t* ldmSeqStore;
173 };
174 
175 typedef struct {
176     ZSTD_compressedBlockState_t* prevCBlock;
177     ZSTD_compressedBlockState_t* nextCBlock;
178     ZSTD_matchState_t matchState;
179 } ZSTD_blockState_t;
180 
181 typedef struct {
182     U32 offset;
183     U32 checksum;
184 } ldmEntry_t;
185 
186 typedef struct {
187     ZSTD_window_t window;   /* State for the window round buffer management */
188     ldmEntry_t* hashTable;
189     U32 loadedDictEnd;
190     BYTE* bucketOffsets;    /* Next position in bucket to insert entry */
191     U64 hashPower;          /* Used to compute the rolling hash.
192                              * Depends on ldmParams.minMatchLength */
193 } ldmState_t;
194 
195 typedef struct {
196     U32 enableLdm;          /* 1 if enable long distance matching */
197     U32 hashLog;            /* Log size of hashTable */
198     U32 bucketSizeLog;      /* Log bucket size for collision resolution, at most 8 */
199     U32 minMatchLength;     /* Minimum match length */
200     U32 hashRateLog;       /* Log number of entries to skip */
201     U32 windowLog;          /* Window log for the LDM */
202 } ldmParams_t;
203 
204 typedef struct {
205     int collectSequences;
206     ZSTD_Sequence* seqStart;
207     size_t seqIndex;
208     size_t maxSequences;
209 } SeqCollector;
210 
211 struct ZSTD_CCtx_params_s {
212     ZSTD_format_e format;
213     ZSTD_compressionParameters cParams;
214     ZSTD_frameParameters fParams;
215 
216     int compressionLevel;
217     int forceWindow;           /* force back-references to respect limit of
218                                 * 1<<wLog, even for dictionary */
219     size_t targetCBlockSize;   /* Tries to fit compressed block size to be around targetCBlockSize.
220                                 * No target when targetCBlockSize == 0.
221                                 * There is no guarantee on compressed block size */
222     int srcSizeHint;           /* User's best guess of source size.
223                                 * Hint is not valid when srcSizeHint == 0.
224                                 * There is no guarantee that hint is close to actual source size */
225 
226     ZSTD_dictAttachPref_e attachDictPref;
227     ZSTD_literalCompressionMode_e literalCompressionMode;
228 
229     /* Multithreading: used to pass parameters to mtctx */
230     int nbWorkers;
231     size_t jobSize;
232     int overlapLog;
233     int rsyncable;
234 
235     /* Long distance matching parameters */
236     ldmParams_t ldmParams;
237 
238     /* Dedicated dict search algorithm trigger */
239     int enableDedicatedDictSearch;
240 
241     /* Input/output buffer modes */
242     ZSTD_bufferMode_e inBufferMode;
243     ZSTD_bufferMode_e outBufferMode;
244 
245     /* Sequence compression API */
246     ZSTD_sequenceFormat_e blockDelimiters;
247     int validateSequences;
248 
249     /* Internal use, for createCCtxParams() and freeCCtxParams() only */
250     ZSTD_customMem customMem;
251 };  /* typedef'd to ZSTD_CCtx_params within "zstd.h" */
252 
253 #define COMPRESS_SEQUENCES_WORKSPACE_SIZE (sizeof(unsigned) * (MaxSeq + 2))
254 #define ENTROPY_WORKSPACE_SIZE (HUF_WORKSPACE_SIZE + COMPRESS_SEQUENCES_WORKSPACE_SIZE)
255 
256 /**
257  * Indicates whether this compression proceeds directly from user-provided
258  * source buffer to user-provided destination buffer (ZSTDb_not_buffered), or
259  * whether the context needs to buffer the input/output (ZSTDb_buffered).
260  */
261 typedef enum {
262     ZSTDb_not_buffered,
263     ZSTDb_buffered
264 } ZSTD_buffered_policy_e;
265 
266 struct ZSTD_CCtx_s {
267     ZSTD_compressionStage_e stage;
268     int cParamsChanged;                  /* == 1 if cParams(except wlog) or compression level are changed in requestedParams. Triggers transmission of new params to ZSTDMT (if available) then reset to 0. */
269     int bmi2;                            /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
270     ZSTD_CCtx_params requestedParams;
271     ZSTD_CCtx_params appliedParams;
272     U32   dictID;
273 
274     ZSTD_cwksp workspace; /* manages buffer for dynamic allocations */
275     size_t blockSize;
276     unsigned long long pledgedSrcSizePlusOne;  /* this way, 0 (default) == unknown */
277     unsigned long long consumedSrcSize;
278     unsigned long long producedCSize;
279     XXH64_state_t xxhState;
280     ZSTD_customMem customMem;
281     ZSTD_threadPool* pool;
282     size_t staticSize;
283     SeqCollector seqCollector;
284     int isFirstBlock;
285     int initialized;
286 
287     seqStore_t seqStore;      /* sequences storage ptrs */
288     ldmState_t ldmState;      /* long distance matching state */
289     rawSeq* ldmSequences;     /* Storage for the ldm output sequences */
290     size_t maxNbLdmSequences;
291     rawSeqStore_t externSeqStore; /* Mutable reference to external sequences */
292     ZSTD_blockState_t blockState;
293     U32* entropyWorkspace;  /* entropy workspace of ENTROPY_WORKSPACE_SIZE bytes */
294 
295     /* Wether we are streaming or not */
296     ZSTD_buffered_policy_e bufferedPolicy;
297 
298     /* streaming */
299     char*  inBuff;
300     size_t inBuffSize;
301     size_t inToCompress;
302     size_t inBuffPos;
303     size_t inBuffTarget;
304     char*  outBuff;
305     size_t outBuffSize;
306     size_t outBuffContentSize;
307     size_t outBuffFlushedSize;
308     ZSTD_cStreamStage streamStage;
309     U32    frameEnded;
310 
311     /* Stable in/out buffer verification */
312     ZSTD_inBuffer expectedInBuffer;
313     size_t expectedOutBufferSize;
314 
315     /* Dictionary */
316     ZSTD_localDict localDict;
317     const ZSTD_CDict* cdict;
318     ZSTD_prefixDict prefixDict;   /* single-usage dictionary */
319 
320     /* Multi-threading */
321 #ifdef ZSTD_MULTITHREAD
322     ZSTDMT_CCtx* mtctx;
323 #endif
324 };
325 
326 typedef enum { ZSTD_dtlm_fast, ZSTD_dtlm_full } ZSTD_dictTableLoadMethod_e;
327 
328 typedef enum {
329     ZSTD_noDict = 0,
330     ZSTD_extDict = 1,
331     ZSTD_dictMatchState = 2,
332     ZSTD_dedicatedDictSearch = 3
333 } ZSTD_dictMode_e;
334 
335 typedef enum {
336     ZSTD_cpm_noAttachDict = 0,  /* Compression with ZSTD_noDict or ZSTD_extDict.
337                                  * In this mode we use both the srcSize and the dictSize
338                                  * when selecting and adjusting parameters.
339                                  */
340     ZSTD_cpm_attachDict = 1,    /* Compression with ZSTD_dictMatchState or ZSTD_dedicatedDictSearch.
341                                  * In this mode we only take the srcSize into account when selecting
342                                  * and adjusting parameters.
343                                  */
344     ZSTD_cpm_createCDict = 2,   /* Creating a CDict.
345                                  * In this mode we take both the source size and the dictionary size
346                                  * into account when selecting and adjusting the parameters.
347                                  */
348     ZSTD_cpm_unknown = 3,       /* ZSTD_getCParams, ZSTD_getParams, ZSTD_adjustParams.
349                                  * We don't know what these parameters are for. We default to the legacy
350                                  * behavior of taking both the source size and the dict size into account
351                                  * when selecting and adjusting parameters.
352                                  */
353 } ZSTD_cParamMode_e;
354 
355 typedef size_t (*ZSTD_blockCompressor) (
356         ZSTD_matchState_t* bs, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
357         void const* src, size_t srcSize);
358 ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode);
359 
360 
361 MEM_STATIC U32 ZSTD_LLcode(U32 litLength)
362 {
363     static const BYTE LL_Code[64] = {  0,  1,  2,  3,  4,  5,  6,  7,
364                                        8,  9, 10, 11, 12, 13, 14, 15,
365                                       16, 16, 17, 17, 18, 18, 19, 19,
366                                       20, 20, 20, 20, 21, 21, 21, 21,
367                                       22, 22, 22, 22, 22, 22, 22, 22,
368                                       23, 23, 23, 23, 23, 23, 23, 23,
369                                       24, 24, 24, 24, 24, 24, 24, 24,
370                                       24, 24, 24, 24, 24, 24, 24, 24 };
371     static const U32 LL_deltaCode = 19;
372     return (litLength > 63) ? ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
373 }
374 
375 /* ZSTD_MLcode() :
376  * note : mlBase = matchLength - MINMATCH;
377  *        because it's the format it's stored in seqStore->sequences */
378 MEM_STATIC U32 ZSTD_MLcode(U32 mlBase)
379 {
380     static const BYTE ML_Code[128] = { 0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,
381                                       16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
382                                       32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37,
383                                       38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39,
384                                       40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
385                                       41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
386                                       42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
387                                       42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42 };
388     static const U32 ML_deltaCode = 36;
389     return (mlBase > 127) ? ZSTD_highbit32(mlBase) + ML_deltaCode : ML_Code[mlBase];
390 }
391 
392 typedef struct repcodes_s {
393     U32 rep[3];
394 } repcodes_t;
395 
396 MEM_STATIC repcodes_t ZSTD_updateRep(U32 const rep[3], U32 const offset, U32 const ll0)
397 {
398     repcodes_t newReps;
399     if (offset >= ZSTD_REP_NUM) {  /* full offset */
400         newReps.rep[2] = rep[1];
401         newReps.rep[1] = rep[0];
402         newReps.rep[0] = offset - ZSTD_REP_MOVE;
403     } else {   /* repcode */
404         U32 const repCode = offset + ll0;
405         if (repCode > 0) {  /* note : if repCode==0, no change */
406             U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
407             newReps.rep[2] = (repCode >= 2) ? rep[1] : rep[2];
408             newReps.rep[1] = rep[0];
409             newReps.rep[0] = currentOffset;
410         } else {   /* repCode == 0 */
411             ZSTD_memcpy(&newReps, rep, sizeof(newReps));
412         }
413     }
414     return newReps;
415 }
416 
417 /* ZSTD_cParam_withinBounds:
418  * @return 1 if value is within cParam bounds,
419  * 0 otherwise */
420 MEM_STATIC int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value)
421 {
422     ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);
423     if (ZSTD_isError(bounds.error)) return 0;
424     if (value < bounds.lowerBound) return 0;
425     if (value > bounds.upperBound) return 0;
426     return 1;
427 }
428 
429 /* ZSTD_noCompressBlock() :
430  * Writes uncompressed block to dst buffer from given src.
431  * Returns the size of the block */
432 MEM_STATIC size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock)
433 {
434     U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(srcSize << 3);
435     RETURN_ERROR_IF(srcSize + ZSTD_blockHeaderSize > dstCapacity,
436                     dstSize_tooSmall, "dst buf too small for uncompressed block");
437     MEM_writeLE24(dst, cBlockHeader24);
438     ZSTD_memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize);
439     return ZSTD_blockHeaderSize + srcSize;
440 }
441 
442 MEM_STATIC size_t ZSTD_rleCompressBlock (void* dst, size_t dstCapacity, BYTE src, size_t srcSize, U32 lastBlock)
443 {
444     BYTE* const op = (BYTE*)dst;
445     U32 const cBlockHeader = lastBlock + (((U32)bt_rle)<<1) + (U32)(srcSize << 3);
446     RETURN_ERROR_IF(dstCapacity < 4, dstSize_tooSmall, "");
447     MEM_writeLE24(op, cBlockHeader);
448     op[3] = src;
449     return 4;
450 }
451 
452 
453 /* ZSTD_minGain() :
454  * minimum compression required
455  * to generate a compress block or a compressed literals section.
456  * note : use same formula for both situations */
457 MEM_STATIC size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat)
458 {
459     U32 const minlog = (strat>=ZSTD_btultra) ? (U32)(strat) - 1 : 6;
460     ZSTD_STATIC_ASSERT(ZSTD_btultra == 8);
461     assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
462     return (srcSize >> minlog) + 2;
463 }
464 
465 MEM_STATIC int ZSTD_disableLiteralsCompression(const ZSTD_CCtx_params* cctxParams)
466 {
467     switch (cctxParams->literalCompressionMode) {
468     case ZSTD_lcm_huffman:
469         return 0;
470     case ZSTD_lcm_uncompressed:
471         return 1;
472     default:
473         assert(0 /* impossible: pre-validated */);
474         /* fall-through */
475     case ZSTD_lcm_auto:
476         return (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0);
477     }
478 }
479 
480 /*! ZSTD_safecopyLiterals() :
481  *  memcpy() function that won't read beyond more than WILDCOPY_OVERLENGTH bytes past ilimit_w.
482  *  Only called when the sequence ends past ilimit_w, so it only needs to be optimized for single
483  *  large copies.
484  */
485 static void ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE const* ilimit_w) {
486     assert(iend > ilimit_w);
487     if (ip <= ilimit_w) {
488         ZSTD_wildcopy(op, ip, ilimit_w - ip, ZSTD_no_overlap);
489         op += ilimit_w - ip;
490         ip = ilimit_w;
491     }
492     while (ip < iend) *op++ = *ip++;
493 }
494 
495 /*! ZSTD_storeSeq() :
496  *  Store a sequence (litlen, litPtr, offCode and mlBase) into seqStore_t.
497  *  `offCode` : distance to match + ZSTD_REP_MOVE (values <= ZSTD_REP_MOVE are repCodes).
498  *  `mlBase` : matchLength - MINMATCH
499  *  Allowed to overread literals up to litLimit.
500 */
501 HINT_INLINE UNUSED_ATTR
502 void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const BYTE* literals, const BYTE* litLimit, U32 offCode, size_t mlBase)
503 {
504     BYTE const* const litLimit_w = litLimit - WILDCOPY_OVERLENGTH;
505     BYTE const* const litEnd = literals + litLength;
506 #if defined(DEBUGLEVEL) && (DEBUGLEVEL >= 6)
507     static const BYTE* g_start = NULL;
508     if (g_start==NULL) g_start = (const BYTE*)literals;  /* note : index only works for compression within a single segment */
509     {   U32 const pos = (U32)((const BYTE*)literals - g_start);
510         DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at offCode%7u",
511                pos, (U32)litLength, (U32)mlBase+MINMATCH, (U32)offCode);
512     }
513 #endif
514     assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq);
515     /* copy Literals */
516     assert(seqStorePtr->maxNbLit <= 128 KB);
517     assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + seqStorePtr->maxNbLit);
518     assert(literals + litLength <= litLimit);
519     if (litEnd <= litLimit_w) {
520         /* Common case we can use wildcopy.
521 	 * First copy 16 bytes, because literals are likely short.
522 	 */
523         assert(WILDCOPY_OVERLENGTH >= 16);
524         ZSTD_copy16(seqStorePtr->lit, literals);
525         if (litLength > 16) {
526             ZSTD_wildcopy(seqStorePtr->lit+16, literals+16, (ptrdiff_t)litLength-16, ZSTD_no_overlap);
527         }
528     } else {
529         ZSTD_safecopyLiterals(seqStorePtr->lit, literals, litEnd, litLimit_w);
530     }
531     seqStorePtr->lit += litLength;
532 
533     /* literal Length */
534     if (litLength>0xFFFF) {
535         assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */
536         seqStorePtr->longLengthID = 1;
537         seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
538     }
539     seqStorePtr->sequences[0].litLength = (U16)litLength;
540 
541     /* match offset */
542     seqStorePtr->sequences[0].offset = offCode + 1;
543 
544     /* match Length */
545     if (mlBase>0xFFFF) {
546         assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */
547         seqStorePtr->longLengthID = 2;
548         seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
549     }
550     seqStorePtr->sequences[0].matchLength = (U16)mlBase;
551 
552     seqStorePtr->sequences++;
553 }
554 
555 
556 /*-*************************************
557 *  Match length counter
558 ***************************************/
559 static unsigned ZSTD_NbCommonBytes (size_t val)
560 {
561     if (MEM_isLittleEndian()) {
562         if (MEM_64bits()) {
563 #       if defined(_MSC_VER) && defined(_WIN64)
564 #           if STATIC_BMI2
565                 return _tzcnt_u64(val) >> 3;
566 #           else
567                 unsigned long r = 0;
568                 return _BitScanForward64( &r, (U64)val ) ? (unsigned)(r >> 3) : 0;
569 #           endif
570 #       elif defined(__GNUC__) && (__GNUC__ >= 4)
571             return (__builtin_ctzll((U64)val) >> 3);
572 #       else
573             static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2,
574                                                      0, 3, 1, 3, 1, 4, 2, 7,
575                                                      0, 2, 3, 6, 1, 5, 3, 5,
576                                                      1, 3, 4, 4, 2, 5, 6, 7,
577                                                      7, 0, 1, 2, 3, 3, 4, 6,
578                                                      2, 6, 5, 5, 3, 4, 5, 6,
579                                                      7, 1, 2, 4, 6, 4, 4, 5,
580                                                      7, 2, 6, 5, 7, 6, 7, 7 };
581             return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
582 #       endif
583         } else { /* 32 bits */
584 #       if defined(_MSC_VER)
585             unsigned long r=0;
586             return _BitScanForward( &r, (U32)val ) ? (unsigned)(r >> 3) : 0;
587 #       elif defined(__GNUC__) && (__GNUC__ >= 3)
588             return (__builtin_ctz((U32)val) >> 3);
589 #       else
590             static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0,
591                                                      3, 2, 2, 1, 3, 2, 0, 1,
592                                                      3, 3, 1, 2, 2, 2, 2, 0,
593                                                      3, 1, 2, 0, 1, 0, 1, 1 };
594             return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
595 #       endif
596         }
597     } else {  /* Big Endian CPU */
598         if (MEM_64bits()) {
599 #       if defined(_MSC_VER) && defined(_WIN64)
600 #           if STATIC_BMI2
601 			    return _lzcnt_u64(val) >> 3;
602 #           else
603 			    unsigned long r = 0;
604 			    return _BitScanReverse64(&r, (U64)val) ? (unsigned)(r >> 3) : 0;
605 #           endif
606 #       elif defined(__GNUC__) && (__GNUC__ >= 4)
607             return (__builtin_clzll(val) >> 3);
608 #       else
609             unsigned r;
610             const unsigned n32 = sizeof(size_t)*4;   /* calculate this way due to compiler complaining in 32-bits mode */
611             if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; }
612             if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
613             r += (!val);
614             return r;
615 #       endif
616         } else { /* 32 bits */
617 #       if defined(_MSC_VER)
618             unsigned long r = 0;
619             return _BitScanReverse( &r, (unsigned long)val ) ? (unsigned)(r >> 3) : 0;
620 #       elif defined(__GNUC__) && (__GNUC__ >= 3)
621             return (__builtin_clz((U32)val) >> 3);
622 #       else
623             unsigned r;
624             if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
625             r += (!val);
626             return r;
627 #       endif
628     }   }
629 }
630 
631 
632 MEM_STATIC size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit)
633 {
634     const BYTE* const pStart = pIn;
635     const BYTE* const pInLoopLimit = pInLimit - (sizeof(size_t)-1);
636 
637     if (pIn < pInLoopLimit) {
638         { size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
639           if (diff) return ZSTD_NbCommonBytes(diff); }
640         pIn+=sizeof(size_t); pMatch+=sizeof(size_t);
641         while (pIn < pInLoopLimit) {
642             size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
643             if (!diff) { pIn+=sizeof(size_t); pMatch+=sizeof(size_t); continue; }
644             pIn += ZSTD_NbCommonBytes(diff);
645             return (size_t)(pIn - pStart);
646     }   }
647     if (MEM_64bits() && (pIn<(pInLimit-3)) && (MEM_read32(pMatch) == MEM_read32(pIn))) { pIn+=4; pMatch+=4; }
648     if ((pIn<(pInLimit-1)) && (MEM_read16(pMatch) == MEM_read16(pIn))) { pIn+=2; pMatch+=2; }
649     if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
650     return (size_t)(pIn - pStart);
651 }
652 
653 /** ZSTD_count_2segments() :
654  *  can count match length with `ip` & `match` in 2 different segments.
655  *  convention : on reaching mEnd, match count continue starting from iStart
656  */
657 MEM_STATIC size_t
658 ZSTD_count_2segments(const BYTE* ip, const BYTE* match,
659                      const BYTE* iEnd, const BYTE* mEnd, const BYTE* iStart)
660 {
661     const BYTE* const vEnd = MIN( ip + (mEnd - match), iEnd);
662     size_t const matchLength = ZSTD_count(ip, match, vEnd);
663     if (match + matchLength != mEnd) return matchLength;
664     DEBUGLOG(7, "ZSTD_count_2segments: found a 2-parts match (current length==%zu)", matchLength);
665     DEBUGLOG(7, "distance from match beginning to end dictionary = %zi", mEnd - match);
666     DEBUGLOG(7, "distance from current pos to end buffer = %zi", iEnd - ip);
667     DEBUGLOG(7, "next byte : ip==%02X, istart==%02X", ip[matchLength], *iStart);
668     DEBUGLOG(7, "final match length = %zu", matchLength + ZSTD_count(ip+matchLength, iStart, iEnd));
669     return matchLength + ZSTD_count(ip+matchLength, iStart, iEnd);
670 }
671 
672 
673 /*-*************************************
674  *  Hashes
675  ***************************************/
676 static const U32 prime3bytes = 506832829U;
677 static U32    ZSTD_hash3(U32 u, U32 h) { return ((u << (32-24)) * prime3bytes)  >> (32-h) ; }
678 MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); } /* only in zstd_opt.h */
679 
680 static const U32 prime4bytes = 2654435761U;
681 static U32    ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; }
682 static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_read32(ptr), h); }
683 
684 static const U64 prime5bytes = 889523592379ULL;
685 static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u  << (64-40)) * prime5bytes) >> (64-h)) ; }
686 static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h); }
687 
688 static const U64 prime6bytes = 227718039650203ULL;
689 static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u  << (64-48)) * prime6bytes) >> (64-h)) ; }
690 static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); }
691 
692 static const U64 prime7bytes = 58295818150454627ULL;
693 static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u  << (64-56)) * prime7bytes) >> (64-h)) ; }
694 static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h); }
695 
696 static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
697 static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; }
698 static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); }
699 
700 MEM_STATIC FORCE_INLINE_ATTR
701 size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls)
702 {
703     switch(mls)
704     {
705     default:
706     case 4: return ZSTD_hash4Ptr(p, hBits);
707     case 5: return ZSTD_hash5Ptr(p, hBits);
708     case 6: return ZSTD_hash6Ptr(p, hBits);
709     case 7: return ZSTD_hash7Ptr(p, hBits);
710     case 8: return ZSTD_hash8Ptr(p, hBits);
711     }
712 }
713 
714 /** ZSTD_ipow() :
715  * Return base^exponent.
716  */
717 static U64 ZSTD_ipow(U64 base, U64 exponent)
718 {
719     U64 power = 1;
720     while (exponent) {
721       if (exponent & 1) power *= base;
722       exponent >>= 1;
723       base *= base;
724     }
725     return power;
726 }
727 
728 #define ZSTD_ROLL_HASH_CHAR_OFFSET 10
729 
730 /** ZSTD_rollingHash_append() :
731  * Add the buffer to the hash value.
732  */
733 static U64 ZSTD_rollingHash_append(U64 hash, void const* buf, size_t size)
734 {
735     BYTE const* istart = (BYTE const*)buf;
736     size_t pos;
737     for (pos = 0; pos < size; ++pos) {
738         hash *= prime8bytes;
739         hash += istart[pos] + ZSTD_ROLL_HASH_CHAR_OFFSET;
740     }
741     return hash;
742 }
743 
744 /** ZSTD_rollingHash_compute() :
745  * Compute the rolling hash value of the buffer.
746  */
747 MEM_STATIC U64 ZSTD_rollingHash_compute(void const* buf, size_t size)
748 {
749     return ZSTD_rollingHash_append(0, buf, size);
750 }
751 
752 /** ZSTD_rollingHash_primePower() :
753  * Compute the primePower to be passed to ZSTD_rollingHash_rotate() for a hash
754  * over a window of length bytes.
755  */
756 MEM_STATIC U64 ZSTD_rollingHash_primePower(U32 length)
757 {
758     return ZSTD_ipow(prime8bytes, length - 1);
759 }
760 
761 /** ZSTD_rollingHash_rotate() :
762  * Rotate the rolling hash by one byte.
763  */
764 MEM_STATIC U64 ZSTD_rollingHash_rotate(U64 hash, BYTE toRemove, BYTE toAdd, U64 primePower)
765 {
766     hash -= (toRemove + ZSTD_ROLL_HASH_CHAR_OFFSET) * primePower;
767     hash *= prime8bytes;
768     hash += toAdd + ZSTD_ROLL_HASH_CHAR_OFFSET;
769     return hash;
770 }
771 
772 /*-*************************************
773 *  Round buffer management
774 ***************************************/
775 #if (ZSTD_WINDOWLOG_MAX_64 > 31)
776 # error "ZSTD_WINDOWLOG_MAX is too large : would overflow ZSTD_CURRENT_MAX"
777 #endif
778 /* Max current allowed */
779 #define ZSTD_CURRENT_MAX ((3U << 29) + (1U << ZSTD_WINDOWLOG_MAX))
780 /* Maximum chunk size before overflow correction needs to be called again */
781 #define ZSTD_CHUNKSIZE_MAX                                                     \
782     ( ((U32)-1)                  /* Maximum ending current index */            \
783     - ZSTD_CURRENT_MAX)          /* Maximum beginning lowLimit */
784 
785 /**
786  * ZSTD_window_clear():
787  * Clears the window containing the history by simply setting it to empty.
788  */
789 MEM_STATIC void ZSTD_window_clear(ZSTD_window_t* window)
790 {
791     size_t const endT = (size_t)(window->nextSrc - window->base);
792     U32 const end = (U32)endT;
793 
794     window->lowLimit = end;
795     window->dictLimit = end;
796 }
797 
798 /**
799  * ZSTD_window_hasExtDict():
800  * Returns non-zero if the window has a non-empty extDict.
801  */
802 MEM_STATIC U32 ZSTD_window_hasExtDict(ZSTD_window_t const window)
803 {
804     return window.lowLimit < window.dictLimit;
805 }
806 
807 /**
808  * ZSTD_matchState_dictMode():
809  * Inspects the provided matchState and figures out what dictMode should be
810  * passed to the compressor.
811  */
812 MEM_STATIC ZSTD_dictMode_e ZSTD_matchState_dictMode(const ZSTD_matchState_t *ms)
813 {
814     return ZSTD_window_hasExtDict(ms->window) ?
815         ZSTD_extDict :
816         ms->dictMatchState != NULL ?
817             (ms->dictMatchState->dedicatedDictSearch ? ZSTD_dedicatedDictSearch : ZSTD_dictMatchState) :
818             ZSTD_noDict;
819 }
820 
821 /**
822  * ZSTD_window_needOverflowCorrection():
823  * Returns non-zero if the indices are getting too large and need overflow
824  * protection.
825  */
826 MEM_STATIC U32 ZSTD_window_needOverflowCorrection(ZSTD_window_t const window,
827                                                   void const* srcEnd)
828 {
829     U32 const curr = (U32)((BYTE const*)srcEnd - window.base);
830     return curr > ZSTD_CURRENT_MAX;
831 }
832 
833 /**
834  * ZSTD_window_correctOverflow():
835  * Reduces the indices to protect from index overflow.
836  * Returns the correction made to the indices, which must be applied to every
837  * stored index.
838  *
839  * The least significant cycleLog bits of the indices must remain the same,
840  * which may be 0. Every index up to maxDist in the past must be valid.
841  * NOTE: (maxDist & cycleMask) must be zero.
842  */
843 MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog,
844                                            U32 maxDist, void const* src)
845 {
846     /* preemptive overflow correction:
847      * 1. correction is large enough:
848      *    lowLimit > (3<<29) ==> current > 3<<29 + 1<<windowLog
849      *    1<<windowLog <= newCurrent < 1<<chainLog + 1<<windowLog
850      *
851      *    current - newCurrent
852      *    > (3<<29 + 1<<windowLog) - (1<<windowLog + 1<<chainLog)
853      *    > (3<<29) - (1<<chainLog)
854      *    > (3<<29) - (1<<30)             (NOTE: chainLog <= 30)
855      *    > 1<<29
856      *
857      * 2. (ip+ZSTD_CHUNKSIZE_MAX - cctx->base) doesn't overflow:
858      *    After correction, current is less than (1<<chainLog + 1<<windowLog).
859      *    In 64-bit mode we are safe, because we have 64-bit ptrdiff_t.
860      *    In 32-bit mode we are safe, because (chainLog <= 29), so
861      *    ip+ZSTD_CHUNKSIZE_MAX - cctx->base < 1<<32.
862      * 3. (cctx->lowLimit + 1<<windowLog) < 1<<32:
863      *    windowLog <= 31 ==> 3<<29 + 1<<windowLog < 7<<29 < 1<<32.
864      */
865     U32 const cycleMask = (1U << cycleLog) - 1;
866     U32 const curr = (U32)((BYTE const*)src - window->base);
867     U32 const currentCycle0 = curr & cycleMask;
868     /* Exclude zero so that newCurrent - maxDist >= 1. */
869     U32 const currentCycle1 = currentCycle0 == 0 ? (1U << cycleLog) : currentCycle0;
870     U32 const newCurrent = currentCycle1 + maxDist;
871     U32 const correction = curr - newCurrent;
872     assert((maxDist & cycleMask) == 0);
873     assert(curr > newCurrent);
874     /* Loose bound, should be around 1<<29 (see above) */
875     assert(correction > 1<<28);
876 
877     window->base += correction;
878     window->dictBase += correction;
879     if (window->lowLimit <= correction) window->lowLimit = 1;
880     else window->lowLimit -= correction;
881     if (window->dictLimit <= correction) window->dictLimit = 1;
882     else window->dictLimit -= correction;
883 
884     /* Ensure we can still reference the full window. */
885     assert(newCurrent >= maxDist);
886     assert(newCurrent - maxDist >= 1);
887     /* Ensure that lowLimit and dictLimit didn't underflow. */
888     assert(window->lowLimit <= newCurrent);
889     assert(window->dictLimit <= newCurrent);
890 
891     DEBUGLOG(4, "Correction of 0x%x bytes to lowLimit=0x%x", correction,
892              window->lowLimit);
893     return correction;
894 }
895 
896 /**
897  * ZSTD_window_enforceMaxDist():
898  * Updates lowLimit so that:
899  *    (srcEnd - base) - lowLimit == maxDist + loadedDictEnd
900  *
901  * It ensures index is valid as long as index >= lowLimit.
902  * This must be called before a block compression call.
903  *
904  * loadedDictEnd is only defined if a dictionary is in use for current compression.
905  * As the name implies, loadedDictEnd represents the index at end of dictionary.
906  * The value lies within context's referential, it can be directly compared to blockEndIdx.
907  *
908  * If loadedDictEndPtr is NULL, no dictionary is in use, and we use loadedDictEnd == 0.
909  * If loadedDictEndPtr is not NULL, we set it to zero after updating lowLimit.
910  * This is because dictionaries are allowed to be referenced fully
911  * as long as the last byte of the dictionary is in the window.
912  * Once input has progressed beyond window size, dictionary cannot be referenced anymore.
913  *
914  * In normal dict mode, the dictionary lies between lowLimit and dictLimit.
915  * In dictMatchState mode, lowLimit and dictLimit are the same,
916  * and the dictionary is below them.
917  * forceWindow and dictMatchState are therefore incompatible.
918  */
919 MEM_STATIC void
920 ZSTD_window_enforceMaxDist(ZSTD_window_t* window,
921                      const void* blockEnd,
922                            U32   maxDist,
923                            U32*  loadedDictEndPtr,
924                      const ZSTD_matchState_t** dictMatchStatePtr)
925 {
926     U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base);
927     U32 const loadedDictEnd = (loadedDictEndPtr != NULL) ? *loadedDictEndPtr : 0;
928     DEBUGLOG(5, "ZSTD_window_enforceMaxDist: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u",
929                 (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd);
930 
931     /* - When there is no dictionary : loadedDictEnd == 0.
932          In which case, the test (blockEndIdx > maxDist) is merely to avoid
933          overflowing next operation `newLowLimit = blockEndIdx - maxDist`.
934        - When there is a standard dictionary :
935          Index referential is copied from the dictionary,
936          which means it starts from 0.
937          In which case, loadedDictEnd == dictSize,
938          and it makes sense to compare `blockEndIdx > maxDist + dictSize`
939          since `blockEndIdx` also starts from zero.
940        - When there is an attached dictionary :
941          loadedDictEnd is expressed within the referential of the context,
942          so it can be directly compared against blockEndIdx.
943     */
944     if (blockEndIdx > maxDist + loadedDictEnd) {
945         U32 const newLowLimit = blockEndIdx - maxDist;
946         if (window->lowLimit < newLowLimit) window->lowLimit = newLowLimit;
947         if (window->dictLimit < window->lowLimit) {
948             DEBUGLOG(5, "Update dictLimit to match lowLimit, from %u to %u",
949                         (unsigned)window->dictLimit, (unsigned)window->lowLimit);
950             window->dictLimit = window->lowLimit;
951         }
952         /* On reaching window size, dictionaries are invalidated */
953         if (loadedDictEndPtr) *loadedDictEndPtr = 0;
954         if (dictMatchStatePtr) *dictMatchStatePtr = NULL;
955     }
956 }
957 
958 /* Similar to ZSTD_window_enforceMaxDist(),
959  * but only invalidates dictionary
960  * when input progresses beyond window size.
961  * assumption : loadedDictEndPtr and dictMatchStatePtr are valid (non NULL)
962  *              loadedDictEnd uses same referential as window->base
963  *              maxDist is the window size */
964 MEM_STATIC void
965 ZSTD_checkDictValidity(const ZSTD_window_t* window,
966                        const void* blockEnd,
967                              U32   maxDist,
968                              U32*  loadedDictEndPtr,
969                        const ZSTD_matchState_t** dictMatchStatePtr)
970 {
971     assert(loadedDictEndPtr != NULL);
972     assert(dictMatchStatePtr != NULL);
973     {   U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base);
974         U32 const loadedDictEnd = *loadedDictEndPtr;
975         DEBUGLOG(5, "ZSTD_checkDictValidity: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u",
976                     (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd);
977         assert(blockEndIdx >= loadedDictEnd);
978 
979         if (blockEndIdx > loadedDictEnd + maxDist) {
980             /* On reaching window size, dictionaries are invalidated.
981              * For simplification, if window size is reached anywhere within next block,
982              * the dictionary is invalidated for the full block.
983              */
984             DEBUGLOG(6, "invalidating dictionary for current block (distance > windowSize)");
985             *loadedDictEndPtr = 0;
986             *dictMatchStatePtr = NULL;
987         } else {
988             if (*loadedDictEndPtr != 0) {
989                 DEBUGLOG(6, "dictionary considered valid for current block");
990     }   }   }
991 }
992 
993 MEM_STATIC void ZSTD_window_init(ZSTD_window_t* window) {
994     ZSTD_memset(window, 0, sizeof(*window));
995     window->base = (BYTE const*)"";
996     window->dictBase = (BYTE const*)"";
997     window->dictLimit = 1;    /* start from 1, so that 1st position is valid */
998     window->lowLimit = 1;     /* it ensures first and later CCtx usages compress the same */
999     window->nextSrc = window->base + 1;   /* see issue #1241 */
1000 }
1001 
1002 /**
1003  * ZSTD_window_update():
1004  * Updates the window by appending [src, src + srcSize) to the window.
1005  * If it is not contiguous, the current prefix becomes the extDict, and we
1006  * forget about the extDict. Handles overlap of the prefix and extDict.
1007  * Returns non-zero if the segment is contiguous.
1008  */
1009 MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window,
1010                                   void const* src, size_t srcSize)
1011 {
1012     BYTE const* const ip = (BYTE const*)src;
1013     U32 contiguous = 1;
1014     DEBUGLOG(5, "ZSTD_window_update");
1015     if (srcSize == 0)
1016         return contiguous;
1017     assert(window->base != NULL);
1018     assert(window->dictBase != NULL);
1019     /* Check if blocks follow each other */
1020     if (src != window->nextSrc) {
1021         /* not contiguous */
1022         size_t const distanceFromBase = (size_t)(window->nextSrc - window->base);
1023         DEBUGLOG(5, "Non contiguous blocks, new segment starts at %u", window->dictLimit);
1024         window->lowLimit = window->dictLimit;
1025         assert(distanceFromBase == (size_t)(U32)distanceFromBase);  /* should never overflow */
1026         window->dictLimit = (U32)distanceFromBase;
1027         window->dictBase = window->base;
1028         window->base = ip - distanceFromBase;
1029         /* ms->nextToUpdate = window->dictLimit; */
1030         if (window->dictLimit - window->lowLimit < HASH_READ_SIZE) window->lowLimit = window->dictLimit;   /* too small extDict */
1031         contiguous = 0;
1032     }
1033     window->nextSrc = ip + srcSize;
1034     /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */
1035     if ( (ip+srcSize > window->dictBase + window->lowLimit)
1036        & (ip < window->dictBase + window->dictLimit)) {
1037         ptrdiff_t const highInputIdx = (ip + srcSize) - window->dictBase;
1038         U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)window->dictLimit) ? window->dictLimit : (U32)highInputIdx;
1039         window->lowLimit = lowLimitMax;
1040         DEBUGLOG(5, "Overlapping extDict and input : new lowLimit = %u", window->lowLimit);
1041     }
1042     return contiguous;
1043 }
1044 
1045 /**
1046  * Returns the lowest allowed match index. It may either be in the ext-dict or the prefix.
1047  */
1048 MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t* ms, U32 curr, unsigned windowLog)
1049 {
1050     U32    const maxDistance = 1U << windowLog;
1051     U32    const lowestValid = ms->window.lowLimit;
1052     U32    const withinWindow = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
1053     U32    const isDictionary = (ms->loadedDictEnd != 0);
1054     /* When using a dictionary the entire dictionary is valid if a single byte of the dictionary
1055      * is within the window. We invalidate the dictionary (and set loadedDictEnd to 0) when it isn't
1056      * valid for the entire block. So this check is sufficient to find the lowest valid match index.
1057      */
1058     U32    const matchLowest = isDictionary ? lowestValid : withinWindow;
1059     return matchLowest;
1060 }
1061 
1062 /**
1063  * Returns the lowest allowed match index in the prefix.
1064  */
1065 MEM_STATIC U32 ZSTD_getLowestPrefixIndex(const ZSTD_matchState_t* ms, U32 curr, unsigned windowLog)
1066 {
1067     U32    const maxDistance = 1U << windowLog;
1068     U32    const lowestValid = ms->window.dictLimit;
1069     U32    const withinWindow = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
1070     U32    const isDictionary = (ms->loadedDictEnd != 0);
1071     /* When computing the lowest prefix index we need to take the dictionary into account to handle
1072      * the edge case where the dictionary and the source are contiguous in memory.
1073      */
1074     U32    const matchLowest = isDictionary ? lowestValid : withinWindow;
1075     return matchLowest;
1076 }
1077 
1078 
1079 
1080 /* debug functions */
1081 #if (DEBUGLEVEL>=2)
1082 
1083 MEM_STATIC double ZSTD_fWeight(U32 rawStat)
1084 {
1085     U32 const fp_accuracy = 8;
1086     U32 const fp_multiplier = (1 << fp_accuracy);
1087     U32 const newStat = rawStat + 1;
1088     U32 const hb = ZSTD_highbit32(newStat);
1089     U32 const BWeight = hb * fp_multiplier;
1090     U32 const FWeight = (newStat << fp_accuracy) >> hb;
1091     U32 const weight = BWeight + FWeight;
1092     assert(hb + fp_accuracy < 31);
1093     return (double)weight / fp_multiplier;
1094 }
1095 
1096 /* display a table content,
1097  * listing each element, its frequency, and its predicted bit cost */
1098 MEM_STATIC void ZSTD_debugTable(const U32* table, U32 max)
1099 {
1100     unsigned u, sum;
1101     for (u=0, sum=0; u<=max; u++) sum += table[u];
1102     DEBUGLOG(2, "total nb elts: %u", sum);
1103     for (u=0; u<=max; u++) {
1104         DEBUGLOG(2, "%2u: %5u  (%.2f)",
1105                 u, table[u], ZSTD_fWeight(sum) - ZSTD_fWeight(table[u]) );
1106     }
1107 }
1108 
1109 #endif
1110 
1111 
1112 #if defined (__cplusplus)
1113 }
1114 #endif
1115 
1116 /* ===============================================================
1117  * Shared internal declarations
1118  * These prototypes may be called from sources not in lib/compress
1119  * =============================================================== */
1120 
1121 /* ZSTD_loadCEntropy() :
1122  * dict : must point at beginning of a valid zstd dictionary.
1123  * return : size of dictionary header (size of magic number + dict ID + entropy tables)
1124  * assumptions : magic number supposed already checked
1125  *               and dictSize >= 8 */
1126 size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
1127                          const void* const dict, size_t dictSize);
1128 
1129 void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs);
1130 
1131 /* ==============================================================
1132  * Private declarations
1133  * These prototypes shall only be called from within lib/compress
1134  * ============================================================== */
1135 
1136 /* ZSTD_getCParamsFromCCtxParams() :
1137  * cParams are built depending on compressionLevel, src size hints,
1138  * LDM and manually set compression parameters.
1139  * Note: srcSizeHint == 0 means 0!
1140  */
1141 ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
1142         const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
1143 
1144 /*! ZSTD_initCStream_internal() :
1145  *  Private use only. Init streaming operation.
1146  *  expects params to be valid.
1147  *  must receive dict, or cdict, or none, but not both.
1148  *  @return : 0, or an error code */
1149 size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
1150                      const void* dict, size_t dictSize,
1151                      const ZSTD_CDict* cdict,
1152                      const ZSTD_CCtx_params* params, unsigned long long pledgedSrcSize);
1153 
1154 void ZSTD_resetSeqStore(seqStore_t* ssPtr);
1155 
1156 /*! ZSTD_getCParamsFromCDict() :
1157  *  as the name implies */
1158 ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict);
1159 
1160 /* ZSTD_compressBegin_advanced_internal() :
1161  * Private use only. To be called from zstdmt_compress.c. */
1162 size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
1163                                     const void* dict, size_t dictSize,
1164                                     ZSTD_dictContentType_e dictContentType,
1165                                     ZSTD_dictTableLoadMethod_e dtlm,
1166                                     const ZSTD_CDict* cdict,
1167                                     const ZSTD_CCtx_params* params,
1168                                     unsigned long long pledgedSrcSize);
1169 
1170 /* ZSTD_compress_advanced_internal() :
1171  * Private use only. To be called from zstdmt_compress.c. */
1172 size_t ZSTD_compress_advanced_internal(ZSTD_CCtx* cctx,
1173                                        void* dst, size_t dstCapacity,
1174                                  const void* src, size_t srcSize,
1175                                  const void* dict,size_t dictSize,
1176                                  const ZSTD_CCtx_params* params);
1177 
1178 
1179 /* ZSTD_writeLastEmptyBlock() :
1180  * output an empty Block with end-of-frame mark to complete a frame
1181  * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
1182  *           or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
1183  */
1184 size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity);
1185 
1186 
1187 /* ZSTD_referenceExternalSequences() :
1188  * Must be called before starting a compression operation.
1189  * seqs must parse a prefix of the source.
1190  * This cannot be used when long range matching is enabled.
1191  * Zstd will use these sequences, and pass the literals to a secondary block
1192  * compressor.
1193  * @return : An error code on failure.
1194  * NOTE: seqs are not verified! Invalid sequences can cause out-of-bounds memory
1195  * access and data corruption.
1196  */
1197 size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq);
1198 
1199 /** ZSTD_cycleLog() :
1200  *  condition for correct operation : hashLog > 1 */
1201 /* Begin FreeBSD - This symbol is needed by dll-linked CLI zstd(1). */
1202 ZSTDLIB_API
1203 /* End FreeBSD */
1204 U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat);
1205 
1206 #endif /* ZSTD_COMPRESS_H */
1207