xref: /freebsd/sys/contrib/zstd/lib/compress/zstd_compress.c (revision e796cc77c586c2955b2f3940dbf4991b31e8d289)
1 /*
2  * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
3  * All rights reserved.
4  *
5  * This source code is licensed under both the BSD-style license (found in the
6  * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7  * in the COPYING file in the root directory of this source tree).
8  * You may select, at your option, one of the above-listed licenses.
9  */
10 
11 
12 /*-*************************************
13 *  Tuning parameters
14 ***************************************/
15 #ifndef ZSTD_CLEVEL_DEFAULT
16 #  define ZSTD_CLEVEL_DEFAULT 3
17 #endif
18 
19 
20 /*-*************************************
21 *  Dependencies
22 ***************************************/
23 #include <string.h>         /* memset */
24 #include "mem.h"
25 #define FSE_STATIC_LINKING_ONLY   /* FSE_encodeSymbol */
26 #include "fse.h"
27 #define HUF_STATIC_LINKING_ONLY
28 #include "huf.h"
29 #include "zstd_compress_internal.h"
30 #include "zstd_fast.h"
31 #include "zstd_double_fast.h"
32 #include "zstd_lazy.h"
33 #include "zstd_opt.h"
34 #include "zstd_ldm.h"
35 
36 
37 /*-*************************************
38 *  Helper functions
39 ***************************************/
40 size_t ZSTD_compressBound(size_t srcSize) {
41     return ZSTD_COMPRESSBOUND(srcSize);
42 }
43 
44 
45 /*-*************************************
46 *  Context memory management
47 ***************************************/
48 struct ZSTD_CDict_s {
49     void* dictBuffer;
50     const void* dictContent;
51     size_t dictContentSize;
52     ZSTD_CCtx* refContext;
53 };  /* typedef'd to ZSTD_CDict within "zstd.h" */
54 
55 ZSTD_CCtx* ZSTD_createCCtx(void)
56 {
57     return ZSTD_createCCtx_advanced(ZSTD_defaultCMem);
58 }
59 
60 ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
61 {
62     ZSTD_CCtx* cctx;
63 
64     if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
65 
66     cctx = (ZSTD_CCtx*) ZSTD_calloc(sizeof(ZSTD_CCtx), customMem);
67     if (!cctx) return NULL;
68     cctx->customMem = customMem;
69     cctx->requestedParams.compressionLevel = ZSTD_CLEVEL_DEFAULT;
70     cctx->requestedParams.fParams.contentSizeFlag = 1;
71     ZSTD_STATIC_ASSERT(zcss_init==0);
72     ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1));
73     return cctx;
74 }
75 
76 ZSTD_CCtx* ZSTD_initStaticCCtx(void *workspace, size_t workspaceSize)
77 {
78     ZSTD_CCtx* const cctx = (ZSTD_CCtx*) workspace;
79     if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL;  /* minimum size */
80     if ((size_t)workspace & 7) return NULL;  /* must be 8-aligned */
81     memset(workspace, 0, workspaceSize);   /* may be a bit generous, could memset be smaller ? */
82     cctx->staticSize = workspaceSize;
83     cctx->workSpace = (void*)(cctx+1);
84     cctx->workSpaceSize = workspaceSize - sizeof(ZSTD_CCtx);
85 
86     /* entropy space (never moves) */
87     if (cctx->workSpaceSize < sizeof(ZSTD_entropyCTables_t)) return NULL;
88     assert(((size_t)cctx->workSpace & (sizeof(void*)-1)) == 0);   /* ensure correct alignment */
89     cctx->entropy = (ZSTD_entropyCTables_t*)cctx->workSpace;
90 
91     return cctx;
92 }
93 
94 size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
95 {
96     if (cctx==NULL) return 0;   /* support free on NULL */
97     if (cctx->staticSize) return ERROR(memory_allocation);   /* not compatible with static CCtx */
98     ZSTD_free(cctx->workSpace, cctx->customMem);
99     cctx->workSpace = NULL;
100     ZSTD_freeCDict(cctx->cdictLocal);
101     cctx->cdictLocal = NULL;
102 #ifdef ZSTD_MULTITHREAD
103     ZSTDMT_freeCCtx(cctx->mtctx);
104     cctx->mtctx = NULL;
105 #endif
106     ZSTD_free(cctx, cctx->customMem);
107     return 0;   /* reserved as a potential error code in the future */
108 }
109 
110 
111 static size_t ZSTD_sizeof_mtctx(const ZSTD_CCtx* cctx)
112 {
113 #ifdef ZSTD_MULTITHREAD
114     return ZSTDMT_sizeof_CCtx(cctx->mtctx);
115 #else
116     (void) cctx;
117     return 0;
118 #endif
119 }
120 
121 
122 size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx)
123 {
124     if (cctx==NULL) return 0;   /* support sizeof on NULL */
125     DEBUGLOG(3, "sizeof(*cctx) : %u", (U32)sizeof(*cctx));
126     DEBUGLOG(3, "workSpaceSize (including streaming buffers): %u", (U32)cctx->workSpaceSize);
127     DEBUGLOG(3, "inner cdict : %u", (U32)ZSTD_sizeof_CDict(cctx->cdictLocal));
128     DEBUGLOG(3, "inner MTCTX : %u", (U32)ZSTD_sizeof_mtctx(cctx));
129     return sizeof(*cctx) + cctx->workSpaceSize
130            + ZSTD_sizeof_CDict(cctx->cdictLocal)
131            + ZSTD_sizeof_mtctx(cctx);
132 }
133 
134 size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs)
135 {
136     return ZSTD_sizeof_CCtx(zcs);  /* same object */
137 }
138 
139 /* private API call, for dictBuilder only */
140 const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); }
141 
142 #define ZSTD_CLEVEL_CUSTOM 999
143 
144 static ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
145         ZSTD_CCtx_params CCtxParams, U64 srcSizeHint, size_t dictSize)
146 {
147     DEBUGLOG(4, "ZSTD_getCParamsFromCCtxParams: srcSize = %u, dictSize = %u",
148                 (U32)srcSizeHint, (U32)dictSize);
149     return (CCtxParams.compressionLevel == ZSTD_CLEVEL_CUSTOM) ?
150                 CCtxParams.cParams :
151                 ZSTD_getCParams(CCtxParams.compressionLevel, srcSizeHint, dictSize);
152 }
153 
154 static void ZSTD_cLevelToCCtxParams_srcSize(ZSTD_CCtx_params* CCtxParams, U64 srcSize)
155 {
156     DEBUGLOG(4, "ZSTD_cLevelToCCtxParams_srcSize: srcSize = %u",
157                 (U32)srcSize);
158     CCtxParams->cParams = ZSTD_getCParamsFromCCtxParams(*CCtxParams, srcSize, 0);
159     CCtxParams->compressionLevel = ZSTD_CLEVEL_CUSTOM;
160 }
161 
162 static void ZSTD_cLevelToCParams(ZSTD_CCtx* cctx)
163 {
164     DEBUGLOG(4, "ZSTD_cLevelToCParams: level=%i", cctx->requestedParams.compressionLevel);
165     ZSTD_cLevelToCCtxParams_srcSize(
166             &cctx->requestedParams, cctx->pledgedSrcSizePlusOne-1);
167 }
168 
169 static void ZSTD_cLevelToCCtxParams(ZSTD_CCtx_params* CCtxParams)
170 {
171     DEBUGLOG(4, "ZSTD_cLevelToCCtxParams");
172     ZSTD_cLevelToCCtxParams_srcSize(CCtxParams, ZSTD_CONTENTSIZE_UNKNOWN);
173 }
174 
175 static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams(
176         ZSTD_compressionParameters cParams)
177 {
178     ZSTD_CCtx_params cctxParams;
179     memset(&cctxParams, 0, sizeof(cctxParams));
180     cctxParams.cParams = cParams;
181     cctxParams.compressionLevel = ZSTD_CLEVEL_CUSTOM;
182     return cctxParams;
183 }
184 
185 static ZSTD_CCtx_params* ZSTD_createCCtxParams_advanced(
186         ZSTD_customMem customMem)
187 {
188     ZSTD_CCtx_params* params;
189     if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
190     params = (ZSTD_CCtx_params*)ZSTD_calloc(
191             sizeof(ZSTD_CCtx_params), customMem);
192     if (!params) { return NULL; }
193     params->customMem = customMem;
194     params->compressionLevel = ZSTD_CLEVEL_DEFAULT;
195     return params;
196 }
197 
198 ZSTD_CCtx_params* ZSTD_createCCtxParams(void)
199 {
200     return ZSTD_createCCtxParams_advanced(ZSTD_defaultCMem);
201 }
202 
203 size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params)
204 {
205     if (params == NULL) { return 0; }
206     ZSTD_free(params, params->customMem);
207     return 0;
208 }
209 
210 size_t ZSTD_resetCCtxParams(ZSTD_CCtx_params* params)
211 {
212     return ZSTD_initCCtxParams(params, ZSTD_CLEVEL_DEFAULT);
213 }
214 
215 size_t ZSTD_initCCtxParams(ZSTD_CCtx_params* cctxParams, int compressionLevel) {
216     if (!cctxParams) { return ERROR(GENERIC); }
217     memset(cctxParams, 0, sizeof(*cctxParams));
218     cctxParams->compressionLevel = compressionLevel;
219     return 0;
220 }
221 
222 size_t ZSTD_initCCtxParams_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params)
223 {
224     if (!cctxParams) { return ERROR(GENERIC); }
225     CHECK_F( ZSTD_checkCParams(params.cParams) );
226     memset(cctxParams, 0, sizeof(*cctxParams));
227     cctxParams->cParams = params.cParams;
228     cctxParams->fParams = params.fParams;
229     cctxParams->compressionLevel = ZSTD_CLEVEL_CUSTOM;
230     return 0;
231 }
232 
233 static ZSTD_CCtx_params ZSTD_assignParamsToCCtxParams(
234         ZSTD_CCtx_params cctxParams, ZSTD_parameters params)
235 {
236     ZSTD_CCtx_params ret = cctxParams;
237     ret.cParams = params.cParams;
238     ret.fParams = params.fParams;
239     ret.compressionLevel = ZSTD_CLEVEL_CUSTOM;
240     return ret;
241 }
242 
243 #define CLAMPCHECK(val,min,max) {            \
244     if (((val)<(min)) | ((val)>(max))) {     \
245         return ERROR(parameter_outOfBound);  \
246 }   }
247 
248 size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, unsigned value)
249 {
250     DEBUGLOG(4, "ZSTD_CCtx_setParameter (%u, %u)", (U32)param, value);
251     if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
252 
253     switch(param)
254     {
255     case ZSTD_p_format :
256         return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
257 
258     case ZSTD_p_compressionLevel:
259         if (cctx->cdict) return ERROR(stage_wrong);
260         return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
261 
262     case ZSTD_p_windowLog:
263     case ZSTD_p_hashLog:
264     case ZSTD_p_chainLog:
265     case ZSTD_p_searchLog:
266     case ZSTD_p_minMatch:
267     case ZSTD_p_targetLength:
268     case ZSTD_p_compressionStrategy:
269         if (cctx->cdict) return ERROR(stage_wrong);
270         if (value>0) ZSTD_cLevelToCParams(cctx);  /* Can optimize if srcSize is known */
271         return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
272 
273     case ZSTD_p_contentSizeFlag:
274     case ZSTD_p_checksumFlag:
275     case ZSTD_p_dictIDFlag:
276         return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
277 
278     case ZSTD_p_forceMaxWindow :  /* Force back-references to remain < windowSize,
279                                    * even when referencing into Dictionary content.
280                                    * default : 0 when using a CDict, 1 when using a Prefix */
281         return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
282 
283     case ZSTD_p_nbThreads:
284         if ((value > 1) && cctx->staticSize) {
285             return ERROR(parameter_unsupported);  /* MT not compatible with static alloc */
286         }
287         return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
288 
289     case ZSTD_p_jobSize:
290         return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
291 
292     case ZSTD_p_overlapSizeLog:
293         return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
294 
295     case ZSTD_p_enableLongDistanceMatching:
296         if (cctx->cdict) return ERROR(stage_wrong);
297         if (value>0) ZSTD_cLevelToCParams(cctx);
298         return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
299 
300     case ZSTD_p_ldmHashLog:
301     case ZSTD_p_ldmMinMatch:
302     case ZSTD_p_ldmBucketSizeLog:
303     case ZSTD_p_ldmHashEveryLog:
304         if (cctx->cdict) return ERROR(stage_wrong);
305         return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
306 
307     default: return ERROR(parameter_unsupported);
308     }
309 }
310 
311 size_t ZSTD_CCtxParam_setParameter(
312         ZSTD_CCtx_params* CCtxParams, ZSTD_cParameter param, unsigned value)
313 {
314     DEBUGLOG(4, "ZSTD_CCtxParam_setParameter (%u, %u)", (U32)param, value);
315     switch(param)
316     {
317     case ZSTD_p_format :
318         if (value > (unsigned)ZSTD_f_zstd1_magicless)
319             return ERROR(parameter_unsupported);
320         CCtxParams->format = (ZSTD_format_e)value;
321         return (size_t)CCtxParams->format;
322 
323     case ZSTD_p_compressionLevel :
324         if ((int)value > ZSTD_maxCLevel()) value = ZSTD_maxCLevel();
325         if (value)  /* 0 : does not change current level */
326             CCtxParams->compressionLevel = value;
327         return CCtxParams->compressionLevel;
328 
329     case ZSTD_p_windowLog :
330         DEBUGLOG(4, "ZSTD_CCtxParam_setParameter: set windowLog=%u", value);
331         if (value) {  /* 0 : does not change current windowLog */
332             CLAMPCHECK(value, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX);
333             ZSTD_cLevelToCCtxParams(CCtxParams);
334             CCtxParams->cParams.windowLog = value;
335         }
336         return CCtxParams->cParams.windowLog;
337 
338     case ZSTD_p_hashLog :
339         if (value) { /* 0 : does not change current hashLog */
340             CLAMPCHECK(value, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
341             ZSTD_cLevelToCCtxParams(CCtxParams);
342             CCtxParams->cParams.hashLog = value;
343         }
344         return CCtxParams->cParams.hashLog;
345 
346     case ZSTD_p_chainLog :
347         if (value) { /* 0 : does not change current chainLog */
348             CLAMPCHECK(value, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX);
349             ZSTD_cLevelToCCtxParams(CCtxParams);
350             CCtxParams->cParams.chainLog = value;
351         }
352         return CCtxParams->cParams.chainLog;
353 
354     case ZSTD_p_searchLog :
355         if (value) { /* 0 : does not change current searchLog */
356             CLAMPCHECK(value, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX);
357             ZSTD_cLevelToCCtxParams(CCtxParams);
358             CCtxParams->cParams.searchLog = value;
359         }
360         return value;
361 
362     case ZSTD_p_minMatch :
363         if (value) { /* 0 : does not change current minMatch length */
364             CLAMPCHECK(value, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX);
365             ZSTD_cLevelToCCtxParams(CCtxParams);
366             CCtxParams->cParams.searchLength = value;
367         }
368         return CCtxParams->cParams.searchLength;
369 
370     case ZSTD_p_targetLength :
371         if (value) { /* 0 : does not change current sufficient_len */
372             CLAMPCHECK(value, ZSTD_TARGETLENGTH_MIN, ZSTD_TARGETLENGTH_MAX);
373             ZSTD_cLevelToCCtxParams(CCtxParams);
374             CCtxParams->cParams.targetLength = value;
375         }
376         return CCtxParams->cParams.targetLength;
377 
378     case ZSTD_p_compressionStrategy :
379         if (value) { /* 0 : does not change currentstrategy */
380             CLAMPCHECK(value, (unsigned)ZSTD_fast, (unsigned)ZSTD_btultra);
381             ZSTD_cLevelToCCtxParams(CCtxParams);
382             CCtxParams->cParams.strategy = (ZSTD_strategy)value;
383         }
384         return (size_t)CCtxParams->cParams.strategy;
385 
386     case ZSTD_p_contentSizeFlag :
387         /* Content size written in frame header _when known_ (default:1) */
388         DEBUGLOG(4, "set content size flag = %u", (value>0));
389         CCtxParams->fParams.contentSizeFlag = value > 0;
390         return CCtxParams->fParams.contentSizeFlag;
391 
392     case ZSTD_p_checksumFlag :
393         /* A 32-bits content checksum will be calculated and written at end of frame (default:0) */
394         CCtxParams->fParams.checksumFlag = value > 0;
395         return CCtxParams->fParams.checksumFlag;
396 
397     case ZSTD_p_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */
398         DEBUGLOG(4, "set dictIDFlag = %u", (value>0));
399         CCtxParams->fParams.noDictIDFlag = (value == 0);
400         return !CCtxParams->fParams.noDictIDFlag;
401 
402     case ZSTD_p_forceMaxWindow :
403         CCtxParams->forceWindow = (value > 0);
404         return CCtxParams->forceWindow;
405 
406     case ZSTD_p_nbThreads :
407         if (value == 0) return CCtxParams->nbThreads;
408 #ifndef ZSTD_MULTITHREAD
409         if (value > 1) return ERROR(parameter_unsupported);
410         return 1;
411 #else
412         return ZSTDMT_CCtxParam_setNbThreads(CCtxParams, value);
413 #endif
414 
415     case ZSTD_p_jobSize :
416 #ifndef ZSTD_MULTITHREAD
417         return ERROR(parameter_unsupported);
418 #else
419         if (CCtxParams->nbThreads <= 1) return ERROR(parameter_unsupported);
420         return ZSTDMT_CCtxParam_setMTCtxParameter(CCtxParams, ZSTDMT_p_jobSize, value);
421 #endif
422 
423     case ZSTD_p_overlapSizeLog :
424 #ifndef ZSTD_MULTITHREAD
425         return ERROR(parameter_unsupported);
426 #else
427         if (CCtxParams->nbThreads <= 1) return ERROR(parameter_unsupported);
428         return ZSTDMT_CCtxParam_setMTCtxParameter(CCtxParams, ZSTDMT_p_overlapSectionLog, value);
429 #endif
430 
431     case ZSTD_p_enableLongDistanceMatching :
432         if (value) {
433             ZSTD_cLevelToCCtxParams(CCtxParams);
434             CCtxParams->cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG;
435         }
436         return ZSTD_ldm_initializeParameters(&CCtxParams->ldmParams, value);
437 
438     case ZSTD_p_ldmHashLog :
439         if (value) { /* 0 : does not change current ldmHashLog */
440             CLAMPCHECK(value, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
441             CCtxParams->ldmParams.hashLog = value;
442         }
443         return CCtxParams->ldmParams.hashLog;
444 
445     case ZSTD_p_ldmMinMatch :
446         if (value) { /* 0 : does not change current ldmMinMatch */
447             CLAMPCHECK(value, ZSTD_LDM_MINMATCH_MIN, ZSTD_LDM_MINMATCH_MAX);
448             CCtxParams->ldmParams.minMatchLength = value;
449         }
450         return CCtxParams->ldmParams.minMatchLength;
451 
452     case ZSTD_p_ldmBucketSizeLog :
453         if (value > ZSTD_LDM_BUCKETSIZELOG_MAX) {
454             return ERROR(parameter_outOfBound);
455         }
456         CCtxParams->ldmParams.bucketSizeLog = value;
457         return value;
458 
459     case ZSTD_p_ldmHashEveryLog :
460         if (value > ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN) {
461             return ERROR(parameter_outOfBound);
462         }
463         CCtxParams->ldmParams.hashEveryLog = value;
464         return value;
465 
466     default: return ERROR(parameter_unsupported);
467     }
468 }
469 
470 /** ZSTD_CCtx_setParametersUsingCCtxParams() :
471  *  just applies `params` into `cctx`
472  *  no action is performed, parameters are merely stored.
473  */
474 size_t ZSTD_CCtx_setParametersUsingCCtxParams(
475         ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params)
476 {
477     if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
478     if (cctx->cdict) return ERROR(stage_wrong);
479 
480     cctx->requestedParams = *params;
481 
482     return 0;
483 }
484 
485 ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize)
486 {
487     DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %u bytes", (U32)pledgedSrcSize);
488     if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
489     cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
490     return 0;
491 }
492 
493 size_t ZSTD_CCtx_loadDictionary_advanced(
494         ZSTD_CCtx* cctx, const void* dict, size_t dictSize,
495         ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictMode_e dictMode)
496 {
497     if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
498     if (cctx->staticSize) return ERROR(memory_allocation);  /* no malloc for static CCtx */
499     DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize);
500     ZSTD_freeCDict(cctx->cdictLocal);  /* in case one already exists */
501     if (dict==NULL || dictSize==0) {   /* no dictionary mode */
502         cctx->cdictLocal = NULL;
503         cctx->cdict = NULL;
504     } else {
505         ZSTD_compressionParameters const cParams =
506                 ZSTD_getCParamsFromCCtxParams(cctx->requestedParams, cctx->pledgedSrcSizePlusOne-1, dictSize);
507         cctx->cdictLocal = ZSTD_createCDict_advanced(
508                                 dict, dictSize,
509                                 dictLoadMethod, dictMode,
510                                 cParams, cctx->customMem);
511         cctx->cdict = cctx->cdictLocal;
512         if (cctx->cdictLocal == NULL)
513             return ERROR(memory_allocation);
514     }
515     return 0;
516 }
517 
518 ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(
519       ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
520 {
521     return ZSTD_CCtx_loadDictionary_advanced(
522             cctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dm_auto);
523 }
524 
525 ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
526 {
527     return ZSTD_CCtx_loadDictionary_advanced(
528             cctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dm_auto);
529 }
530 
531 
532 size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
533 {
534     if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
535     cctx->cdict = cdict;
536     memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));  /* exclusive */
537     return 0;
538 }
539 
540 size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize)
541 {
542     return ZSTD_CCtx_refPrefix_advanced(cctx, prefix, prefixSize, ZSTD_dm_rawContent);
543 }
544 
545 size_t ZSTD_CCtx_refPrefix_advanced(
546         ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictMode_e dictMode)
547 {
548     if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
549     cctx->cdict = NULL;   /* prefix discards any prior cdict */
550     cctx->prefixDict.dict = prefix;
551     cctx->prefixDict.dictSize = prefixSize;
552     cctx->prefixDict.dictMode = dictMode;
553     return 0;
554 }
555 
556 static void ZSTD_startNewCompression(ZSTD_CCtx* cctx)
557 {
558     cctx->streamStage = zcss_init;
559     cctx->pledgedSrcSizePlusOne = 0;
560 }
561 
562 /*! ZSTD_CCtx_reset() :
563  *  Also dumps dictionary */
564 void ZSTD_CCtx_reset(ZSTD_CCtx* cctx)
565 {
566     ZSTD_startNewCompression(cctx);
567     cctx->cdict = NULL;
568 }
569 
570 /** ZSTD_checkCParams() :
571     control CParam values remain within authorized range.
572     @return : 0, or an error code if one value is beyond authorized range */
573 size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
574 {
575     CLAMPCHECK(cParams.windowLog, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX);
576     CLAMPCHECK(cParams.chainLog, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX);
577     CLAMPCHECK(cParams.hashLog, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
578     CLAMPCHECK(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX);
579     CLAMPCHECK(cParams.searchLength, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX);
580     CLAMPCHECK(cParams.targetLength, ZSTD_TARGETLENGTH_MIN, ZSTD_TARGETLENGTH_MAX);
581     if ((U32)(cParams.strategy) > (U32)ZSTD_btultra)
582         return ERROR(parameter_unsupported);
583     return 0;
584 }
585 
586 /** ZSTD_clampCParams() :
587  *  make CParam values within valid range.
588  *  @return : valid CParams */
589 static ZSTD_compressionParameters ZSTD_clampCParams(ZSTD_compressionParameters cParams)
590 {
591 #   define CLAMP(val,min,max) {      \
592         if (val<min) val=min;        \
593         else if (val>max) val=max;   \
594     }
595     CLAMP(cParams.windowLog, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX);
596     CLAMP(cParams.chainLog, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX);
597     CLAMP(cParams.hashLog, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
598     CLAMP(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX);
599     CLAMP(cParams.searchLength, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX);
600     CLAMP(cParams.targetLength, ZSTD_TARGETLENGTH_MIN, ZSTD_TARGETLENGTH_MAX);
601     if ((U32)(cParams.strategy) > (U32)ZSTD_btultra) cParams.strategy = ZSTD_btultra;
602     return cParams;
603 }
604 
605 /** ZSTD_cycleLog() :
606  *  condition for correct operation : hashLog > 1 */
607 static U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)
608 {
609     U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2);
610     return hashLog - btScale;
611 }
612 
613 /** ZSTD_adjustCParams_internal() :
614     optimize `cPar` for a given input (`srcSize` and `dictSize`).
615     mostly downsizing to reduce memory consumption and initialization latency.
616     Both `srcSize` and `dictSize` are optional (use 0 if unknown).
617     Note : cPar is considered validated at this stage. Use ZSTD_checkCParams() to ensure that condition. */
618 ZSTD_compressionParameters ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize)
619 {
620     static const U64 minSrcSize = 513; /* (1<<9) + 1 */
621     static const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1);
622     assert(ZSTD_checkCParams(cPar)==0);
623 
624     if (dictSize && (srcSize+1<2) /* srcSize unknown */ )
625         srcSize = minSrcSize;  /* presumed small when there is a dictionary */
626     else if (srcSize == 0)
627         srcSize = ZSTD_CONTENTSIZE_UNKNOWN;  /* 0 == unknown : presumed large */
628 
629     /* resize windowLog if input is small enough, to use less memory */
630     if ( (srcSize < maxWindowResize)
631       && (dictSize < maxWindowResize) )  {
632         U32 const tSize = (U32)(srcSize + dictSize);
633         static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN;
634         U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN :
635                             ZSTD_highbit32(tSize-1) + 1;
636         if (cPar.windowLog > srcLog) cPar.windowLog = srcLog;
637     }
638     if (cPar.hashLog > cPar.windowLog) cPar.hashLog = cPar.windowLog;
639     {   U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
640         if (cycleLog > cPar.windowLog)
641             cPar.chainLog -= (cycleLog - cPar.windowLog);
642     }
643 
644     if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
645         cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN;  /* required for frame header */
646 
647     return cPar;
648 }
649 
650 ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize)
651 {
652     cPar = ZSTD_clampCParams(cPar);
653     return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize);
654 }
655 
656 size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)
657 {
658     /* Estimate CCtx size is supported for single-threaded compression only. */
659     if (params->nbThreads > 1) { return ERROR(GENERIC); }
660     {   ZSTD_compressionParameters const cParams =
661                 ZSTD_getCParamsFromCCtxParams(*params, 0, 0);
662         size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
663         U32    const divider = (cParams.searchLength==3) ? 3 : 4;
664         size_t const maxNbSeq = blockSize / divider;
665         size_t const tokenSpace = blockSize + 11*maxNbSeq;
666         size_t const chainSize =
667                 (cParams.strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams.chainLog);
668         size_t const hSize = ((size_t)1) << cParams.hashLog;
669         U32    const hashLog3 = (cParams.searchLength>3) ?
670                                 0 : MIN(ZSTD_HASHLOG3_MAX, cParams.windowLog);
671         size_t const h3Size = ((size_t)1) << hashLog3;
672         size_t const entropySpace = sizeof(ZSTD_entropyCTables_t);
673         size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
674 
675         size_t const optBudget =
676                 ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<<Litbits))*sizeof(U32)
677                 + (ZSTD_OPT_NUM+1)*(sizeof(ZSTD_match_t) + sizeof(ZSTD_optimal_t));
678         size_t const optSpace = ((cParams.strategy == ZSTD_btopt) || (cParams.strategy == ZSTD_btultra)) ? optBudget : 0;
679 
680         size_t const ldmSpace = params->ldmParams.enableLdm ?
681             ZSTD_ldm_getTableSize(params->ldmParams.hashLog,
682                                   params->ldmParams.bucketSizeLog) : 0;
683 
684         size_t const neededSpace = entropySpace + tableSpace + tokenSpace +
685                                    optSpace + ldmSpace;
686 
687         DEBUGLOG(5, "sizeof(ZSTD_CCtx) : %u", (U32)sizeof(ZSTD_CCtx));
688         DEBUGLOG(5, "estimate workSpace : %u", (U32)neededSpace);
689         return sizeof(ZSTD_CCtx) + neededSpace;
690     }
691 }
692 
693 size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams)
694 {
695     ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
696     return ZSTD_estimateCCtxSize_usingCCtxParams(&params);
697 }
698 
699 size_t ZSTD_estimateCCtxSize(int compressionLevel)
700 {
701     ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, 0);
702     return ZSTD_estimateCCtxSize_usingCParams(cParams);
703 }
704 
705 size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params)
706 {
707     if (params->nbThreads > 1) { return ERROR(GENERIC); }
708     {   size_t const CCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(params);
709         size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << params->cParams.windowLog);
710         size_t const inBuffSize = ((size_t)1 << params->cParams.windowLog) + blockSize;
711         size_t const outBuffSize = ZSTD_compressBound(blockSize) + 1;
712         size_t const streamingSize = inBuffSize + outBuffSize;
713 
714         return CCtxSize + streamingSize;
715     }
716 }
717 
718 size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams)
719 {
720     ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
721     return ZSTD_estimateCStreamSize_usingCCtxParams(&params);
722 }
723 
724 size_t ZSTD_estimateCStreamSize(int compressionLevel) {
725     ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, 0);
726     return ZSTD_estimateCStreamSize_usingCParams(cParams);
727 }
728 
729 static U32 ZSTD_equivalentCParams(ZSTD_compressionParameters cParams1,
730                                   ZSTD_compressionParameters cParams2)
731 {
732     return (cParams1.hashLog  == cParams2.hashLog)
733          & (cParams1.chainLog == cParams2.chainLog)
734          & (cParams1.strategy == cParams2.strategy)   /* opt parser space */
735          & ((cParams1.searchLength==3) == (cParams2.searchLength==3));  /* hashlog3 space */
736 }
737 
738 /** The parameters are equivalent if ldm is not enabled in both sets or
739  *  all the parameters are equivalent. */
740 static U32 ZSTD_equivalentLdmParams(ldmParams_t ldmParams1,
741                                     ldmParams_t ldmParams2)
742 {
743     return (!ldmParams1.enableLdm && !ldmParams2.enableLdm) ||
744            (ldmParams1.enableLdm == ldmParams2.enableLdm &&
745             ldmParams1.hashLog == ldmParams2.hashLog &&
746             ldmParams1.bucketSizeLog == ldmParams2.bucketSizeLog &&
747             ldmParams1.minMatchLength == ldmParams2.minMatchLength &&
748             ldmParams1.hashEveryLog == ldmParams2.hashEveryLog);
749 }
750 
751 typedef enum { ZSTDb_not_buffered, ZSTDb_buffered } ZSTD_buffered_policy_e;
752 
753 /* ZSTD_sufficientBuff() :
754  * check internal buffers exist for streaming if buffPol == ZSTDb_buffered .
755  * Note : they are assumed to be correctly sized if ZSTD_equivalentCParams()==1 */
756 static U32 ZSTD_sufficientBuff(size_t bufferSize1, size_t blockSize1,
757                             ZSTD_buffered_policy_e buffPol2,
758                             ZSTD_compressionParameters cParams2,
759                             U64 pledgedSrcSize)
760 {
761     size_t const windowSize2 = MAX(1, (size_t)MIN(((U64)1 << cParams2.windowLog), pledgedSrcSize));
762     size_t const blockSize2 = MIN(ZSTD_BLOCKSIZE_MAX, windowSize2);
763     size_t const neededBufferSize2 = (buffPol2==ZSTDb_buffered) ? windowSize2 + blockSize2 : 0;
764     DEBUGLOG(4, "ZSTD_sufficientBuff: windowSize2=%u from wlog=%u",
765                 (U32)windowSize2, cParams2.windowLog);
766     DEBUGLOG(4, "ZSTD_sufficientBuff: blockSize2 %u <=? blockSize1 %u",
767                 (U32)blockSize2, (U32)blockSize1);
768     return (blockSize2 <= blockSize1) /* seqStore space depends on blockSize */
769          & (neededBufferSize2 <= bufferSize1);
770 }
771 
772 /** Equivalence for resetCCtx purposes */
773 static U32 ZSTD_equivalentParams(ZSTD_CCtx_params params1,
774                                  ZSTD_CCtx_params params2,
775                                  size_t buffSize1, size_t blockSize1,
776                                  ZSTD_buffered_policy_e buffPol2,
777                                  U64 pledgedSrcSize)
778 {
779     DEBUGLOG(4, "ZSTD_equivalentParams: pledgedSrcSize=%u", (U32)pledgedSrcSize);
780     return ZSTD_equivalentCParams(params1.cParams, params2.cParams) &&
781            ZSTD_equivalentLdmParams(params1.ldmParams, params2.ldmParams) &&
782            ZSTD_sufficientBuff(buffSize1, blockSize1, buffPol2, params2.cParams, pledgedSrcSize);
783 }
784 
785 /*! ZSTD_continueCCtx() :
786  *  reuse CCtx without reset (note : requires no dictionary) */
787 static size_t ZSTD_continueCCtx(ZSTD_CCtx* cctx, ZSTD_CCtx_params params, U64 pledgedSrcSize)
788 {
789     U32 const end = (U32)(cctx->nextSrc - cctx->base);
790     size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize));
791     size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
792     DEBUGLOG(4, "ZSTD_continueCCtx");
793 
794     cctx->blockSize = blockSize;   /* previous block size could be different even for same windowLog, due to pledgedSrcSize */
795     cctx->appliedParams = params;
796     cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
797     cctx->consumedSrcSize = 0;
798     if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)
799         cctx->appliedParams.fParams.contentSizeFlag = 0;
800     DEBUGLOG(4, "pledged content size : %u ; flag : %u",
801         (U32)pledgedSrcSize, cctx->appliedParams.fParams.contentSizeFlag);
802     cctx->lowLimit = end;
803     cctx->dictLimit = end;
804     cctx->nextToUpdate = end+1;
805     cctx->stage = ZSTDcs_init;
806     cctx->dictID = 0;
807     cctx->loadedDictEnd = 0;
808     { int i; for (i=0; i<ZSTD_REP_NUM; i++) cctx->seqStore.rep[i] = repStartValue[i]; }
809     cctx->optState.litLengthSum = 0;  /* force reset of btopt stats */
810     XXH64_reset(&cctx->xxhState, 0);
811     return 0;
812 }
813 
814 typedef enum { ZSTDcrp_continue, ZSTDcrp_noMemset } ZSTD_compResetPolicy_e;
815 
816 /*! ZSTD_resetCCtx_internal() :
817     note : `params` are assumed fully validated at this stage */
818 static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
819                                       ZSTD_CCtx_params params, U64 pledgedSrcSize,
820                                       ZSTD_compResetPolicy_e const crp,
821                                       ZSTD_buffered_policy_e const zbuff)
822 {
823     DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u",
824                 (U32)pledgedSrcSize, params.cParams.windowLog);
825     assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
826 
827     if (crp == ZSTDcrp_continue) {
828         if (ZSTD_equivalentParams(zc->appliedParams, params,
829                                 zc->inBuffSize, zc->blockSize,
830                                 zbuff, pledgedSrcSize)) {
831             DEBUGLOG(4, "ZSTD_equivalentParams()==1 -> continue mode (wLog1=%u, blockSize1=%u)",
832                         zc->appliedParams.cParams.windowLog, (U32)zc->blockSize);
833             assert(!(params.ldmParams.enableLdm &&
834                      params.ldmParams.hashEveryLog == ZSTD_LDM_HASHEVERYLOG_NOTSET));
835             zc->entropy->hufCTable_repeatMode = HUF_repeat_none;
836             zc->entropy->offcode_repeatMode = FSE_repeat_none;
837             zc->entropy->matchlength_repeatMode = FSE_repeat_none;
838             zc->entropy->litlength_repeatMode = FSE_repeat_none;
839             return ZSTD_continueCCtx(zc, params, pledgedSrcSize);
840     }   }
841     DEBUGLOG(4, "ZSTD_equivalentParams()==0 -> reset CCtx");
842 
843     if (params.ldmParams.enableLdm) {
844         /* Adjust long distance matching parameters */
845         ZSTD_ldm_adjustParameters(&params.ldmParams, params.cParams.windowLog);
846         assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
847         assert(params.ldmParams.hashEveryLog < 32);
848         zc->ldmState.hashPower =
849                 ZSTD_ldm_getHashPower(params.ldmParams.minMatchLength);
850     }
851 
852     {   size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize));
853         size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
854         U32    const divider = (params.cParams.searchLength==3) ? 3 : 4;
855         size_t const maxNbSeq = blockSize / divider;
856         size_t const tokenSpace = blockSize + 11*maxNbSeq;
857         size_t const chainSize = (params.cParams.strategy == ZSTD_fast) ?
858                                 0 : ((size_t)1 << params.cParams.chainLog);
859         size_t const hSize = ((size_t)1) << params.cParams.hashLog;
860         U32    const hashLog3 = (params.cParams.searchLength>3) ?
861                                 0 : MIN(ZSTD_HASHLOG3_MAX, params.cParams.windowLog);
862         size_t const h3Size = ((size_t)1) << hashLog3;
863         size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
864         size_t const buffOutSize = (zbuff==ZSTDb_buffered) ? ZSTD_compressBound(blockSize)+1 : 0;
865         size_t const buffInSize = (zbuff==ZSTDb_buffered) ? windowSize + blockSize : 0;
866         void* ptr;
867 
868         /* Check if workSpace is large enough, alloc a new one if needed */
869         {   size_t const entropySpace = sizeof(ZSTD_entropyCTables_t);
870             size_t const optPotentialSpace = ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<<Litbits)) * sizeof(U32)
871                                   + (ZSTD_OPT_NUM+1) * (sizeof(ZSTD_match_t)+sizeof(ZSTD_optimal_t));
872             size_t const optSpace = ( (params.cParams.strategy == ZSTD_btopt)
873                                     || (params.cParams.strategy == ZSTD_btultra)) ?
874                                     optPotentialSpace : 0;
875             size_t const bufferSpace = buffInSize + buffOutSize;
876             size_t const ldmSpace = params.ldmParams.enableLdm
877                 ? ZSTD_ldm_getTableSize(params.ldmParams.hashLog, params.ldmParams.bucketSizeLog)
878                 : 0;
879             size_t const neededSpace = entropySpace + optSpace + ldmSpace +
880                                        tableSpace + tokenSpace + bufferSpace;
881             DEBUGLOG(4, "Need %uKB workspace, including %uKB for tables, and %uKB for buffers",
882                         (U32)(neededSpace>>10), (U32)(tableSpace>>10), (U32)(bufferSpace>>10));
883             DEBUGLOG(4, "chainSize: %u - hSize: %u - h3Size: %u - windowSize: %u - blockSize: %u",
884                         (U32)chainSize, (U32)hSize, (U32)h3Size, (U32)windowSize, (U32)blockSize);
885 
886             if (zc->workSpaceSize < neededSpace) {  /* too small : resize */
887                 DEBUGLOG(4, "Need to update workSpaceSize from %uK to %uK",
888                             (unsigned)(zc->workSpaceSize>>10),
889                             (unsigned)(neededSpace>>10));
890                 /* static cctx : no resize, error out */
891                 if (zc->staticSize) return ERROR(memory_allocation);
892 
893                 zc->workSpaceSize = 0;
894                 ZSTD_free(zc->workSpace, zc->customMem);
895                 zc->workSpace = ZSTD_malloc(neededSpace, zc->customMem);
896                 if (zc->workSpace == NULL) return ERROR(memory_allocation);
897                 zc->workSpaceSize = neededSpace;
898                 ptr = zc->workSpace;
899 
900                 /* entropy space */
901                 assert(((size_t)zc->workSpace & 3) == 0);   /* ensure correct alignment */
902                 assert(zc->workSpaceSize >= sizeof(ZSTD_entropyCTables_t));
903                 zc->entropy = (ZSTD_entropyCTables_t*)zc->workSpace;
904         }   }
905 
906         /* init params */
907         zc->appliedParams = params;
908         zc->pledgedSrcSizePlusOne = pledgedSrcSize+1;
909         zc->consumedSrcSize = 0;
910         if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)
911             zc->appliedParams.fParams.contentSizeFlag = 0;
912         DEBUGLOG(4, "pledged content size : %u ; flag : %u",
913             (U32)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag);
914         zc->blockSize = blockSize;
915 
916         XXH64_reset(&zc->xxhState, 0);
917         zc->stage = ZSTDcs_init;
918         zc->dictID = 0;
919         zc->loadedDictEnd = 0;
920         zc->entropy->hufCTable_repeatMode = HUF_repeat_none;
921         zc->entropy->offcode_repeatMode = FSE_repeat_none;
922         zc->entropy->matchlength_repeatMode = FSE_repeat_none;
923         zc->entropy->litlength_repeatMode = FSE_repeat_none;
924         zc->nextToUpdate = 1;
925         zc->nextSrc = NULL;
926         zc->base = NULL;
927         zc->dictBase = NULL;
928         zc->dictLimit = 0;
929         zc->lowLimit = 0;
930         { int i; for (i=0; i<ZSTD_REP_NUM; i++) zc->seqStore.rep[i] = repStartValue[i]; }
931         zc->hashLog3 = hashLog3;
932         zc->optState.litLengthSum = 0;
933 
934         ptr = zc->entropy + 1;
935 
936         /* opt parser space */
937         if ((params.cParams.strategy == ZSTD_btopt) || (params.cParams.strategy == ZSTD_btultra)) {
938             DEBUGLOG(4, "reserving optimal parser space");
939             assert(((size_t)ptr & 3) == 0);  /* ensure ptr is properly aligned */
940             zc->optState.litFreq = (U32*)ptr;
941             zc->optState.litLengthFreq = zc->optState.litFreq + (1<<Litbits);
942             zc->optState.matchLengthFreq = zc->optState.litLengthFreq + (MaxLL+1);
943             zc->optState.offCodeFreq = zc->optState.matchLengthFreq + (MaxML+1);
944             ptr = zc->optState.offCodeFreq + (MaxOff+1);
945             zc->optState.matchTable = (ZSTD_match_t*)ptr;
946             ptr = zc->optState.matchTable + ZSTD_OPT_NUM+1;
947             zc->optState.priceTable = (ZSTD_optimal_t*)ptr;
948             ptr = zc->optState.priceTable + ZSTD_OPT_NUM+1;
949         }
950 
951         /* ldm hash table */
952         /* initialize bucketOffsets table later for pointer alignment */
953         if (params.ldmParams.enableLdm) {
954             size_t const ldmHSize = ((size_t)1) << params.ldmParams.hashLog;
955             memset(ptr, 0, ldmHSize * sizeof(ldmEntry_t));
956             assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */
957             zc->ldmState.hashTable = (ldmEntry_t*)ptr;
958             ptr = zc->ldmState.hashTable + ldmHSize;
959         }
960 
961         /* table Space */
962         DEBUGLOG(4, "reset table : %u", crp!=ZSTDcrp_noMemset);
963         if (crp!=ZSTDcrp_noMemset) memset(ptr, 0, tableSpace);   /* reset tables only */
964         assert(((size_t)ptr & 3) == 0);  /* ensure ptr is properly aligned */
965         zc->hashTable = (U32*)(ptr);
966         zc->chainTable = zc->hashTable + hSize;
967         zc->hashTable3 = zc->chainTable + chainSize;
968         ptr = zc->hashTable3 + h3Size;
969 
970         /* sequences storage */
971         zc->seqStore.sequencesStart = (seqDef*)ptr;
972         ptr = zc->seqStore.sequencesStart + maxNbSeq;
973         zc->seqStore.llCode = (BYTE*) ptr;
974         zc->seqStore.mlCode = zc->seqStore.llCode + maxNbSeq;
975         zc->seqStore.ofCode = zc->seqStore.mlCode + maxNbSeq;
976         zc->seqStore.litStart = zc->seqStore.ofCode + maxNbSeq;
977         ptr = zc->seqStore.litStart + blockSize;
978 
979         /* ldm bucketOffsets table */
980         if (params.ldmParams.enableLdm) {
981             size_t const ldmBucketSize =
982                   ((size_t)1) << (params.ldmParams.hashLog -
983                                   params.ldmParams.bucketSizeLog);
984             memset(ptr, 0, ldmBucketSize);
985             zc->ldmState.bucketOffsets = (BYTE*)ptr;
986             ptr = zc->ldmState.bucketOffsets + ldmBucketSize;
987         }
988 
989         /* buffers */
990         zc->inBuffSize = buffInSize;
991         zc->inBuff = (char*)ptr;
992         zc->outBuffSize = buffOutSize;
993         zc->outBuff = zc->inBuff + buffInSize;
994 
995         return 0;
996     }
997 }
998 
999 /* ZSTD_invalidateRepCodes() :
1000  * ensures next compression will not use repcodes from previous block.
1001  * Note : only works with regular variant;
1002  *        do not use with extDict variant ! */
1003 void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) {
1004     int i;
1005     for (i=0; i<ZSTD_REP_NUM; i++) cctx->seqStore.rep[i] = 0;
1006 }
1007 
1008 
1009 /*! ZSTD_copyCCtx_internal() :
1010  *  Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
1011  *  Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
1012  *  The "context", in this case, refers to the hash and chain tables,
1013  *  entropy tables, and dictionary references.
1014  * `windowLog` value is enforced if != 0, otherwise value is copied from srcCCtx.
1015  * @return : 0, or an error code */
1016 static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
1017                             const ZSTD_CCtx* srcCCtx,
1018                             unsigned windowLog,
1019                             ZSTD_frameParameters fParams,
1020                             U64 pledgedSrcSize,
1021                             ZSTD_buffered_policy_e zbuff)
1022 {
1023     DEBUGLOG(5, "ZSTD_copyCCtx_internal");
1024     if (srcCCtx->stage!=ZSTDcs_init) return ERROR(stage_wrong);
1025 
1026     memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
1027     {   ZSTD_CCtx_params params = dstCCtx->requestedParams;
1028         /* Copy only compression parameters related to tables. */
1029         params.cParams = srcCCtx->appliedParams.cParams;
1030         if (windowLog) params.cParams.windowLog = windowLog;
1031         params.fParams = fParams;
1032         ZSTD_resetCCtx_internal(dstCCtx, params, pledgedSrcSize,
1033                                 ZSTDcrp_noMemset, zbuff);
1034     }
1035 
1036     /* copy tables */
1037     {   size_t const chainSize = (srcCCtx->appliedParams.cParams.strategy == ZSTD_fast) ? 0 : ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog);
1038         size_t const hSize =  (size_t)1 << srcCCtx->appliedParams.cParams.hashLog;
1039         size_t const h3Size = (size_t)1 << srcCCtx->hashLog3;
1040         size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
1041         assert((U32*)dstCCtx->chainTable == (U32*)dstCCtx->hashTable + hSize);  /* chainTable must follow hashTable */
1042         assert((U32*)dstCCtx->hashTable3 == (U32*)dstCCtx->chainTable + chainSize);
1043         memcpy(dstCCtx->hashTable, srcCCtx->hashTable, tableSpace);   /* presumes all tables follow each other */
1044     }
1045 
1046     /* copy dictionary offsets */
1047     dstCCtx->nextToUpdate = srcCCtx->nextToUpdate;
1048     dstCCtx->nextToUpdate3= srcCCtx->nextToUpdate3;
1049     dstCCtx->nextSrc      = srcCCtx->nextSrc;
1050     dstCCtx->base         = srcCCtx->base;
1051     dstCCtx->dictBase     = srcCCtx->dictBase;
1052     dstCCtx->dictLimit    = srcCCtx->dictLimit;
1053     dstCCtx->lowLimit     = srcCCtx->lowLimit;
1054     dstCCtx->loadedDictEnd= srcCCtx->loadedDictEnd;
1055     dstCCtx->dictID       = srcCCtx->dictID;
1056 
1057     /* copy entropy tables */
1058     memcpy(dstCCtx->entropy, srcCCtx->entropy, sizeof(ZSTD_entropyCTables_t));
1059     /* copy repcodes */
1060     {
1061         int i;
1062         for (i = 0; i < ZSTD_REP_NUM; ++i)
1063             dstCCtx->seqStore.rep[i] = srcCCtx->seqStore.rep[i];
1064     }
1065 
1066     return 0;
1067 }
1068 
1069 /*! ZSTD_copyCCtx() :
1070  *  Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
1071  *  Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
1072  *  pledgedSrcSize==0 means "unknown".
1073 *   @return : 0, or an error code */
1074 size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize)
1075 {
1076     ZSTD_frameParameters fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
1077     ZSTD_buffered_policy_e const zbuff = (ZSTD_buffered_policy_e)(srcCCtx->inBuffSize>0);
1078     ZSTD_STATIC_ASSERT((U32)ZSTDb_buffered==1);
1079     if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
1080     fParams.contentSizeFlag = (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN);
1081 
1082     return ZSTD_copyCCtx_internal(dstCCtx, srcCCtx,
1083                                 0 /*windowLog from srcCCtx*/, fParams, pledgedSrcSize,
1084                                 zbuff);
1085 }
1086 
1087 
1088 /*! ZSTD_reduceTable() :
1089  *  reduce table indexes by `reducerValue` */
1090 static void ZSTD_reduceTable (U32* const table, U32 const size, U32 const reducerValue)
1091 {
1092     U32 u;
1093     for (u=0 ; u < size ; u++) {
1094         if (table[u] < reducerValue) table[u] = 0;
1095         else table[u] -= reducerValue;
1096     }
1097 }
1098 
1099 /*! ZSTD_ldm_reduceTable() :
1100  *  reduce table indexes by `reducerValue` */
1101 static void ZSTD_ldm_reduceTable(ldmEntry_t* const table, U32 const size,
1102                                  U32 const reducerValue)
1103 {
1104     U32 u;
1105     for (u = 0; u < size; u++) {
1106         if (table[u].offset < reducerValue) table[u].offset = 0;
1107         else table[u].offset -= reducerValue;
1108     }
1109 }
1110 
1111 /*! ZSTD_reduceIndex() :
1112 *   rescale all indexes to avoid future overflow (indexes are U32) */
1113 static void ZSTD_reduceIndex (ZSTD_CCtx* zc, const U32 reducerValue)
1114 {
1115     { U32 const hSize = (U32)1 << zc->appliedParams.cParams.hashLog;
1116       ZSTD_reduceTable(zc->hashTable, hSize, reducerValue); }
1117 
1118     { U32 const chainSize = (zc->appliedParams.cParams.strategy == ZSTD_fast) ? 0 : ((U32)1 << zc->appliedParams.cParams.chainLog);
1119       ZSTD_reduceTable(zc->chainTable, chainSize, reducerValue); }
1120 
1121     { U32 const h3Size = (zc->hashLog3) ? (U32)1 << zc->hashLog3 : 0;
1122       ZSTD_reduceTable(zc->hashTable3, h3Size, reducerValue); }
1123 
1124     { if (zc->appliedParams.ldmParams.enableLdm) {
1125           U32 const ldmHSize = (U32)1 << zc->appliedParams.ldmParams.hashLog;
1126           ZSTD_ldm_reduceTable(zc->ldmState.hashTable, ldmHSize, reducerValue);
1127       }
1128     }
1129 }
1130 
1131 
1132 /*-*******************************************************
1133 *  Block entropic compression
1134 *********************************************************/
1135 
1136 /* See doc/zstd_compression_format.md for detailed format description */
1137 
1138 size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
1139 {
1140     if (srcSize + ZSTD_blockHeaderSize > dstCapacity) return ERROR(dstSize_tooSmall);
1141     memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize);
1142     MEM_writeLE24(dst, (U32)(srcSize << 2) + (U32)bt_raw);
1143     return ZSTD_blockHeaderSize+srcSize;
1144 }
1145 
1146 
1147 static size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
1148 {
1149     BYTE* const ostart = (BYTE* const)dst;
1150     U32   const flSize = 1 + (srcSize>31) + (srcSize>4095);
1151 
1152     if (srcSize + flSize > dstCapacity) return ERROR(dstSize_tooSmall);
1153 
1154     switch(flSize)
1155     {
1156         case 1: /* 2 - 1 - 5 */
1157             ostart[0] = (BYTE)((U32)set_basic + (srcSize<<3));
1158             break;
1159         case 2: /* 2 - 2 - 12 */
1160             MEM_writeLE16(ostart, (U16)((U32)set_basic + (1<<2) + (srcSize<<4)));
1161             break;
1162         case 3: /* 2 - 2 - 20 */
1163             MEM_writeLE32(ostart, (U32)((U32)set_basic + (3<<2) + (srcSize<<4)));
1164             break;
1165         default:   /* not necessary : flSize is {1,2,3} */
1166             assert(0);
1167     }
1168 
1169     memcpy(ostart + flSize, src, srcSize);
1170     return srcSize + flSize;
1171 }
1172 
1173 static size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
1174 {
1175     BYTE* const ostart = (BYTE* const)dst;
1176     U32   const flSize = 1 + (srcSize>31) + (srcSize>4095);
1177 
1178     (void)dstCapacity;  /* dstCapacity already guaranteed to be >=4, hence large enough */
1179 
1180     switch(flSize)
1181     {
1182         case 1: /* 2 - 1 - 5 */
1183             ostart[0] = (BYTE)((U32)set_rle + (srcSize<<3));
1184             break;
1185         case 2: /* 2 - 2 - 12 */
1186             MEM_writeLE16(ostart, (U16)((U32)set_rle + (1<<2) + (srcSize<<4)));
1187             break;
1188         case 3: /* 2 - 2 - 20 */
1189             MEM_writeLE32(ostart, (U32)((U32)set_rle + (3<<2) + (srcSize<<4)));
1190             break;
1191         default:   /* not necessary : flSize is {1,2,3} */
1192             assert(0);
1193     }
1194 
1195     ostart[flSize] = *(const BYTE*)src;
1196     return flSize+1;
1197 }
1198 
1199 
1200 static size_t ZSTD_minGain(size_t srcSize) { return (srcSize >> 6) + 2; }
1201 
1202 static size_t ZSTD_compressLiterals (ZSTD_entropyCTables_t * entropy,
1203                                      ZSTD_strategy strategy,
1204                                      void* dst, size_t dstCapacity,
1205                                const void* src, size_t srcSize)
1206 {
1207     size_t const minGain = ZSTD_minGain(srcSize);
1208     size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
1209     BYTE*  const ostart = (BYTE*)dst;
1210     U32 singleStream = srcSize < 256;
1211     symbolEncodingType_e hType = set_compressed;
1212     size_t cLitSize;
1213 
1214 
1215     /* small ? don't even attempt compression (speed opt) */
1216 #   define LITERAL_NOENTROPY 63
1217     {   size_t const minLitSize = entropy->hufCTable_repeatMode == HUF_repeat_valid ? 6 : LITERAL_NOENTROPY;
1218         if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
1219     }
1220 
1221     if (dstCapacity < lhSize+1) return ERROR(dstSize_tooSmall);   /* not enough space for compression */
1222     {   HUF_repeat repeat = entropy->hufCTable_repeatMode;
1223         int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
1224         if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
1225         cLitSize = singleStream ? HUF_compress1X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
1226                                       entropy->workspace, sizeof(entropy->workspace), (HUF_CElt*)entropy->hufCTable, &repeat, preferRepeat)
1227                                 : HUF_compress4X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
1228                                       entropy->workspace, sizeof(entropy->workspace), (HUF_CElt*)entropy->hufCTable, &repeat, preferRepeat);
1229         if (repeat != HUF_repeat_none) { hType = set_repeat; }    /* reused the existing table */
1230         else { entropy->hufCTable_repeatMode = HUF_repeat_check; }       /* now have a table to reuse */
1231     }
1232 
1233     if ((cLitSize==0) | (cLitSize >= srcSize - minGain) | ERR_isError(cLitSize)) {
1234         entropy->hufCTable_repeatMode = HUF_repeat_none;
1235         return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
1236     }
1237     if (cLitSize==1) {
1238         entropy->hufCTable_repeatMode = HUF_repeat_none;
1239         return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
1240     }
1241 
1242     /* Build header */
1243     switch(lhSize)
1244     {
1245     case 3: /* 2 - 2 - 10 - 10 */
1246         {   U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14);
1247             MEM_writeLE24(ostart, lhc);
1248             break;
1249         }
1250     case 4: /* 2 - 2 - 14 - 14 */
1251         {   U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18);
1252             MEM_writeLE32(ostart, lhc);
1253             break;
1254         }
1255     case 5: /* 2 - 2 - 18 - 18 */
1256         {   U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22);
1257             MEM_writeLE32(ostart, lhc);
1258             ostart[4] = (BYTE)(cLitSize >> 10);
1259             break;
1260         }
1261     default:  /* not possible : lhSize is {3,4,5} */
1262         assert(0);
1263     }
1264     return lhSize+cLitSize;
1265 }
1266 
1267 
1268 void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
1269 {
1270     const seqDef* const sequences = seqStorePtr->sequencesStart;
1271     BYTE* const llCodeTable = seqStorePtr->llCode;
1272     BYTE* const ofCodeTable = seqStorePtr->ofCode;
1273     BYTE* const mlCodeTable = seqStorePtr->mlCode;
1274     U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
1275     U32 u;
1276     for (u=0; u<nbSeq; u++) {
1277         U32 const llv = sequences[u].litLength;
1278         U32 const mlv = sequences[u].matchLength;
1279         llCodeTable[u] = (BYTE)ZSTD_LLcode(llv);
1280         ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offset);
1281         mlCodeTable[u] = (BYTE)ZSTD_MLcode(mlv);
1282     }
1283     if (seqStorePtr->longLengthID==1)
1284         llCodeTable[seqStorePtr->longLengthPos] = MaxLL;
1285     if (seqStorePtr->longLengthID==2)
1286         mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
1287 }
1288 
1289 typedef enum {
1290     ZSTD_defaultDisallowed = 0,
1291     ZSTD_defaultAllowed = 1
1292 } ZSTD_defaultPolicy_e;
1293 
1294 MEM_STATIC
1295 symbolEncodingType_e ZSTD_selectEncodingType(
1296         FSE_repeat* repeatMode, size_t const mostFrequent, size_t nbSeq,
1297         U32 defaultNormLog, ZSTD_defaultPolicy_e const isDefaultAllowed)
1298 {
1299 #define MIN_SEQ_FOR_DYNAMIC_FSE   64
1300 #define MAX_SEQ_FOR_STATIC_FSE  1000
1301     ZSTD_STATIC_ASSERT(ZSTD_defaultDisallowed == 0 && ZSTD_defaultAllowed != 0);
1302     if ((mostFrequent == nbSeq) && (!isDefaultAllowed || nbSeq > 2)) {
1303         DEBUGLOG(5, "Selected set_rle");
1304         /* Prefer set_basic over set_rle when there are 2 or less symbols,
1305          * since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol.
1306          * If basic encoding isn't possible, always choose RLE.
1307          */
1308         *repeatMode = FSE_repeat_check;
1309         return set_rle;
1310     }
1311     if ( isDefaultAllowed
1312       && (*repeatMode == FSE_repeat_valid) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
1313         DEBUGLOG(5, "Selected set_repeat");
1314         return set_repeat;
1315     }
1316     if ( isDefaultAllowed
1317       && ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (defaultNormLog-1)))) ) {
1318         DEBUGLOG(5, "Selected set_basic");
1319         /* The format allows default tables to be repeated, but it isn't useful.
1320          * When using simple heuristics to select encoding type, we don't want
1321          * to confuse these tables with dictionaries. When running more careful
1322          * analysis, we don't need to waste time checking both repeating tables
1323          * and default tables.
1324          */
1325         *repeatMode = FSE_repeat_none;
1326         return set_basic;
1327     }
1328     DEBUGLOG(5, "Selected set_compressed");
1329     *repeatMode = FSE_repeat_check;
1330     return set_compressed;
1331 }
1332 
1333 MEM_STATIC
1334 size_t ZSTD_buildCTable(void* dst, size_t dstCapacity,
1335         FSE_CTable* CTable, U32 FSELog, symbolEncodingType_e type,
1336         U32* count, U32 max,
1337         BYTE const* codeTable, size_t nbSeq,
1338         S16 const* defaultNorm, U32 defaultNormLog, U32 defaultMax,
1339         void* workspace, size_t workspaceSize)
1340 {
1341     BYTE* op = (BYTE*)dst;
1342     BYTE const* const oend = op + dstCapacity;
1343 
1344     switch (type) {
1345     case set_rle:
1346         *op = codeTable[0];
1347         CHECK_F(FSE_buildCTable_rle(CTable, (BYTE)max));
1348         return 1;
1349     case set_repeat:
1350         return 0;
1351     case set_basic:
1352         CHECK_F(FSE_buildCTable_wksp(CTable, defaultNorm, defaultMax, defaultNormLog, workspace, workspaceSize));  /* note : could be pre-calculated */
1353         return 0;
1354     case set_compressed: {
1355         S16 norm[MaxSeq + 1];
1356         size_t nbSeq_1 = nbSeq;
1357         const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
1358         if (count[codeTable[nbSeq-1]] > 1) {
1359             count[codeTable[nbSeq-1]]--;
1360             nbSeq_1--;
1361         }
1362         assert(nbSeq_1 > 1);
1363         CHECK_F(FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max));
1364         {   size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog);   /* overflow protected */
1365             if (FSE_isError(NCountSize)) return NCountSize;
1366             CHECK_F(FSE_buildCTable_wksp(CTable, norm, max, tableLog, workspace, workspaceSize));
1367             return NCountSize;
1368         }
1369     }
1370     default: return assert(0), ERROR(GENERIC);
1371     }
1372 }
1373 
1374 MEM_STATIC
1375 size_t ZSTD_encodeSequences(
1376             void* dst, size_t dstCapacity,
1377             FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
1378             FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
1379             FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
1380             seqDef const* sequences, size_t nbSeq, int longOffsets)
1381 {
1382     BIT_CStream_t blockStream;
1383     FSE_CState_t  stateMatchLength;
1384     FSE_CState_t  stateOffsetBits;
1385     FSE_CState_t  stateLitLength;
1386 
1387     CHECK_E(BIT_initCStream(&blockStream, dst, dstCapacity), dstSize_tooSmall); /* not enough space remaining */
1388 
1389     /* first symbols */
1390     FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]);
1391     FSE_initCState2(&stateOffsetBits,  CTable_OffsetBits,  ofCodeTable[nbSeq-1]);
1392     FSE_initCState2(&stateLitLength,   CTable_LitLength,   llCodeTable[nbSeq-1]);
1393     BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]);
1394     if (MEM_32bits()) BIT_flushBits(&blockStream);
1395     BIT_addBits(&blockStream, sequences[nbSeq-1].matchLength, ML_bits[mlCodeTable[nbSeq-1]]);
1396     if (MEM_32bits()) BIT_flushBits(&blockStream);
1397     if (longOffsets) {
1398         U32 const ofBits = ofCodeTable[nbSeq-1];
1399         int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
1400         if (extraBits) {
1401             BIT_addBits(&blockStream, sequences[nbSeq-1].offset, extraBits);
1402             BIT_flushBits(&blockStream);
1403         }
1404         BIT_addBits(&blockStream, sequences[nbSeq-1].offset >> extraBits,
1405                     ofBits - extraBits);
1406     } else {
1407         BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]);
1408     }
1409     BIT_flushBits(&blockStream);
1410 
1411     {   size_t n;
1412         for (n=nbSeq-2 ; n<nbSeq ; n--) {      /* intentional underflow */
1413             BYTE const llCode = llCodeTable[n];
1414             BYTE const ofCode = ofCodeTable[n];
1415             BYTE const mlCode = mlCodeTable[n];
1416             U32  const llBits = LL_bits[llCode];
1417             U32  const ofBits = ofCode;
1418             U32  const mlBits = ML_bits[mlCode];
1419             DEBUGLOG(6, "encoding: litlen:%2u - matchlen:%2u - offCode:%7u",
1420                         sequences[n].litLength,
1421                         sequences[n].matchLength + MINMATCH,
1422                         sequences[n].offset);                               /* 32b*/  /* 64b*/
1423                                                                             /* (7)*/  /* (7)*/
1424             FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode);       /* 15 */  /* 15 */
1425             FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode);      /* 24 */  /* 24 */
1426             if (MEM_32bits()) BIT_flushBits(&blockStream);                  /* (7)*/
1427             FSE_encodeSymbol(&blockStream, &stateLitLength, llCode);        /* 16 */  /* 33 */
1428             if (MEM_32bits() || (ofBits+mlBits+llBits >= 64-7-(LLFSELog+MLFSELog+OffFSELog)))
1429                 BIT_flushBits(&blockStream);                                /* (7)*/
1430             BIT_addBits(&blockStream, sequences[n].litLength, llBits);
1431             if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream);
1432             BIT_addBits(&blockStream, sequences[n].matchLength, mlBits);
1433             if (MEM_32bits() || (ofBits+mlBits+llBits > 56)) BIT_flushBits(&blockStream);
1434             if (longOffsets) {
1435                 int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
1436                 if (extraBits) {
1437                     BIT_addBits(&blockStream, sequences[n].offset, extraBits);
1438                     BIT_flushBits(&blockStream);                            /* (7)*/
1439                 }
1440                 BIT_addBits(&blockStream, sequences[n].offset >> extraBits,
1441                             ofBits - extraBits);                            /* 31 */
1442             } else {
1443                 BIT_addBits(&blockStream, sequences[n].offset, ofBits);     /* 31 */
1444             }
1445             BIT_flushBits(&blockStream);                                    /* (7)*/
1446     }   }
1447 
1448     FSE_flushCState(&blockStream, &stateMatchLength);
1449     FSE_flushCState(&blockStream, &stateOffsetBits);
1450     FSE_flushCState(&blockStream, &stateLitLength);
1451 
1452     {   size_t const streamSize = BIT_closeCStream(&blockStream);
1453         if (streamSize==0) return ERROR(dstSize_tooSmall);   /* not enough space */
1454         return streamSize;
1455     }
1456 }
1457 
1458 MEM_STATIC size_t ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
1459                               ZSTD_entropyCTables_t* entropy,
1460                               ZSTD_compressionParameters const* cParams,
1461                               void* dst, size_t dstCapacity)
1462 {
1463     const int longOffsets = cParams->windowLog > STREAM_ACCUMULATOR_MIN;
1464     U32 count[MaxSeq+1];
1465     FSE_CTable* CTable_LitLength = entropy->litlengthCTable;
1466     FSE_CTable* CTable_OffsetBits = entropy->offcodeCTable;
1467     FSE_CTable* CTable_MatchLength = entropy->matchlengthCTable;
1468     U32 LLtype, Offtype, MLtype;   /* compressed, raw or rle */
1469     const seqDef* const sequences = seqStorePtr->sequencesStart;
1470     const BYTE* const ofCodeTable = seqStorePtr->ofCode;
1471     const BYTE* const llCodeTable = seqStorePtr->llCode;
1472     const BYTE* const mlCodeTable = seqStorePtr->mlCode;
1473     BYTE* const ostart = (BYTE*)dst;
1474     BYTE* const oend = ostart + dstCapacity;
1475     BYTE* op = ostart;
1476     size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
1477     BYTE* seqHead;
1478 
1479     ZSTD_STATIC_ASSERT(sizeof(entropy->workspace) >= (1<<MAX(MLFSELog,LLFSELog)));
1480 
1481     /* Compress literals */
1482     {   const BYTE* const literals = seqStorePtr->litStart;
1483         size_t const litSize = seqStorePtr->lit - literals;
1484         size_t const cSize = ZSTD_compressLiterals(
1485                 entropy, cParams->strategy, op, dstCapacity, literals, litSize);
1486         if (ZSTD_isError(cSize))
1487           return cSize;
1488         assert(cSize <= dstCapacity);
1489         op += cSize;
1490     }
1491 
1492     /* Sequences Header */
1493     if ((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/) return ERROR(dstSize_tooSmall);
1494     if (nbSeq < 0x7F)
1495         *op++ = (BYTE)nbSeq;
1496     else if (nbSeq < LONGNBSEQ)
1497         op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2;
1498     else
1499         op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3;
1500     if (nbSeq==0) return op - ostart;
1501 
1502     /* seqHead : flags for FSE encoding type */
1503     seqHead = op++;
1504 
1505     /* convert length/distances into codes */
1506     ZSTD_seqToCodes(seqStorePtr);
1507     /* build CTable for Literal Lengths */
1508     {   U32 max = MaxLL;
1509         size_t const mostFrequent = FSE_countFast_wksp(count, &max, llCodeTable, nbSeq, entropy->workspace);
1510         DEBUGLOG(5, "Building LL table");
1511         LLtype = ZSTD_selectEncodingType(&entropy->litlength_repeatMode, mostFrequent, nbSeq, LL_defaultNormLog, ZSTD_defaultAllowed);
1512         {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
1513                     count, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL,
1514                     entropy->workspace, sizeof(entropy->workspace));
1515             if (ZSTD_isError(countSize)) return countSize;
1516             op += countSize;
1517     }   }
1518     /* build CTable for Offsets */
1519     {   U32 max = MaxOff;
1520         size_t const mostFrequent = FSE_countFast_wksp(count, &max, ofCodeTable, nbSeq, entropy->workspace);
1521         /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
1522         ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
1523         DEBUGLOG(5, "Building OF table");
1524         Offtype = ZSTD_selectEncodingType(&entropy->offcode_repeatMode, mostFrequent, nbSeq, OF_defaultNormLog, defaultPolicy);
1525         {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
1526                     count, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
1527                     entropy->workspace, sizeof(entropy->workspace));
1528             if (ZSTD_isError(countSize)) return countSize;
1529             op += countSize;
1530     }   }
1531     /* build CTable for MatchLengths */
1532     {   U32 max = MaxML;
1533         size_t const mostFrequent = FSE_countFast_wksp(count, &max, mlCodeTable, nbSeq, entropy->workspace);
1534         DEBUGLOG(5, "Building ML table");
1535         MLtype = ZSTD_selectEncodingType(&entropy->matchlength_repeatMode, mostFrequent, nbSeq, ML_defaultNormLog, ZSTD_defaultAllowed);
1536         {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
1537                     count, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML,
1538                     entropy->workspace, sizeof(entropy->workspace));
1539             if (ZSTD_isError(countSize)) return countSize;
1540             op += countSize;
1541     }   }
1542 
1543     *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
1544 
1545     {   size_t const bitstreamSize = ZSTD_encodeSequences(
1546                                         op, oend - op,
1547                                         CTable_MatchLength, mlCodeTable,
1548                                         CTable_OffsetBits, ofCodeTable,
1549                                         CTable_LitLength, llCodeTable,
1550                                         sequences, nbSeq,
1551                                         longOffsets);
1552         if (ZSTD_isError(bitstreamSize)) return bitstreamSize;
1553         op += bitstreamSize;
1554     }
1555 
1556     return op - ostart;
1557 }
1558 
1559 MEM_STATIC size_t ZSTD_compressSequences(seqStore_t* seqStorePtr,
1560                               ZSTD_entropyCTables_t* entropy,
1561                               ZSTD_compressionParameters const* cParams,
1562                               void* dst, size_t dstCapacity,
1563                               size_t srcSize)
1564 {
1565     size_t const cSize = ZSTD_compressSequences_internal(seqStorePtr, entropy, cParams,
1566                                                          dst, dstCapacity);
1567     /* If the srcSize <= dstCapacity, then there is enough space to write a
1568      * raw uncompressed block. Since we ran out of space, the block must not
1569      * be compressible, so fall back to a raw uncompressed block.
1570      */
1571     int const uncompressibleError = (cSize == ERROR(dstSize_tooSmall)) && (srcSize <= dstCapacity);
1572     if (ZSTD_isError(cSize) && !uncompressibleError)
1573         return cSize;
1574     /* We check that dictionaries have offset codes available for the first
1575      * block. After the first block, the offcode table might not have large
1576      * enough codes to represent the offsets in the data.
1577      */
1578     if (entropy->offcode_repeatMode == FSE_repeat_valid)
1579         entropy->offcode_repeatMode = FSE_repeat_check;
1580 
1581     /* Check compressibility */
1582     {   size_t const minGain = ZSTD_minGain(srcSize);  /* note : fixed formula, maybe should depend on compression level, or strategy */
1583         size_t const maxCSize = srcSize - minGain;
1584         if (cSize >= maxCSize || uncompressibleError) {
1585             entropy->hufCTable_repeatMode = HUF_repeat_none;
1586             entropy->offcode_repeatMode = FSE_repeat_none;
1587             entropy->matchlength_repeatMode = FSE_repeat_none;
1588             entropy->litlength_repeatMode = FSE_repeat_none;
1589             return 0;  /* block not compressed */
1590     }   }
1591     assert(!ZSTD_isError(cSize));
1592 
1593     /* block is compressed => confirm repcodes in history */
1594     { int i; for (i=0; i<ZSTD_REP_NUM; i++) seqStorePtr->rep[i] = seqStorePtr->repToConfirm[i]; }
1595     return cSize;
1596 }
1597 
1598 /* ZSTD_selectBlockCompressor() :
1599  * Not static, but internal use only (used by long distance matcher)
1600  * assumption : strat is a valid strategy */
1601 typedef size_t (*ZSTD_blockCompressor) (ZSTD_CCtx* ctx, const void* src, size_t srcSize);
1602 ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict)
1603 {
1604     static const ZSTD_blockCompressor blockCompressor[2][(unsigned)ZSTD_btultra+1] = {
1605         { ZSTD_compressBlock_fast  /* default for 0 */,
1606           ZSTD_compressBlock_fast, ZSTD_compressBlock_doubleFast, ZSTD_compressBlock_greedy,
1607           ZSTD_compressBlock_lazy, ZSTD_compressBlock_lazy2, ZSTD_compressBlock_btlazy2,
1608           ZSTD_compressBlock_btopt, ZSTD_compressBlock_btultra },
1609         { ZSTD_compressBlock_fast_extDict  /* default for 0 */,
1610           ZSTD_compressBlock_fast_extDict, ZSTD_compressBlock_doubleFast_extDict, ZSTD_compressBlock_greedy_extDict,
1611           ZSTD_compressBlock_lazy_extDict,ZSTD_compressBlock_lazy2_extDict, ZSTD_compressBlock_btlazy2_extDict,
1612           ZSTD_compressBlock_btopt_extDict, ZSTD_compressBlock_btultra_extDict }
1613     };
1614     ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1);
1615 
1616     assert((U32)strat >= (U32)ZSTD_fast);
1617     assert((U32)strat <= (U32)ZSTD_btultra);
1618     return blockCompressor[extDict!=0][(U32)strat];
1619 }
1620 
1621 static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr,
1622                                    const BYTE* anchor, size_t lastLLSize)
1623 {
1624     memcpy(seqStorePtr->lit, anchor, lastLLSize);
1625     seqStorePtr->lit += lastLLSize;
1626 }
1627 
1628 static void ZSTD_resetSeqStore(seqStore_t* ssPtr)
1629 {
1630     ssPtr->lit = ssPtr->litStart;
1631     ssPtr->sequences = ssPtr->sequencesStart;
1632     ssPtr->longLengthID = 0;
1633 }
1634 
1635 static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
1636 {
1637     DEBUGLOG(5, "ZSTD_compressBlock_internal : dstCapacity = %u", (U32)dstCapacity);
1638     if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1)
1639         return 0;   /* don't even attempt compression below a certain srcSize */
1640     ZSTD_resetSeqStore(&(zc->seqStore));
1641 
1642     /* limited update after a very long match */
1643     {   const BYTE* const base = zc->base;
1644         const BYTE* const istart = (const BYTE*)src;
1645         const U32 current = (U32)(istart-base);
1646         if (current > zc->nextToUpdate + 384)
1647             zc->nextToUpdate = current - MIN(192, (U32)(current - zc->nextToUpdate - 384));
1648     }
1649     /* find and store sequences */
1650     {   U32 const extDict = zc->lowLimit < zc->dictLimit;
1651         const ZSTD_blockCompressor blockCompressor =
1652             zc->appliedParams.ldmParams.enableLdm
1653                 ? (extDict ? ZSTD_compressBlock_ldm_extDict : ZSTD_compressBlock_ldm)
1654                 : ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, extDict);
1655         size_t const lastLLSize = blockCompressor(zc, src, srcSize);
1656         const BYTE* const anchor = (const BYTE*)src + srcSize - lastLLSize;
1657         ZSTD_storeLastLiterals(&zc->seqStore, anchor, lastLLSize);
1658     }
1659     /* encode */
1660     return ZSTD_compressSequences(&zc->seqStore, zc->entropy, &zc->appliedParams.cParams, dst, dstCapacity, srcSize);
1661 }
1662 
1663 
1664 /*! ZSTD_compress_frameChunk() :
1665 *   Compress a chunk of data into one or multiple blocks.
1666 *   All blocks will be terminated, all input will be consumed.
1667 *   Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.
1668 *   Frame is supposed already started (header already produced)
1669 *   @return : compressed size, or an error code
1670 */
1671 static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx,
1672                                      void* dst, size_t dstCapacity,
1673                                const void* src, size_t srcSize,
1674                                      U32 lastFrameChunk)
1675 {
1676     size_t blockSize = cctx->blockSize;
1677     size_t remaining = srcSize;
1678     const BYTE* ip = (const BYTE*)src;
1679     BYTE* const ostart = (BYTE*)dst;
1680     BYTE* op = ostart;
1681     U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog;
1682     assert(cctx->appliedParams.cParams.windowLog <= 31);
1683 
1684     DEBUGLOG(5, "ZSTD_compress_frameChunk (blockSize=%u)", (U32)blockSize);
1685     if (cctx->appliedParams.fParams.checksumFlag && srcSize)
1686         XXH64_update(&cctx->xxhState, src, srcSize);
1687 
1688     while (remaining) {
1689         U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
1690 
1691         if (dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE)
1692             return ERROR(dstSize_tooSmall);   /* not enough space to store compressed block */
1693         if (remaining < blockSize) blockSize = remaining;
1694 
1695         /* preemptive overflow correction:
1696          * 1. correction is large enough:
1697          *    lowLimit > (3<<29) ==> current > 3<<29 + 1<<windowLog - blockSize
1698          *    1<<windowLog <= newCurrent < 1<<chainLog + 1<<windowLog
1699          *
1700          *    current - newCurrent
1701          *    > (3<<29 + 1<<windowLog - blockSize) - (1<<windowLog + 1<<chainLog)
1702          *    > (3<<29 - blockSize) - (1<<chainLog)
1703          *    > (3<<29 - blockSize) - (1<<30)             (NOTE: chainLog <= 30)
1704          *    > 1<<29 - 1<<17
1705          *
1706          * 2. (ip+blockSize - cctx->base) doesn't overflow:
1707          *    In 32 bit mode we limit windowLog to 30 so we don't get
1708          *    differences larger than 1<<31-1.
1709          * 3. cctx->lowLimit < 1<<32:
1710          *    windowLog <= 31 ==> 3<<29 + 1<<windowLog < 7<<29 < 1<<32.
1711          */
1712         if (cctx->lowLimit > (3U<<29)) {
1713             U32 const cycleMask = ((U32)1 << ZSTD_cycleLog(cctx->appliedParams.cParams.chainLog, cctx->appliedParams.cParams.strategy)) - 1;
1714             U32 const current = (U32)(ip - cctx->base);
1715             U32 const newCurrent = (current & cycleMask) + ((U32)1 << cctx->appliedParams.cParams.windowLog);
1716             U32 const correction = current - newCurrent;
1717             ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30);
1718             ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30);
1719             ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31);
1720             assert(current > newCurrent);
1721             assert(correction > 1<<28); /* Loose bound, should be about 1<<29 */
1722             ZSTD_reduceIndex(cctx, correction);
1723             cctx->base += correction;
1724             cctx->dictBase += correction;
1725             cctx->lowLimit -= correction;
1726             cctx->dictLimit -= correction;
1727             if (cctx->nextToUpdate < correction) cctx->nextToUpdate = 0;
1728             else cctx->nextToUpdate -= correction;
1729             DEBUGLOG(4, "Correction of 0x%x bytes to lowLimit=0x%x\n", correction, cctx->lowLimit);
1730         }
1731         /* enforce maxDist */
1732         if ((U32)(ip+blockSize - cctx->base) > cctx->loadedDictEnd + maxDist) {
1733             U32 const newLowLimit = (U32)(ip+blockSize - cctx->base) - maxDist;
1734             if (cctx->lowLimit < newLowLimit) cctx->lowLimit = newLowLimit;
1735             if (cctx->dictLimit < cctx->lowLimit) cctx->dictLimit = cctx->lowLimit;
1736         }
1737 
1738         {   size_t cSize = ZSTD_compressBlock_internal(cctx,
1739                                 op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize,
1740                                 ip, blockSize);
1741             if (ZSTD_isError(cSize)) return cSize;
1742 
1743             if (cSize == 0) {  /* block is not compressible */
1744                 U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(blockSize << 3);
1745                 if (blockSize + ZSTD_blockHeaderSize > dstCapacity) return ERROR(dstSize_tooSmall);
1746                 MEM_writeLE32(op, cBlockHeader24);   /* 4th byte will be overwritten */
1747                 memcpy(op + ZSTD_blockHeaderSize, ip, blockSize);
1748                 cSize = ZSTD_blockHeaderSize + blockSize;
1749             } else {
1750                 U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
1751                 MEM_writeLE24(op, cBlockHeader24);
1752                 cSize += ZSTD_blockHeaderSize;
1753             }
1754 
1755             ip += blockSize;
1756             assert(remaining >= blockSize);
1757             remaining -= blockSize;
1758             op += cSize;
1759             assert(dstCapacity >= cSize);
1760             dstCapacity -= cSize;
1761             DEBUGLOG(5, "ZSTD_compress_frameChunk: adding a block of size %u",
1762                         (U32)cSize);
1763     }   }
1764 
1765     if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending;
1766     return op-ostart;
1767 }
1768 
1769 
1770 static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
1771                                     ZSTD_CCtx_params params, U64 pledgedSrcSize, U32 dictID)
1772 {   BYTE* const op = (BYTE*)dst;
1773     U32   const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536);   /* 0-3 */
1774     U32   const dictIDSizeCode = params.fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength;   /* 0-3 */
1775     U32   const checksumFlag = params.fParams.checksumFlag>0;
1776     U32   const windowSize = (U32)1 << params.cParams.windowLog;
1777     U32   const singleSegment = params.fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);
1778     BYTE  const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
1779     U32   const fcsCode = params.fParams.contentSizeFlag ?
1780                      (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0;  /* 0-3 */
1781     BYTE  const frameHeaderDecriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
1782     size_t pos=0;
1783 
1784     if (dstCapacity < ZSTD_frameHeaderSize_max) return ERROR(dstSize_tooSmall);
1785     DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u",
1786                 !params.fParams.noDictIDFlag, dictID,  dictIDSizeCode);
1787 
1788     if (params.format == ZSTD_f_zstd1) {
1789         MEM_writeLE32(dst, ZSTD_MAGICNUMBER);
1790         pos = 4;
1791     }
1792     op[pos++] = frameHeaderDecriptionByte;
1793     if (!singleSegment) op[pos++] = windowLogByte;
1794     switch(dictIDSizeCode)
1795     {
1796         default:  assert(0); /* impossible */
1797         case 0 : break;
1798         case 1 : op[pos] = (BYTE)(dictID); pos++; break;
1799         case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break;
1800         case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break;
1801     }
1802     switch(fcsCode)
1803     {
1804         default:  assert(0); /* impossible */
1805         case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break;
1806         case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break;
1807         case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break;
1808         case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break;
1809     }
1810     return pos;
1811 }
1812 
1813 
1814 static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
1815                               void* dst, size_t dstCapacity,
1816                         const void* src, size_t srcSize,
1817                                U32 frame, U32 lastFrameChunk)
1818 {
1819     const BYTE* const ip = (const BYTE*) src;
1820     size_t fhSize = 0;
1821 
1822     DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u", cctx->stage);
1823     if (cctx->stage==ZSTDcs_created) return ERROR(stage_wrong);   /* missing init (ZSTD_compressBegin) */
1824 
1825     if (frame && (cctx->stage==ZSTDcs_init)) {
1826         fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->appliedParams,
1827                                        cctx->pledgedSrcSizePlusOne-1, cctx->dictID);
1828         if (ZSTD_isError(fhSize)) return fhSize;
1829         dstCapacity -= fhSize;
1830         dst = (char*)dst + fhSize;
1831         cctx->stage = ZSTDcs_ongoing;
1832     }
1833 
1834     if (!srcSize) return fhSize;  /* do not generate an empty block if no input */
1835 
1836     /* Check if blocks follow each other */
1837     if (src != cctx->nextSrc) {
1838         /* not contiguous */
1839         size_t const distanceFromBase = (size_t)(cctx->nextSrc - cctx->base);
1840         cctx->lowLimit = cctx->dictLimit;
1841         assert(distanceFromBase == (size_t)(U32)distanceFromBase);  /* should never overflow */
1842         cctx->dictLimit = (U32)distanceFromBase;
1843         cctx->dictBase = cctx->base;
1844         cctx->base = ip - distanceFromBase;
1845         cctx->nextToUpdate = cctx->dictLimit;
1846         if (cctx->dictLimit - cctx->lowLimit < HASH_READ_SIZE) cctx->lowLimit = cctx->dictLimit;   /* too small extDict */
1847     }
1848     cctx->nextSrc = ip + srcSize;
1849 
1850     /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */
1851     if ((ip+srcSize > cctx->dictBase + cctx->lowLimit) & (ip < cctx->dictBase + cctx->dictLimit)) {
1852         ptrdiff_t const highInputIdx = (ip + srcSize) - cctx->dictBase;
1853         U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)cctx->dictLimit) ? cctx->dictLimit : (U32)highInputIdx;
1854         cctx->lowLimit = lowLimitMax;
1855     }
1856 
1857     DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (U32)cctx->blockSize);
1858     {   size_t const cSize = frame ?
1859                              ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
1860                              ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize);
1861         if (ZSTD_isError(cSize)) return cSize;
1862         cctx->consumedSrcSize += srcSize;
1863         return cSize + fhSize;
1864     }
1865 }
1866 
1867 size_t ZSTD_compressContinue (ZSTD_CCtx* cctx,
1868                               void* dst, size_t dstCapacity,
1869                         const void* src, size_t srcSize)
1870 {
1871     return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */);
1872 }
1873 
1874 
1875 size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx)
1876 {
1877     ZSTD_compressionParameters const cParams =
1878             ZSTD_getCParamsFromCCtxParams(cctx->appliedParams, 0, 0);
1879     return MIN (ZSTD_BLOCKSIZE_MAX, (U32)1 << cParams.windowLog);
1880 }
1881 
1882 size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
1883 {
1884     size_t const blockSizeMax = ZSTD_getBlockSize(cctx);
1885     if (srcSize > blockSizeMax) return ERROR(srcSize_wrong);
1886     return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */);
1887 }
1888 
1889 /*! ZSTD_loadDictionaryContent() :
1890  *  @return : 0, or an error code
1891  */
1892 static size_t ZSTD_loadDictionaryContent(ZSTD_CCtx* zc, const void* src, size_t srcSize)
1893 {
1894     const BYTE* const ip = (const BYTE*) src;
1895     const BYTE* const iend = ip + srcSize;
1896 
1897     /* input becomes current prefix */
1898     zc->lowLimit = zc->dictLimit;
1899     zc->dictLimit = (U32)(zc->nextSrc - zc->base);
1900     zc->dictBase = zc->base;
1901     zc->base = ip - zc->dictLimit;
1902     zc->nextToUpdate = zc->dictLimit;
1903     zc->loadedDictEnd = zc->appliedParams.forceWindow ? 0 : (U32)(iend - zc->base);
1904 
1905     zc->nextSrc = iend;
1906     if (srcSize <= HASH_READ_SIZE) return 0;
1907 
1908     switch(zc->appliedParams.cParams.strategy)
1909     {
1910     case ZSTD_fast:
1911         ZSTD_fillHashTable (zc, iend, zc->appliedParams.cParams.searchLength);
1912         break;
1913     case ZSTD_dfast:
1914         ZSTD_fillDoubleHashTable (zc, iend, zc->appliedParams.cParams.searchLength);
1915         break;
1916 
1917     case ZSTD_greedy:
1918     case ZSTD_lazy:
1919     case ZSTD_lazy2:
1920         if (srcSize >= HASH_READ_SIZE)
1921             ZSTD_insertAndFindFirstIndex(zc, iend-HASH_READ_SIZE, zc->appliedParams.cParams.searchLength);
1922         break;
1923 
1924     case ZSTD_btlazy2:
1925     case ZSTD_btopt:
1926     case ZSTD_btultra:
1927         if (srcSize >= HASH_READ_SIZE)
1928             ZSTD_updateTree(zc, iend-HASH_READ_SIZE, iend, (U32)1 << zc->appliedParams.cParams.searchLog, zc->appliedParams.cParams.searchLength);
1929         break;
1930 
1931     default:
1932         assert(0);  /* not possible : not a valid strategy id */
1933     }
1934 
1935     zc->nextToUpdate = (U32)(iend - zc->base);
1936     return 0;
1937 }
1938 
1939 
1940 /* Dictionaries that assign zero probability to symbols that show up causes problems
1941    when FSE encoding.  Refuse dictionaries that assign zero probability to symbols
1942    that we may encounter during compression.
1943    NOTE: This behavior is not standard and could be improved in the future. */
1944 static size_t ZSTD_checkDictNCount(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue) {
1945     U32 s;
1946     if (dictMaxSymbolValue < maxSymbolValue) return ERROR(dictionary_corrupted);
1947     for (s = 0; s <= maxSymbolValue; ++s) {
1948         if (normalizedCounter[s] == 0) return ERROR(dictionary_corrupted);
1949     }
1950     return 0;
1951 }
1952 
1953 
1954 /* Dictionary format :
1955  * See :
1956  * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
1957  */
1958 /*! ZSTD_loadZstdDictionary() :
1959  * @return : 0, or an error code
1960  *  assumptions : magic number supposed already checked
1961  *                dictSize supposed > 8
1962  */
1963 static size_t ZSTD_loadZstdDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
1964 {
1965     const BYTE* dictPtr = (const BYTE*)dict;
1966     const BYTE* const dictEnd = dictPtr + dictSize;
1967     short offcodeNCount[MaxOff+1];
1968     unsigned offcodeMaxValue = MaxOff;
1969 
1970     ZSTD_STATIC_ASSERT(sizeof(cctx->entropy->workspace) >= (1<<MAX(MLFSELog,LLFSELog)));
1971 
1972     dictPtr += 4;   /* skip magic number */
1973     cctx->dictID = cctx->appliedParams.fParams.noDictIDFlag ? 0 :  MEM_readLE32(dictPtr);
1974     dictPtr += 4;
1975 
1976     {   unsigned maxSymbolValue = 255;
1977         size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)cctx->entropy->hufCTable, &maxSymbolValue, dictPtr, dictEnd-dictPtr);
1978         if (HUF_isError(hufHeaderSize)) return ERROR(dictionary_corrupted);
1979         if (maxSymbolValue < 255) return ERROR(dictionary_corrupted);
1980         dictPtr += hufHeaderSize;
1981     }
1982 
1983     {   unsigned offcodeLog;
1984         size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
1985         if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
1986         if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);
1987         /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
1988         CHECK_E( FSE_buildCTable_wksp(cctx->entropy->offcodeCTable, offcodeNCount, offcodeMaxValue, offcodeLog, cctx->entropy->workspace, sizeof(cctx->entropy->workspace)),
1989                  dictionary_corrupted);
1990         dictPtr += offcodeHeaderSize;
1991     }
1992 
1993     {   short matchlengthNCount[MaxML+1];
1994         unsigned matchlengthMaxValue = MaxML, matchlengthLog;
1995         size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
1996         if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);
1997         if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted);
1998         /* Every match length code must have non-zero probability */
1999         CHECK_F( ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML));
2000         CHECK_E( FSE_buildCTable_wksp(cctx->entropy->matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, cctx->entropy->workspace, sizeof(cctx->entropy->workspace)),
2001                  dictionary_corrupted);
2002         dictPtr += matchlengthHeaderSize;
2003     }
2004 
2005     {   short litlengthNCount[MaxLL+1];
2006         unsigned litlengthMaxValue = MaxLL, litlengthLog;
2007         size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
2008         if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);
2009         if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted);
2010         /* Every literal length code must have non-zero probability */
2011         CHECK_F( ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL));
2012         CHECK_E( FSE_buildCTable_wksp(cctx->entropy->litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog, cctx->entropy->workspace, sizeof(cctx->entropy->workspace)),
2013                  dictionary_corrupted);
2014         dictPtr += litlengthHeaderSize;
2015     }
2016 
2017     if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted);
2018     cctx->seqStore.rep[0] = MEM_readLE32(dictPtr+0);
2019     cctx->seqStore.rep[1] = MEM_readLE32(dictPtr+4);
2020     cctx->seqStore.rep[2] = MEM_readLE32(dictPtr+8);
2021     dictPtr += 12;
2022 
2023     {   size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
2024         U32 offcodeMax = MaxOff;
2025         if (dictContentSize <= ((U32)-1) - 128 KB) {
2026             U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */
2027             offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */
2028         }
2029         /* All offset values <= dictContentSize + 128 KB must be representable */
2030         CHECK_F (ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff)));
2031         /* All repCodes must be <= dictContentSize and != 0*/
2032         {   U32 u;
2033             for (u=0; u<3; u++) {
2034                 if (cctx->seqStore.rep[u] == 0) return ERROR(dictionary_corrupted);
2035                 if (cctx->seqStore.rep[u] > dictContentSize) return ERROR(dictionary_corrupted);
2036         }   }
2037 
2038         cctx->entropy->hufCTable_repeatMode = HUF_repeat_valid;
2039         cctx->entropy->offcode_repeatMode = FSE_repeat_valid;
2040         cctx->entropy->matchlength_repeatMode = FSE_repeat_valid;
2041         cctx->entropy->litlength_repeatMode = FSE_repeat_valid;
2042         return ZSTD_loadDictionaryContent(cctx, dictPtr, dictContentSize);
2043     }
2044 }
2045 
2046 /** ZSTD_compress_insertDictionary() :
2047 *   @return : 0, or an error code */
2048 static size_t ZSTD_compress_insertDictionary(ZSTD_CCtx* cctx,
2049                                        const void* dict, size_t dictSize,
2050                                              ZSTD_dictMode_e dictMode)
2051 {
2052     DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize);
2053     if ((dict==NULL) || (dictSize<=8)) return 0;
2054 
2055     /* dict restricted modes */
2056     if (dictMode==ZSTD_dm_rawContent)
2057         return ZSTD_loadDictionaryContent(cctx, dict, dictSize);
2058 
2059     if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) {
2060         if (dictMode == ZSTD_dm_auto) {
2061             DEBUGLOG(4, "raw content dictionary detected");
2062             return ZSTD_loadDictionaryContent(cctx, dict, dictSize);
2063         }
2064         if (dictMode == ZSTD_dm_fullDict)
2065             return ERROR(dictionary_wrong);
2066         assert(0);   /* impossible */
2067     }
2068 
2069     /* dict as full zstd dictionary */
2070     return ZSTD_loadZstdDictionary(cctx, dict, dictSize);
2071 }
2072 
2073 /*! ZSTD_compressBegin_internal() :
2074  * @return : 0, or an error code */
2075 size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
2076                              const void* dict, size_t dictSize,
2077                              ZSTD_dictMode_e dictMode,
2078                              const ZSTD_CDict* cdict,
2079                              ZSTD_CCtx_params params, U64 pledgedSrcSize,
2080                              ZSTD_buffered_policy_e zbuff)
2081 {
2082     DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params.cParams.windowLog);
2083     /* params are supposed to be fully validated at this point */
2084     assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
2085     assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
2086 
2087     if (cdict && cdict->dictContentSize>0) {
2088         cctx->requestedParams = params;
2089         return ZSTD_copyCCtx_internal(cctx, cdict->refContext,
2090                                       params.cParams.windowLog, params.fParams, pledgedSrcSize,
2091                                       zbuff);
2092     }
2093 
2094     CHECK_F( ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
2095                                      ZSTDcrp_continue, zbuff) );
2096     return ZSTD_compress_insertDictionary(cctx, dict, dictSize, dictMode);
2097 }
2098 
2099 size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
2100                                     const void* dict, size_t dictSize,
2101                                     ZSTD_dictMode_e dictMode,
2102                                     const ZSTD_CDict* cdict,
2103                                     ZSTD_CCtx_params params,
2104                                     unsigned long long pledgedSrcSize)
2105 {
2106     DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params.cParams.windowLog);
2107     /* compression parameters verification and optimization */
2108     CHECK_F( ZSTD_checkCParams(params.cParams) );
2109     return ZSTD_compressBegin_internal(cctx,
2110                                        dict, dictSize, dictMode,
2111                                        cdict,
2112                                        params, pledgedSrcSize,
2113                                        ZSTDb_not_buffered);
2114 }
2115 
2116 /*! ZSTD_compressBegin_advanced() :
2117 *   @return : 0, or an error code */
2118 size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx,
2119                              const void* dict, size_t dictSize,
2120                                    ZSTD_parameters params, unsigned long long pledgedSrcSize)
2121 {
2122     ZSTD_CCtx_params const cctxParams =
2123             ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
2124     return ZSTD_compressBegin_advanced_internal(cctx,
2125                                             dict, dictSize, ZSTD_dm_auto,
2126                                             NULL /*cdict*/,
2127                                             cctxParams, pledgedSrcSize);
2128 }
2129 
2130 size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel)
2131 {
2132     ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, dictSize);
2133     ZSTD_CCtx_params const cctxParams =
2134             ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
2135     DEBUGLOG(4, "ZSTD_compressBegin_usingDict");
2136     return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dm_auto, NULL,
2137                                        cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered);
2138 }
2139 
2140 size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel)
2141 {
2142     return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel);
2143 }
2144 
2145 
2146 /*! ZSTD_writeEpilogue() :
2147 *   Ends a frame.
2148 *   @return : nb of bytes written into dst (or an error code) */
2149 static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
2150 {
2151     BYTE* const ostart = (BYTE*)dst;
2152     BYTE* op = ostart;
2153     size_t fhSize = 0;
2154 
2155     DEBUGLOG(5, "ZSTD_writeEpilogue");
2156     if (cctx->stage == ZSTDcs_created) return ERROR(stage_wrong);  /* init missing */
2157 
2158     /* special case : empty frame */
2159     if (cctx->stage == ZSTDcs_init) {
2160         fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->appliedParams, 0, 0);
2161         if (ZSTD_isError(fhSize)) return fhSize;
2162         dstCapacity -= fhSize;
2163         op += fhSize;
2164         cctx->stage = ZSTDcs_ongoing;
2165     }
2166 
2167     if (cctx->stage != ZSTDcs_ending) {
2168         /* write one last empty block, make it the "last" block */
2169         U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0;
2170         if (dstCapacity<4) return ERROR(dstSize_tooSmall);
2171         MEM_writeLE32(op, cBlockHeader24);
2172         op += ZSTD_blockHeaderSize;
2173         dstCapacity -= ZSTD_blockHeaderSize;
2174     }
2175 
2176     if (cctx->appliedParams.fParams.checksumFlag) {
2177         U32 const checksum = (U32) XXH64_digest(&cctx->xxhState);
2178         if (dstCapacity<4) return ERROR(dstSize_tooSmall);
2179         MEM_writeLE32(op, checksum);
2180         op += 4;
2181     }
2182 
2183     cctx->stage = ZSTDcs_created;  /* return to "created but no init" status */
2184     return op-ostart;
2185 }
2186 
2187 
2188 size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
2189                          void* dst, size_t dstCapacity,
2190                    const void* src, size_t srcSize)
2191 {
2192     size_t endResult;
2193     size_t const cSize = ZSTD_compressContinue_internal(cctx,
2194                                 dst, dstCapacity, src, srcSize,
2195                                 1 /* frame mode */, 1 /* last chunk */);
2196     if (ZSTD_isError(cSize)) return cSize;
2197     endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize);
2198     if (ZSTD_isError(endResult)) return endResult;
2199     if (cctx->appliedParams.fParams.contentSizeFlag) {  /* control src size */
2200         DEBUGLOG(4, "end of frame : controlling src size");
2201         if (cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1) {
2202             DEBUGLOG(4, "error : pledgedSrcSize = %u, while realSrcSize = %u",
2203                 (U32)cctx->pledgedSrcSizePlusOne-1, (U32)cctx->consumedSrcSize);
2204             return ERROR(srcSize_wrong);
2205     }   }
2206     return cSize + endResult;
2207 }
2208 
2209 
2210 static size_t ZSTD_compress_internal (ZSTD_CCtx* cctx,
2211                                void* dst, size_t dstCapacity,
2212                          const void* src, size_t srcSize,
2213                          const void* dict,size_t dictSize,
2214                                ZSTD_parameters params)
2215 {
2216     ZSTD_CCtx_params const cctxParams =
2217             ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
2218     DEBUGLOG(4, "ZSTD_compress_internal");
2219     return ZSTD_compress_advanced_internal(cctx,
2220                                           dst, dstCapacity,
2221                                           src, srcSize,
2222                                           dict, dictSize,
2223                                           cctxParams);
2224 }
2225 
2226 size_t ZSTD_compress_advanced (ZSTD_CCtx* ctx,
2227                                void* dst, size_t dstCapacity,
2228                          const void* src, size_t srcSize,
2229                          const void* dict,size_t dictSize,
2230                                ZSTD_parameters params)
2231 {
2232     DEBUGLOG(4, "ZSTD_compress_advanced");
2233     CHECK_F(ZSTD_checkCParams(params.cParams));
2234     return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, dict, dictSize, params);
2235 }
2236 
2237 /* Internal */
2238 size_t ZSTD_compress_advanced_internal(
2239         ZSTD_CCtx* cctx,
2240         void* dst, size_t dstCapacity,
2241         const void* src, size_t srcSize,
2242         const void* dict,size_t dictSize,
2243         ZSTD_CCtx_params params)
2244 {
2245     DEBUGLOG(4, "ZSTD_compress_advanced_internal");
2246     CHECK_F( ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dm_auto, NULL,
2247                                          params, srcSize, ZSTDb_not_buffered) );
2248     return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
2249 }
2250 
2251 size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize,
2252                                const void* dict, size_t dictSize, int compressionLevel)
2253 {
2254     ZSTD_parameters params = ZSTD_getParams(compressionLevel, srcSize ? srcSize : 1, dict ? dictSize : 0);
2255     params.fParams.contentSizeFlag = 1;
2256     DEBUGLOG(4, "ZSTD_compress_usingDict (level=%i, srcSize=%u, dictSize=%u)",
2257                 compressionLevel, (U32)srcSize, (U32)dictSize);
2258     return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, dict, dictSize, params);
2259 }
2260 
2261 size_t ZSTD_compressCCtx (ZSTD_CCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel)
2262 {
2263     return ZSTD_compress_usingDict(ctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel);
2264 }
2265 
2266 size_t ZSTD_compress(void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel)
2267 {
2268     size_t result;
2269     ZSTD_CCtx ctxBody;
2270     memset(&ctxBody, 0, sizeof(ctxBody));
2271     ctxBody.customMem = ZSTD_defaultCMem;
2272     result = ZSTD_compressCCtx(&ctxBody, dst, dstCapacity, src, srcSize, compressionLevel);
2273     ZSTD_free(ctxBody.workSpace, ZSTD_defaultCMem);  /* can't free ctxBody itself, as it's on stack; free only heap content */
2274     return result;
2275 }
2276 
2277 
2278 /* =====  Dictionary API  ===== */
2279 
2280 /*! ZSTD_estimateCDictSize_advanced() :
2281  *  Estimate amount of memory that will be needed to create a dictionary with following arguments */
2282 size_t ZSTD_estimateCDictSize_advanced(
2283         size_t dictSize, ZSTD_compressionParameters cParams,
2284         ZSTD_dictLoadMethod_e dictLoadMethod)
2285 {
2286     DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (U32)sizeof(ZSTD_CDict));
2287     DEBUGLOG(5, "CCtx estimate : %u",
2288              (U32)ZSTD_estimateCCtxSize_usingCParams(cParams));
2289     return sizeof(ZSTD_CDict) + ZSTD_estimateCCtxSize_usingCParams(cParams)
2290            + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
2291 }
2292 
2293 size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel)
2294 {
2295     ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, dictSize);
2296     return ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy);
2297 }
2298 
2299 size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict)
2300 {
2301     if (cdict==NULL) return 0;   /* support sizeof on NULL */
2302     DEBUGLOG(5, "sizeof(*cdict) : %u", (U32)sizeof(*cdict));
2303     DEBUGLOG(5, "ZSTD_sizeof_CCtx : %u", (U32)ZSTD_sizeof_CCtx(cdict->refContext));
2304     return ZSTD_sizeof_CCtx(cdict->refContext) + (cdict->dictBuffer ? cdict->dictContentSize : 0) + sizeof(*cdict);
2305 }
2306 
2307 static size_t ZSTD_initCDict_internal(
2308                     ZSTD_CDict* cdict,
2309               const void* dictBuffer, size_t dictSize,
2310                     ZSTD_dictLoadMethod_e dictLoadMethod,
2311                     ZSTD_dictMode_e dictMode,
2312                     ZSTD_compressionParameters cParams)
2313 {
2314     DEBUGLOG(3, "ZSTD_initCDict_internal, mode %u", (U32)dictMode);
2315     if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) {
2316         cdict->dictBuffer = NULL;
2317         cdict->dictContent = dictBuffer;
2318     } else {
2319         void* const internalBuffer = ZSTD_malloc(dictSize, cdict->refContext->customMem);
2320         cdict->dictBuffer = internalBuffer;
2321         cdict->dictContent = internalBuffer;
2322         if (!internalBuffer) return ERROR(memory_allocation);
2323         memcpy(internalBuffer, dictBuffer, dictSize);
2324     }
2325     cdict->dictContentSize = dictSize;
2326 
2327     {   ZSTD_CCtx_params cctxParams = cdict->refContext->requestedParams;
2328         cctxParams.cParams = cParams;
2329         CHECK_F( ZSTD_compressBegin_internal(cdict->refContext,
2330                                         cdict->dictContent, dictSize, dictMode,
2331                                         NULL,
2332                                         cctxParams, ZSTD_CONTENTSIZE_UNKNOWN,
2333                                         ZSTDb_not_buffered) );
2334     }
2335 
2336     return 0;
2337 }
2338 
2339 ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize,
2340                                       ZSTD_dictLoadMethod_e dictLoadMethod,
2341                                       ZSTD_dictMode_e dictMode,
2342                                       ZSTD_compressionParameters cParams, ZSTD_customMem customMem)
2343 {
2344     DEBUGLOG(3, "ZSTD_createCDict_advanced, mode %u", (U32)dictMode);
2345     if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
2346 
2347     {   ZSTD_CDict* const cdict = (ZSTD_CDict*)ZSTD_malloc(sizeof(ZSTD_CDict), customMem);
2348         ZSTD_CCtx* const cctx = ZSTD_createCCtx_advanced(customMem);
2349 
2350         if (!cdict || !cctx) {
2351             ZSTD_free(cdict, customMem);
2352             ZSTD_freeCCtx(cctx);
2353             return NULL;
2354         }
2355         cdict->refContext = cctx;
2356         if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
2357                                         dictBuffer, dictSize,
2358                                         dictLoadMethod, dictMode,
2359                                         cParams) )) {
2360             ZSTD_freeCDict(cdict);
2361             return NULL;
2362         }
2363 
2364         return cdict;
2365     }
2366 }
2367 
2368 ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel)
2369 {
2370     ZSTD_compressionParameters cParams = ZSTD_getCParams(compressionLevel, 0, dictSize);
2371     return ZSTD_createCDict_advanced(dict, dictSize,
2372                                      ZSTD_dlm_byCopy, ZSTD_dm_auto,
2373                                      cParams, ZSTD_defaultCMem);
2374 }
2375 
2376 ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel)
2377 {
2378     ZSTD_compressionParameters cParams = ZSTD_getCParams(compressionLevel, 0, dictSize);
2379     return ZSTD_createCDict_advanced(dict, dictSize,
2380                                      ZSTD_dlm_byRef, ZSTD_dm_auto,
2381                                      cParams, ZSTD_defaultCMem);
2382 }
2383 
2384 size_t ZSTD_freeCDict(ZSTD_CDict* cdict)
2385 {
2386     if (cdict==NULL) return 0;   /* support free on NULL */
2387     {   ZSTD_customMem const cMem = cdict->refContext->customMem;
2388         ZSTD_freeCCtx(cdict->refContext);
2389         ZSTD_free(cdict->dictBuffer, cMem);
2390         ZSTD_free(cdict, cMem);
2391         return 0;
2392     }
2393 }
2394 
2395 /*! ZSTD_initStaticCDict_advanced() :
2396  *  Generate a digested dictionary in provided memory area.
2397  *  workspace: The memory area to emplace the dictionary into.
2398  *             Provided pointer must 8-bytes aligned.
2399  *             It must outlive dictionary usage.
2400  *  workspaceSize: Use ZSTD_estimateCDictSize()
2401  *                 to determine how large workspace must be.
2402  *  cParams : use ZSTD_getCParams() to transform a compression level
2403  *            into its relevants cParams.
2404  * @return : pointer to ZSTD_CDict*, or NULL if error (size too small)
2405  *  Note : there is no corresponding "free" function.
2406  *         Since workspace was allocated externally, it must be freed externally.
2407  */
2408 ZSTD_CDict* ZSTD_initStaticCDict(void* workspace, size_t workspaceSize,
2409                            const void* dict, size_t dictSize,
2410                                  ZSTD_dictLoadMethod_e dictLoadMethod,
2411                                  ZSTD_dictMode_e dictMode,
2412                                  ZSTD_compressionParameters cParams)
2413 {
2414     size_t const cctxSize = ZSTD_estimateCCtxSize_usingCParams(cParams);
2415     size_t const neededSize = sizeof(ZSTD_CDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize)
2416                             + cctxSize;
2417     ZSTD_CDict* const cdict = (ZSTD_CDict*) workspace;
2418     void* ptr;
2419     DEBUGLOG(4, "(size_t)workspace & 7 : %u", (U32)(size_t)workspace & 7);
2420     if ((size_t)workspace & 7) return NULL;  /* 8-aligned */
2421     DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u",
2422         (U32)workspaceSize, (U32)neededSize, (U32)(workspaceSize < neededSize));
2423     if (workspaceSize < neededSize) return NULL;
2424 
2425     if (dictLoadMethod == ZSTD_dlm_byCopy) {
2426         memcpy(cdict+1, dict, dictSize);
2427         dict = cdict+1;
2428         ptr = (char*)workspace + sizeof(ZSTD_CDict) + dictSize;
2429     } else {
2430         ptr = cdict+1;
2431     }
2432     cdict->refContext = ZSTD_initStaticCCtx(ptr, cctxSize);
2433 
2434     if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
2435                                               dict, dictSize,
2436                                               ZSTD_dlm_byRef, dictMode,
2437                                               cParams) ))
2438         return NULL;
2439 
2440     return cdict;
2441 }
2442 
2443 ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict) {
2444     return cdict->refContext->appliedParams.cParams;
2445 }
2446 
2447 /* ZSTD_compressBegin_usingCDict_advanced() :
2448  * cdict must be != NULL */
2449 size_t ZSTD_compressBegin_usingCDict_advanced(
2450     ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict,
2451     ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
2452 {
2453     DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_advanced");
2454     if (cdict==NULL) return ERROR(dictionary_wrong);
2455     {   ZSTD_CCtx_params params = cctx->requestedParams;
2456         params.cParams = ZSTD_getCParamsFromCDict(cdict);
2457         params.fParams = fParams;
2458         return ZSTD_compressBegin_internal(cctx,
2459                                            NULL, 0, ZSTD_dm_auto,
2460                                            cdict,
2461                                            params, pledgedSrcSize,
2462                                            ZSTDb_not_buffered);
2463     }
2464 }
2465 
2466 /* ZSTD_compressBegin_usingCDict() :
2467  * pledgedSrcSize=0 means "unknown"
2468  * if pledgedSrcSize>0, it will enable contentSizeFlag */
2469 size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
2470 {
2471     ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
2472     DEBUGLOG(4, "ZSTD_compressBegin_usingCDict : dictIDFlag == %u", !fParams.noDictIDFlag);
2473     return ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, 0);
2474 }
2475 
2476 size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
2477                                 void* dst, size_t dstCapacity,
2478                                 const void* src, size_t srcSize,
2479                                 const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
2480 {
2481     CHECK_F (ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, srcSize));   /* will check if cdict != NULL */
2482     return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
2483 }
2484 
2485 /*! ZSTD_compress_usingCDict() :
2486  *  Compression using a digested Dictionary.
2487  *  Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
2488  *  Note that compression parameters are decided at CDict creation time
2489  *  while frame parameters are hardcoded */
2490 size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
2491                                 void* dst, size_t dstCapacity,
2492                                 const void* src, size_t srcSize,
2493                                 const ZSTD_CDict* cdict)
2494 {
2495     ZSTD_frameParameters const fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
2496     return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, fParams);
2497 }
2498 
2499 
2500 
2501 /* ******************************************************************
2502 *  Streaming
2503 ********************************************************************/
2504 
2505 ZSTD_CStream* ZSTD_createCStream(void)
2506 {
2507     DEBUGLOG(3, "ZSTD_createCStream");
2508     return ZSTD_createCStream_advanced(ZSTD_defaultCMem);
2509 }
2510 
2511 ZSTD_CStream* ZSTD_initStaticCStream(void *workspace, size_t workspaceSize)
2512 {
2513     return ZSTD_initStaticCCtx(workspace, workspaceSize);
2514 }
2515 
2516 ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem)
2517 {   /* CStream and CCtx are now same object */
2518     return ZSTD_createCCtx_advanced(customMem);
2519 }
2520 
2521 size_t ZSTD_freeCStream(ZSTD_CStream* zcs)
2522 {
2523     return ZSTD_freeCCtx(zcs);   /* same object */
2524 }
2525 
2526 
2527 
2528 /*======   Initialization   ======*/
2529 
2530 size_t ZSTD_CStreamInSize(void)  { return ZSTD_BLOCKSIZE_MAX; }
2531 
2532 size_t ZSTD_CStreamOutSize(void)
2533 {
2534     return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ;
2535 }
2536 
2537 static size_t ZSTD_resetCStream_internal(ZSTD_CStream* zcs,
2538                     const void* const dict, size_t const dictSize, ZSTD_dictMode_e const dictMode,
2539                     const ZSTD_CDict* const cdict,
2540                     ZSTD_CCtx_params const params, unsigned long long const pledgedSrcSize)
2541 {
2542     DEBUGLOG(4, "ZSTD_resetCStream_internal");
2543     /* params are supposed to be fully validated at this point */
2544     assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
2545     assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
2546 
2547     CHECK_F( ZSTD_compressBegin_internal(zcs,
2548                                          dict, dictSize, dictMode,
2549                                          cdict,
2550                                          params, pledgedSrcSize,
2551                                          ZSTDb_buffered) );
2552 
2553     zcs->inToCompress = 0;
2554     zcs->inBuffPos = 0;
2555     zcs->inBuffTarget = zcs->blockSize
2556                       + (zcs->blockSize == pledgedSrcSize);   /* for small input: avoid automatic flush on reaching end of block, since it would require to add a 3-bytes null block to end frame */
2557     zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
2558     zcs->streamStage = zcss_load;
2559     zcs->frameEnded = 0;
2560     return 0;   /* ready to go */
2561 }
2562 
2563 /* ZSTD_resetCStream():
2564  * pledgedSrcSize == 0 means "unknown" */
2565 size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize)
2566 {
2567     ZSTD_CCtx_params params = zcs->requestedParams;
2568     DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (U32)pledgedSrcSize);
2569     if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
2570     params.fParams.contentSizeFlag = 1;
2571     params.cParams = ZSTD_getCParamsFromCCtxParams(params, pledgedSrcSize, 0);
2572     return ZSTD_resetCStream_internal(zcs, NULL, 0, ZSTD_dm_auto, zcs->cdict, params, pledgedSrcSize);
2573 }
2574 
2575 /*! ZSTD_initCStream_internal() :
2576  *  Note : for lib/compress only. Used by zstdmt_compress.c.
2577  *  Assumption 1 : params are valid
2578  *  Assumption 2 : either dict, or cdict, is defined, not both */
2579 size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
2580                     const void* dict, size_t dictSize, const ZSTD_CDict* cdict,
2581                     ZSTD_CCtx_params params, unsigned long long pledgedSrcSize)
2582 {
2583     DEBUGLOG(4, "ZSTD_initCStream_internal");
2584     assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
2585     assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
2586 
2587     if (dict && dictSize >= 8) {
2588         DEBUGLOG(4, "loading dictionary of size %u", (U32)dictSize);
2589         if (zcs->staticSize) {   /* static CCtx : never uses malloc */
2590             /* incompatible with internal cdict creation */
2591             return ERROR(memory_allocation);
2592         }
2593         ZSTD_freeCDict(zcs->cdictLocal);
2594         zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize,
2595                                             ZSTD_dlm_byCopy, ZSTD_dm_auto,
2596                                             params.cParams, zcs->customMem);
2597         zcs->cdict = zcs->cdictLocal;
2598         if (zcs->cdictLocal == NULL) return ERROR(memory_allocation);
2599     } else {
2600         if (cdict) {
2601             params.cParams = ZSTD_getCParamsFromCDict(cdict);  /* cParams are enforced from cdict; it includes windowLog */
2602         }
2603         ZSTD_freeCDict(zcs->cdictLocal);
2604         zcs->cdictLocal = NULL;
2605         zcs->cdict = cdict;
2606     }
2607 
2608     params.compressionLevel = ZSTD_CLEVEL_CUSTOM; /* enforce usage of cParams, instead of a dynamic derivation from cLevel (but does that happen ?) */
2609     zcs->requestedParams = params;
2610 
2611     return ZSTD_resetCStream_internal(zcs, NULL, 0, ZSTD_dm_auto, zcs->cdict, params, pledgedSrcSize);
2612 }
2613 
2614 /* ZSTD_initCStream_usingCDict_advanced() :
2615  * same as ZSTD_initCStream_usingCDict(), with control over frame parameters */
2616 size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
2617                                             const ZSTD_CDict* cdict,
2618                                             ZSTD_frameParameters fParams,
2619                                             unsigned long long pledgedSrcSize)
2620 {
2621     DEBUGLOG(4, "ZSTD_initCStream_usingCDict_advanced");
2622     if (!cdict) return ERROR(dictionary_wrong); /* cannot handle NULL cdict (does not know what to do) */
2623     {   ZSTD_CCtx_params params = zcs->requestedParams;
2624         params.cParams = ZSTD_getCParamsFromCDict(cdict);
2625         params.fParams = fParams;
2626         return ZSTD_initCStream_internal(zcs,
2627                                 NULL, 0, cdict,
2628                                 params, pledgedSrcSize);
2629     }
2630 }
2631 
2632 /* note : cdict must outlive compression session */
2633 size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict)
2634 {
2635     ZSTD_frameParameters const fParams = { 0 /* contentSizeFlag */, 0 /* checksum */, 0 /* hideDictID */ };
2636     DEBUGLOG(4, "ZSTD_initCStream_usingCDict");
2637     return ZSTD_initCStream_usingCDict_advanced(zcs, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN);  /* note : will check that cdict != NULL */
2638 }
2639 
2640 /* ZSTD_initCStream_advanced() :
2641  * pledgedSrcSize must be correct.
2642  * if srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.
2643  * dict is loaded with default parameters ZSTD_dm_auto and ZSTD_dlm_byCopy. */
2644 size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
2645                                  const void* dict, size_t dictSize,
2646                                  ZSTD_parameters params, unsigned long long pledgedSrcSize)
2647 {
2648     ZSTD_CCtx_params const cctxParams = ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params);
2649     DEBUGLOG(4, "ZSTD_initCStream_advanced: pledgedSrcSize=%u, flag=%u",
2650                 (U32)pledgedSrcSize, params.fParams.contentSizeFlag);
2651     CHECK_F( ZSTD_checkCParams(params.cParams) );
2652     if ((pledgedSrcSize==0) && (params.fParams.contentSizeFlag==0)) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;  /* for compatibility with older programs relying on this behavior. Users should now specify ZSTD_CONTENTSIZE_UNKNOWN. This line will be removed in the future. */
2653     return ZSTD_initCStream_internal(zcs, dict, dictSize, NULL /*cdict*/, cctxParams, pledgedSrcSize);
2654 }
2655 
2656 size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel)
2657 {
2658     ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, dictSize);
2659     ZSTD_CCtx_params const cctxParams =
2660             ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params);
2661     return ZSTD_initCStream_internal(zcs, dict, dictSize, NULL, cctxParams, ZSTD_CONTENTSIZE_UNKNOWN);
2662 }
2663 
2664 size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pss)
2665 {
2666     U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;  /* temporary : 0 interpreted as "unknown" during transition period. Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN. `0` will be interpreted as "empty" in the future */
2667     ZSTD_parameters const params = ZSTD_getParams(compressionLevel, pledgedSrcSize, 0);
2668     ZSTD_CCtx_params const cctxParams = ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params);
2669     return ZSTD_initCStream_internal(zcs, NULL, 0, NULL, cctxParams, pledgedSrcSize);
2670 }
2671 
2672 size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)
2673 {
2674     DEBUGLOG(4, "ZSTD_initCStream");
2675     return ZSTD_initCStream_srcSize(zcs, compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN);
2676 }
2677 
2678 /*======   Compression   ======*/
2679 
2680 MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity,
2681                            const void* src, size_t srcSize)
2682 {
2683     size_t const length = MIN(dstCapacity, srcSize);
2684     if (length) memcpy(dst, src, length);
2685     return length;
2686 }
2687 
2688 /** ZSTD_compressStream_generic():
2689  *  internal function for all *compressStream*() variants and *compress_generic()
2690  *  non-static, because can be called from zstdmt.c
2691  * @return : hint size for next input */
2692 size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
2693                                    ZSTD_outBuffer* output,
2694                                    ZSTD_inBuffer* input,
2695                                    ZSTD_EndDirective const flushMode)
2696 {
2697     const char* const istart = (const char*)input->src;
2698     const char* const iend = istart + input->size;
2699     const char* ip = istart + input->pos;
2700     char* const ostart = (char*)output->dst;
2701     char* const oend = ostart + output->size;
2702     char* op = ostart + output->pos;
2703     U32 someMoreWork = 1;
2704 
2705     /* check expectations */
2706     DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (U32)flushMode);
2707     assert(zcs->inBuff != NULL);
2708     assert(zcs->inBuffSize > 0);
2709     assert(zcs->outBuff !=  NULL);
2710     assert(zcs->outBuffSize > 0);
2711     assert(output->pos <= output->size);
2712     assert(input->pos <= input->size);
2713 
2714     while (someMoreWork) {
2715         switch(zcs->streamStage)
2716         {
2717         case zcss_init:
2718             /* call ZSTD_initCStream() first ! */
2719             return ERROR(init_missing);
2720 
2721         case zcss_load:
2722             if ( (flushMode == ZSTD_e_end)
2723               && ((size_t)(oend-op) >= ZSTD_compressBound(iend-ip))  /* enough dstCapacity */
2724               && (zcs->inBuffPos == 0) ) {
2725                 /* shortcut to compression pass directly into output buffer */
2726                 size_t const cSize = ZSTD_compressEnd(zcs,
2727                                                 op, oend-op, ip, iend-ip);
2728                 DEBUGLOG(4, "ZSTD_compressEnd : %u", (U32)cSize);
2729                 if (ZSTD_isError(cSize)) return cSize;
2730                 ip = iend;
2731                 op += cSize;
2732                 zcs->frameEnded = 1;
2733                 ZSTD_startNewCompression(zcs);
2734                 someMoreWork = 0; break;
2735             }
2736             /* complete loading into inBuffer */
2737             {   size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
2738                 size_t const loaded = ZSTD_limitCopy(
2739                                         zcs->inBuff + zcs->inBuffPos, toLoad,
2740                                         ip, iend-ip);
2741                 zcs->inBuffPos += loaded;
2742                 ip += loaded;
2743                 if ( (flushMode == ZSTD_e_continue)
2744                   && (zcs->inBuffPos < zcs->inBuffTarget) ) {
2745                     /* not enough input to fill full block : stop here */
2746                     someMoreWork = 0; break;
2747                 }
2748                 if ( (flushMode == ZSTD_e_flush)
2749                   && (zcs->inBuffPos == zcs->inToCompress) ) {
2750                     /* empty */
2751                     someMoreWork = 0; break;
2752                 }
2753             }
2754             /* compress current block (note : this stage cannot be stopped in the middle) */
2755             DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode);
2756             {   void* cDst;
2757                 size_t cSize;
2758                 size_t const iSize = zcs->inBuffPos - zcs->inToCompress;
2759                 size_t oSize = oend-op;
2760                 unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend);
2761                 if (oSize >= ZSTD_compressBound(iSize))
2762                     cDst = op;   /* compress into output buffer, to skip flush stage */
2763                 else
2764                     cDst = zcs->outBuff, oSize = zcs->outBuffSize;
2765                 cSize = lastBlock ?
2766                         ZSTD_compressEnd(zcs, cDst, oSize,
2767                                     zcs->inBuff + zcs->inToCompress, iSize) :
2768                         ZSTD_compressContinue(zcs, cDst, oSize,
2769                                     zcs->inBuff + zcs->inToCompress, iSize);
2770                 if (ZSTD_isError(cSize)) return cSize;
2771                 zcs->frameEnded = lastBlock;
2772                 /* prepare next block */
2773                 zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
2774                 if (zcs->inBuffTarget > zcs->inBuffSize)
2775                     zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize;
2776                 DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u",
2777                          (U32)zcs->inBuffTarget, (U32)zcs->inBuffSize);
2778                 if (!lastBlock)
2779                     assert(zcs->inBuffTarget <= zcs->inBuffSize);
2780                 zcs->inToCompress = zcs->inBuffPos;
2781                 if (cDst == op) {  /* no need to flush */
2782                     op += cSize;
2783                     if (zcs->frameEnded) {
2784                         DEBUGLOG(5, "Frame completed directly in outBuffer");
2785                         someMoreWork = 0;
2786                         ZSTD_startNewCompression(zcs);
2787                     }
2788                     break;
2789                 }
2790                 zcs->outBuffContentSize = cSize;
2791                 zcs->outBuffFlushedSize = 0;
2792                 zcs->streamStage = zcss_flush; /* pass-through to flush stage */
2793             }
2794 	    /* fall-through */
2795         case zcss_flush:
2796             DEBUGLOG(5, "flush stage");
2797             {   size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
2798                 size_t const flushed = ZSTD_limitCopy(op, oend-op,
2799                             zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
2800                 DEBUGLOG(5, "toFlush: %u into %u ==> flushed: %u",
2801                             (U32)toFlush, (U32)(oend-op), (U32)flushed);
2802                 op += flushed;
2803                 zcs->outBuffFlushedSize += flushed;
2804                 if (toFlush!=flushed) {
2805                     /* flush not fully completed, presumably because dst is too small */
2806                     assert(op==oend);
2807                     someMoreWork = 0;
2808                     break;
2809                 }
2810                 zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
2811                 if (zcs->frameEnded) {
2812                     DEBUGLOG(5, "Frame completed on flush");
2813                     someMoreWork = 0;
2814                     ZSTD_startNewCompression(zcs);
2815                     break;
2816                 }
2817                 zcs->streamStage = zcss_load;
2818                 break;
2819             }
2820 
2821         default: /* impossible */
2822             assert(0);
2823         }
2824     }
2825 
2826     input->pos = ip - istart;
2827     output->pos = op - ostart;
2828     if (zcs->frameEnded) return 0;
2829     {   size_t hintInSize = zcs->inBuffTarget - zcs->inBuffPos;
2830         if (hintInSize==0) hintInSize = zcs->blockSize;
2831         return hintInSize;
2832     }
2833 }
2834 
2835 size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
2836 {
2837     /* check conditions */
2838     if (output->pos > output->size) return ERROR(GENERIC);
2839     if (input->pos  > input->size)  return ERROR(GENERIC);
2840 
2841     return ZSTD_compressStream_generic(zcs, output, input, ZSTD_e_continue);
2842 }
2843 
2844 
2845 size_t ZSTD_compress_generic (ZSTD_CCtx* cctx,
2846                               ZSTD_outBuffer* output,
2847                               ZSTD_inBuffer* input,
2848                               ZSTD_EndDirective endOp)
2849 {
2850     DEBUGLOG(5, "ZSTD_compress_generic, endOp=%u ", (U32)endOp);
2851     /* check conditions */
2852     if (output->pos > output->size) return ERROR(GENERIC);
2853     if (input->pos  > input->size)  return ERROR(GENERIC);
2854     assert(cctx!=NULL);
2855 
2856     /* transparent initialization stage */
2857     if (cctx->streamStage == zcss_init) {
2858         ZSTD_CCtx_params params = cctx->requestedParams;
2859         ZSTD_prefixDict const prefixDict = cctx->prefixDict;
2860         memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));  /* single usage */
2861         assert(prefixDict.dict==NULL || cctx->cdict==NULL);   /* only one can be set */
2862         DEBUGLOG(4, "ZSTD_compress_generic : transparent init stage");
2863         if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = input->size + 1;  /* auto-fix pledgedSrcSize */
2864         params.cParams = ZSTD_getCParamsFromCCtxParams(
2865                 cctx->requestedParams, cctx->pledgedSrcSizePlusOne-1, 0 /*dictSize*/);
2866 
2867 #ifdef ZSTD_MULTITHREAD
2868         if ((cctx->pledgedSrcSizePlusOne-1) <= ZSTDMT_JOBSIZE_MIN)
2869             params.nbThreads = 1; /* do not invoke multi-threading when src size is too small */
2870         if (params.nbThreads > 1) {
2871             if (cctx->mtctx == NULL || (params.nbThreads != ZSTDMT_getNbThreads(cctx->mtctx))) {
2872                 DEBUGLOG(4, "ZSTD_compress_generic: creating new mtctx for nbThreads=%u (previous: %u)",
2873                             params.nbThreads, ZSTDMT_getNbThreads(cctx->mtctx));
2874                 ZSTDMT_freeCCtx(cctx->mtctx);
2875                 cctx->mtctx = ZSTDMT_createCCtx_advanced(params.nbThreads, cctx->customMem);
2876                 if (cctx->mtctx == NULL) return ERROR(memory_allocation);
2877             }
2878             DEBUGLOG(4, "call ZSTDMT_initCStream_internal as nbThreads=%u", params.nbThreads);
2879             CHECK_F( ZSTDMT_initCStream_internal(
2880                         cctx->mtctx,
2881                         prefixDict.dict, prefixDict.dictSize, ZSTD_dm_rawContent,
2882                         cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) );
2883             cctx->streamStage = zcss_load;
2884             cctx->appliedParams.nbThreads = params.nbThreads;
2885         } else
2886 #endif
2887         {   CHECK_F( ZSTD_resetCStream_internal(
2888                              cctx, prefixDict.dict, prefixDict.dictSize,
2889                              prefixDict.dictMode, cctx->cdict, params,
2890                              cctx->pledgedSrcSizePlusOne-1) );
2891             assert(cctx->streamStage == zcss_load);
2892             assert(cctx->appliedParams.nbThreads <= 1);
2893     }   }
2894 
2895     /* compression stage */
2896 #ifdef ZSTD_MULTITHREAD
2897     if (cctx->appliedParams.nbThreads > 1) {
2898         size_t const flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp);
2899         if ( ZSTD_isError(flushMin)
2900           || (endOp == ZSTD_e_end && flushMin == 0) ) { /* compression completed */
2901             ZSTD_startNewCompression(cctx);
2902         }
2903         return flushMin;
2904     }
2905 #endif
2906     CHECK_F( ZSTD_compressStream_generic(cctx, output, input, endOp) );
2907     DEBUGLOG(5, "completed ZSTD_compress_generic");
2908     return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */
2909 }
2910 
2911 size_t ZSTD_compress_generic_simpleArgs (
2912                             ZSTD_CCtx* cctx,
2913                             void* dst, size_t dstCapacity, size_t* dstPos,
2914                       const void* src, size_t srcSize, size_t* srcPos,
2915                             ZSTD_EndDirective endOp)
2916 {
2917     ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
2918     ZSTD_inBuffer  input  = { src, srcSize, *srcPos };
2919     /* ZSTD_compress_generic() will check validity of dstPos and srcPos */
2920     size_t const cErr = ZSTD_compress_generic(cctx, &output, &input, endOp);
2921     *dstPos = output.pos;
2922     *srcPos = input.pos;
2923     return cErr;
2924 }
2925 
2926 
2927 /*======   Finalize   ======*/
2928 
2929 /*! ZSTD_flushStream() :
2930 *   @return : amount of data remaining to flush */
2931 size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
2932 {
2933     ZSTD_inBuffer input = { NULL, 0, 0 };
2934     if (output->pos > output->size) return ERROR(GENERIC);
2935     CHECK_F( ZSTD_compressStream_generic(zcs, output, &input, ZSTD_e_flush) );
2936     return zcs->outBuffContentSize - zcs->outBuffFlushedSize;  /* remaining to flush */
2937 }
2938 
2939 
2940 size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
2941 {
2942     ZSTD_inBuffer input = { NULL, 0, 0 };
2943     if (output->pos > output->size) return ERROR(GENERIC);
2944     CHECK_F( ZSTD_compressStream_generic(zcs, output, &input, ZSTD_e_end) );
2945     {   size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE;
2946         size_t const checksumSize = zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4;
2947         size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize + lastBlockSize + checksumSize;
2948         DEBUGLOG(4, "ZSTD_endStream : remaining to flush : %u", (U32)toFlush);
2949         return toFlush;
2950     }
2951 }
2952 
2953 
2954 /*-=====  Pre-defined compression levels  =====-*/
2955 
2956 #define ZSTD_MAX_CLEVEL     22
2957 int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
2958 
2959 static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = {
2960 {   /* "default" - guarantees a monotonically increasing memory budget */
2961     /* W,  C,  H,  S,  L, TL, strat */
2962     { 18, 12, 12,  1,  7, 16, ZSTD_fast    },  /* level  0 - never used */
2963     { 19, 13, 14,  1,  7, 16, ZSTD_fast    },  /* level  1 */
2964     { 19, 15, 16,  1,  6, 16, ZSTD_fast    },  /* level  2 */
2965     { 20, 16, 17,  1,  5, 16, ZSTD_dfast   },  /* level  3 */
2966     { 20, 17, 18,  1,  5, 16, ZSTD_dfast   },  /* level  4 */
2967     { 20, 17, 18,  2,  5, 16, ZSTD_greedy  },  /* level  5 */
2968     { 21, 17, 19,  2,  5, 16, ZSTD_lazy    },  /* level  6 */
2969     { 21, 18, 19,  3,  5, 16, ZSTD_lazy    },  /* level  7 */
2970     { 21, 18, 20,  3,  5, 16, ZSTD_lazy2   },  /* level  8 */
2971     { 21, 19, 20,  3,  5, 16, ZSTD_lazy2   },  /* level  9 */
2972     { 21, 19, 21,  4,  5, 16, ZSTD_lazy2   },  /* level 10 */
2973     { 22, 20, 22,  4,  5, 16, ZSTD_lazy2   },  /* level 11 */
2974     { 22, 20, 22,  5,  5, 16, ZSTD_lazy2   },  /* level 12 */
2975     { 22, 21, 22,  5,  5, 16, ZSTD_lazy2   },  /* level 13 */
2976     { 22, 21, 22,  6,  5, 16, ZSTD_lazy2   },  /* level 14 */
2977     { 22, 21, 22,  4,  5, 16, ZSTD_btlazy2 },  /* level 15 */
2978     { 22, 21, 22,  4,  5, 48, ZSTD_btopt   },  /* level 16 */
2979     { 23, 22, 22,  4,  4, 48, ZSTD_btopt   },  /* level 17 */
2980     { 23, 22, 22,  5,  3, 64, ZSTD_btopt   },  /* level 18 */
2981     { 23, 23, 22,  7,  3,128, ZSTD_btopt   },  /* level 19 */
2982     { 25, 25, 23,  7,  3,128, ZSTD_btultra },  /* level 20 */
2983     { 26, 26, 24,  7,  3,256, ZSTD_btultra },  /* level 21 */
2984     { 27, 27, 25,  9,  3,512, ZSTD_btultra },  /* level 22 */
2985 },
2986 {   /* for srcSize <= 256 KB */
2987     /* W,  C,  H,  S,  L,  T, strat */
2988     {  0,  0,  0,  0,  0,  0, ZSTD_fast    },  /* level  0 - not used */
2989     { 18, 13, 14,  1,  6,  8, ZSTD_fast    },  /* level  1 */
2990     { 18, 14, 13,  1,  5,  8, ZSTD_dfast   },  /* level  2 */
2991     { 18, 16, 15,  1,  5,  8, ZSTD_dfast   },  /* level  3 */
2992     { 18, 15, 17,  1,  5,  8, ZSTD_greedy  },  /* level  4.*/
2993     { 18, 16, 17,  4,  5,  8, ZSTD_greedy  },  /* level  5.*/
2994     { 18, 16, 17,  3,  5,  8, ZSTD_lazy    },  /* level  6.*/
2995     { 18, 17, 17,  4,  4,  8, ZSTD_lazy    },  /* level  7 */
2996     { 18, 17, 17,  4,  4,  8, ZSTD_lazy2   },  /* level  8 */
2997     { 18, 17, 17,  5,  4,  8, ZSTD_lazy2   },  /* level  9 */
2998     { 18, 17, 17,  6,  4,  8, ZSTD_lazy2   },  /* level 10 */
2999     { 18, 18, 17,  6,  4,  8, ZSTD_lazy2   },  /* level 11.*/
3000     { 18, 18, 17,  7,  4,  8, ZSTD_lazy2   },  /* level 12.*/
3001     { 18, 19, 17,  6,  4,  8, ZSTD_btlazy2 },  /* level 13 */
3002     { 18, 18, 18,  4,  4, 16, ZSTD_btopt   },  /* level 14.*/
3003     { 18, 18, 18,  4,  3, 16, ZSTD_btopt   },  /* level 15.*/
3004     { 18, 19, 18,  6,  3, 32, ZSTD_btopt   },  /* level 16.*/
3005     { 18, 19, 18,  8,  3, 64, ZSTD_btopt   },  /* level 17.*/
3006     { 18, 19, 18,  9,  3,128, ZSTD_btopt   },  /* level 18.*/
3007     { 18, 19, 18, 10,  3,256, ZSTD_btopt   },  /* level 19.*/
3008     { 18, 19, 18, 11,  3,512, ZSTD_btultra },  /* level 20.*/
3009     { 18, 19, 18, 12,  3,512, ZSTD_btultra },  /* level 21.*/
3010     { 18, 19, 18, 13,  3,512, ZSTD_btultra },  /* level 22.*/
3011 },
3012 {   /* for srcSize <= 128 KB */
3013     /* W,  C,  H,  S,  L,  T, strat */
3014     { 17, 12, 12,  1,  7,  8, ZSTD_fast    },  /* level  0 - not used */
3015     { 17, 12, 13,  1,  6,  8, ZSTD_fast    },  /* level  1 */
3016     { 17, 13, 16,  1,  5,  8, ZSTD_fast    },  /* level  2 */
3017     { 17, 16, 16,  2,  5,  8, ZSTD_dfast   },  /* level  3 */
3018     { 17, 13, 15,  3,  4,  8, ZSTD_greedy  },  /* level  4 */
3019     { 17, 15, 17,  4,  4,  8, ZSTD_greedy  },  /* level  5 */
3020     { 17, 16, 17,  3,  4,  8, ZSTD_lazy    },  /* level  6 */
3021     { 17, 15, 17,  4,  4,  8, ZSTD_lazy2   },  /* level  7 */
3022     { 17, 17, 17,  4,  4,  8, ZSTD_lazy2   },  /* level  8 */
3023     { 17, 17, 17,  5,  4,  8, ZSTD_lazy2   },  /* level  9 */
3024     { 17, 17, 17,  6,  4,  8, ZSTD_lazy2   },  /* level 10 */
3025     { 17, 17, 17,  7,  4,  8, ZSTD_lazy2   },  /* level 11 */
3026     { 17, 17, 17,  8,  4,  8, ZSTD_lazy2   },  /* level 12 */
3027     { 17, 18, 17,  6,  4,  8, ZSTD_btlazy2 },  /* level 13.*/
3028     { 17, 17, 17,  7,  3,  8, ZSTD_btopt   },  /* level 14.*/
3029     { 17, 17, 17,  7,  3, 16, ZSTD_btopt   },  /* level 15.*/
3030     { 17, 18, 17,  7,  3, 32, ZSTD_btopt   },  /* level 16.*/
3031     { 17, 18, 17,  7,  3, 64, ZSTD_btopt   },  /* level 17.*/
3032     { 17, 18, 17,  7,  3,256, ZSTD_btopt   },  /* level 18.*/
3033     { 17, 18, 17,  8,  3,256, ZSTD_btopt   },  /* level 19.*/
3034     { 17, 18, 17,  9,  3,256, ZSTD_btultra },  /* level 20.*/
3035     { 17, 18, 17, 10,  3,256, ZSTD_btultra },  /* level 21.*/
3036     { 17, 18, 17, 11,  3,512, ZSTD_btultra },  /* level 22.*/
3037 },
3038 {   /* for srcSize <= 16 KB */
3039     /* W,  C,  H,  S,  L,  T, strat */
3040     { 14, 12, 12,  1,  7,  6, ZSTD_fast    },  /* level  0 - not used */
3041     { 14, 14, 14,  1,  6,  6, ZSTD_fast    },  /* level  1 */
3042     { 14, 14, 14,  1,  4,  6, ZSTD_fast    },  /* level  2 */
3043     { 14, 14, 14,  1,  4,  6, ZSTD_dfast   },  /* level  3.*/
3044     { 14, 14, 14,  4,  4,  6, ZSTD_greedy  },  /* level  4.*/
3045     { 14, 14, 14,  3,  4,  6, ZSTD_lazy    },  /* level  5.*/
3046     { 14, 14, 14,  4,  4,  6, ZSTD_lazy2   },  /* level  6 */
3047     { 14, 14, 14,  5,  4,  6, ZSTD_lazy2   },  /* level  7 */
3048     { 14, 14, 14,  6,  4,  6, ZSTD_lazy2   },  /* level  8.*/
3049     { 14, 15, 14,  6,  4,  6, ZSTD_btlazy2 },  /* level  9.*/
3050     { 14, 15, 14,  3,  3,  6, ZSTD_btopt   },  /* level 10.*/
3051     { 14, 15, 14,  6,  3,  8, ZSTD_btopt   },  /* level 11.*/
3052     { 14, 15, 14,  6,  3, 16, ZSTD_btopt   },  /* level 12.*/
3053     { 14, 15, 14,  6,  3, 24, ZSTD_btopt   },  /* level 13.*/
3054     { 14, 15, 15,  6,  3, 48, ZSTD_btopt   },  /* level 14.*/
3055     { 14, 15, 15,  6,  3, 64, ZSTD_btopt   },  /* level 15.*/
3056     { 14, 15, 15,  6,  3, 96, ZSTD_btopt   },  /* level 16.*/
3057     { 14, 15, 15,  6,  3,128, ZSTD_btopt   },  /* level 17.*/
3058     { 14, 15, 15,  6,  3,256, ZSTD_btopt   },  /* level 18.*/
3059     { 14, 15, 15,  7,  3,256, ZSTD_btopt   },  /* level 19.*/
3060     { 14, 15, 15,  8,  3,256, ZSTD_btultra },  /* level 20.*/
3061     { 14, 15, 15,  9,  3,256, ZSTD_btultra },  /* level 21.*/
3062     { 14, 15, 15, 10,  3,256, ZSTD_btultra },  /* level 22.*/
3063 },
3064 };
3065 
3066 #if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=1)
3067 /* This function just controls
3068  * the monotonic memory budget increase of ZSTD_defaultCParameters[0].
3069  * Run once, on first ZSTD_getCParams() usage, if ZSTD_DEBUG is enabled
3070  */
3071 MEM_STATIC void ZSTD_check_compressionLevel_monotonicIncrease_memoryBudget(void)
3072 {
3073     int level;
3074     for (level=1; level<ZSTD_maxCLevel(); level++) {
3075         ZSTD_compressionParameters const c1 = ZSTD_defaultCParameters[0][level];
3076         ZSTD_compressionParameters const c2 = ZSTD_defaultCParameters[0][level+1];
3077         assert(c1.windowLog <= c2.windowLog);
3078 #       define ZSTD_TABLECOST(h,c) ((1<<(h)) + (1<<(c)))
3079         assert(ZSTD_TABLECOST(c1.hashLog, c1.chainLog) <= ZSTD_TABLECOST(c2.hashLog, c2.chainLog));
3080     }
3081 }
3082 #endif
3083 
3084 /*! ZSTD_getCParams() :
3085 *   @return ZSTD_compressionParameters structure for a selected compression level, `srcSize` and `dictSize`.
3086 *   Size values are optional, provide 0 if not known or unused */
3087 ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize)
3088 {
3089     size_t const addedSize = srcSizeHint ? 0 : 500;
3090     U64 const rSize = srcSizeHint+dictSize ? srcSizeHint+dictSize+addedSize : (U64)-1;
3091     U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB);   /* intentional underflow for srcSizeHint == 0 */
3092 
3093 #if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=1)
3094     static int g_monotonicTest = 1;
3095     if (g_monotonicTest) {
3096         ZSTD_check_compressionLevel_monotonicIncrease_memoryBudget();
3097         g_monotonicTest=0;
3098     }
3099 #endif
3100 
3101     DEBUGLOG(4, "ZSTD_getCParams: cLevel=%i, srcSize=%u, dictSize=%u => table %u",
3102                 compressionLevel, (U32)srcSizeHint, (U32)dictSize, tableID);
3103     if (compressionLevel <= 0) compressionLevel = ZSTD_CLEVEL_DEFAULT;   /* 0 == default; no negative compressionLevel yet */
3104     if (compressionLevel > ZSTD_MAX_CLEVEL) compressionLevel = ZSTD_MAX_CLEVEL;
3105     { ZSTD_compressionParameters const cp = ZSTD_defaultCParameters[tableID][compressionLevel];
3106       return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize); }
3107 
3108 }
3109 
3110 /*! ZSTD_getParams() :
3111 *   same as ZSTD_getCParams(), but @return a `ZSTD_parameters` object (instead of `ZSTD_compressionParameters`).
3112 *   All fields of `ZSTD_frameParameters` are set to default (0) */
3113 ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) {
3114     ZSTD_parameters params;
3115     ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, srcSizeHint, dictSize);
3116     memset(&params, 0, sizeof(params));
3117     params.cParams = cParams;
3118     params.fParams.contentSizeFlag = 1;
3119     return params;
3120 }
3121