xref: /freebsd/sys/contrib/zstd/lib/compress/zstd_compress.c (revision d06955f9bdb1416d9196043ed781f9b36dae9adc)
1 /*
2  * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
3  * All rights reserved.
4  *
5  * This source code is licensed under both the BSD-style license (found in the
6  * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7  * in the COPYING file in the root directory of this source tree).
8  * You may select, at your option, one of the above-listed licenses.
9  */
10 
11 
12 /*-*************************************
13 *  Tuning parameters
14 ***************************************/
15 #ifndef ZSTD_CLEVEL_DEFAULT
16 #  define ZSTD_CLEVEL_DEFAULT 3
17 #endif
18 
19 
20 /*-*************************************
21 *  Dependencies
22 ***************************************/
23 #include <string.h>         /* memset */
24 #include "mem.h"
25 #define FSE_STATIC_LINKING_ONLY   /* FSE_encodeSymbol */
26 #include "fse.h"
27 #define HUF_STATIC_LINKING_ONLY
28 #include "huf.h"
29 #include "zstd_compress.h"
30 #include "zstd_fast.h"
31 #include "zstd_double_fast.h"
32 #include "zstd_lazy.h"
33 #include "zstd_opt.h"
34 #include "zstd_ldm.h"
35 
36 
37 /*-*************************************
38 *  Helper functions
39 ***************************************/
40 size_t ZSTD_compressBound(size_t srcSize) {
41     return ZSTD_COMPRESSBOUND(srcSize);
42 }
43 
44 
45 /*-*************************************
46 *  Sequence storage
47 ***************************************/
48 static void ZSTD_resetSeqStore(seqStore_t* ssPtr)
49 {
50     ssPtr->lit = ssPtr->litStart;
51     ssPtr->sequences = ssPtr->sequencesStart;
52     ssPtr->longLengthID = 0;
53 }
54 
55 
56 /*-*************************************
57 *  Context memory management
58 ***************************************/
59 struct ZSTD_CDict_s {
60     void* dictBuffer;
61     const void* dictContent;
62     size_t dictContentSize;
63     ZSTD_CCtx* refContext;
64 };  /* typedef'd to ZSTD_CDict within "zstd.h" */
65 
66 ZSTD_CCtx* ZSTD_createCCtx(void)
67 {
68     return ZSTD_createCCtx_advanced(ZSTD_defaultCMem);
69 }
70 
71 ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
72 {
73     ZSTD_CCtx* cctx;
74 
75     if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
76 
77     cctx = (ZSTD_CCtx*) ZSTD_calloc(sizeof(ZSTD_CCtx), customMem);
78     if (!cctx) return NULL;
79     cctx->customMem = customMem;
80     cctx->requestedParams.compressionLevel = ZSTD_CLEVEL_DEFAULT;
81     ZSTD_STATIC_ASSERT(zcss_init==0);
82     ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1));
83     return cctx;
84 }
85 
86 ZSTD_CCtx* ZSTD_initStaticCCtx(void *workspace, size_t workspaceSize)
87 {
88     ZSTD_CCtx* const cctx = (ZSTD_CCtx*) workspace;
89     if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL;  /* minimum size */
90     if ((size_t)workspace & 7) return NULL;  /* must be 8-aligned */
91     memset(workspace, 0, workspaceSize);   /* may be a bit generous, could memset be smaller ? */
92     cctx->staticSize = workspaceSize;
93     cctx->workSpace = (void*)(cctx+1);
94     cctx->workSpaceSize = workspaceSize - sizeof(ZSTD_CCtx);
95 
96     /* entropy space (never moves) */
97     if (cctx->workSpaceSize < sizeof(ZSTD_entropyCTables_t)) return NULL;
98     assert(((size_t)cctx->workSpace & (sizeof(void*)-1)) == 0);   /* ensure correct alignment */
99     cctx->entropy = (ZSTD_entropyCTables_t*)cctx->workSpace;
100 
101     return cctx;
102 }
103 
104 size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
105 {
106     if (cctx==NULL) return 0;   /* support free on NULL */
107     if (cctx->staticSize) return ERROR(memory_allocation);   /* not compatible with static CCtx */
108     ZSTD_free(cctx->workSpace, cctx->customMem);
109     cctx->workSpace = NULL;
110     ZSTD_freeCDict(cctx->cdictLocal);
111     cctx->cdictLocal = NULL;
112 #ifdef ZSTD_MULTITHREAD
113     ZSTDMT_freeCCtx(cctx->mtctx);
114     cctx->mtctx = NULL;
115 #endif
116     ZSTD_free(cctx, cctx->customMem);
117     return 0;   /* reserved as a potential error code in the future */
118 }
119 
120 
121 static size_t ZSTD_sizeof_mtctx(const ZSTD_CCtx* cctx)
122 {
123 #ifdef ZSTD_MULTITHREAD
124     return ZSTDMT_sizeof_CCtx(cctx->mtctx);
125 #else
126     (void) cctx;
127     return 0;
128 #endif
129 }
130 
131 
132 size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx)
133 {
134     if (cctx==NULL) return 0;   /* support sizeof on NULL */
135     DEBUGLOG(3, "sizeof(*cctx) : %u", (U32)sizeof(*cctx));
136     DEBUGLOG(3, "workSpaceSize (including streaming buffers): %u", (U32)cctx->workSpaceSize);
137     DEBUGLOG(3, "inner cdict : %u", (U32)ZSTD_sizeof_CDict(cctx->cdictLocal));
138     DEBUGLOG(3, "inner MTCTX : %u", (U32)ZSTD_sizeof_mtctx(cctx));
139     return sizeof(*cctx) + cctx->workSpaceSize
140            + ZSTD_sizeof_CDict(cctx->cdictLocal)
141            + ZSTD_sizeof_mtctx(cctx);
142 }
143 
144 size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs)
145 {
146     return ZSTD_sizeof_CCtx(zcs);  /* same object */
147 }
148 
149 /* private API call, for dictBuilder only */
150 const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); }
151 
152 #define ZSTD_CLEVEL_CUSTOM 999
153 
154 static ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
155         ZSTD_CCtx_params params, U64 srcSizeHint, size_t dictSize)
156 {
157     return (params.compressionLevel == ZSTD_CLEVEL_CUSTOM ?
158                     params.cParams :
159                     ZSTD_getCParams(params.compressionLevel, srcSizeHint, dictSize));
160 }
161 
162 static void ZSTD_cLevelToCCtxParams_srcSize(ZSTD_CCtx_params* params, U64 srcSize)
163 {
164     params->cParams = ZSTD_getCParamsFromCCtxParams(*params, srcSize, 0);
165     params->compressionLevel = ZSTD_CLEVEL_CUSTOM;
166 }
167 
168 static void ZSTD_cLevelToCParams(ZSTD_CCtx* cctx)
169 {
170     ZSTD_cLevelToCCtxParams_srcSize(
171             &cctx->requestedParams, cctx->pledgedSrcSizePlusOne-1);
172 }
173 
174 static void ZSTD_cLevelToCCtxParams(ZSTD_CCtx_params* params)
175 {
176     ZSTD_cLevelToCCtxParams_srcSize(params, 0);
177 }
178 
179 static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams(
180         ZSTD_compressionParameters cParams)
181 {
182     ZSTD_CCtx_params cctxParams;
183     memset(&cctxParams, 0, sizeof(cctxParams));
184     cctxParams.cParams = cParams;
185     cctxParams.compressionLevel = ZSTD_CLEVEL_CUSTOM;
186     return cctxParams;
187 }
188 
189 static ZSTD_CCtx_params* ZSTD_createCCtxParams_advanced(
190         ZSTD_customMem customMem)
191 {
192     ZSTD_CCtx_params* params;
193     if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
194     params = (ZSTD_CCtx_params*)ZSTD_calloc(
195             sizeof(ZSTD_CCtx_params), customMem);
196     if (!params) { return NULL; }
197     params->customMem = customMem;
198     params->compressionLevel = ZSTD_CLEVEL_DEFAULT;
199     return params;
200 }
201 
202 ZSTD_CCtx_params* ZSTD_createCCtxParams(void)
203 {
204     return ZSTD_createCCtxParams_advanced(ZSTD_defaultCMem);
205 }
206 
207 size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params)
208 {
209     if (params == NULL) { return 0; }
210     ZSTD_free(params, params->customMem);
211     return 0;
212 }
213 
214 size_t ZSTD_resetCCtxParams(ZSTD_CCtx_params* params)
215 {
216     return ZSTD_initCCtxParams(params, ZSTD_CLEVEL_DEFAULT);
217 }
218 
219 size_t ZSTD_initCCtxParams(ZSTD_CCtx_params* cctxParams, int compressionLevel) {
220     if (!cctxParams) { return ERROR(GENERIC); }
221     memset(cctxParams, 0, sizeof(*cctxParams));
222     cctxParams->compressionLevel = compressionLevel;
223     return 0;
224 }
225 
226 size_t ZSTD_initCCtxParams_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params)
227 {
228     if (!cctxParams) { return ERROR(GENERIC); }
229     CHECK_F( ZSTD_checkCParams(params.cParams) );
230     memset(cctxParams, 0, sizeof(*cctxParams));
231     cctxParams->cParams = params.cParams;
232     cctxParams->fParams = params.fParams;
233     cctxParams->compressionLevel = ZSTD_CLEVEL_CUSTOM;
234     return 0;
235 }
236 
237 static ZSTD_CCtx_params ZSTD_assignParamsToCCtxParams(
238         ZSTD_CCtx_params cctxParams, ZSTD_parameters params)
239 {
240     ZSTD_CCtx_params ret = cctxParams;
241     ret.cParams = params.cParams;
242     ret.fParams = params.fParams;
243     ret.compressionLevel = ZSTD_CLEVEL_CUSTOM;
244     return ret;
245 }
246 
247 #define CLAMPCHECK(val,min,max) {            \
248     if (((val)<(min)) | ((val)>(max))) {     \
249         return ERROR(parameter_outOfBound);  \
250 }   }
251 
252 size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, unsigned value)
253 {
254     if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
255 
256     switch(param)
257     {
258     case ZSTD_p_format :
259         return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
260 
261     case ZSTD_p_compressionLevel:
262         if (value == 0) return 0;  /* special value : 0 means "don't change anything" */
263         if (cctx->cdict) return ERROR(stage_wrong);
264         return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
265 
266     case ZSTD_p_windowLog:
267     case ZSTD_p_hashLog:
268     case ZSTD_p_chainLog:
269     case ZSTD_p_searchLog:
270     case ZSTD_p_minMatch:
271     case ZSTD_p_targetLength:
272     case ZSTD_p_compressionStrategy:
273         if (value == 0) return 0;  /* special value : 0 means "don't change anything" */
274         if (cctx->cdict) return ERROR(stage_wrong);
275         ZSTD_cLevelToCParams(cctx);  /* Can optimize if srcSize is known */
276         return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
277 
278     case ZSTD_p_contentSizeFlag:
279     case ZSTD_p_checksumFlag:
280     case ZSTD_p_dictIDFlag:
281         return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
282 
283     case ZSTD_p_forceMaxWindow :  /* Force back-references to remain < windowSize,
284                                    * even when referencing into Dictionary content
285                                    * default : 0 when using a CDict, 1 when using a Prefix */
286         cctx->loadedDictEnd = 0;
287         return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
288 
289     case ZSTD_p_nbThreads:
290         if (value==0) return 0;
291         DEBUGLOG(5, " setting nbThreads : %u", value);
292         if (value > 1 && cctx->staticSize) {
293             return ERROR(parameter_unsupported);  /* MT not compatible with static alloc */
294         }
295         return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
296 
297     case ZSTD_p_jobSize:
298         return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
299 
300     case ZSTD_p_overlapSizeLog:
301         DEBUGLOG(5, " setting overlap with nbThreads == %u", cctx->requestedParams.nbThreads);
302         return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
303 
304     case ZSTD_p_enableLongDistanceMatching:
305         if (cctx->cdict) return ERROR(stage_wrong);
306         if (value != 0) {
307             ZSTD_cLevelToCParams(cctx);
308         }
309         return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
310 
311     case ZSTD_p_ldmHashLog:
312     case ZSTD_p_ldmMinMatch:
313         if (value == 0) return 0;  /* special value : 0 means "don't change anything" */
314         if (cctx->cdict) return ERROR(stage_wrong);
315         return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
316 
317     case ZSTD_p_ldmBucketSizeLog:
318     case ZSTD_p_ldmHashEveryLog:
319         if (cctx->cdict) return ERROR(stage_wrong);
320         return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
321 
322     default: return ERROR(parameter_unsupported);
323     }
324 }
325 
326 size_t ZSTD_CCtxParam_setParameter(
327         ZSTD_CCtx_params* params, ZSTD_cParameter param, unsigned value)
328 {
329     switch(param)
330     {
331     case ZSTD_p_format :
332         if (value > (unsigned)ZSTD_f_zstd1_magicless)
333             return ERROR(parameter_unsupported);
334         params->format = (ZSTD_format_e)value;
335         return 0;
336 
337     case ZSTD_p_compressionLevel :
338         if ((int)value > ZSTD_maxCLevel()) value = ZSTD_maxCLevel();
339         if (value == 0) return 0;
340         params->compressionLevel = value;
341         return 0;
342 
343     case ZSTD_p_windowLog :
344         if (value == 0) return 0;
345         CLAMPCHECK(value, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX);
346         ZSTD_cLevelToCCtxParams(params);
347         params->cParams.windowLog = value;
348         return 0;
349 
350     case ZSTD_p_hashLog :
351         if (value == 0) return 0;
352         CLAMPCHECK(value, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
353         ZSTD_cLevelToCCtxParams(params);
354         params->cParams.hashLog = value;
355         return 0;
356 
357     case ZSTD_p_chainLog :
358         if (value == 0) return 0;
359         CLAMPCHECK(value, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX);
360         ZSTD_cLevelToCCtxParams(params);
361         params->cParams.chainLog = value;
362         return 0;
363 
364     case ZSTD_p_searchLog :
365         if (value == 0) return 0;
366         CLAMPCHECK(value, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX);
367         ZSTD_cLevelToCCtxParams(params);
368         params->cParams.searchLog = value;
369         return 0;
370 
371     case ZSTD_p_minMatch :
372         if (value == 0) return 0;
373         CLAMPCHECK(value, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX);
374         ZSTD_cLevelToCCtxParams(params);
375         params->cParams.searchLength = value;
376         return 0;
377 
378     case ZSTD_p_targetLength :
379         if (value == 0) return 0;
380         CLAMPCHECK(value, ZSTD_TARGETLENGTH_MIN, ZSTD_TARGETLENGTH_MAX);
381         ZSTD_cLevelToCCtxParams(params);
382         params->cParams.targetLength = value;
383         return 0;
384 
385     case ZSTD_p_compressionStrategy :
386         if (value == 0) return 0;
387         CLAMPCHECK(value, (unsigned)ZSTD_fast, (unsigned)ZSTD_btultra);
388         ZSTD_cLevelToCCtxParams(params);
389         params->cParams.strategy = (ZSTD_strategy)value;
390         return 0;
391 
392     case ZSTD_p_contentSizeFlag :
393         /* Content size written in frame header _when known_ (default:1) */
394         DEBUGLOG(5, "set content size flag = %u", (value>0));
395         params->fParams.contentSizeFlag = value > 0;
396         return 0;
397 
398     case ZSTD_p_checksumFlag :
399         /* A 32-bits content checksum will be calculated and written at end of frame (default:0) */
400         params->fParams.checksumFlag = value > 0;
401         return 0;
402 
403     case ZSTD_p_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */
404         DEBUGLOG(5, "set dictIDFlag = %u", (value>0));
405         params->fParams.noDictIDFlag = (value == 0);
406         return 0;
407 
408     case ZSTD_p_forceMaxWindow :
409         params->forceWindow = value > 0;
410         return 0;
411 
412     case ZSTD_p_nbThreads :
413         if (value == 0) return 0;
414 #ifndef ZSTD_MULTITHREAD
415         if (value > 1) return ERROR(parameter_unsupported);
416         return 0;
417 #else
418         return ZSTDMT_initializeCCtxParameters(params, value);
419 #endif
420 
421     case ZSTD_p_jobSize :
422 #ifndef ZSTD_MULTITHREAD
423         return ERROR(parameter_unsupported);
424 #else
425         if (params->nbThreads <= 1) return ERROR(parameter_unsupported);
426         return ZSTDMT_CCtxParam_setMTCtxParameter(params, ZSTDMT_p_sectionSize, value);
427 #endif
428 
429     case ZSTD_p_overlapSizeLog :
430 #ifndef ZSTD_MULTITHREAD
431         return ERROR(parameter_unsupported);
432 #else
433         if (params->nbThreads <= 1) return ERROR(parameter_unsupported);
434         return ZSTDMT_CCtxParam_setMTCtxParameter(params, ZSTDMT_p_overlapSectionLog, value);
435 #endif
436 
437     case ZSTD_p_enableLongDistanceMatching :
438         if (value != 0) {
439             ZSTD_cLevelToCCtxParams(params);
440             params->cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG;
441         }
442         return ZSTD_ldm_initializeParameters(&params->ldmParams, value);
443 
444     case ZSTD_p_ldmHashLog :
445         if (value == 0) return 0;
446         CLAMPCHECK(value, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
447         params->ldmParams.hashLog = value;
448         return 0;
449 
450     case ZSTD_p_ldmMinMatch :
451         if (value == 0) return 0;
452         CLAMPCHECK(value, ZSTD_LDM_MINMATCH_MIN, ZSTD_LDM_MINMATCH_MAX);
453         params->ldmParams.minMatchLength = value;
454         return 0;
455 
456     case ZSTD_p_ldmBucketSizeLog :
457         if (value > ZSTD_LDM_BUCKETSIZELOG_MAX) {
458             return ERROR(parameter_outOfBound);
459         }
460         params->ldmParams.bucketSizeLog = value;
461         return 0;
462 
463     case ZSTD_p_ldmHashEveryLog :
464         if (value > ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN) {
465             return ERROR(parameter_outOfBound);
466         }
467         params->ldmParams.hashEveryLog = value;
468         return 0;
469 
470     default: return ERROR(parameter_unsupported);
471     }
472 }
473 
474 /**
475  * This function should be updated whenever ZSTD_CCtx_params is updated.
476  * Parameters are copied manually before the dictionary is loaded.
477  * The multithreading parameters jobSize and overlapSizeLog are set only if
478  * nbThreads > 1.
479  *
480  * Pledged srcSize is treated as unknown.
481  */
482 size_t ZSTD_CCtx_setParametersUsingCCtxParams(
483         ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params)
484 {
485     if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
486     if (cctx->cdict) return ERROR(stage_wrong);
487 
488     /* Assume the compression and frame parameters are validated */
489     cctx->requestedParams.cParams = params->cParams;
490     cctx->requestedParams.fParams = params->fParams;
491     cctx->requestedParams.compressionLevel = params->compressionLevel;
492 
493     /* Set force window explicitly since it sets cctx->loadedDictEnd */
494     CHECK_F( ZSTD_CCtx_setParameter(
495                    cctx, ZSTD_p_forceMaxWindow, params->forceWindow) );
496 
497     /* Set multithreading parameters explicitly */
498     CHECK_F( ZSTD_CCtx_setParameter(cctx, ZSTD_p_nbThreads, params->nbThreads) );
499     if (params->nbThreads > 1) {
500         CHECK_F( ZSTD_CCtx_setParameter(cctx, ZSTD_p_jobSize, params->jobSize) );
501         CHECK_F( ZSTD_CCtx_setParameter(
502                     cctx, ZSTD_p_overlapSizeLog, params->overlapSizeLog) );
503     }
504 
505     /* Copy long distance matching parameters */
506     cctx->requestedParams.ldmParams = params->ldmParams;
507 
508     /* customMem is used only for create/free params and can be ignored */
509     return 0;
510 }
511 
512 ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize)
513 {
514     DEBUGLOG(4, " setting pledgedSrcSize to %u", (U32)pledgedSrcSize);
515     if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
516     cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
517     return 0;
518 }
519 
520 size_t ZSTD_CCtx_loadDictionary_advanced(
521         ZSTD_CCtx* cctx, const void* dict, size_t dictSize,
522         ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictMode_e dictMode)
523 {
524     if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
525     if (cctx->staticSize) return ERROR(memory_allocation);  /* no malloc for static CCtx */
526     DEBUGLOG(4, "load dictionary of size %u", (U32)dictSize);
527     ZSTD_freeCDict(cctx->cdictLocal);  /* in case one already exists */
528     if (dict==NULL || dictSize==0) {   /* no dictionary mode */
529         cctx->cdictLocal = NULL;
530         cctx->cdict = NULL;
531     } else {
532         ZSTD_compressionParameters const cParams =
533                 ZSTD_getCParamsFromCCtxParams(cctx->requestedParams, 0, dictSize);
534         cctx->cdictLocal = ZSTD_createCDict_advanced(
535                                 dict, dictSize,
536                                 dictLoadMethod, dictMode,
537                                 cParams, cctx->customMem);
538         cctx->cdict = cctx->cdictLocal;
539         if (cctx->cdictLocal == NULL)
540             return ERROR(memory_allocation);
541     }
542     return 0;
543 }
544 
545 ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(
546       ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
547 {
548     return ZSTD_CCtx_loadDictionary_advanced(
549             cctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dm_auto);
550 }
551 
552 ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
553 {
554     return ZSTD_CCtx_loadDictionary_advanced(
555             cctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dm_auto);
556 }
557 
558 
559 size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
560 {
561     if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
562     cctx->cdict = cdict;
563     memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));  /* exclusive */
564     return 0;
565 }
566 
567 size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize)
568 {
569     return ZSTD_CCtx_refPrefix_advanced(cctx, prefix, prefixSize, ZSTD_dm_rawContent);
570 }
571 
572 size_t ZSTD_CCtx_refPrefix_advanced(
573         ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictMode_e dictMode)
574 {
575     if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
576     cctx->cdict = NULL;   /* prefix discards any prior cdict */
577     cctx->prefixDict.dict = prefix;
578     cctx->prefixDict.dictSize = prefixSize;
579     cctx->prefixDict.dictMode = dictMode;
580     return 0;
581 }
582 
583 static void ZSTD_startNewCompression(ZSTD_CCtx* cctx)
584 {
585     cctx->streamStage = zcss_init;
586     cctx->pledgedSrcSizePlusOne = 0;
587 }
588 
589 /*! ZSTD_CCtx_reset() :
590  *  Also dumps dictionary */
591 void ZSTD_CCtx_reset(ZSTD_CCtx* cctx)
592 {
593     ZSTD_startNewCompression(cctx);
594     cctx->cdict = NULL;
595 }
596 
597 /** ZSTD_checkCParams() :
598     control CParam values remain within authorized range.
599     @return : 0, or an error code if one value is beyond authorized range */
600 size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
601 {
602     CLAMPCHECK(cParams.windowLog, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX);
603     CLAMPCHECK(cParams.chainLog, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX);
604     CLAMPCHECK(cParams.hashLog, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
605     CLAMPCHECK(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX);
606     CLAMPCHECK(cParams.searchLength, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX);
607     CLAMPCHECK(cParams.targetLength, ZSTD_TARGETLENGTH_MIN, ZSTD_TARGETLENGTH_MAX);
608     if ((U32)(cParams.strategy) > (U32)ZSTD_btultra)
609         return ERROR(parameter_unsupported);
610     return 0;
611 }
612 
613 /** ZSTD_clampCParams() :
614  *  make CParam values within valid range.
615  *  @return : valid CParams */
616 static ZSTD_compressionParameters ZSTD_clampCParams(ZSTD_compressionParameters cParams)
617 {
618 #   define CLAMP(val,min,max) {      \
619         if (val<min) val=min;        \
620         else if (val>max) val=max;   \
621     }
622     CLAMP(cParams.windowLog, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX);
623     CLAMP(cParams.chainLog, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX);
624     CLAMP(cParams.hashLog, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
625     CLAMP(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX);
626     CLAMP(cParams.searchLength, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX);
627     CLAMP(cParams.targetLength, ZSTD_TARGETLENGTH_MIN, ZSTD_TARGETLENGTH_MAX);
628     if ((U32)(cParams.strategy) > (U32)ZSTD_btultra) cParams.strategy = ZSTD_btultra;
629     return cParams;
630 }
631 
632 /** ZSTD_cycleLog() :
633  *  condition for correct operation : hashLog > 1 */
634 static U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)
635 {
636     U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2);
637     return hashLog - btScale;
638 }
639 
640 /** ZSTD_adjustCParams_internal() :
641     optimize `cPar` for a given input (`srcSize` and `dictSize`).
642     mostly downsizing to reduce memory consumption and initialization latency.
643     Both `srcSize` and `dictSize` are optional (use 0 if unknown).
644     Note : cPar is considered validated at this stage. Use ZSTD_checkCParams() to ensure that condition. */
645 ZSTD_compressionParameters ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize)
646 {
647     static const U64 minSrcSize = 513; /* (1<<9) + 1 */
648     static const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1);
649     assert(ZSTD_checkCParams(cPar)==0);
650 
651     if (dictSize && (srcSize+1<2) /* srcSize unknown */ )
652         srcSize = minSrcSize;  /* presumed small when there is a dictionary */
653     else if (srcSize == 0)
654         srcSize = ZSTD_CONTENTSIZE_UNKNOWN;  /* 0 == unknown : presumed large */
655 
656     /* resize windowLog if input is small enough, to use less memory */
657     if ( (srcSize < maxWindowResize)
658       && (dictSize < maxWindowResize) )  {
659         U32 const tSize = (U32)(srcSize + dictSize);
660         static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN;
661         U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN :
662                             ZSTD_highbit32(tSize-1) + 1;
663         if (cPar.windowLog > srcLog) cPar.windowLog = srcLog;
664     }
665     if (cPar.hashLog > cPar.windowLog) cPar.hashLog = cPar.windowLog;
666     {   U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
667         if (cycleLog > cPar.windowLog)
668             cPar.chainLog -= (cycleLog - cPar.windowLog);
669     }
670 
671     if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
672         cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN;  /* required for frame header */
673 
674     return cPar;
675 }
676 
677 ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize)
678 {
679     cPar = ZSTD_clampCParams(cPar);
680     return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize);
681 }
682 
683 size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)
684 {
685     /* Estimate CCtx size is supported for single-threaded compression only. */
686     if (params->nbThreads > 1) { return ERROR(GENERIC); }
687     {   ZSTD_compressionParameters const cParams =
688                 ZSTD_getCParamsFromCCtxParams(*params, 0, 0);
689         size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
690         U32    const divider = (cParams.searchLength==3) ? 3 : 4;
691         size_t const maxNbSeq = blockSize / divider;
692         size_t const tokenSpace = blockSize + 11*maxNbSeq;
693         size_t const chainSize =
694                 (cParams.strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams.chainLog);
695         size_t const hSize = ((size_t)1) << cParams.hashLog;
696         U32    const hashLog3 = (cParams.searchLength>3) ?
697                                 0 : MIN(ZSTD_HASHLOG3_MAX, cParams.windowLog);
698         size_t const h3Size = ((size_t)1) << hashLog3;
699         size_t const entropySpace = sizeof(ZSTD_entropyCTables_t);
700         size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
701 
702         size_t const optBudget =
703                 ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<<Litbits))*sizeof(U32)
704                 + (ZSTD_OPT_NUM+1)*(sizeof(ZSTD_match_t) + sizeof(ZSTD_optimal_t));
705         size_t const optSpace = ((cParams.strategy == ZSTD_btopt) || (cParams.strategy == ZSTD_btultra)) ? optBudget : 0;
706 
707         size_t const ldmSpace = params->ldmParams.enableLdm ?
708             ZSTD_ldm_getTableSize(params->ldmParams.hashLog,
709                                   params->ldmParams.bucketSizeLog) : 0;
710 
711         size_t const neededSpace = entropySpace + tableSpace + tokenSpace +
712                                    optSpace + ldmSpace;
713 
714         DEBUGLOG(5, "sizeof(ZSTD_CCtx) : %u", (U32)sizeof(ZSTD_CCtx));
715         DEBUGLOG(5, "estimate workSpace : %u", (U32)neededSpace);
716         return sizeof(ZSTD_CCtx) + neededSpace;
717     }
718 }
719 
720 size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams)
721 {
722     ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
723     return ZSTD_estimateCCtxSize_usingCCtxParams(&params);
724 }
725 
726 size_t ZSTD_estimateCCtxSize(int compressionLevel)
727 {
728     ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, 0);
729     return ZSTD_estimateCCtxSize_usingCParams(cParams);
730 }
731 
732 size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params)
733 {
734     if (params->nbThreads > 1) { return ERROR(GENERIC); }
735     {   size_t const CCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(params);
736         size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << params->cParams.windowLog);
737         size_t const inBuffSize = ((size_t)1 << params->cParams.windowLog) + blockSize;
738         size_t const outBuffSize = ZSTD_compressBound(blockSize) + 1;
739         size_t const streamingSize = inBuffSize + outBuffSize;
740 
741         return CCtxSize + streamingSize;
742     }
743 }
744 
745 size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams)
746 {
747     ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
748     return ZSTD_estimateCStreamSize_usingCCtxParams(&params);
749 }
750 
751 size_t ZSTD_estimateCStreamSize(int compressionLevel) {
752     ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, 0);
753     return ZSTD_estimateCStreamSize_usingCParams(cParams);
754 }
755 
756 static U32 ZSTD_equivalentCParams(ZSTD_compressionParameters cParams1,
757                                   ZSTD_compressionParameters cParams2)
758 {
759     U32 bslog1 = MIN(cParams1.windowLog, ZSTD_BLOCKSIZELOG_MAX);
760     U32 bslog2 = MIN(cParams2.windowLog, ZSTD_BLOCKSIZELOG_MAX);
761     return (bslog1 == bslog2)   /* same block size */
762          & (cParams1.hashLog  == cParams2.hashLog)
763          & (cParams1.chainLog == cParams2.chainLog)
764          & (cParams1.strategy == cParams2.strategy)   /* opt parser space */
765          & ((cParams1.searchLength==3) == (cParams2.searchLength==3));  /* hashlog3 space */
766 }
767 
768 /** The parameters are equivalent if ldm is not enabled in both sets or
769  *  all the parameters are equivalent. */
770 static U32 ZSTD_equivalentLdmParams(ldmParams_t ldmParams1,
771                                     ldmParams_t ldmParams2)
772 {
773     return (!ldmParams1.enableLdm && !ldmParams2.enableLdm) ||
774            (ldmParams1.enableLdm == ldmParams2.enableLdm &&
775             ldmParams1.hashLog == ldmParams2.hashLog &&
776             ldmParams1.bucketSizeLog == ldmParams2.bucketSizeLog &&
777             ldmParams1.minMatchLength == ldmParams2.minMatchLength &&
778             ldmParams1.hashEveryLog == ldmParams2.hashEveryLog);
779 }
780 
781 /** Equivalence for resetCCtx purposes */
782 static U32 ZSTD_equivalentParams(ZSTD_CCtx_params params1,
783                                  ZSTD_CCtx_params params2)
784 {
785     return ZSTD_equivalentCParams(params1.cParams, params2.cParams) &&
786            ZSTD_equivalentLdmParams(params1.ldmParams, params2.ldmParams);
787 }
788 
789 /*! ZSTD_continueCCtx() :
790  *  reuse CCtx without reset (note : requires no dictionary) */
791 static size_t ZSTD_continueCCtx(ZSTD_CCtx* cctx, ZSTD_CCtx_params params, U64 pledgedSrcSize)
792 {
793     U32 const end = (U32)(cctx->nextSrc - cctx->base);
794     DEBUGLOG(4, "continue mode");
795     cctx->appliedParams = params;
796     cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
797     cctx->consumedSrcSize = 0;
798     if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)
799         cctx->appliedParams.fParams.contentSizeFlag = 0;
800     DEBUGLOG(4, "pledged content size : %u ; flag : %u",
801         (U32)pledgedSrcSize, cctx->appliedParams.fParams.contentSizeFlag);
802     cctx->lowLimit = end;
803     cctx->dictLimit = end;
804     cctx->nextToUpdate = end+1;
805     cctx->stage = ZSTDcs_init;
806     cctx->dictID = 0;
807     cctx->loadedDictEnd = 0;
808     { int i; for (i=0; i<ZSTD_REP_NUM; i++) cctx->seqStore.rep[i] = repStartValue[i]; }
809     cctx->optState.litLengthSum = 0;  /* force reset of btopt stats */
810     XXH64_reset(&cctx->xxhState, 0);
811     return 0;
812 }
813 
814 typedef enum { ZSTDcrp_continue, ZSTDcrp_noMemset } ZSTD_compResetPolicy_e;
815 typedef enum { ZSTDb_not_buffered, ZSTDb_buffered } ZSTD_buffered_policy_e;
816 
817 /*! ZSTD_resetCCtx_internal() :
818     note : `params` are assumed fully validated at this stage */
819 static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
820                                       ZSTD_CCtx_params params, U64 pledgedSrcSize,
821                                       ZSTD_compResetPolicy_e const crp,
822                                       ZSTD_buffered_policy_e const zbuff)
823 {
824     DEBUGLOG(4, "ZSTD_resetCCtx_internal");
825     assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
826     DEBUGLOG(4, "pledgedSrcSize: %u", (U32)pledgedSrcSize);
827 
828     if (crp == ZSTDcrp_continue) {
829         if (ZSTD_equivalentParams(params, zc->appliedParams)) {
830             DEBUGLOG(4, "ZSTD_equivalentParams()==1");
831             assert(!(params.ldmParams.enableLdm &&
832                      params.ldmParams.hashEveryLog == ZSTD_LDM_HASHEVERYLOG_NOTSET));
833             zc->entropy->hufCTable_repeatMode = HUF_repeat_none;
834             zc->entropy->offcode_repeatMode = FSE_repeat_none;
835             zc->entropy->matchlength_repeatMode = FSE_repeat_none;
836             zc->entropy->litlength_repeatMode = FSE_repeat_none;
837             return ZSTD_continueCCtx(zc, params, pledgedSrcSize);
838     }   }
839 
840     if (params.ldmParams.enableLdm) {
841         /* Adjust long distance matching parameters */
842         ZSTD_ldm_adjustParameters(&params.ldmParams, params.cParams.windowLog);
843         assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
844         assert(params.ldmParams.hashEveryLog < 32);
845         zc->ldmState.hashPower =
846                 ZSTD_ldm_getHashPower(params.ldmParams.minMatchLength);
847     }
848 
849     {   size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << params.cParams.windowLog);
850         U32    const divider = (params.cParams.searchLength==3) ? 3 : 4;
851         size_t const maxNbSeq = blockSize / divider;
852         size_t const tokenSpace = blockSize + 11*maxNbSeq;
853         size_t const chainSize = (params.cParams.strategy == ZSTD_fast) ?
854                                 0 : ((size_t)1 << params.cParams.chainLog);
855         size_t const hSize = ((size_t)1) << params.cParams.hashLog;
856         U32    const hashLog3 = (params.cParams.searchLength>3) ?
857                                 0 : MIN(ZSTD_HASHLOG3_MAX, params.cParams.windowLog);
858         size_t const h3Size = ((size_t)1) << hashLog3;
859         size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
860         size_t const buffOutSize = (zbuff==ZSTDb_buffered) ? ZSTD_compressBound(blockSize)+1 : 0;
861         size_t const buffInSize = (zbuff==ZSTDb_buffered) ? ((size_t)1 << params.cParams.windowLog) + blockSize : 0;
862         void* ptr;
863 
864         /* Check if workSpace is large enough, alloc a new one if needed */
865         {   size_t const entropySpace = sizeof(ZSTD_entropyCTables_t);
866             size_t const optPotentialSpace = ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<<Litbits)) * sizeof(U32)
867                                   + (ZSTD_OPT_NUM+1) * (sizeof(ZSTD_match_t)+sizeof(ZSTD_optimal_t));
868             size_t const optSpace = ( (params.cParams.strategy == ZSTD_btopt)
869                                     || (params.cParams.strategy == ZSTD_btultra)) ?
870                                     optPotentialSpace : 0;
871             size_t const bufferSpace = buffInSize + buffOutSize;
872             size_t const ldmSpace = params.ldmParams.enableLdm
873                 ? ZSTD_ldm_getTableSize(params.ldmParams.hashLog, params.ldmParams.bucketSizeLog)
874                 : 0;
875             size_t const neededSpace = entropySpace + optSpace + ldmSpace +
876                                        tableSpace + tokenSpace + bufferSpace;
877 
878             if (zc->workSpaceSize < neededSpace) {  /* too small : resize */
879                 DEBUGLOG(5, "Need to update workSpaceSize from %uK to %uK \n",
880                             (unsigned)zc->workSpaceSize>>10,
881                             (unsigned)neededSpace>>10);
882                 /* static cctx : no resize, error out */
883                 if (zc->staticSize) return ERROR(memory_allocation);
884 
885                 zc->workSpaceSize = 0;
886                 ZSTD_free(zc->workSpace, zc->customMem);
887                 zc->workSpace = ZSTD_malloc(neededSpace, zc->customMem);
888                 if (zc->workSpace == NULL) return ERROR(memory_allocation);
889                 zc->workSpaceSize = neededSpace;
890                 ptr = zc->workSpace;
891 
892                 /* entropy space */
893                 assert(((size_t)zc->workSpace & 3) == 0);   /* ensure correct alignment */
894                 assert(zc->workSpaceSize >= sizeof(ZSTD_entropyCTables_t));
895                 zc->entropy = (ZSTD_entropyCTables_t*)zc->workSpace;
896         }   }
897 
898         /* init params */
899         zc->appliedParams = params;
900         zc->pledgedSrcSizePlusOne = pledgedSrcSize+1;
901         zc->consumedSrcSize = 0;
902         if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)
903             zc->appliedParams.fParams.contentSizeFlag = 0;
904         DEBUGLOG(5, "pledged content size : %u ; flag : %u",
905             (U32)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag);
906         zc->blockSize = blockSize;
907 
908         XXH64_reset(&zc->xxhState, 0);
909         zc->stage = ZSTDcs_init;
910         zc->dictID = 0;
911         zc->loadedDictEnd = 0;
912         zc->entropy->hufCTable_repeatMode = HUF_repeat_none;
913         zc->entropy->offcode_repeatMode = FSE_repeat_none;
914         zc->entropy->matchlength_repeatMode = FSE_repeat_none;
915         zc->entropy->litlength_repeatMode = FSE_repeat_none;
916         zc->nextToUpdate = 1;
917         zc->nextSrc = NULL;
918         zc->base = NULL;
919         zc->dictBase = NULL;
920         zc->dictLimit = 0;
921         zc->lowLimit = 0;
922         { int i; for (i=0; i<ZSTD_REP_NUM; i++) zc->seqStore.rep[i] = repStartValue[i]; }
923         zc->hashLog3 = hashLog3;
924         zc->optState.litLengthSum = 0;
925 
926         ptr = zc->entropy + 1;
927 
928         /* opt parser space */
929         if ((params.cParams.strategy == ZSTD_btopt) || (params.cParams.strategy == ZSTD_btultra)) {
930             DEBUGLOG(5, "reserving optimal parser space");
931             assert(((size_t)ptr & 3) == 0);  /* ensure ptr is properly aligned */
932             zc->optState.litFreq = (U32*)ptr;
933             zc->optState.litLengthFreq = zc->optState.litFreq + (1<<Litbits);
934             zc->optState.matchLengthFreq = zc->optState.litLengthFreq + (MaxLL+1);
935             zc->optState.offCodeFreq = zc->optState.matchLengthFreq + (MaxML+1);
936             ptr = zc->optState.offCodeFreq + (MaxOff+1);
937             zc->optState.matchTable = (ZSTD_match_t*)ptr;
938             ptr = zc->optState.matchTable + ZSTD_OPT_NUM+1;
939             zc->optState.priceTable = (ZSTD_optimal_t*)ptr;
940             ptr = zc->optState.priceTable + ZSTD_OPT_NUM+1;
941         }
942 
943         /* ldm hash table */
944         /* initialize bucketOffsets table later for pointer alignment */
945         if (params.ldmParams.enableLdm) {
946             size_t const ldmHSize = ((size_t)1) << params.ldmParams.hashLog;
947             memset(ptr, 0, ldmHSize * sizeof(ldmEntry_t));
948             assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */
949             zc->ldmState.hashTable = (ldmEntry_t*)ptr;
950             ptr = zc->ldmState.hashTable + ldmHSize;
951         }
952 
953         /* table Space */
954         if (crp!=ZSTDcrp_noMemset) memset(ptr, 0, tableSpace);   /* reset tables only */
955         assert(((size_t)ptr & 3) == 0);  /* ensure ptr is properly aligned */
956         zc->hashTable = (U32*)(ptr);
957         zc->chainTable = zc->hashTable + hSize;
958         zc->hashTable3 = zc->chainTable + chainSize;
959         ptr = zc->hashTable3 + h3Size;
960 
961         /* sequences storage */
962         zc->seqStore.sequencesStart = (seqDef*)ptr;
963         ptr = zc->seqStore.sequencesStart + maxNbSeq;
964         zc->seqStore.llCode = (BYTE*) ptr;
965         zc->seqStore.mlCode = zc->seqStore.llCode + maxNbSeq;
966         zc->seqStore.ofCode = zc->seqStore.mlCode + maxNbSeq;
967         zc->seqStore.litStart = zc->seqStore.ofCode + maxNbSeq;
968         ptr = zc->seqStore.litStart + blockSize;
969 
970         /* ldm bucketOffsets table */
971         if (params.ldmParams.enableLdm) {
972             size_t const ldmBucketSize =
973                   ((size_t)1) << (params.ldmParams.hashLog -
974                                   params.ldmParams.bucketSizeLog);
975             memset(ptr, 0, ldmBucketSize);
976             zc->ldmState.bucketOffsets = (BYTE*)ptr;
977             ptr = zc->ldmState.bucketOffsets + ldmBucketSize;
978         }
979 
980         /* buffers */
981         zc->inBuffSize = buffInSize;
982         zc->inBuff = (char*)ptr;
983         zc->outBuffSize = buffOutSize;
984         zc->outBuff = zc->inBuff + buffInSize;
985 
986         return 0;
987     }
988 }
989 
990 /* ZSTD_invalidateRepCodes() :
991  * ensures next compression will not use repcodes from previous block.
992  * Note : only works with regular variant;
993  *        do not use with extDict variant ! */
994 void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) {
995     int i;
996     for (i=0; i<ZSTD_REP_NUM; i++) cctx->seqStore.rep[i] = 0;
997 }
998 
999 
1000 /*! ZSTD_copyCCtx_internal() :
1001  *  Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
1002  *  The "context", in this case, refers to the hash and chain tables, entropy
1003  *  tables, and dictionary offsets.
1004  *  Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
1005  *  pledgedSrcSize=0 means "empty" if fParams.contentSizeFlag=1
1006  *  @return : 0, or an error code */
1007 static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
1008                             const ZSTD_CCtx* srcCCtx,
1009                             ZSTD_frameParameters fParams,
1010                             unsigned long long pledgedSrcSize,
1011                             ZSTD_buffered_policy_e zbuff)
1012 {
1013     DEBUGLOG(5, "ZSTD_copyCCtx_internal");
1014     if (srcCCtx->stage!=ZSTDcs_init) return ERROR(stage_wrong);
1015 
1016     memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
1017     {   ZSTD_CCtx_params params = dstCCtx->requestedParams;
1018         /* Copy only compression parameters related to tables. */
1019         params.cParams = srcCCtx->appliedParams.cParams;
1020         params.fParams = fParams;
1021         ZSTD_resetCCtx_internal(dstCCtx, params, pledgedSrcSize,
1022                                 ZSTDcrp_noMemset, zbuff);
1023     }
1024 
1025     /* copy tables */
1026     {   size_t const chainSize = (srcCCtx->appliedParams.cParams.strategy == ZSTD_fast) ? 0 : ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog);
1027         size_t const hSize =  (size_t)1 << srcCCtx->appliedParams.cParams.hashLog;
1028         size_t const h3Size = (size_t)1 << srcCCtx->hashLog3;
1029         size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
1030         assert((U32*)dstCCtx->chainTable == (U32*)dstCCtx->hashTable + hSize);  /* chainTable must follow hashTable */
1031         assert((U32*)dstCCtx->hashTable3 == (U32*)dstCCtx->chainTable + chainSize);
1032         memcpy(dstCCtx->hashTable, srcCCtx->hashTable, tableSpace);   /* presumes all tables follow each other */
1033     }
1034 
1035     /* copy dictionary offsets */
1036     dstCCtx->nextToUpdate = srcCCtx->nextToUpdate;
1037     dstCCtx->nextToUpdate3= srcCCtx->nextToUpdate3;
1038     dstCCtx->nextSrc      = srcCCtx->nextSrc;
1039     dstCCtx->base         = srcCCtx->base;
1040     dstCCtx->dictBase     = srcCCtx->dictBase;
1041     dstCCtx->dictLimit    = srcCCtx->dictLimit;
1042     dstCCtx->lowLimit     = srcCCtx->lowLimit;
1043     dstCCtx->loadedDictEnd= srcCCtx->loadedDictEnd;
1044     dstCCtx->dictID       = srcCCtx->dictID;
1045 
1046     /* copy entropy tables */
1047     memcpy(dstCCtx->entropy, srcCCtx->entropy, sizeof(ZSTD_entropyCTables_t));
1048 
1049     return 0;
1050 }
1051 
1052 /*! ZSTD_copyCCtx() :
1053  *  Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
1054  *  Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
1055  *  pledgedSrcSize==0 means "unknown".
1056 *   @return : 0, or an error code */
1057 size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize)
1058 {
1059     ZSTD_frameParameters fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
1060     ZSTD_buffered_policy_e const zbuff = (ZSTD_buffered_policy_e)(srcCCtx->inBuffSize>0);
1061     ZSTD_STATIC_ASSERT((U32)ZSTDb_buffered==1);
1062     fParams.contentSizeFlag = pledgedSrcSize>0;
1063 
1064     return ZSTD_copyCCtx_internal(dstCCtx, srcCCtx, fParams, pledgedSrcSize, zbuff);
1065 }
1066 
1067 
1068 /*! ZSTD_reduceTable() :
1069  *  reduce table indexes by `reducerValue` */
1070 static void ZSTD_reduceTable (U32* const table, U32 const size, U32 const reducerValue)
1071 {
1072     U32 u;
1073     for (u=0 ; u < size ; u++) {
1074         if (table[u] < reducerValue) table[u] = 0;
1075         else table[u] -= reducerValue;
1076     }
1077 }
1078 
1079 /*! ZSTD_ldm_reduceTable() :
1080  *  reduce table indexes by `reducerValue` */
1081 static void ZSTD_ldm_reduceTable(ldmEntry_t* const table, U32 const size,
1082                                  U32 const reducerValue)
1083 {
1084     U32 u;
1085     for (u = 0; u < size; u++) {
1086         if (table[u].offset < reducerValue) table[u].offset = 0;
1087         else table[u].offset -= reducerValue;
1088     }
1089 }
1090 
1091 /*! ZSTD_reduceIndex() :
1092 *   rescale all indexes to avoid future overflow (indexes are U32) */
1093 static void ZSTD_reduceIndex (ZSTD_CCtx* zc, const U32 reducerValue)
1094 {
1095     { U32 const hSize = (U32)1 << zc->appliedParams.cParams.hashLog;
1096       ZSTD_reduceTable(zc->hashTable, hSize, reducerValue); }
1097 
1098     { U32 const chainSize = (zc->appliedParams.cParams.strategy == ZSTD_fast) ? 0 : ((U32)1 << zc->appliedParams.cParams.chainLog);
1099       ZSTD_reduceTable(zc->chainTable, chainSize, reducerValue); }
1100 
1101     { U32 const h3Size = (zc->hashLog3) ? (U32)1 << zc->hashLog3 : 0;
1102       ZSTD_reduceTable(zc->hashTable3, h3Size, reducerValue); }
1103 
1104     { if (zc->appliedParams.ldmParams.enableLdm) {
1105           U32 const ldmHSize = (U32)1 << zc->appliedParams.ldmParams.hashLog;
1106           ZSTD_ldm_reduceTable(zc->ldmState.hashTable, ldmHSize, reducerValue);
1107       }
1108     }
1109 }
1110 
1111 
1112 /*-*******************************************************
1113 *  Block entropic compression
1114 *********************************************************/
1115 
1116 /* See doc/zstd_compression_format.md for detailed format description */
1117 
1118 size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
1119 {
1120     if (srcSize + ZSTD_blockHeaderSize > dstCapacity) return ERROR(dstSize_tooSmall);
1121     memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize);
1122     MEM_writeLE24(dst, (U32)(srcSize << 2) + (U32)bt_raw);
1123     return ZSTD_blockHeaderSize+srcSize;
1124 }
1125 
1126 
1127 static size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
1128 {
1129     BYTE* const ostart = (BYTE* const)dst;
1130     U32   const flSize = 1 + (srcSize>31) + (srcSize>4095);
1131 
1132     if (srcSize + flSize > dstCapacity) return ERROR(dstSize_tooSmall);
1133 
1134     switch(flSize)
1135     {
1136         case 1: /* 2 - 1 - 5 */
1137             ostart[0] = (BYTE)((U32)set_basic + (srcSize<<3));
1138             break;
1139         case 2: /* 2 - 2 - 12 */
1140             MEM_writeLE16(ostart, (U16)((U32)set_basic + (1<<2) + (srcSize<<4)));
1141             break;
1142         case 3: /* 2 - 2 - 20 */
1143             MEM_writeLE32(ostart, (U32)((U32)set_basic + (3<<2) + (srcSize<<4)));
1144             break;
1145         default:   /* not necessary : flSize is {1,2,3} */
1146             assert(0);
1147     }
1148 
1149     memcpy(ostart + flSize, src, srcSize);
1150     return srcSize + flSize;
1151 }
1152 
1153 static size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
1154 {
1155     BYTE* const ostart = (BYTE* const)dst;
1156     U32   const flSize = 1 + (srcSize>31) + (srcSize>4095);
1157 
1158     (void)dstCapacity;  /* dstCapacity already guaranteed to be >=4, hence large enough */
1159 
1160     switch(flSize)
1161     {
1162         case 1: /* 2 - 1 - 5 */
1163             ostart[0] = (BYTE)((U32)set_rle + (srcSize<<3));
1164             break;
1165         case 2: /* 2 - 2 - 12 */
1166             MEM_writeLE16(ostart, (U16)((U32)set_rle + (1<<2) + (srcSize<<4)));
1167             break;
1168         case 3: /* 2 - 2 - 20 */
1169             MEM_writeLE32(ostart, (U32)((U32)set_rle + (3<<2) + (srcSize<<4)));
1170             break;
1171         default:   /* not necessary : flSize is {1,2,3} */
1172             assert(0);
1173     }
1174 
1175     ostart[flSize] = *(const BYTE*)src;
1176     return flSize+1;
1177 }
1178 
1179 
1180 static size_t ZSTD_minGain(size_t srcSize) { return (srcSize >> 6) + 2; }
1181 
1182 static size_t ZSTD_compressLiterals (ZSTD_entropyCTables_t * entropy,
1183                                      ZSTD_strategy strategy,
1184                                      void* dst, size_t dstCapacity,
1185                                const void* src, size_t srcSize)
1186 {
1187     size_t const minGain = ZSTD_minGain(srcSize);
1188     size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
1189     BYTE*  const ostart = (BYTE*)dst;
1190     U32 singleStream = srcSize < 256;
1191     symbolEncodingType_e hType = set_compressed;
1192     size_t cLitSize;
1193 
1194 
1195     /* small ? don't even attempt compression (speed opt) */
1196 #   define LITERAL_NOENTROPY 63
1197     {   size_t const minLitSize = entropy->hufCTable_repeatMode == HUF_repeat_valid ? 6 : LITERAL_NOENTROPY;
1198         if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
1199     }
1200 
1201     if (dstCapacity < lhSize+1) return ERROR(dstSize_tooSmall);   /* not enough space for compression */
1202     {   HUF_repeat repeat = entropy->hufCTable_repeatMode;
1203         int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
1204         if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
1205         cLitSize = singleStream ? HUF_compress1X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
1206                                       entropy->workspace, sizeof(entropy->workspace), (HUF_CElt*)entropy->hufCTable, &repeat, preferRepeat)
1207                                 : HUF_compress4X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
1208                                       entropy->workspace, sizeof(entropy->workspace), (HUF_CElt*)entropy->hufCTable, &repeat, preferRepeat);
1209         if (repeat != HUF_repeat_none) { hType = set_repeat; }    /* reused the existing table */
1210         else { entropy->hufCTable_repeatMode = HUF_repeat_check; }       /* now have a table to reuse */
1211     }
1212 
1213     if ((cLitSize==0) | (cLitSize >= srcSize - minGain) | ERR_isError(cLitSize)) {
1214         entropy->hufCTable_repeatMode = HUF_repeat_none;
1215         return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
1216     }
1217     if (cLitSize==1) {
1218         entropy->hufCTable_repeatMode = HUF_repeat_none;
1219         return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
1220     }
1221 
1222     /* Build header */
1223     switch(lhSize)
1224     {
1225     case 3: /* 2 - 2 - 10 - 10 */
1226         {   U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14);
1227             MEM_writeLE24(ostart, lhc);
1228             break;
1229         }
1230     case 4: /* 2 - 2 - 14 - 14 */
1231         {   U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18);
1232             MEM_writeLE32(ostart, lhc);
1233             break;
1234         }
1235     case 5: /* 2 - 2 - 18 - 18 */
1236         {   U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22);
1237             MEM_writeLE32(ostart, lhc);
1238             ostart[4] = (BYTE)(cLitSize >> 10);
1239             break;
1240         }
1241     default:   /* not possible : lhSize is {3,4,5} */
1242         assert(0);
1243     }
1244     return lhSize+cLitSize;
1245 }
1246 
1247 
1248 void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
1249 {
1250     BYTE const LL_deltaCode = 19;
1251     BYTE const ML_deltaCode = 36;
1252     const seqDef* const sequences = seqStorePtr->sequencesStart;
1253     BYTE* const llCodeTable = seqStorePtr->llCode;
1254     BYTE* const ofCodeTable = seqStorePtr->ofCode;
1255     BYTE* const mlCodeTable = seqStorePtr->mlCode;
1256     U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
1257     U32 u;
1258     for (u=0; u<nbSeq; u++) {
1259         U32 const llv = sequences[u].litLength;
1260         U32 const mlv = sequences[u].matchLength;
1261         llCodeTable[u] = (llv> 63) ? (BYTE)ZSTD_highbit32(llv) + LL_deltaCode : LL_Code[llv];
1262         ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offset);
1263         mlCodeTable[u] = (mlv>127) ? (BYTE)ZSTD_highbit32(mlv) + ML_deltaCode : ML_Code[mlv];
1264     }
1265     if (seqStorePtr->longLengthID==1)
1266         llCodeTable[seqStorePtr->longLengthPos] = MaxLL;
1267     if (seqStorePtr->longLengthID==2)
1268         mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
1269 }
1270 
1271 typedef enum {
1272     ZSTD_defaultDisallowed = 0,
1273     ZSTD_defaultAllowed = 1
1274 } ZSTD_defaultPolicy_e;
1275 
1276 MEM_STATIC symbolEncodingType_e ZSTD_selectEncodingType(
1277         FSE_repeat* repeatMode, size_t const mostFrequent, size_t nbSeq,
1278         U32 defaultNormLog, ZSTD_defaultPolicy_e const isDefaultAllowed)
1279 {
1280 #define MIN_SEQ_FOR_DYNAMIC_FSE   64
1281 #define MAX_SEQ_FOR_STATIC_FSE  1000
1282     ZSTD_STATIC_ASSERT(ZSTD_defaultDisallowed == 0 && ZSTD_defaultAllowed != 0);
1283     if ((mostFrequent == nbSeq) && (!isDefaultAllowed || nbSeq > 2)) {
1284         /* Prefer set_basic over set_rle when there are 2 or less symbols,
1285          * since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol.
1286          * If basic encoding isn't possible, always choose RLE.
1287          */
1288         *repeatMode = FSE_repeat_check;
1289         return set_rle;
1290     }
1291     if (isDefaultAllowed && (*repeatMode == FSE_repeat_valid) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
1292         return set_repeat;
1293     }
1294     if (isDefaultAllowed && ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (defaultNormLog-1))))) {
1295         *repeatMode = FSE_repeat_valid;
1296         return set_basic;
1297     }
1298     *repeatMode = FSE_repeat_check;
1299     return set_compressed;
1300 }
1301 
1302 MEM_STATIC size_t ZSTD_buildCTable(void* dst, size_t dstCapacity,
1303         FSE_CTable* CTable, U32 FSELog, symbolEncodingType_e type,
1304         U32* count, U32 max,
1305         BYTE const* codeTable, size_t nbSeq,
1306         S16 const* defaultNorm, U32 defaultNormLog, U32 defaultMax,
1307         void* workspace, size_t workspaceSize)
1308 {
1309     BYTE* op = (BYTE*)dst;
1310     BYTE const* const oend = op + dstCapacity;
1311 
1312     switch (type) {
1313     case set_rle:
1314         *op = codeTable[0];
1315         CHECK_F(FSE_buildCTable_rle(CTable, (BYTE)max));
1316         return 1;
1317     case set_repeat:
1318         return 0;
1319     case set_basic:
1320         CHECK_F(FSE_buildCTable_wksp(CTable, defaultNorm, defaultMax, defaultNormLog, workspace, workspaceSize));
1321         return 0;
1322     case set_compressed: {
1323         S16 norm[MaxSeq + 1];
1324         size_t nbSeq_1 = nbSeq;
1325         const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
1326         if (count[codeTable[nbSeq-1]] > 1) {
1327             count[codeTable[nbSeq-1]]--;
1328             nbSeq_1--;
1329         }
1330         assert(nbSeq_1 > 1);
1331         CHECK_F(FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max));
1332         {   size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog);   /* overflow protected */
1333             if (FSE_isError(NCountSize)) return NCountSize;
1334             CHECK_F(FSE_buildCTable_wksp(CTable, norm, max, tableLog, workspace, workspaceSize));
1335             return NCountSize;
1336         }
1337     }
1338     default: return assert(0), ERROR(GENERIC);
1339     }
1340 }
1341 
1342 MEM_STATIC size_t ZSTD_encodeSequences(void* dst, size_t dstCapacity,
1343     FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
1344     FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
1345     FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
1346     seqDef const* sequences, size_t nbSeq, int longOffsets)
1347 {
1348     BIT_CStream_t blockStream;
1349     FSE_CState_t  stateMatchLength;
1350     FSE_CState_t  stateOffsetBits;
1351     FSE_CState_t  stateLitLength;
1352 
1353     CHECK_E(BIT_initCStream(&blockStream, dst, dstCapacity), dstSize_tooSmall); /* not enough space remaining */
1354 
1355     /* first symbols */
1356     FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]);
1357     FSE_initCState2(&stateOffsetBits,  CTable_OffsetBits,  ofCodeTable[nbSeq-1]);
1358     FSE_initCState2(&stateLitLength,   CTable_LitLength,   llCodeTable[nbSeq-1]);
1359     BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]);
1360     if (MEM_32bits()) BIT_flushBits(&blockStream);
1361     BIT_addBits(&blockStream, sequences[nbSeq-1].matchLength, ML_bits[mlCodeTable[nbSeq-1]]);
1362     if (MEM_32bits()) BIT_flushBits(&blockStream);
1363     if (longOffsets) {
1364         U32 const ofBits = ofCodeTable[nbSeq-1];
1365         int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
1366         if (extraBits) {
1367             BIT_addBits(&blockStream, sequences[nbSeq-1].offset, extraBits);
1368             BIT_flushBits(&blockStream);
1369         }
1370         BIT_addBits(&blockStream, sequences[nbSeq-1].offset >> extraBits,
1371                     ofBits - extraBits);
1372     } else {
1373         BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]);
1374     }
1375     BIT_flushBits(&blockStream);
1376 
1377     {   size_t n;
1378         for (n=nbSeq-2 ; n<nbSeq ; n--) {      /* intentional underflow */
1379             BYTE const llCode = llCodeTable[n];
1380             BYTE const ofCode = ofCodeTable[n];
1381             BYTE const mlCode = mlCodeTable[n];
1382             U32  const llBits = LL_bits[llCode];
1383             U32  const ofBits = ofCode;                                     /* 32b*/  /* 64b*/
1384             U32  const mlBits = ML_bits[mlCode];
1385                                                                             /* (7)*/  /* (7)*/
1386             FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode);       /* 15 */  /* 15 */
1387             FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode);      /* 24 */  /* 24 */
1388             if (MEM_32bits()) BIT_flushBits(&blockStream);                  /* (7)*/
1389             FSE_encodeSymbol(&blockStream, &stateLitLength, llCode);        /* 16 */  /* 33 */
1390             if (MEM_32bits() || (ofBits+mlBits+llBits >= 64-7-(LLFSELog+MLFSELog+OffFSELog)))
1391                 BIT_flushBits(&blockStream);                                /* (7)*/
1392             BIT_addBits(&blockStream, sequences[n].litLength, llBits);
1393             if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream);
1394             BIT_addBits(&blockStream, sequences[n].matchLength, mlBits);
1395             if (MEM_32bits() || (ofBits+mlBits+llBits > 56)) BIT_flushBits(&blockStream);
1396             if (longOffsets) {
1397                 int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
1398                 if (extraBits) {
1399                     BIT_addBits(&blockStream, sequences[n].offset, extraBits);
1400                     BIT_flushBits(&blockStream);                            /* (7)*/
1401                 }
1402                 BIT_addBits(&blockStream, sequences[n].offset >> extraBits,
1403                             ofBits - extraBits);                            /* 31 */
1404             } else {
1405                 BIT_addBits(&blockStream, sequences[n].offset, ofBits);     /* 31 */
1406             }
1407             BIT_flushBits(&blockStream);                                    /* (7)*/
1408     }   }
1409 
1410     FSE_flushCState(&blockStream, &stateMatchLength);
1411     FSE_flushCState(&blockStream, &stateOffsetBits);
1412     FSE_flushCState(&blockStream, &stateLitLength);
1413 
1414     {   size_t const streamSize = BIT_closeCStream(&blockStream);
1415         if (streamSize==0) return ERROR(dstSize_tooSmall);   /* not enough space */
1416         return streamSize;
1417     }
1418 }
1419 
1420 MEM_STATIC size_t ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
1421                               ZSTD_entropyCTables_t* entropy,
1422                               ZSTD_compressionParameters const* cParams,
1423                               void* dst, size_t dstCapacity)
1424 {
1425     const int longOffsets = cParams->windowLog > STREAM_ACCUMULATOR_MIN;
1426     U32 count[MaxSeq+1];
1427     FSE_CTable* CTable_LitLength = entropy->litlengthCTable;
1428     FSE_CTable* CTable_OffsetBits = entropy->offcodeCTable;
1429     FSE_CTable* CTable_MatchLength = entropy->matchlengthCTable;
1430     U32 LLtype, Offtype, MLtype;   /* compressed, raw or rle */
1431     const seqDef* const sequences = seqStorePtr->sequencesStart;
1432     const BYTE* const ofCodeTable = seqStorePtr->ofCode;
1433     const BYTE* const llCodeTable = seqStorePtr->llCode;
1434     const BYTE* const mlCodeTable = seqStorePtr->mlCode;
1435     BYTE* const ostart = (BYTE*)dst;
1436     BYTE* const oend = ostart + dstCapacity;
1437     BYTE* op = ostart;
1438     size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
1439     BYTE* seqHead;
1440 
1441     ZSTD_STATIC_ASSERT(sizeof(entropy->workspace) >= (1<<MAX(MLFSELog,LLFSELog)));
1442 
1443     /* Compress literals */
1444     {   const BYTE* const literals = seqStorePtr->litStart;
1445         size_t const litSize = seqStorePtr->lit - literals;
1446         size_t const cSize = ZSTD_compressLiterals(
1447                 entropy, cParams->strategy, op, dstCapacity, literals, litSize);
1448         if (ZSTD_isError(cSize))
1449           return cSize;
1450         op += cSize;
1451     }
1452 
1453     /* Sequences Header */
1454     if ((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead */) return ERROR(dstSize_tooSmall);
1455     if (nbSeq < 0x7F) *op++ = (BYTE)nbSeq;
1456     else if (nbSeq < LONGNBSEQ) op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2;
1457     else op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3;
1458     if (nbSeq==0) return op - ostart;
1459 
1460     /* seqHead : flags for FSE encoding type */
1461     seqHead = op++;
1462 
1463     /* convert length/distances into codes */
1464     ZSTD_seqToCodes(seqStorePtr);
1465     /* CTable for Literal Lengths */
1466     {   U32 max = MaxLL;
1467         size_t const mostFrequent = FSE_countFast_wksp(count, &max, llCodeTable, nbSeq, entropy->workspace);
1468         LLtype = ZSTD_selectEncodingType(&entropy->litlength_repeatMode, mostFrequent, nbSeq, LL_defaultNormLog, ZSTD_defaultAllowed);
1469         {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
1470                     count, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL,
1471                     entropy->workspace, sizeof(entropy->workspace));
1472             if (ZSTD_isError(countSize)) return countSize;
1473             op += countSize;
1474     }   }
1475     /* CTable for Offsets */
1476     {   U32 max = MaxOff;
1477         size_t const mostFrequent = FSE_countFast_wksp(count, &max, ofCodeTable, nbSeq, entropy->workspace);
1478         /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
1479         ZSTD_defaultPolicy_e const defaultPolicy = max <= DefaultMaxOff ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
1480         Offtype = ZSTD_selectEncodingType(&entropy->offcode_repeatMode, mostFrequent, nbSeq, OF_defaultNormLog, defaultPolicy);
1481         {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
1482                     count, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
1483                     entropy->workspace, sizeof(entropy->workspace));
1484             if (ZSTD_isError(countSize)) return countSize;
1485             op += countSize;
1486     }   }
1487     /* CTable for MatchLengths */
1488     {   U32 max = MaxML;
1489         size_t const mostFrequent = FSE_countFast_wksp(count, &max, mlCodeTable, nbSeq, entropy->workspace);
1490         MLtype = ZSTD_selectEncodingType(&entropy->matchlength_repeatMode, mostFrequent, nbSeq, ML_defaultNormLog, ZSTD_defaultAllowed);
1491         {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
1492                     count, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML,
1493                     entropy->workspace, sizeof(entropy->workspace));
1494             if (ZSTD_isError(countSize)) return countSize;
1495             op += countSize;
1496     }   }
1497 
1498     *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
1499 
1500     {   size_t const streamSize = ZSTD_encodeSequences(op, oend - op,
1501                 CTable_MatchLength, mlCodeTable,
1502                 CTable_OffsetBits, ofCodeTable,
1503                 CTable_LitLength, llCodeTable,
1504                 sequences, nbSeq, longOffsets);
1505         if (ZSTD_isError(streamSize)) return streamSize;
1506         op += streamSize;
1507     }
1508 
1509     return op - ostart;
1510 }
1511 
1512 MEM_STATIC size_t ZSTD_compressSequences(seqStore_t* seqStorePtr,
1513                               ZSTD_entropyCTables_t* entropy,
1514                               ZSTD_compressionParameters const* cParams,
1515                               void* dst, size_t dstCapacity,
1516                               size_t srcSize)
1517 {
1518     size_t const cSize = ZSTD_compressSequences_internal(seqStorePtr, entropy, cParams,
1519                                                          dst, dstCapacity);
1520     size_t const minGain = ZSTD_minGain(srcSize);
1521     size_t const maxCSize = srcSize - minGain;
1522     /* If the srcSize <= dstCapacity, then there is enough space to write a
1523      * raw uncompressed block. Since we ran out of space, the block must not
1524      * be compressible, so fall back to a raw uncompressed block.
1525      */
1526     int const uncompressibleError = cSize == ERROR(dstSize_tooSmall) && srcSize <= dstCapacity;
1527 
1528     if (ZSTD_isError(cSize) && !uncompressibleError)
1529         return cSize;
1530     /* Check compressibility */
1531     if (cSize >= maxCSize || uncompressibleError) {
1532         entropy->hufCTable_repeatMode = HUF_repeat_none;
1533         entropy->offcode_repeatMode = FSE_repeat_none;
1534         entropy->matchlength_repeatMode = FSE_repeat_none;
1535         entropy->litlength_repeatMode = FSE_repeat_none;
1536         return 0;
1537     }
1538     assert(!ZSTD_isError(cSize));
1539 
1540     /* confirm repcodes */
1541     { int i; for (i=0; i<ZSTD_REP_NUM; i++) seqStorePtr->rep[i] = seqStorePtr->repToConfirm[i]; }
1542     return cSize;
1543 }
1544 
1545 /* ZSTD_selectBlockCompressor() :
1546  * Not static, but internal use only (used by long distance matcher)
1547  * assumption : strat is a valid strategy */
1548 typedef size_t (*ZSTD_blockCompressor) (ZSTD_CCtx* ctx, const void* src, size_t srcSize);
1549 ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict)
1550 {
1551     static const ZSTD_blockCompressor blockCompressor[2][(unsigned)ZSTD_btultra+1] = {
1552         { ZSTD_compressBlock_fast  /* default for 0 */,
1553           ZSTD_compressBlock_fast, ZSTD_compressBlock_doubleFast, ZSTD_compressBlock_greedy,
1554           ZSTD_compressBlock_lazy, ZSTD_compressBlock_lazy2, ZSTD_compressBlock_btlazy2,
1555           ZSTD_compressBlock_btopt, ZSTD_compressBlock_btultra },
1556         { ZSTD_compressBlock_fast_extDict  /* default for 0 */,
1557           ZSTD_compressBlock_fast_extDict, ZSTD_compressBlock_doubleFast_extDict, ZSTD_compressBlock_greedy_extDict,
1558           ZSTD_compressBlock_lazy_extDict,ZSTD_compressBlock_lazy2_extDict, ZSTD_compressBlock_btlazy2_extDict,
1559           ZSTD_compressBlock_btopt_extDict, ZSTD_compressBlock_btultra_extDict }
1560     };
1561     ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1);
1562     assert((U32)strat >= (U32)ZSTD_fast);
1563     assert((U32)strat <= (U32)ZSTD_btultra);
1564 
1565     return blockCompressor[extDict!=0][(U32)strat];
1566 }
1567 
1568 static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr,
1569                                    const BYTE* anchor, size_t lastLLSize)
1570 {
1571     memcpy(seqStorePtr->lit, anchor, lastLLSize);
1572     seqStorePtr->lit += lastLLSize;
1573 }
1574 
1575 static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
1576 {
1577     const BYTE* const base = zc->base;
1578     const BYTE* const istart = (const BYTE*)src;
1579     const U32 current = (U32)(istart-base);
1580     size_t lastLLSize;
1581     const BYTE* anchor;
1582     U32 const extDict = zc->lowLimit < zc->dictLimit;
1583     const ZSTD_blockCompressor blockCompressor =
1584         zc->appliedParams.ldmParams.enableLdm
1585             ? (extDict ? ZSTD_compressBlock_ldm_extDict : ZSTD_compressBlock_ldm)
1586             : ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, extDict);
1587 
1588     if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) return 0;   /* don't even attempt compression below a certain srcSize */
1589     ZSTD_resetSeqStore(&(zc->seqStore));
1590     if (current > zc->nextToUpdate + 384)
1591         zc->nextToUpdate = current - MIN(192, (U32)(current - zc->nextToUpdate - 384));   /* limited update after finding a very long match */
1592 
1593     lastLLSize = blockCompressor(zc, src, srcSize);
1594 
1595     /* Last literals */
1596     anchor = (const BYTE*)src + srcSize - lastLLSize;
1597     ZSTD_storeLastLiterals(&zc->seqStore, anchor, lastLLSize);
1598 
1599     return ZSTD_compressSequences(&zc->seqStore, zc->entropy, &zc->appliedParams.cParams, dst, dstCapacity, srcSize);
1600 }
1601 
1602 
1603 /*! ZSTD_compress_frameChunk() :
1604 *   Compress a chunk of data into one or multiple blocks.
1605 *   All blocks will be terminated, all input will be consumed.
1606 *   Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.
1607 *   Frame is supposed already started (header already produced)
1608 *   @return : compressed size, or an error code
1609 */
1610 static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx,
1611                                      void* dst, size_t dstCapacity,
1612                                const void* src, size_t srcSize,
1613                                      U32 lastFrameChunk)
1614 {
1615     size_t blockSize = cctx->blockSize;
1616     size_t remaining = srcSize;
1617     const BYTE* ip = (const BYTE*)src;
1618     BYTE* const ostart = (BYTE*)dst;
1619     BYTE* op = ostart;
1620     U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog;
1621 
1622     if (cctx->appliedParams.fParams.checksumFlag && srcSize)
1623         XXH64_update(&cctx->xxhState, src, srcSize);
1624 
1625     while (remaining) {
1626         U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
1627         size_t cSize;
1628 
1629         if (dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE)
1630             return ERROR(dstSize_tooSmall);   /* not enough space to store compressed block */
1631         if (remaining < blockSize) blockSize = remaining;
1632 
1633         /* preemptive overflow correction:
1634          * 1. correction is large enough:
1635          *    lowLimit > (3<<29) ==> current > 3<<29 + 1<<windowLog - blockSize
1636          *    1<<windowLog <= newCurrent < 1<<chainLog + 1<<windowLog
1637          *
1638          *    current - newCurrent
1639          *    > (3<<29 + 1<<windowLog - blockSize) - (1<<windowLog + 1<<chainLog)
1640          *    > (3<<29 - blockSize) - (1<<chainLog)
1641          *    > (3<<29 - blockSize) - (1<<30)             (NOTE: chainLog <= 30)
1642          *    > 1<<29 - 1<<17
1643          *
1644          * 2. (ip+blockSize - cctx->base) doesn't overflow:
1645          *    In 32 bit mode we limit windowLog to 30 so we don't get
1646          *    differences larger than 1<<31-1.
1647          * 3. cctx->lowLimit < 1<<32:
1648          *    windowLog <= 31 ==> 3<<29 + 1<<windowLog < 7<<29 < 1<<32.
1649          */
1650         if (cctx->lowLimit > (3U<<29)) {
1651             U32 const cycleMask = ((U32)1 << ZSTD_cycleLog(cctx->appliedParams.cParams.chainLog, cctx->appliedParams.cParams.strategy)) - 1;
1652             U32 const current = (U32)(ip - cctx->base);
1653             U32 const newCurrent = (current & cycleMask) + ((U32)1 << cctx->appliedParams.cParams.windowLog);
1654             U32 const correction = current - newCurrent;
1655             ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30);
1656             ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30);
1657             ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31);
1658             assert(current > newCurrent);
1659             assert(correction > 1<<28); /* Loose bound, should be about 1<<29 */
1660             ZSTD_reduceIndex(cctx, correction);
1661             cctx->base += correction;
1662             cctx->dictBase += correction;
1663             cctx->lowLimit -= correction;
1664             cctx->dictLimit -= correction;
1665             if (cctx->nextToUpdate < correction) cctx->nextToUpdate = 0;
1666             else cctx->nextToUpdate -= correction;
1667             DEBUGLOG(4, "Correction of 0x%x bytes to lowLimit=0x%x\n", correction, cctx->lowLimit);
1668         }
1669 
1670         if ((U32)(ip+blockSize - cctx->base) > cctx->loadedDictEnd + maxDist) {
1671             /* enforce maxDist */
1672             U32 const newLowLimit = (U32)(ip+blockSize - cctx->base) - maxDist;
1673             if (cctx->lowLimit < newLowLimit) cctx->lowLimit = newLowLimit;
1674             if (cctx->dictLimit < cctx->lowLimit) cctx->dictLimit = cctx->lowLimit;
1675         }
1676 
1677         cSize = ZSTD_compressBlock_internal(cctx, op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize, ip, blockSize);
1678         if (ZSTD_isError(cSize)) return cSize;
1679 
1680         if (cSize == 0) {  /* block is not compressible */
1681             U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(blockSize << 3);
1682             if (blockSize + ZSTD_blockHeaderSize > dstCapacity) return ERROR(dstSize_tooSmall);
1683             MEM_writeLE32(op, cBlockHeader24);   /* no pb, 4th byte will be overwritten */
1684             memcpy(op + ZSTD_blockHeaderSize, ip, blockSize);
1685             cSize = ZSTD_blockHeaderSize+blockSize;
1686         } else {
1687             U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
1688             MEM_writeLE24(op, cBlockHeader24);
1689             cSize += ZSTD_blockHeaderSize;
1690         }
1691 
1692         remaining -= blockSize;
1693         dstCapacity -= cSize;
1694         ip += blockSize;
1695         op += cSize;
1696     }
1697 
1698     if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending;
1699     return op-ostart;
1700 }
1701 
1702 
1703 static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
1704                                     ZSTD_CCtx_params params, U64 pledgedSrcSize, U32 dictID)
1705 {   BYTE* const op = (BYTE*)dst;
1706     U32   const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536);   /* 0-3 */
1707     U32   const dictIDSizeCode = params.fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength;   /* 0-3 */
1708     U32   const checksumFlag = params.fParams.checksumFlag>0;
1709     U32   const windowSize = (U32)1 << params.cParams.windowLog;
1710     U32   const singleSegment = params.fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);
1711     BYTE  const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
1712     U32   const fcsCode = params.fParams.contentSizeFlag ?
1713                      (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0;  /* 0-3 */
1714     BYTE  const frameHeaderDecriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
1715     size_t pos=0;
1716 
1717     if (dstCapacity < ZSTD_frameHeaderSize_max) return ERROR(dstSize_tooSmall);
1718     DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u",
1719                 !params.fParams.noDictIDFlag, dictID,  dictIDSizeCode);
1720 
1721     if (params.format == ZSTD_f_zstd1) {
1722         DEBUGLOG(4, "writing zstd magic number");
1723         MEM_writeLE32(dst, ZSTD_MAGICNUMBER);
1724         pos = 4;
1725     }
1726     op[pos++] = frameHeaderDecriptionByte;
1727     if (!singleSegment) op[pos++] = windowLogByte;
1728     switch(dictIDSizeCode)
1729     {
1730         default:  assert(0); /* impossible */
1731         case 0 : break;
1732         case 1 : op[pos] = (BYTE)(dictID); pos++; break;
1733         case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break;
1734         case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break;
1735     }
1736     switch(fcsCode)
1737     {
1738         default:  assert(0); /* impossible */
1739         case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break;
1740         case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break;
1741         case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break;
1742         case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break;
1743     }
1744     return pos;
1745 }
1746 
1747 
1748 static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
1749                               void* dst, size_t dstCapacity,
1750                         const void* src, size_t srcSize,
1751                                U32 frame, U32 lastFrameChunk)
1752 {
1753     const BYTE* const ip = (const BYTE*) src;
1754     size_t fhSize = 0;
1755 
1756     DEBUGLOG(5, "ZSTD_compressContinue_internal");
1757     DEBUGLOG(5, "stage: %u", cctx->stage);
1758     if (cctx->stage==ZSTDcs_created) return ERROR(stage_wrong);   /* missing init (ZSTD_compressBegin) */
1759 
1760     if (frame && (cctx->stage==ZSTDcs_init)) {
1761         fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->appliedParams,
1762                                        cctx->pledgedSrcSizePlusOne-1, cctx->dictID);
1763         if (ZSTD_isError(fhSize)) return fhSize;
1764         dstCapacity -= fhSize;
1765         dst = (char*)dst + fhSize;
1766         cctx->stage = ZSTDcs_ongoing;
1767     }
1768 
1769     /* Check if blocks follow each other */
1770     if (src != cctx->nextSrc) {
1771         /* not contiguous */
1772         ptrdiff_t const delta = cctx->nextSrc - ip;
1773         cctx->lowLimit = cctx->dictLimit;
1774         cctx->dictLimit = (U32)(cctx->nextSrc - cctx->base);
1775         cctx->dictBase = cctx->base;
1776         cctx->base -= delta;
1777         cctx->nextToUpdate = cctx->dictLimit;
1778         if (cctx->dictLimit - cctx->lowLimit < HASH_READ_SIZE) cctx->lowLimit = cctx->dictLimit;   /* too small extDict */
1779     }
1780 
1781     /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */
1782     if ((ip+srcSize > cctx->dictBase + cctx->lowLimit) & (ip < cctx->dictBase + cctx->dictLimit)) {
1783         ptrdiff_t const highInputIdx = (ip + srcSize) - cctx->dictBase;
1784         U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)cctx->dictLimit) ? cctx->dictLimit : (U32)highInputIdx;
1785         cctx->lowLimit = lowLimitMax;
1786     }
1787 
1788     cctx->nextSrc = ip + srcSize;
1789 
1790     if (srcSize) {
1791         size_t const cSize = frame ?
1792                              ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
1793                              ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize);
1794         if (ZSTD_isError(cSize)) return cSize;
1795         cctx->consumedSrcSize += srcSize;
1796         return cSize + fhSize;
1797     } else
1798         return fhSize;
1799 }
1800 
1801 size_t ZSTD_compressContinue (ZSTD_CCtx* cctx,
1802                               void* dst, size_t dstCapacity,
1803                         const void* src, size_t srcSize)
1804 {
1805     return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */);
1806 }
1807 
1808 
1809 size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx)
1810 {
1811     ZSTD_compressionParameters const cParams =
1812             ZSTD_getCParamsFromCCtxParams(cctx->appliedParams, 0, 0);
1813     return MIN (ZSTD_BLOCKSIZE_MAX, (U32)1 << cParams.windowLog);
1814 }
1815 
1816 size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
1817 {
1818     size_t const blockSizeMax = ZSTD_getBlockSize(cctx);
1819     if (srcSize > blockSizeMax) return ERROR(srcSize_wrong);
1820     return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */);
1821 }
1822 
1823 /*! ZSTD_loadDictionaryContent() :
1824  *  @return : 0, or an error code
1825  */
1826 static size_t ZSTD_loadDictionaryContent(ZSTD_CCtx* zc, const void* src, size_t srcSize)
1827 {
1828     const BYTE* const ip = (const BYTE*) src;
1829     const BYTE* const iend = ip + srcSize;
1830 
1831     /* input becomes current prefix */
1832     zc->lowLimit = zc->dictLimit;
1833     zc->dictLimit = (U32)(zc->nextSrc - zc->base);
1834     zc->dictBase = zc->base;
1835     zc->base += ip - zc->nextSrc;
1836     zc->nextToUpdate = zc->dictLimit;
1837     zc->loadedDictEnd = zc->appliedParams.forceWindow ? 0 : (U32)(iend - zc->base);
1838 
1839     zc->nextSrc = iend;
1840     if (srcSize <= HASH_READ_SIZE) return 0;
1841 
1842     switch(zc->appliedParams.cParams.strategy)
1843     {
1844     case ZSTD_fast:
1845         ZSTD_fillHashTable (zc, iend, zc->appliedParams.cParams.searchLength);
1846         break;
1847     case ZSTD_dfast:
1848         ZSTD_fillDoubleHashTable (zc, iend, zc->appliedParams.cParams.searchLength);
1849         break;
1850 
1851     case ZSTD_greedy:
1852     case ZSTD_lazy:
1853     case ZSTD_lazy2:
1854         if (srcSize >= HASH_READ_SIZE)
1855             ZSTD_insertAndFindFirstIndex(zc, iend-HASH_READ_SIZE, zc->appliedParams.cParams.searchLength);
1856         break;
1857 
1858     case ZSTD_btlazy2:
1859     case ZSTD_btopt:
1860     case ZSTD_btultra:
1861         if (srcSize >= HASH_READ_SIZE)
1862             ZSTD_updateTree(zc, iend-HASH_READ_SIZE, iend, (U32)1 << zc->appliedParams.cParams.searchLog, zc->appliedParams.cParams.searchLength);
1863         break;
1864 
1865     default:
1866         assert(0);  /* not possible : not a valid strategy id */
1867     }
1868 
1869     zc->nextToUpdate = (U32)(iend - zc->base);
1870     return 0;
1871 }
1872 
1873 
1874 /* Dictionaries that assign zero probability to symbols that show up causes problems
1875    when FSE encoding.  Refuse dictionaries that assign zero probability to symbols
1876    that we may encounter during compression.
1877    NOTE: This behavior is not standard and could be improved in the future. */
1878 static size_t ZSTD_checkDictNCount(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue) {
1879     U32 s;
1880     if (dictMaxSymbolValue < maxSymbolValue) return ERROR(dictionary_corrupted);
1881     for (s = 0; s <= maxSymbolValue; ++s) {
1882         if (normalizedCounter[s] == 0) return ERROR(dictionary_corrupted);
1883     }
1884     return 0;
1885 }
1886 
1887 
1888 /* Dictionary format :
1889  * See :
1890  * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
1891  */
1892 /*! ZSTD_loadZstdDictionary() :
1893  * @return : 0, or an error code
1894  *  assumptions : magic number supposed already checked
1895  *                dictSize supposed > 8
1896  */
1897 static size_t ZSTD_loadZstdDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
1898 {
1899     const BYTE* dictPtr = (const BYTE*)dict;
1900     const BYTE* const dictEnd = dictPtr + dictSize;
1901     short offcodeNCount[MaxOff+1];
1902     unsigned offcodeMaxValue = MaxOff;
1903 
1904     ZSTD_STATIC_ASSERT(sizeof(cctx->entropy->workspace) >= (1<<MAX(MLFSELog,LLFSELog)));
1905 
1906     dictPtr += 4;   /* skip magic number */
1907     cctx->dictID = cctx->appliedParams.fParams.noDictIDFlag ? 0 :  MEM_readLE32(dictPtr);
1908     dictPtr += 4;
1909 
1910     {   unsigned maxSymbolValue = 255;
1911         size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)cctx->entropy->hufCTable, &maxSymbolValue, dictPtr, dictEnd-dictPtr);
1912         if (HUF_isError(hufHeaderSize)) return ERROR(dictionary_corrupted);
1913         if (maxSymbolValue < 255) return ERROR(dictionary_corrupted);
1914         dictPtr += hufHeaderSize;
1915     }
1916 
1917     {   unsigned offcodeLog;
1918         size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
1919         if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
1920         if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);
1921         /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
1922         CHECK_E( FSE_buildCTable_wksp(cctx->entropy->offcodeCTable, offcodeNCount, offcodeMaxValue, offcodeLog, cctx->entropy->workspace, sizeof(cctx->entropy->workspace)),
1923                  dictionary_corrupted);
1924         dictPtr += offcodeHeaderSize;
1925     }
1926 
1927     {   short matchlengthNCount[MaxML+1];
1928         unsigned matchlengthMaxValue = MaxML, matchlengthLog;
1929         size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
1930         if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);
1931         if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted);
1932         /* Every match length code must have non-zero probability */
1933         CHECK_F( ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML));
1934         CHECK_E( FSE_buildCTable_wksp(cctx->entropy->matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, cctx->entropy->workspace, sizeof(cctx->entropy->workspace)),
1935                  dictionary_corrupted);
1936         dictPtr += matchlengthHeaderSize;
1937     }
1938 
1939     {   short litlengthNCount[MaxLL+1];
1940         unsigned litlengthMaxValue = MaxLL, litlengthLog;
1941         size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
1942         if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);
1943         if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted);
1944         /* Every literal length code must have non-zero probability */
1945         CHECK_F( ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL));
1946         CHECK_E( FSE_buildCTable_wksp(cctx->entropy->litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog, cctx->entropy->workspace, sizeof(cctx->entropy->workspace)),
1947                  dictionary_corrupted);
1948         dictPtr += litlengthHeaderSize;
1949     }
1950 
1951     if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted);
1952     cctx->seqStore.rep[0] = MEM_readLE32(dictPtr+0);
1953     cctx->seqStore.rep[1] = MEM_readLE32(dictPtr+4);
1954     cctx->seqStore.rep[2] = MEM_readLE32(dictPtr+8);
1955     dictPtr += 12;
1956 
1957     {   size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
1958         U32 offcodeMax = MaxOff;
1959         if (dictContentSize <= ((U32)-1) - 128 KB) {
1960             U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */
1961             offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */
1962         }
1963         /* All offset values <= dictContentSize + 128 KB must be representable */
1964         CHECK_F (ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff)));
1965         /* All repCodes must be <= dictContentSize and != 0*/
1966         {   U32 u;
1967             for (u=0; u<3; u++) {
1968                 if (cctx->seqStore.rep[u] == 0) return ERROR(dictionary_corrupted);
1969                 if (cctx->seqStore.rep[u] > dictContentSize) return ERROR(dictionary_corrupted);
1970         }   }
1971 
1972         cctx->entropy->hufCTable_repeatMode = HUF_repeat_valid;
1973         cctx->entropy->offcode_repeatMode = FSE_repeat_valid;
1974         cctx->entropy->matchlength_repeatMode = FSE_repeat_valid;
1975         cctx->entropy->litlength_repeatMode = FSE_repeat_valid;
1976         return ZSTD_loadDictionaryContent(cctx, dictPtr, dictContentSize);
1977     }
1978 }
1979 
1980 /** ZSTD_compress_insertDictionary() :
1981 *   @return : 0, or an error code */
1982 static size_t ZSTD_compress_insertDictionary(ZSTD_CCtx* cctx,
1983                                        const void* dict, size_t dictSize,
1984                                              ZSTD_dictMode_e dictMode)
1985 {
1986     DEBUGLOG(5, "ZSTD_compress_insertDictionary");
1987     if ((dict==NULL) || (dictSize<=8)) return 0;
1988 
1989     /* dict restricted modes */
1990     if (dictMode==ZSTD_dm_rawContent)
1991         return ZSTD_loadDictionaryContent(cctx, dict, dictSize);
1992 
1993     if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) {
1994         if (dictMode == ZSTD_dm_auto) {
1995             DEBUGLOG(5, "raw content dictionary detected");
1996             return ZSTD_loadDictionaryContent(cctx, dict, dictSize);
1997         }
1998         if (dictMode == ZSTD_dm_fullDict)
1999             return ERROR(dictionary_wrong);
2000         assert(0);   /* impossible */
2001     }
2002 
2003     /* dict as full zstd dictionary */
2004     return ZSTD_loadZstdDictionary(cctx, dict, dictSize);
2005 }
2006 
2007 /*! ZSTD_compressBegin_internal() :
2008  * @return : 0, or an error code */
2009 static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
2010                              const void* dict, size_t dictSize,
2011                              ZSTD_dictMode_e dictMode,
2012                              const ZSTD_CDict* cdict,
2013                                    ZSTD_CCtx_params params, U64 pledgedSrcSize,
2014                                    ZSTD_buffered_policy_e zbuff)
2015 {
2016     DEBUGLOG(4, "ZSTD_compressBegin_internal");
2017     /* params are supposed to be fully validated at this point */
2018     assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
2019     assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
2020 
2021     if (cdict && cdict->dictContentSize>0) {
2022         return ZSTD_copyCCtx_internal(cctx, cdict->refContext,
2023                                       params.fParams, pledgedSrcSize,
2024                                       zbuff);
2025     }
2026 
2027     CHECK_F( ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
2028                                      ZSTDcrp_continue, zbuff) );
2029     return ZSTD_compress_insertDictionary(cctx, dict, dictSize, dictMode);
2030 }
2031 
2032 size_t ZSTD_compressBegin_advanced_internal(
2033                                     ZSTD_CCtx* cctx,
2034                                     const void* dict, size_t dictSize,
2035                                     ZSTD_dictMode_e dictMode,
2036                                     ZSTD_CCtx_params params,
2037                                     unsigned long long pledgedSrcSize)
2038 {
2039     /* compression parameters verification and optimization */
2040     CHECK_F( ZSTD_checkCParams(params.cParams) );
2041     return ZSTD_compressBegin_internal(cctx, dict, dictSize, dictMode, NULL,
2042                                        params, pledgedSrcSize,
2043                                        ZSTDb_not_buffered);
2044 }
2045 
2046 /*! ZSTD_compressBegin_advanced() :
2047 *   @return : 0, or an error code */
2048 size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx,
2049                              const void* dict, size_t dictSize,
2050                                    ZSTD_parameters params, unsigned long long pledgedSrcSize)
2051 {
2052     ZSTD_CCtx_params const cctxParams =
2053             ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
2054     return ZSTD_compressBegin_advanced_internal(cctx, dict, dictSize, ZSTD_dm_auto,
2055                                                 cctxParams,
2056                                                 pledgedSrcSize);
2057 }
2058 
2059 size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel)
2060 {
2061     ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, dictSize);
2062     ZSTD_CCtx_params const cctxParams =
2063             ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
2064     return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dm_auto, NULL,
2065                                        cctxParams, 0, ZSTDb_not_buffered);
2066 }
2067 
2068 size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel)
2069 {
2070     return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel);
2071 }
2072 
2073 
2074 /*! ZSTD_writeEpilogue() :
2075 *   Ends a frame.
2076 *   @return : nb of bytes written into dst (or an error code) */
2077 static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
2078 {
2079     BYTE* const ostart = (BYTE*)dst;
2080     BYTE* op = ostart;
2081     size_t fhSize = 0;
2082 
2083     DEBUGLOG(5, "ZSTD_writeEpilogue");
2084     if (cctx->stage == ZSTDcs_created) return ERROR(stage_wrong);  /* init missing */
2085 
2086     /* special case : empty frame */
2087     if (cctx->stage == ZSTDcs_init) {
2088         fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->appliedParams, 0, 0);
2089         if (ZSTD_isError(fhSize)) return fhSize;
2090         dstCapacity -= fhSize;
2091         op += fhSize;
2092         cctx->stage = ZSTDcs_ongoing;
2093     }
2094 
2095     if (cctx->stage != ZSTDcs_ending) {
2096         /* write one last empty block, make it the "last" block */
2097         U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0;
2098         if (dstCapacity<4) return ERROR(dstSize_tooSmall);
2099         MEM_writeLE32(op, cBlockHeader24);
2100         op += ZSTD_blockHeaderSize;
2101         dstCapacity -= ZSTD_blockHeaderSize;
2102     }
2103 
2104     if (cctx->appliedParams.fParams.checksumFlag) {
2105         U32 const checksum = (U32) XXH64_digest(&cctx->xxhState);
2106         if (dstCapacity<4) return ERROR(dstSize_tooSmall);
2107         MEM_writeLE32(op, checksum);
2108         op += 4;
2109     }
2110 
2111     cctx->stage = ZSTDcs_created;  /* return to "created but no init" status */
2112     return op-ostart;
2113 }
2114 
2115 
2116 size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
2117                          void* dst, size_t dstCapacity,
2118                    const void* src, size_t srcSize)
2119 {
2120     size_t endResult;
2121     size_t const cSize = ZSTD_compressContinue_internal(cctx,
2122                                 dst, dstCapacity, src, srcSize,
2123                                 1 /* frame mode */, 1 /* last chunk */);
2124     if (ZSTD_isError(cSize)) return cSize;
2125     endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize);
2126     if (ZSTD_isError(endResult)) return endResult;
2127     if (cctx->appliedParams.fParams.contentSizeFlag) {  /* control src size */
2128         DEBUGLOG(4, "end of frame : controlling src size");
2129         if (cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1) {
2130             DEBUGLOG(4, "error : pledgedSrcSize = %u, while realSrcSize = %u",
2131                 (U32)cctx->pledgedSrcSizePlusOne-1, (U32)cctx->consumedSrcSize);
2132             return ERROR(srcSize_wrong);
2133     }   }
2134     return cSize + endResult;
2135 }
2136 
2137 
2138 static size_t ZSTD_compress_internal (ZSTD_CCtx* cctx,
2139                                void* dst, size_t dstCapacity,
2140                          const void* src, size_t srcSize,
2141                          const void* dict,size_t dictSize,
2142                                ZSTD_parameters params)
2143 {
2144     ZSTD_CCtx_params const cctxParams =
2145             ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
2146     return ZSTD_compress_advanced_internal(cctx,
2147                                           dst, dstCapacity,
2148                                           src, srcSize,
2149                                           dict, dictSize,
2150                                           cctxParams);
2151 }
2152 
2153 size_t ZSTD_compress_advanced (ZSTD_CCtx* ctx,
2154                                void* dst, size_t dstCapacity,
2155                          const void* src, size_t srcSize,
2156                          const void* dict,size_t dictSize,
2157                                ZSTD_parameters params)
2158 {
2159     CHECK_F(ZSTD_checkCParams(params.cParams));
2160     return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, dict, dictSize, params);
2161 }
2162 
2163 /* Internal */
2164 size_t ZSTD_compress_advanced_internal(
2165         ZSTD_CCtx* cctx,
2166         void* dst, size_t dstCapacity,
2167         const void* src, size_t srcSize,
2168         const void* dict,size_t dictSize,
2169         ZSTD_CCtx_params params)
2170 {
2171     CHECK_F( ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dm_auto, NULL,
2172                                          params, srcSize, ZSTDb_not_buffered) );
2173     return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
2174 }
2175 
2176 size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize,
2177                                const void* dict, size_t dictSize, int compressionLevel)
2178 {
2179     ZSTD_parameters params = ZSTD_getParams(compressionLevel, srcSize, dict ? dictSize : 0);
2180     params.fParams.contentSizeFlag = 1;
2181     return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, dict, dictSize, params);
2182 }
2183 
2184 size_t ZSTD_compressCCtx (ZSTD_CCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel)
2185 {
2186     return ZSTD_compress_usingDict(ctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel);
2187 }
2188 
2189 size_t ZSTD_compress(void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel)
2190 {
2191     size_t result;
2192     ZSTD_CCtx ctxBody;
2193     memset(&ctxBody, 0, sizeof(ctxBody));
2194     ctxBody.customMem = ZSTD_defaultCMem;
2195     result = ZSTD_compressCCtx(&ctxBody, dst, dstCapacity, src, srcSize, compressionLevel);
2196     ZSTD_free(ctxBody.workSpace, ZSTD_defaultCMem);  /* can't free ctxBody itself, as it's on stack; free only heap content */
2197     return result;
2198 }
2199 
2200 
2201 /* =====  Dictionary API  ===== */
2202 
2203 /*! ZSTD_estimateCDictSize_advanced() :
2204  *  Estimate amount of memory that will be needed to create a dictionary with following arguments */
2205 size_t ZSTD_estimateCDictSize_advanced(
2206         size_t dictSize, ZSTD_compressionParameters cParams,
2207         ZSTD_dictLoadMethod_e dictLoadMethod)
2208 {
2209     DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (U32)sizeof(ZSTD_CDict));
2210     DEBUGLOG(5, "CCtx estimate : %u",
2211              (U32)ZSTD_estimateCCtxSize_usingCParams(cParams));
2212     return sizeof(ZSTD_CDict) + ZSTD_estimateCCtxSize_usingCParams(cParams)
2213            + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
2214 }
2215 
2216 size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel)
2217 {
2218     ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, dictSize);
2219     return ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy);
2220 }
2221 
2222 size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict)
2223 {
2224     if (cdict==NULL) return 0;   /* support sizeof on NULL */
2225     DEBUGLOG(5, "sizeof(*cdict) : %u", (U32)sizeof(*cdict));
2226     DEBUGLOG(5, "ZSTD_sizeof_CCtx : %u", (U32)ZSTD_sizeof_CCtx(cdict->refContext));
2227     return ZSTD_sizeof_CCtx(cdict->refContext) + (cdict->dictBuffer ? cdict->dictContentSize : 0) + sizeof(*cdict);
2228 }
2229 
2230 static size_t ZSTD_initCDict_internal(
2231                     ZSTD_CDict* cdict,
2232               const void* dictBuffer, size_t dictSize,
2233                     ZSTD_dictLoadMethod_e dictLoadMethod,
2234                     ZSTD_dictMode_e dictMode,
2235                     ZSTD_compressionParameters cParams)
2236 {
2237     DEBUGLOG(5, "ZSTD_initCDict_internal, mode %u", (U32)dictMode);
2238     if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) {
2239         cdict->dictBuffer = NULL;
2240         cdict->dictContent = dictBuffer;
2241     } else {
2242         void* const internalBuffer = ZSTD_malloc(dictSize, cdict->refContext->customMem);
2243         cdict->dictBuffer = internalBuffer;
2244         cdict->dictContent = internalBuffer;
2245         if (!internalBuffer) return ERROR(memory_allocation);
2246         memcpy(internalBuffer, dictBuffer, dictSize);
2247     }
2248     cdict->dictContentSize = dictSize;
2249 
2250     {   ZSTD_CCtx_params cctxParams = cdict->refContext->requestedParams;
2251         cctxParams.cParams = cParams;
2252         CHECK_F( ZSTD_compressBegin_internal(cdict->refContext,
2253                                         cdict->dictContent, dictSize, dictMode,
2254                                         NULL,
2255                                         cctxParams, ZSTD_CONTENTSIZE_UNKNOWN,
2256                                         ZSTDb_not_buffered) );
2257     }
2258 
2259     return 0;
2260 }
2261 
2262 ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize,
2263                                       ZSTD_dictLoadMethod_e dictLoadMethod,
2264                                       ZSTD_dictMode_e dictMode,
2265                                       ZSTD_compressionParameters cParams, ZSTD_customMem customMem)
2266 {
2267     DEBUGLOG(5, "ZSTD_createCDict_advanced, mode %u", (U32)dictMode);
2268     if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
2269 
2270     {   ZSTD_CDict* const cdict = (ZSTD_CDict*)ZSTD_malloc(sizeof(ZSTD_CDict), customMem);
2271         ZSTD_CCtx* const cctx = ZSTD_createCCtx_advanced(customMem);
2272 
2273         if (!cdict || !cctx) {
2274             ZSTD_free(cdict, customMem);
2275             ZSTD_freeCCtx(cctx);
2276             return NULL;
2277         }
2278         cdict->refContext = cctx;
2279         if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
2280                                         dictBuffer, dictSize,
2281                                         dictLoadMethod, dictMode,
2282                                         cParams) )) {
2283             ZSTD_freeCDict(cdict);
2284             return NULL;
2285         }
2286 
2287         return cdict;
2288     }
2289 }
2290 
2291 ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel)
2292 {
2293     ZSTD_compressionParameters cParams = ZSTD_getCParams(compressionLevel, 0, dictSize);
2294     return ZSTD_createCDict_advanced(dict, dictSize,
2295                                      ZSTD_dlm_byCopy, ZSTD_dm_auto,
2296                                      cParams, ZSTD_defaultCMem);
2297 }
2298 
2299 ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel)
2300 {
2301     ZSTD_compressionParameters cParams = ZSTD_getCParams(compressionLevel, 0, dictSize);
2302     return ZSTD_createCDict_advanced(dict, dictSize,
2303                                      ZSTD_dlm_byRef, ZSTD_dm_auto,
2304                                      cParams, ZSTD_defaultCMem);
2305 }
2306 
2307 size_t ZSTD_freeCDict(ZSTD_CDict* cdict)
2308 {
2309     if (cdict==NULL) return 0;   /* support free on NULL */
2310     {   ZSTD_customMem const cMem = cdict->refContext->customMem;
2311         ZSTD_freeCCtx(cdict->refContext);
2312         ZSTD_free(cdict->dictBuffer, cMem);
2313         ZSTD_free(cdict, cMem);
2314         return 0;
2315     }
2316 }
2317 
2318 /*! ZSTD_initStaticCDict_advanced() :
2319  *  Generate a digested dictionary in provided memory area.
2320  *  workspace: The memory area to emplace the dictionary into.
2321  *             Provided pointer must 8-bytes aligned.
2322  *             It must outlive dictionary usage.
2323  *  workspaceSize: Use ZSTD_estimateCDictSize()
2324  *                 to determine how large workspace must be.
2325  *  cParams : use ZSTD_getCParams() to transform a compression level
2326  *            into its relevants cParams.
2327  * @return : pointer to ZSTD_CDict*, or NULL if error (size too small)
2328  *  Note : there is no corresponding "free" function.
2329  *         Since workspace was allocated externally, it must be freed externally.
2330  */
2331 ZSTD_CDict* ZSTD_initStaticCDict(void* workspace, size_t workspaceSize,
2332                            const void* dict, size_t dictSize,
2333                                  ZSTD_dictLoadMethod_e dictLoadMethod,
2334                                  ZSTD_dictMode_e dictMode,
2335                                  ZSTD_compressionParameters cParams)
2336 {
2337     size_t const cctxSize = ZSTD_estimateCCtxSize_usingCParams(cParams);
2338     size_t const neededSize = sizeof(ZSTD_CDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize)
2339                             + cctxSize;
2340     ZSTD_CDict* const cdict = (ZSTD_CDict*) workspace;
2341     void* ptr;
2342     DEBUGLOG(5, "(size_t)workspace & 7 : %u", (U32)(size_t)workspace & 7);
2343     if ((size_t)workspace & 7) return NULL;  /* 8-aligned */
2344     DEBUGLOG(5, "(workspaceSize < neededSize) : (%u < %u) => %u",
2345         (U32)workspaceSize, (U32)neededSize, (U32)(workspaceSize < neededSize));
2346     if (workspaceSize < neededSize) return NULL;
2347 
2348     if (dictLoadMethod == ZSTD_dlm_byCopy) {
2349         memcpy(cdict+1, dict, dictSize);
2350         dict = cdict+1;
2351         ptr = (char*)workspace + sizeof(ZSTD_CDict) + dictSize;
2352     } else {
2353         ptr = cdict+1;
2354     }
2355     cdict->refContext = ZSTD_initStaticCCtx(ptr, cctxSize);
2356 
2357     if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
2358                                               dict, dictSize,
2359                                               ZSTD_dlm_byRef, dictMode,
2360                                               cParams) ))
2361         return NULL;
2362 
2363     return cdict;
2364 }
2365 
2366 ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict) {
2367     return cdict->refContext->appliedParams.cParams;
2368 }
2369 
2370 /* ZSTD_compressBegin_usingCDict_advanced() :
2371  * cdict must be != NULL */
2372 size_t ZSTD_compressBegin_usingCDict_advanced(
2373     ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict,
2374     ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
2375 {
2376     if (cdict==NULL) return ERROR(dictionary_wrong);
2377     {   ZSTD_CCtx_params params = cctx->requestedParams;
2378         params.cParams = ZSTD_getCParamsFromCDict(cdict);
2379         params.fParams = fParams;
2380         DEBUGLOG(5, "ZSTD_compressBegin_usingCDict_advanced");
2381         return ZSTD_compressBegin_internal(cctx,
2382                                            NULL, 0, ZSTD_dm_auto,
2383                                            cdict,
2384                                            params, pledgedSrcSize,
2385                                            ZSTDb_not_buffered);
2386     }
2387 }
2388 
2389 /* ZSTD_compressBegin_usingCDict() :
2390  * pledgedSrcSize=0 means "unknown"
2391  * if pledgedSrcSize>0, it will enable contentSizeFlag */
2392 size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
2393 {
2394     ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
2395     DEBUGLOG(5, "ZSTD_compressBegin_usingCDict : dictIDFlag == %u", !fParams.noDictIDFlag);
2396     return ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, 0);
2397 }
2398 
2399 size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
2400                                 void* dst, size_t dstCapacity,
2401                                 const void* src, size_t srcSize,
2402                                 const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
2403 {
2404     CHECK_F (ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, srcSize));   /* will check if cdict != NULL */
2405     return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
2406 }
2407 
2408 /*! ZSTD_compress_usingCDict() :
2409  *  Compression using a digested Dictionary.
2410  *  Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
2411  *  Note that compression parameters are decided at CDict creation time
2412  *  while frame parameters are hardcoded */
2413 size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
2414                                 void* dst, size_t dstCapacity,
2415                                 const void* src, size_t srcSize,
2416                                 const ZSTD_CDict* cdict)
2417 {
2418     ZSTD_frameParameters const fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
2419     return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, fParams);
2420 }
2421 
2422 
2423 
2424 /* ******************************************************************
2425 *  Streaming
2426 ********************************************************************/
2427 
2428 ZSTD_CStream* ZSTD_createCStream(void)
2429 {
2430     return ZSTD_createCStream_advanced(ZSTD_defaultCMem);
2431 }
2432 
2433 ZSTD_CStream* ZSTD_initStaticCStream(void *workspace, size_t workspaceSize)
2434 {
2435     return ZSTD_initStaticCCtx(workspace, workspaceSize);
2436 }
2437 
2438 ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem)
2439 {   /* CStream and CCtx are now same object */
2440     return ZSTD_createCCtx_advanced(customMem);
2441 }
2442 
2443 size_t ZSTD_freeCStream(ZSTD_CStream* zcs)
2444 {
2445     return ZSTD_freeCCtx(zcs);   /* same object */
2446 }
2447 
2448 
2449 
2450 /*======   Initialization   ======*/
2451 
2452 size_t ZSTD_CStreamInSize(void)  { return ZSTD_BLOCKSIZE_MAX; }
2453 
2454 size_t ZSTD_CStreamOutSize(void)
2455 {
2456     return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ;
2457 }
2458 
2459 static size_t ZSTD_resetCStream_internal(ZSTD_CStream* zcs,
2460                     const void* dict, size_t dictSize, ZSTD_dictMode_e dictMode,
2461                     const ZSTD_CDict* cdict,
2462                     const ZSTD_CCtx_params params, unsigned long long pledgedSrcSize)
2463 {
2464     DEBUGLOG(4, "ZSTD_resetCStream_internal");
2465     /* params are supposed to be fully validated at this point */
2466     assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
2467     assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
2468 
2469     CHECK_F( ZSTD_compressBegin_internal(zcs,
2470                                         dict, dictSize, dictMode,
2471                                         cdict,
2472                                         params, pledgedSrcSize,
2473                                         ZSTDb_buffered) );
2474 
2475     zcs->inToCompress = 0;
2476     zcs->inBuffPos = 0;
2477     zcs->inBuffTarget = zcs->blockSize;
2478     zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
2479     zcs->streamStage = zcss_load;
2480     zcs->frameEnded = 0;
2481     return 0;   /* ready to go */
2482 }
2483 
2484 size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize)
2485 {
2486     ZSTD_CCtx_params params = zcs->requestedParams;
2487     params.fParams.contentSizeFlag = (pledgedSrcSize > 0);
2488     params.cParams = ZSTD_getCParamsFromCCtxParams(params, pledgedSrcSize, 0);
2489     DEBUGLOG(4, "ZSTD_resetCStream");
2490     return ZSTD_resetCStream_internal(zcs, NULL, 0, ZSTD_dm_auto, zcs->cdict, params, pledgedSrcSize);
2491 }
2492 
2493 /*! ZSTD_initCStream_internal() :
2494  *  Note : not static, but hidden (not exposed). Used by zstdmt_compress.c
2495  *  Assumption 1 : params are valid
2496  *  Assumption 2 : either dict, or cdict, is defined, not both */
2497 size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
2498                     const void* dict, size_t dictSize, const ZSTD_CDict* cdict,
2499                     ZSTD_CCtx_params params, unsigned long long pledgedSrcSize)
2500 {
2501     DEBUGLOG(4, "ZSTD_initCStream_internal");
2502     assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
2503     assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
2504 
2505     if (dict && dictSize >= 8) {
2506         DEBUGLOG(5, "loading dictionary of size %u", (U32)dictSize);
2507         if (zcs->staticSize) {   /* static CCtx : never uses malloc */
2508             /* incompatible with internal cdict creation */
2509             return ERROR(memory_allocation);
2510         }
2511         ZSTD_freeCDict(zcs->cdictLocal);
2512         zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize,
2513                                             ZSTD_dlm_byCopy, ZSTD_dm_auto,
2514                                             params.cParams, zcs->customMem);
2515         zcs->cdict = zcs->cdictLocal;
2516         if (zcs->cdictLocal == NULL) return ERROR(memory_allocation);
2517     } else {
2518         if (cdict) {
2519             params.cParams = ZSTD_getCParamsFromCDict(cdict);  /* cParams are enforced from cdict */
2520         }
2521         ZSTD_freeCDict(zcs->cdictLocal);
2522         zcs->cdictLocal = NULL;
2523         zcs->cdict = cdict;
2524     }
2525 
2526     params.compressionLevel = ZSTD_CLEVEL_CUSTOM;
2527     zcs->requestedParams = params;
2528 
2529     return ZSTD_resetCStream_internal(zcs, NULL, 0, ZSTD_dm_auto, zcs->cdict, params, pledgedSrcSize);
2530 }
2531 
2532 /* ZSTD_initCStream_usingCDict_advanced() :
2533  * same as ZSTD_initCStream_usingCDict(), with control over frame parameters */
2534 size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
2535                                             const ZSTD_CDict* cdict,
2536                                             ZSTD_frameParameters fParams,
2537                                             unsigned long long pledgedSrcSize)
2538 {   /* cannot handle NULL cdict (does not know what to do) */
2539     if (!cdict) return ERROR(dictionary_wrong);
2540     {   ZSTD_CCtx_params params = zcs->requestedParams;
2541         params.cParams = ZSTD_getCParamsFromCDict(cdict);
2542         params.fParams = fParams;
2543         return ZSTD_initCStream_internal(zcs,
2544                                 NULL, 0, cdict,
2545                                 params, pledgedSrcSize);
2546     }
2547 }
2548 
2549 /* note : cdict must outlive compression session */
2550 size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict)
2551 {
2552     ZSTD_frameParameters const fParams = { 0 /* contentSize */, 0 /* checksum */, 0 /* hideDictID */ };
2553     return ZSTD_initCStream_usingCDict_advanced(zcs, cdict, fParams, 0);  /* note : will check that cdict != NULL */
2554 }
2555 
2556 size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
2557                                  const void* dict, size_t dictSize,
2558                                  ZSTD_parameters params, unsigned long long pledgedSrcSize)
2559 {
2560     ZSTD_CCtx_params const cctxParams =
2561             ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params);
2562     CHECK_F( ZSTD_checkCParams(params.cParams) );
2563     return ZSTD_initCStream_internal(zcs, dict, dictSize, NULL, cctxParams, pledgedSrcSize);
2564 }
2565 
2566 size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel)
2567 {
2568     ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, dictSize);
2569     ZSTD_CCtx_params const cctxParams =
2570             ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params);
2571     return ZSTD_initCStream_internal(zcs, dict, dictSize, NULL, cctxParams, 0);
2572 }
2573 
2574 size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize)
2575 {
2576     ZSTD_CCtx_params cctxParams;
2577     ZSTD_parameters const params = ZSTD_getParams(compressionLevel, pledgedSrcSize, 0);
2578     cctxParams = ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params);
2579     cctxParams.fParams.contentSizeFlag = (pledgedSrcSize>0);
2580     return ZSTD_initCStream_internal(zcs, NULL, 0, NULL, cctxParams, pledgedSrcSize);
2581 }
2582 
2583 size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)
2584 {
2585     return ZSTD_initCStream_srcSize(zcs, compressionLevel, 0);
2586 }
2587 
2588 /*======   Compression   ======*/
2589 
2590 MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity,
2591                            const void* src, size_t srcSize)
2592 {
2593     size_t const length = MIN(dstCapacity, srcSize);
2594     if (length) memcpy(dst, src, length);
2595     return length;
2596 }
2597 
2598 /** ZSTD_compressStream_generic():
2599  *  internal function for all *compressStream*() variants and *compress_generic()
2600  *  non-static, because can be called from zstdmt.c
2601  * @return : hint size for next input */
2602 size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
2603                                    ZSTD_outBuffer* output,
2604                                    ZSTD_inBuffer* input,
2605                                    ZSTD_EndDirective const flushMode)
2606 {
2607     const char* const istart = (const char*)input->src;
2608     const char* const iend = istart + input->size;
2609     const char* ip = istart + input->pos;
2610     char* const ostart = (char*)output->dst;
2611     char* const oend = ostart + output->size;
2612     char* op = ostart + output->pos;
2613     U32 someMoreWork = 1;
2614 
2615     /* check expectations */
2616     DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (U32)flushMode);
2617     assert(zcs->inBuff != NULL);
2618     assert(zcs->inBuffSize>0);
2619     assert(zcs->outBuff!= NULL);
2620     assert(zcs->outBuffSize>0);
2621     assert(output->pos <= output->size);
2622     assert(input->pos <= input->size);
2623 
2624     while (someMoreWork) {
2625         switch(zcs->streamStage)
2626         {
2627         case zcss_init:
2628             /* call ZSTD_initCStream() first ! */
2629             return ERROR(init_missing);
2630 
2631         case zcss_load:
2632             if ( (flushMode == ZSTD_e_end)
2633               && ((size_t)(oend-op) >= ZSTD_compressBound(iend-ip))  /* enough dstCapacity */
2634               && (zcs->inBuffPos == 0) ) {
2635                 /* shortcut to compression pass directly into output buffer */
2636                 size_t const cSize = ZSTD_compressEnd(zcs,
2637                                                 op, oend-op, ip, iend-ip);
2638                 DEBUGLOG(4, "ZSTD_compressEnd : %u", (U32)cSize);
2639                 if (ZSTD_isError(cSize)) return cSize;
2640                 ip = iend;
2641                 op += cSize;
2642                 zcs->frameEnded = 1;
2643                 ZSTD_startNewCompression(zcs);
2644                 someMoreWork = 0; break;
2645             }
2646             /* complete loading into inBuffer */
2647             {   size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
2648                 size_t const loaded = ZSTD_limitCopy(
2649                                         zcs->inBuff + zcs->inBuffPos, toLoad,
2650                                         ip, iend-ip);
2651                 zcs->inBuffPos += loaded;
2652                 ip += loaded;
2653                 if ( (flushMode == ZSTD_e_continue)
2654                   && (zcs->inBuffPos < zcs->inBuffTarget) ) {
2655                     /* not enough input to fill full block : stop here */
2656                     someMoreWork = 0; break;
2657                 }
2658                 if ( (flushMode == ZSTD_e_flush)
2659                   && (zcs->inBuffPos == zcs->inToCompress) ) {
2660                     /* empty */
2661                     someMoreWork = 0; break;
2662                 }
2663             }
2664             /* compress current block (note : this stage cannot be stopped in the middle) */
2665             DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode);
2666             {   void* cDst;
2667                 size_t cSize;
2668                 size_t const iSize = zcs->inBuffPos - zcs->inToCompress;
2669                 size_t oSize = oend-op;
2670                 unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend);
2671                 if (oSize >= ZSTD_compressBound(iSize))
2672                     cDst = op;   /* compress into output buffer, to skip flush stage */
2673                 else
2674                     cDst = zcs->outBuff, oSize = zcs->outBuffSize;
2675                 cSize = lastBlock ?
2676                         ZSTD_compressEnd(zcs, cDst, oSize,
2677                                     zcs->inBuff + zcs->inToCompress, iSize) :
2678                         ZSTD_compressContinue(zcs, cDst, oSize,
2679                                     zcs->inBuff + zcs->inToCompress, iSize);
2680                 if (ZSTD_isError(cSize)) return cSize;
2681                 zcs->frameEnded = lastBlock;
2682                 /* prepare next block */
2683                 zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
2684                 if (zcs->inBuffTarget > zcs->inBuffSize)
2685                     zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize;
2686                 DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u",
2687                          (U32)zcs->inBuffTarget, (U32)zcs->inBuffSize);
2688                 if (!lastBlock)
2689                     assert(zcs->inBuffTarget <= zcs->inBuffSize);
2690                 zcs->inToCompress = zcs->inBuffPos;
2691                 if (cDst == op) {  /* no need to flush */
2692                     op += cSize;
2693                     if (zcs->frameEnded) {
2694                         DEBUGLOG(5, "Frame completed directly in outBuffer");
2695                         someMoreWork = 0;
2696                         ZSTD_startNewCompression(zcs);
2697                     }
2698                     break;
2699                 }
2700                 zcs->outBuffContentSize = cSize;
2701                 zcs->outBuffFlushedSize = 0;
2702                 zcs->streamStage = zcss_flush; /* pass-through to flush stage */
2703             }
2704 	    /* fall-through */
2705         case zcss_flush:
2706             DEBUGLOG(5, "flush stage");
2707             {   size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
2708                 size_t const flushed = ZSTD_limitCopy(op, oend-op,
2709                             zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
2710                 DEBUGLOG(5, "toFlush: %u into %u ==> flushed: %u",
2711                             (U32)toFlush, (U32)(oend-op), (U32)flushed);
2712                 op += flushed;
2713                 zcs->outBuffFlushedSize += flushed;
2714                 if (toFlush!=flushed) {
2715                     /* flush not fully completed, presumably because dst is too small */
2716                     assert(op==oend);
2717                     someMoreWork = 0;
2718                     break;
2719                 }
2720                 zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
2721                 if (zcs->frameEnded) {
2722                     DEBUGLOG(5, "Frame completed on flush");
2723                     someMoreWork = 0;
2724                     ZSTD_startNewCompression(zcs);
2725                     break;
2726                 }
2727                 zcs->streamStage = zcss_load;
2728                 break;
2729             }
2730 
2731         default: /* impossible */
2732             assert(0);
2733         }
2734     }
2735 
2736     input->pos = ip - istart;
2737     output->pos = op - ostart;
2738     if (zcs->frameEnded) return 0;
2739     {   size_t hintInSize = zcs->inBuffTarget - zcs->inBuffPos;
2740         if (hintInSize==0) hintInSize = zcs->blockSize;
2741         return hintInSize;
2742     }
2743 }
2744 
2745 size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
2746 {
2747     /* check conditions */
2748     if (output->pos > output->size) return ERROR(GENERIC);
2749     if (input->pos  > input->size)  return ERROR(GENERIC);
2750 
2751     return ZSTD_compressStream_generic(zcs, output, input, ZSTD_e_continue);
2752 }
2753 
2754 
2755 size_t ZSTD_compress_generic (ZSTD_CCtx* cctx,
2756                               ZSTD_outBuffer* output,
2757                               ZSTD_inBuffer* input,
2758                               ZSTD_EndDirective endOp)
2759 {
2760     DEBUGLOG(5, "ZSTD_compress_generic");
2761     /* check conditions */
2762     if (output->pos > output->size) return ERROR(GENERIC);
2763     if (input->pos  > input->size)  return ERROR(GENERIC);
2764     assert(cctx!=NULL);
2765 
2766     /* transparent initialization stage */
2767     if (cctx->streamStage == zcss_init) {
2768         ZSTD_prefixDict const prefixDict = cctx->prefixDict;
2769         ZSTD_CCtx_params params = cctx->requestedParams;
2770         params.cParams = ZSTD_getCParamsFromCCtxParams(
2771                 cctx->requestedParams, cctx->pledgedSrcSizePlusOne-1, 0 /*dictSize*/);
2772         memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));  /* single usage */
2773         assert(prefixDict.dict==NULL || cctx->cdict==NULL);   /* only one can be set */
2774         DEBUGLOG(4, "ZSTD_compress_generic : transparent init stage");
2775 
2776 #ifdef ZSTD_MULTITHREAD
2777         if (params.nbThreads > 1) {
2778             if (cctx->mtctx == NULL || cctx->appliedParams.nbThreads != params.nbThreads) {
2779                 ZSTDMT_freeCCtx(cctx->mtctx);
2780                 cctx->mtctx = ZSTDMT_createCCtx_advanced(params.nbThreads, cctx->customMem);
2781                 if (cctx->mtctx == NULL) return ERROR(memory_allocation);
2782             }
2783             DEBUGLOG(4, "call ZSTDMT_initCStream_internal as nbThreads=%u", params.nbThreads);
2784             CHECK_F( ZSTDMT_initCStream_internal(
2785                              cctx->mtctx,
2786                              prefixDict.dict, prefixDict.dictSize, ZSTD_dm_rawContent,
2787                              cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) );
2788             cctx->streamStage = zcss_load;
2789             cctx->appliedParams.nbThreads = params.nbThreads;
2790         } else
2791 #endif
2792         {
2793             CHECK_F( ZSTD_resetCStream_internal(
2794                              cctx, prefixDict.dict, prefixDict.dictSize,
2795                              prefixDict.dictMode, cctx->cdict, params,
2796                              cctx->pledgedSrcSizePlusOne-1) );
2797     }   }
2798 
2799     /* compression stage */
2800 #ifdef ZSTD_MULTITHREAD
2801     if (cctx->appliedParams.nbThreads > 1) {
2802         size_t const flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp);
2803         DEBUGLOG(5, "ZSTDMT_compressStream_generic result : %u", (U32)flushMin);
2804         if ( ZSTD_isError(flushMin)
2805           || (endOp == ZSTD_e_end && flushMin == 0) ) { /* compression completed */
2806             ZSTD_startNewCompression(cctx);
2807         }
2808         return flushMin;
2809     }
2810 #endif
2811     CHECK_F( ZSTD_compressStream_generic(cctx, output, input, endOp) );
2812     DEBUGLOG(5, "completed ZSTD_compress_generic");
2813     return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */
2814 }
2815 
2816 size_t ZSTD_compress_generic_simpleArgs (
2817                             ZSTD_CCtx* cctx,
2818                             void* dst, size_t dstCapacity, size_t* dstPos,
2819                       const void* src, size_t srcSize, size_t* srcPos,
2820                             ZSTD_EndDirective endOp)
2821 {
2822     ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
2823     ZSTD_inBuffer  input  = { src, srcSize, *srcPos };
2824     /* ZSTD_compress_generic() will check validity of dstPos and srcPos */
2825     size_t const cErr = ZSTD_compress_generic(cctx, &output, &input, endOp);
2826     *dstPos = output.pos;
2827     *srcPos = input.pos;
2828     return cErr;
2829 }
2830 
2831 
2832 /*======   Finalize   ======*/
2833 
2834 /*! ZSTD_flushStream() :
2835 *   @return : amount of data remaining to flush */
2836 size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
2837 {
2838     ZSTD_inBuffer input = { NULL, 0, 0 };
2839     if (output->pos > output->size) return ERROR(GENERIC);
2840     CHECK_F( ZSTD_compressStream_generic(zcs, output, &input, ZSTD_e_flush) );
2841     return zcs->outBuffContentSize - zcs->outBuffFlushedSize;  /* remaining to flush */
2842 }
2843 
2844 
2845 size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
2846 {
2847     ZSTD_inBuffer input = { NULL, 0, 0 };
2848     if (output->pos > output->size) return ERROR(GENERIC);
2849     CHECK_F( ZSTD_compressStream_generic(zcs, output, &input, ZSTD_e_end) );
2850     {   size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE;
2851         size_t const checksumSize = zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4;
2852         size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize + lastBlockSize + checksumSize;
2853         DEBUGLOG(5, "ZSTD_endStream : remaining to flush : %u",
2854                 (unsigned)toFlush);
2855         return toFlush;
2856     }
2857 }
2858 
2859 
2860 /*-=====  Pre-defined compression levels  =====-*/
2861 
2862 #define ZSTD_MAX_CLEVEL     22
2863 int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
2864 
2865 static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = {
2866 {   /* "default" - guarantees a monotonically increasing memory budget */
2867     /* W,  C,  H,  S,  L, TL, strat */
2868     { 18, 12, 12,  1,  7, 16, ZSTD_fast    },  /* level  0 - never used */
2869     { 19, 13, 14,  1,  7, 16, ZSTD_fast    },  /* level  1 */
2870     { 19, 15, 16,  1,  6, 16, ZSTD_fast    },  /* level  2 */
2871     { 20, 16, 17,  1,  5, 16, ZSTD_dfast   },  /* level  3 */
2872     { 20, 17, 18,  1,  5, 16, ZSTD_dfast   },  /* level  4 */
2873     { 20, 17, 18,  2,  5, 16, ZSTD_greedy  },  /* level  5 */
2874     { 21, 17, 19,  2,  5, 16, ZSTD_lazy    },  /* level  6 */
2875     { 21, 18, 19,  3,  5, 16, ZSTD_lazy    },  /* level  7 */
2876     { 21, 18, 20,  3,  5, 16, ZSTD_lazy2   },  /* level  8 */
2877     { 21, 19, 20,  3,  5, 16, ZSTD_lazy2   },  /* level  9 */
2878     { 21, 19, 21,  4,  5, 16, ZSTD_lazy2   },  /* level 10 */
2879     { 22, 20, 22,  4,  5, 16, ZSTD_lazy2   },  /* level 11 */
2880     { 22, 20, 22,  5,  5, 16, ZSTD_lazy2   },  /* level 12 */
2881     { 22, 21, 22,  5,  5, 16, ZSTD_lazy2   },  /* level 13 */
2882     { 22, 21, 22,  6,  5, 16, ZSTD_lazy2   },  /* level 14 */
2883     { 22, 21, 22,  5,  5, 16, ZSTD_btlazy2 },  /* level 15 */
2884     { 23, 22, 22,  5,  5, 16, ZSTD_btlazy2 },  /* level 16 */
2885     { 23, 22, 22,  4,  5, 24, ZSTD_btopt   },  /* level 17 */
2886     { 23, 22, 22,  5,  4, 32, ZSTD_btopt   },  /* level 18 */
2887     { 23, 23, 22,  6,  3, 48, ZSTD_btopt   },  /* level 19 */
2888     { 25, 25, 23,  7,  3, 64, ZSTD_btultra },  /* level 20 */
2889     { 26, 26, 24,  7,  3,256, ZSTD_btultra },  /* level 21 */
2890     { 27, 27, 25,  9,  3,512, ZSTD_btultra },  /* level 22 */
2891 },
2892 {   /* for srcSize <= 256 KB */
2893     /* W,  C,  H,  S,  L,  T, strat */
2894     {  0,  0,  0,  0,  0,  0, ZSTD_fast    },  /* level  0 - not used */
2895     { 18, 13, 14,  1,  6,  8, ZSTD_fast    },  /* level  1 */
2896     { 18, 14, 13,  1,  5,  8, ZSTD_dfast   },  /* level  2 */
2897     { 18, 16, 15,  1,  5,  8, ZSTD_dfast   },  /* level  3 */
2898     { 18, 15, 17,  1,  5,  8, ZSTD_greedy  },  /* level  4.*/
2899     { 18, 16, 17,  4,  5,  8, ZSTD_greedy  },  /* level  5.*/
2900     { 18, 16, 17,  3,  5,  8, ZSTD_lazy    },  /* level  6.*/
2901     { 18, 17, 17,  4,  4,  8, ZSTD_lazy    },  /* level  7 */
2902     { 18, 17, 17,  4,  4,  8, ZSTD_lazy2   },  /* level  8 */
2903     { 18, 17, 17,  5,  4,  8, ZSTD_lazy2   },  /* level  9 */
2904     { 18, 17, 17,  6,  4,  8, ZSTD_lazy2   },  /* level 10 */
2905     { 18, 18, 17,  6,  4,  8, ZSTD_lazy2   },  /* level 11.*/
2906     { 18, 18, 17,  7,  4,  8, ZSTD_lazy2   },  /* level 12.*/
2907     { 18, 19, 17,  6,  4,  8, ZSTD_btlazy2 },  /* level 13 */
2908     { 18, 18, 18,  4,  4, 16, ZSTD_btopt   },  /* level 14.*/
2909     { 18, 18, 18,  4,  3, 16, ZSTD_btopt   },  /* level 15.*/
2910     { 18, 19, 18,  6,  3, 32, ZSTD_btopt   },  /* level 16.*/
2911     { 18, 19, 18,  8,  3, 64, ZSTD_btopt   },  /* level 17.*/
2912     { 18, 19, 18,  9,  3,128, ZSTD_btopt   },  /* level 18.*/
2913     { 18, 19, 18, 10,  3,256, ZSTD_btopt   },  /* level 19.*/
2914     { 18, 19, 18, 11,  3,512, ZSTD_btultra },  /* level 20.*/
2915     { 18, 19, 18, 12,  3,512, ZSTD_btultra },  /* level 21.*/
2916     { 18, 19, 18, 13,  3,512, ZSTD_btultra },  /* level 22.*/
2917 },
2918 {   /* for srcSize <= 128 KB */
2919     /* W,  C,  H,  S,  L,  T, strat */
2920     { 17, 12, 12,  1,  7,  8, ZSTD_fast    },  /* level  0 - not used */
2921     { 17, 12, 13,  1,  6,  8, ZSTD_fast    },  /* level  1 */
2922     { 17, 13, 16,  1,  5,  8, ZSTD_fast    },  /* level  2 */
2923     { 17, 16, 16,  2,  5,  8, ZSTD_dfast   },  /* level  3 */
2924     { 17, 13, 15,  3,  4,  8, ZSTD_greedy  },  /* level  4 */
2925     { 17, 15, 17,  4,  4,  8, ZSTD_greedy  },  /* level  5 */
2926     { 17, 16, 17,  3,  4,  8, ZSTD_lazy    },  /* level  6 */
2927     { 17, 15, 17,  4,  4,  8, ZSTD_lazy2   },  /* level  7 */
2928     { 17, 17, 17,  4,  4,  8, ZSTD_lazy2   },  /* level  8 */
2929     { 17, 17, 17,  5,  4,  8, ZSTD_lazy2   },  /* level  9 */
2930     { 17, 17, 17,  6,  4,  8, ZSTD_lazy2   },  /* level 10 */
2931     { 17, 17, 17,  7,  4,  8, ZSTD_lazy2   },  /* level 11 */
2932     { 17, 17, 17,  8,  4,  8, ZSTD_lazy2   },  /* level 12 */
2933     { 17, 18, 17,  6,  4,  8, ZSTD_btlazy2 },  /* level 13.*/
2934     { 17, 17, 17,  7,  3,  8, ZSTD_btopt   },  /* level 14.*/
2935     { 17, 17, 17,  7,  3, 16, ZSTD_btopt   },  /* level 15.*/
2936     { 17, 18, 17,  7,  3, 32, ZSTD_btopt   },  /* level 16.*/
2937     { 17, 18, 17,  7,  3, 64, ZSTD_btopt   },  /* level 17.*/
2938     { 17, 18, 17,  7,  3,256, ZSTD_btopt   },  /* level 18.*/
2939     { 17, 18, 17,  8,  3,256, ZSTD_btopt   },  /* level 19.*/
2940     { 17, 18, 17,  9,  3,256, ZSTD_btultra },  /* level 20.*/
2941     { 17, 18, 17, 10,  3,256, ZSTD_btultra },  /* level 21.*/
2942     { 17, 18, 17, 11,  3,512, ZSTD_btultra },  /* level 22.*/
2943 },
2944 {   /* for srcSize <= 16 KB */
2945     /* W,  C,  H,  S,  L,  T, strat */
2946     { 14, 12, 12,  1,  7,  6, ZSTD_fast    },  /* level  0 - not used */
2947     { 14, 14, 14,  1,  6,  6, ZSTD_fast    },  /* level  1 */
2948     { 14, 14, 14,  1,  4,  6, ZSTD_fast    },  /* level  2 */
2949     { 14, 14, 14,  1,  4,  6, ZSTD_dfast   },  /* level  3.*/
2950     { 14, 14, 14,  4,  4,  6, ZSTD_greedy  },  /* level  4.*/
2951     { 14, 14, 14,  3,  4,  6, ZSTD_lazy    },  /* level  5.*/
2952     { 14, 14, 14,  4,  4,  6, ZSTD_lazy2   },  /* level  6 */
2953     { 14, 14, 14,  5,  4,  6, ZSTD_lazy2   },  /* level  7 */
2954     { 14, 14, 14,  6,  4,  6, ZSTD_lazy2   },  /* level  8.*/
2955     { 14, 15, 14,  6,  4,  6, ZSTD_btlazy2 },  /* level  9.*/
2956     { 14, 15, 14,  3,  3,  6, ZSTD_btopt   },  /* level 10.*/
2957     { 14, 15, 14,  6,  3,  8, ZSTD_btopt   },  /* level 11.*/
2958     { 14, 15, 14,  6,  3, 16, ZSTD_btopt   },  /* level 12.*/
2959     { 14, 15, 14,  6,  3, 24, ZSTD_btopt   },  /* level 13.*/
2960     { 14, 15, 15,  6,  3, 48, ZSTD_btopt   },  /* level 14.*/
2961     { 14, 15, 15,  6,  3, 64, ZSTD_btopt   },  /* level 15.*/
2962     { 14, 15, 15,  6,  3, 96, ZSTD_btopt   },  /* level 16.*/
2963     { 14, 15, 15,  6,  3,128, ZSTD_btopt   },  /* level 17.*/
2964     { 14, 15, 15,  6,  3,256, ZSTD_btopt   },  /* level 18.*/
2965     { 14, 15, 15,  7,  3,256, ZSTD_btopt   },  /* level 19.*/
2966     { 14, 15, 15,  8,  3,256, ZSTD_btultra },  /* level 20.*/
2967     { 14, 15, 15,  9,  3,256, ZSTD_btultra },  /* level 21.*/
2968     { 14, 15, 15, 10,  3,256, ZSTD_btultra },  /* level 22.*/
2969 },
2970 };
2971 
2972 #if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=1)
2973 /* This function just controls
2974  * the monotonic memory budget increase of ZSTD_defaultCParameters[0].
2975  * Run once, on first ZSTD_getCParams() usage, if ZSTD_DEBUG is enabled
2976  */
2977 MEM_STATIC void ZSTD_check_compressionLevel_monotonicIncrease_memoryBudget(void)
2978 {
2979     int level;
2980     for (level=1; level<ZSTD_maxCLevel(); level++) {
2981         ZSTD_compressionParameters const c1 = ZSTD_defaultCParameters[0][level];
2982         ZSTD_compressionParameters const c2 = ZSTD_defaultCParameters[0][level+1];
2983         assert(c1.windowLog <= c2.windowLog);
2984 #       define ZSTD_TABLECOST(h,c) ((1<<(h)) + (1<<(c)))
2985         assert(ZSTD_TABLECOST(c1.hashLog, c1.chainLog) <= ZSTD_TABLECOST(c2.hashLog, c2.chainLog));
2986     }
2987 }
2988 #endif
2989 
2990 /*! ZSTD_getCParams() :
2991 *   @return ZSTD_compressionParameters structure for a selected compression level, `srcSize` and `dictSize`.
2992 *   Size values are optional, provide 0 if not known or unused */
2993 ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize)
2994 {
2995     size_t const addedSize = srcSizeHint ? 0 : 500;
2996     U64 const rSize = srcSizeHint+dictSize ? srcSizeHint+dictSize+addedSize : (U64)-1;
2997     U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB);   /* intentional underflow for srcSizeHint == 0 */
2998 
2999 #if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=1)
3000     static int g_monotonicTest = 1;
3001     if (g_monotonicTest) {
3002         ZSTD_check_compressionLevel_monotonicIncrease_memoryBudget();
3003         g_monotonicTest=0;
3004     }
3005 #endif
3006 
3007     if (compressionLevel <= 0) compressionLevel = ZSTD_CLEVEL_DEFAULT;   /* 0 == default; no negative compressionLevel yet */
3008     if (compressionLevel > ZSTD_MAX_CLEVEL) compressionLevel = ZSTD_MAX_CLEVEL;
3009     { ZSTD_compressionParameters const cp = ZSTD_defaultCParameters[tableID][compressionLevel];
3010       return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize); }
3011 
3012 }
3013 
3014 /*! ZSTD_getParams() :
3015 *   same as ZSTD_getCParams(), but @return a `ZSTD_parameters` object (instead of `ZSTD_compressionParameters`).
3016 *   All fields of `ZSTD_frameParameters` are set to default (0) */
3017 ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) {
3018     ZSTD_parameters params;
3019     ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, srcSizeHint, dictSize);
3020     memset(&params, 0, sizeof(params));
3021     params.cParams = cParams;
3022     return params;
3023 }
3024