xref: /freebsd/sys/contrib/zstd/lib/compress/zstd_compress.c (revision 0bd5d367989b3d2de0e8d8ceaa2e31d3f0d96536)
1 /*
2  * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
3  * All rights reserved.
4  *
5  * This source code is licensed under both the BSD-style license (found in the
6  * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7  * in the COPYING file in the root directory of this source tree).
8  * You may select, at your option, one of the above-listed licenses.
9  */
10 
11 /*-*************************************
12 *  Dependencies
13 ***************************************/
14 #include <limits.h>         /* INT_MAX */
15 #include <string.h>         /* memset */
16 #include "cpu.h"
17 #include "mem.h"
18 #include "hist.h"           /* HIST_countFast_wksp */
19 #define FSE_STATIC_LINKING_ONLY   /* FSE_encodeSymbol */
20 #include "fse.h"
21 #define HUF_STATIC_LINKING_ONLY
22 #include "huf.h"
23 #include "zstd_compress_internal.h"
24 #include "zstd_compress_sequences.h"
25 #include "zstd_compress_literals.h"
26 #include "zstd_fast.h"
27 #include "zstd_double_fast.h"
28 #include "zstd_lazy.h"
29 #include "zstd_opt.h"
30 #include "zstd_ldm.h"
31 
32 
33 /*-*************************************
34 *  Helper functions
35 ***************************************/
36 size_t ZSTD_compressBound(size_t srcSize) {
37     return ZSTD_COMPRESSBOUND(srcSize);
38 }
39 
40 
41 /*-*************************************
42 *  Context memory management
43 ***************************************/
44 struct ZSTD_CDict_s {
45     void* dictBuffer;
46     const void* dictContent;
47     size_t dictContentSize;
48     void* workspace;
49     size_t workspaceSize;
50     ZSTD_matchState_t matchState;
51     ZSTD_compressedBlockState_t cBlockState;
52     ZSTD_customMem customMem;
53     U32 dictID;
54 };  /* typedef'd to ZSTD_CDict within "zstd.h" */
55 
56 ZSTD_CCtx* ZSTD_createCCtx(void)
57 {
58     return ZSTD_createCCtx_advanced(ZSTD_defaultCMem);
59 }
60 
61 static void ZSTD_initCCtx(ZSTD_CCtx* cctx, ZSTD_customMem memManager)
62 {
63     assert(cctx != NULL);
64     memset(cctx, 0, sizeof(*cctx));
65     cctx->customMem = memManager;
66     cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
67     {   size_t const err = ZSTD_CCtx_reset(cctx, ZSTD_reset_parameters);
68         assert(!ZSTD_isError(err));
69         (void)err;
70     }
71 }
72 
73 ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
74 {
75     ZSTD_STATIC_ASSERT(zcss_init==0);
76     ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1));
77     if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
78     {   ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_malloc(sizeof(ZSTD_CCtx), customMem);
79         if (!cctx) return NULL;
80         ZSTD_initCCtx(cctx, customMem);
81         return cctx;
82     }
83 }
84 
85 ZSTD_CCtx* ZSTD_initStaticCCtx(void *workspace, size_t workspaceSize)
86 {
87     ZSTD_CCtx* const cctx = (ZSTD_CCtx*) workspace;
88     if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL;  /* minimum size */
89     if ((size_t)workspace & 7) return NULL;  /* must be 8-aligned */
90     memset(workspace, 0, workspaceSize);   /* may be a bit generous, could memset be smaller ? */
91     cctx->staticSize = workspaceSize;
92     cctx->workSpace = (void*)(cctx+1);
93     cctx->workSpaceSize = workspaceSize - sizeof(ZSTD_CCtx);
94 
95     /* statically sized space. entropyWorkspace never moves (but prev/next block swap places) */
96     if (cctx->workSpaceSize < HUF_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t)) return NULL;
97     assert(((size_t)cctx->workSpace & (sizeof(void*)-1)) == 0);   /* ensure correct alignment */
98     cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)cctx->workSpace;
99     cctx->blockState.nextCBlock = cctx->blockState.prevCBlock + 1;
100     {
101         void* const ptr = cctx->blockState.nextCBlock + 1;
102         cctx->entropyWorkspace = (U32*)ptr;
103     }
104     cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
105     return cctx;
106 }
107 
108 /**
109  * Clears and frees all of the dictionaries in the CCtx.
110  */
111 static void ZSTD_clearAllDicts(ZSTD_CCtx* cctx)
112 {
113     ZSTD_free(cctx->localDict.dictBuffer, cctx->customMem);
114     ZSTD_freeCDict(cctx->localDict.cdict);
115     memset(&cctx->localDict, 0, sizeof(cctx->localDict));
116     memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));
117     cctx->cdict = NULL;
118 }
119 
120 static size_t ZSTD_sizeof_localDict(ZSTD_localDict dict)
121 {
122     size_t const bufferSize = dict.dictBuffer != NULL ? dict.dictSize : 0;
123     size_t const cdictSize = ZSTD_sizeof_CDict(dict.cdict);
124     return bufferSize + cdictSize;
125 }
126 
127 static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx)
128 {
129     assert(cctx != NULL);
130     assert(cctx->staticSize == 0);
131     ZSTD_free(cctx->workSpace, cctx->customMem); cctx->workSpace = NULL;
132     ZSTD_clearAllDicts(cctx);
133 #ifdef ZSTD_MULTITHREAD
134     ZSTDMT_freeCCtx(cctx->mtctx); cctx->mtctx = NULL;
135 #endif
136 }
137 
138 size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
139 {
140     if (cctx==NULL) return 0;   /* support free on NULL */
141     RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
142                     "not compatible with static CCtx");
143     ZSTD_freeCCtxContent(cctx);
144     ZSTD_free(cctx, cctx->customMem);
145     return 0;
146 }
147 
148 
149 static size_t ZSTD_sizeof_mtctx(const ZSTD_CCtx* cctx)
150 {
151 #ifdef ZSTD_MULTITHREAD
152     return ZSTDMT_sizeof_CCtx(cctx->mtctx);
153 #else
154     (void)cctx;
155     return 0;
156 #endif
157 }
158 
159 
160 size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx)
161 {
162     if (cctx==NULL) return 0;   /* support sizeof on NULL */
163     return sizeof(*cctx) + cctx->workSpaceSize
164            + ZSTD_sizeof_localDict(cctx->localDict)
165            + ZSTD_sizeof_mtctx(cctx);
166 }
167 
168 size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs)
169 {
170     return ZSTD_sizeof_CCtx(zcs);  /* same object */
171 }
172 
173 /* private API call, for dictBuilder only */
174 const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); }
175 
176 static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams(
177         ZSTD_compressionParameters cParams)
178 {
179     ZSTD_CCtx_params cctxParams;
180     memset(&cctxParams, 0, sizeof(cctxParams));
181     cctxParams.cParams = cParams;
182     cctxParams.compressionLevel = ZSTD_CLEVEL_DEFAULT;  /* should not matter, as all cParams are presumed properly defined */
183     assert(!ZSTD_checkCParams(cParams));
184     cctxParams.fParams.contentSizeFlag = 1;
185     return cctxParams;
186 }
187 
188 static ZSTD_CCtx_params* ZSTD_createCCtxParams_advanced(
189         ZSTD_customMem customMem)
190 {
191     ZSTD_CCtx_params* params;
192     if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
193     params = (ZSTD_CCtx_params*)ZSTD_calloc(
194             sizeof(ZSTD_CCtx_params), customMem);
195     if (!params) { return NULL; }
196     params->customMem = customMem;
197     params->compressionLevel = ZSTD_CLEVEL_DEFAULT;
198     params->fParams.contentSizeFlag = 1;
199     return params;
200 }
201 
202 ZSTD_CCtx_params* ZSTD_createCCtxParams(void)
203 {
204     return ZSTD_createCCtxParams_advanced(ZSTD_defaultCMem);
205 }
206 
207 size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params)
208 {
209     if (params == NULL) { return 0; }
210     ZSTD_free(params, params->customMem);
211     return 0;
212 }
213 
214 size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params)
215 {
216     return ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT);
217 }
218 
219 size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) {
220     RETURN_ERROR_IF(!cctxParams, GENERIC);
221     memset(cctxParams, 0, sizeof(*cctxParams));
222     cctxParams->compressionLevel = compressionLevel;
223     cctxParams->fParams.contentSizeFlag = 1;
224     return 0;
225 }
226 
227 size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params)
228 {
229     RETURN_ERROR_IF(!cctxParams, GENERIC);
230     FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) );
231     memset(cctxParams, 0, sizeof(*cctxParams));
232     cctxParams->cParams = params.cParams;
233     cctxParams->fParams = params.fParams;
234     cctxParams->compressionLevel = ZSTD_CLEVEL_DEFAULT;   /* should not matter, as all cParams are presumed properly defined */
235     assert(!ZSTD_checkCParams(params.cParams));
236     return 0;
237 }
238 
239 /* ZSTD_assignParamsToCCtxParams() :
240  * params is presumed valid at this stage */
241 static ZSTD_CCtx_params ZSTD_assignParamsToCCtxParams(
242         ZSTD_CCtx_params cctxParams, ZSTD_parameters params)
243 {
244     ZSTD_CCtx_params ret = cctxParams;
245     ret.cParams = params.cParams;
246     ret.fParams = params.fParams;
247     ret.compressionLevel = ZSTD_CLEVEL_DEFAULT;   /* should not matter, as all cParams are presumed properly defined */
248     assert(!ZSTD_checkCParams(params.cParams));
249     return ret;
250 }
251 
252 ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)
253 {
254     ZSTD_bounds bounds = { 0, 0, 0 };
255 
256     switch(param)
257     {
258     case ZSTD_c_compressionLevel:
259         bounds.lowerBound = ZSTD_minCLevel();
260         bounds.upperBound = ZSTD_maxCLevel();
261         return bounds;
262 
263     case ZSTD_c_windowLog:
264         bounds.lowerBound = ZSTD_WINDOWLOG_MIN;
265         bounds.upperBound = ZSTD_WINDOWLOG_MAX;
266         return bounds;
267 
268     case ZSTD_c_hashLog:
269         bounds.lowerBound = ZSTD_HASHLOG_MIN;
270         bounds.upperBound = ZSTD_HASHLOG_MAX;
271         return bounds;
272 
273     case ZSTD_c_chainLog:
274         bounds.lowerBound = ZSTD_CHAINLOG_MIN;
275         bounds.upperBound = ZSTD_CHAINLOG_MAX;
276         return bounds;
277 
278     case ZSTD_c_searchLog:
279         bounds.lowerBound = ZSTD_SEARCHLOG_MIN;
280         bounds.upperBound = ZSTD_SEARCHLOG_MAX;
281         return bounds;
282 
283     case ZSTD_c_minMatch:
284         bounds.lowerBound = ZSTD_MINMATCH_MIN;
285         bounds.upperBound = ZSTD_MINMATCH_MAX;
286         return bounds;
287 
288     case ZSTD_c_targetLength:
289         bounds.lowerBound = ZSTD_TARGETLENGTH_MIN;
290         bounds.upperBound = ZSTD_TARGETLENGTH_MAX;
291         return bounds;
292 
293     case ZSTD_c_strategy:
294         bounds.lowerBound = ZSTD_STRATEGY_MIN;
295         bounds.upperBound = ZSTD_STRATEGY_MAX;
296         return bounds;
297 
298     case ZSTD_c_contentSizeFlag:
299         bounds.lowerBound = 0;
300         bounds.upperBound = 1;
301         return bounds;
302 
303     case ZSTD_c_checksumFlag:
304         bounds.lowerBound = 0;
305         bounds.upperBound = 1;
306         return bounds;
307 
308     case ZSTD_c_dictIDFlag:
309         bounds.lowerBound = 0;
310         bounds.upperBound = 1;
311         return bounds;
312 
313     case ZSTD_c_nbWorkers:
314         bounds.lowerBound = 0;
315 #ifdef ZSTD_MULTITHREAD
316         bounds.upperBound = ZSTDMT_NBWORKERS_MAX;
317 #else
318         bounds.upperBound = 0;
319 #endif
320         return bounds;
321 
322     case ZSTD_c_jobSize:
323         bounds.lowerBound = 0;
324 #ifdef ZSTD_MULTITHREAD
325         bounds.upperBound = ZSTDMT_JOBSIZE_MAX;
326 #else
327         bounds.upperBound = 0;
328 #endif
329         return bounds;
330 
331     case ZSTD_c_overlapLog:
332         bounds.lowerBound = ZSTD_OVERLAPLOG_MIN;
333         bounds.upperBound = ZSTD_OVERLAPLOG_MAX;
334         return bounds;
335 
336     case ZSTD_c_enableLongDistanceMatching:
337         bounds.lowerBound = 0;
338         bounds.upperBound = 1;
339         return bounds;
340 
341     case ZSTD_c_ldmHashLog:
342         bounds.lowerBound = ZSTD_LDM_HASHLOG_MIN;
343         bounds.upperBound = ZSTD_LDM_HASHLOG_MAX;
344         return bounds;
345 
346     case ZSTD_c_ldmMinMatch:
347         bounds.lowerBound = ZSTD_LDM_MINMATCH_MIN;
348         bounds.upperBound = ZSTD_LDM_MINMATCH_MAX;
349         return bounds;
350 
351     case ZSTD_c_ldmBucketSizeLog:
352         bounds.lowerBound = ZSTD_LDM_BUCKETSIZELOG_MIN;
353         bounds.upperBound = ZSTD_LDM_BUCKETSIZELOG_MAX;
354         return bounds;
355 
356     case ZSTD_c_ldmHashRateLog:
357         bounds.lowerBound = ZSTD_LDM_HASHRATELOG_MIN;
358         bounds.upperBound = ZSTD_LDM_HASHRATELOG_MAX;
359         return bounds;
360 
361     /* experimental parameters */
362     case ZSTD_c_rsyncable:
363         bounds.lowerBound = 0;
364         bounds.upperBound = 1;
365         return bounds;
366 
367     case ZSTD_c_forceMaxWindow :
368         bounds.lowerBound = 0;
369         bounds.upperBound = 1;
370         return bounds;
371 
372     case ZSTD_c_format:
373         ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless);
374         bounds.lowerBound = ZSTD_f_zstd1;
375         bounds.upperBound = ZSTD_f_zstd1_magicless;   /* note : how to ensure at compile time that this is the highest value enum ? */
376         return bounds;
377 
378     case ZSTD_c_forceAttachDict:
379         ZSTD_STATIC_ASSERT(ZSTD_dictDefaultAttach < ZSTD_dictForceCopy);
380         bounds.lowerBound = ZSTD_dictDefaultAttach;
381         bounds.upperBound = ZSTD_dictForceCopy;       /* note : how to ensure at compile time that this is the highest value enum ? */
382         return bounds;
383 
384     case ZSTD_c_literalCompressionMode:
385         ZSTD_STATIC_ASSERT(ZSTD_lcm_auto < ZSTD_lcm_huffman && ZSTD_lcm_huffman < ZSTD_lcm_uncompressed);
386         bounds.lowerBound = ZSTD_lcm_auto;
387         bounds.upperBound = ZSTD_lcm_uncompressed;
388         return bounds;
389 
390     case ZSTD_c_targetCBlockSize:
391         bounds.lowerBound = ZSTD_TARGETCBLOCKSIZE_MIN;
392         bounds.upperBound = ZSTD_TARGETCBLOCKSIZE_MAX;
393         return bounds;
394 
395     default:
396         {   ZSTD_bounds const boundError = { ERROR(parameter_unsupported), 0, 0 };
397             return boundError;
398         }
399     }
400 }
401 
402 /* ZSTD_cParam_clampBounds:
403  * Clamps the value into the bounded range.
404  */
405 static size_t ZSTD_cParam_clampBounds(ZSTD_cParameter cParam, int* value)
406 {
407     ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);
408     if (ZSTD_isError(bounds.error)) return bounds.error;
409     if (*value < bounds.lowerBound) *value = bounds.lowerBound;
410     if (*value > bounds.upperBound) *value = bounds.upperBound;
411     return 0;
412 }
413 
414 #define BOUNDCHECK(cParam, val) { \
415     RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val), \
416                     parameter_outOfBound); \
417 }
418 
419 
420 static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
421 {
422     switch(param)
423     {
424     case ZSTD_c_compressionLevel:
425     case ZSTD_c_hashLog:
426     case ZSTD_c_chainLog:
427     case ZSTD_c_searchLog:
428     case ZSTD_c_minMatch:
429     case ZSTD_c_targetLength:
430     case ZSTD_c_strategy:
431         return 1;
432 
433     case ZSTD_c_format:
434     case ZSTD_c_windowLog:
435     case ZSTD_c_contentSizeFlag:
436     case ZSTD_c_checksumFlag:
437     case ZSTD_c_dictIDFlag:
438     case ZSTD_c_forceMaxWindow :
439     case ZSTD_c_nbWorkers:
440     case ZSTD_c_jobSize:
441     case ZSTD_c_overlapLog:
442     case ZSTD_c_rsyncable:
443     case ZSTD_c_enableLongDistanceMatching:
444     case ZSTD_c_ldmHashLog:
445     case ZSTD_c_ldmMinMatch:
446     case ZSTD_c_ldmBucketSizeLog:
447     case ZSTD_c_ldmHashRateLog:
448     case ZSTD_c_forceAttachDict:
449     case ZSTD_c_literalCompressionMode:
450     case ZSTD_c_targetCBlockSize:
451     default:
452         return 0;
453     }
454 }
455 
456 size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)
457 {
458     DEBUGLOG(4, "ZSTD_CCtx_setParameter (%i, %i)", (int)param, value);
459     if (cctx->streamStage != zcss_init) {
460         if (ZSTD_isUpdateAuthorized(param)) {
461             cctx->cParamsChanged = 1;
462         } else {
463             RETURN_ERROR(stage_wrong);
464     }   }
465 
466     switch(param)
467     {
468     case ZSTD_c_nbWorkers:
469         RETURN_ERROR_IF((value!=0) && cctx->staticSize, parameter_unsupported,
470                         "MT not compatible with static alloc");
471         break;
472 
473     case ZSTD_c_compressionLevel:
474     case ZSTD_c_windowLog:
475     case ZSTD_c_hashLog:
476     case ZSTD_c_chainLog:
477     case ZSTD_c_searchLog:
478     case ZSTD_c_minMatch:
479     case ZSTD_c_targetLength:
480     case ZSTD_c_strategy:
481     case ZSTD_c_ldmHashRateLog:
482     case ZSTD_c_format:
483     case ZSTD_c_contentSizeFlag:
484     case ZSTD_c_checksumFlag:
485     case ZSTD_c_dictIDFlag:
486     case ZSTD_c_forceMaxWindow:
487     case ZSTD_c_forceAttachDict:
488     case ZSTD_c_literalCompressionMode:
489     case ZSTD_c_jobSize:
490     case ZSTD_c_overlapLog:
491     case ZSTD_c_rsyncable:
492     case ZSTD_c_enableLongDistanceMatching:
493     case ZSTD_c_ldmHashLog:
494     case ZSTD_c_ldmMinMatch:
495     case ZSTD_c_ldmBucketSizeLog:
496     case ZSTD_c_targetCBlockSize:
497         break;
498 
499     default: RETURN_ERROR(parameter_unsupported);
500     }
501     return ZSTD_CCtxParams_setParameter(&cctx->requestedParams, param, value);
502 }
503 
504 size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
505                                     ZSTD_cParameter param, int value)
506 {
507     DEBUGLOG(4, "ZSTD_CCtxParams_setParameter (%i, %i)", (int)param, value);
508     switch(param)
509     {
510     case ZSTD_c_format :
511         BOUNDCHECK(ZSTD_c_format, value);
512         CCtxParams->format = (ZSTD_format_e)value;
513         return (size_t)CCtxParams->format;
514 
515     case ZSTD_c_compressionLevel : {
516         FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value));
517         if (value) {  /* 0 : does not change current level */
518             CCtxParams->compressionLevel = value;
519         }
520         if (CCtxParams->compressionLevel >= 0) return CCtxParams->compressionLevel;
521         return 0;  /* return type (size_t) cannot represent negative values */
522     }
523 
524     case ZSTD_c_windowLog :
525         if (value!=0)   /* 0 => use default */
526             BOUNDCHECK(ZSTD_c_windowLog, value);
527         CCtxParams->cParams.windowLog = value;
528         return CCtxParams->cParams.windowLog;
529 
530     case ZSTD_c_hashLog :
531         if (value!=0)   /* 0 => use default */
532             BOUNDCHECK(ZSTD_c_hashLog, value);
533         CCtxParams->cParams.hashLog = value;
534         return CCtxParams->cParams.hashLog;
535 
536     case ZSTD_c_chainLog :
537         if (value!=0)   /* 0 => use default */
538             BOUNDCHECK(ZSTD_c_chainLog, value);
539         CCtxParams->cParams.chainLog = value;
540         return CCtxParams->cParams.chainLog;
541 
542     case ZSTD_c_searchLog :
543         if (value!=0)   /* 0 => use default */
544             BOUNDCHECK(ZSTD_c_searchLog, value);
545         CCtxParams->cParams.searchLog = value;
546         return value;
547 
548     case ZSTD_c_minMatch :
549         if (value!=0)   /* 0 => use default */
550             BOUNDCHECK(ZSTD_c_minMatch, value);
551         CCtxParams->cParams.minMatch = value;
552         return CCtxParams->cParams.minMatch;
553 
554     case ZSTD_c_targetLength :
555         BOUNDCHECK(ZSTD_c_targetLength, value);
556         CCtxParams->cParams.targetLength = value;
557         return CCtxParams->cParams.targetLength;
558 
559     case ZSTD_c_strategy :
560         if (value!=0)   /* 0 => use default */
561             BOUNDCHECK(ZSTD_c_strategy, value);
562         CCtxParams->cParams.strategy = (ZSTD_strategy)value;
563         return (size_t)CCtxParams->cParams.strategy;
564 
565     case ZSTD_c_contentSizeFlag :
566         /* Content size written in frame header _when known_ (default:1) */
567         DEBUGLOG(4, "set content size flag = %u", (value!=0));
568         CCtxParams->fParams.contentSizeFlag = value != 0;
569         return CCtxParams->fParams.contentSizeFlag;
570 
571     case ZSTD_c_checksumFlag :
572         /* A 32-bits content checksum will be calculated and written at end of frame (default:0) */
573         CCtxParams->fParams.checksumFlag = value != 0;
574         return CCtxParams->fParams.checksumFlag;
575 
576     case ZSTD_c_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */
577         DEBUGLOG(4, "set dictIDFlag = %u", (value!=0));
578         CCtxParams->fParams.noDictIDFlag = !value;
579         return !CCtxParams->fParams.noDictIDFlag;
580 
581     case ZSTD_c_forceMaxWindow :
582         CCtxParams->forceWindow = (value != 0);
583         return CCtxParams->forceWindow;
584 
585     case ZSTD_c_forceAttachDict : {
586         const ZSTD_dictAttachPref_e pref = (ZSTD_dictAttachPref_e)value;
587         BOUNDCHECK(ZSTD_c_forceAttachDict, pref);
588         CCtxParams->attachDictPref = pref;
589         return CCtxParams->attachDictPref;
590     }
591 
592     case ZSTD_c_literalCompressionMode : {
593         const ZSTD_literalCompressionMode_e lcm = (ZSTD_literalCompressionMode_e)value;
594         BOUNDCHECK(ZSTD_c_literalCompressionMode, lcm);
595         CCtxParams->literalCompressionMode = lcm;
596         return CCtxParams->literalCompressionMode;
597     }
598 
599     case ZSTD_c_nbWorkers :
600 #ifndef ZSTD_MULTITHREAD
601         RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
602         return 0;
603 #else
604         FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value));
605         CCtxParams->nbWorkers = value;
606         return CCtxParams->nbWorkers;
607 #endif
608 
609     case ZSTD_c_jobSize :
610 #ifndef ZSTD_MULTITHREAD
611         RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
612         return 0;
613 #else
614         /* Adjust to the minimum non-default value. */
615         if (value != 0 && value < ZSTDMT_JOBSIZE_MIN)
616             value = ZSTDMT_JOBSIZE_MIN;
617         FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value));
618         assert(value >= 0);
619         CCtxParams->jobSize = value;
620         return CCtxParams->jobSize;
621 #endif
622 
623     case ZSTD_c_overlapLog :
624 #ifndef ZSTD_MULTITHREAD
625         RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
626         return 0;
627 #else
628         FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(ZSTD_c_overlapLog, &value));
629         CCtxParams->overlapLog = value;
630         return CCtxParams->overlapLog;
631 #endif
632 
633     case ZSTD_c_rsyncable :
634 #ifndef ZSTD_MULTITHREAD
635         RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
636         return 0;
637 #else
638         FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(ZSTD_c_overlapLog, &value));
639         CCtxParams->rsyncable = value;
640         return CCtxParams->rsyncable;
641 #endif
642 
643     case ZSTD_c_enableLongDistanceMatching :
644         CCtxParams->ldmParams.enableLdm = (value!=0);
645         return CCtxParams->ldmParams.enableLdm;
646 
647     case ZSTD_c_ldmHashLog :
648         if (value!=0)   /* 0 ==> auto */
649             BOUNDCHECK(ZSTD_c_ldmHashLog, value);
650         CCtxParams->ldmParams.hashLog = value;
651         return CCtxParams->ldmParams.hashLog;
652 
653     case ZSTD_c_ldmMinMatch :
654         if (value!=0)   /* 0 ==> default */
655             BOUNDCHECK(ZSTD_c_ldmMinMatch, value);
656         CCtxParams->ldmParams.minMatchLength = value;
657         return CCtxParams->ldmParams.minMatchLength;
658 
659     case ZSTD_c_ldmBucketSizeLog :
660         if (value!=0)   /* 0 ==> default */
661             BOUNDCHECK(ZSTD_c_ldmBucketSizeLog, value);
662         CCtxParams->ldmParams.bucketSizeLog = value;
663         return CCtxParams->ldmParams.bucketSizeLog;
664 
665     case ZSTD_c_ldmHashRateLog :
666         RETURN_ERROR_IF(value > ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN,
667                         parameter_outOfBound);
668         CCtxParams->ldmParams.hashRateLog = value;
669         return CCtxParams->ldmParams.hashRateLog;
670 
671     case ZSTD_c_targetCBlockSize :
672         if (value!=0)   /* 0 ==> default */
673             BOUNDCHECK(ZSTD_c_targetCBlockSize, value);
674         CCtxParams->targetCBlockSize = value;
675         return CCtxParams->targetCBlockSize;
676 
677     default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
678     }
679 }
680 
681 size_t ZSTD_CCtx_getParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value)
682 {
683     return ZSTD_CCtxParams_getParameter(&cctx->requestedParams, param, value);
684 }
685 
686 size_t ZSTD_CCtxParams_getParameter(
687         ZSTD_CCtx_params* CCtxParams, ZSTD_cParameter param, int* value)
688 {
689     switch(param)
690     {
691     case ZSTD_c_format :
692         *value = CCtxParams->format;
693         break;
694     case ZSTD_c_compressionLevel :
695         *value = CCtxParams->compressionLevel;
696         break;
697     case ZSTD_c_windowLog :
698         *value = (int)CCtxParams->cParams.windowLog;
699         break;
700     case ZSTD_c_hashLog :
701         *value = (int)CCtxParams->cParams.hashLog;
702         break;
703     case ZSTD_c_chainLog :
704         *value = (int)CCtxParams->cParams.chainLog;
705         break;
706     case ZSTD_c_searchLog :
707         *value = CCtxParams->cParams.searchLog;
708         break;
709     case ZSTD_c_minMatch :
710         *value = CCtxParams->cParams.minMatch;
711         break;
712     case ZSTD_c_targetLength :
713         *value = CCtxParams->cParams.targetLength;
714         break;
715     case ZSTD_c_strategy :
716         *value = (unsigned)CCtxParams->cParams.strategy;
717         break;
718     case ZSTD_c_contentSizeFlag :
719         *value = CCtxParams->fParams.contentSizeFlag;
720         break;
721     case ZSTD_c_checksumFlag :
722         *value = CCtxParams->fParams.checksumFlag;
723         break;
724     case ZSTD_c_dictIDFlag :
725         *value = !CCtxParams->fParams.noDictIDFlag;
726         break;
727     case ZSTD_c_forceMaxWindow :
728         *value = CCtxParams->forceWindow;
729         break;
730     case ZSTD_c_forceAttachDict :
731         *value = CCtxParams->attachDictPref;
732         break;
733     case ZSTD_c_literalCompressionMode :
734         *value = CCtxParams->literalCompressionMode;
735         break;
736     case ZSTD_c_nbWorkers :
737 #ifndef ZSTD_MULTITHREAD
738         assert(CCtxParams->nbWorkers == 0);
739 #endif
740         *value = CCtxParams->nbWorkers;
741         break;
742     case ZSTD_c_jobSize :
743 #ifndef ZSTD_MULTITHREAD
744         RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
745 #else
746         assert(CCtxParams->jobSize <= INT_MAX);
747         *value = (int)CCtxParams->jobSize;
748         break;
749 #endif
750     case ZSTD_c_overlapLog :
751 #ifndef ZSTD_MULTITHREAD
752         RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
753 #else
754         *value = CCtxParams->overlapLog;
755         break;
756 #endif
757     case ZSTD_c_rsyncable :
758 #ifndef ZSTD_MULTITHREAD
759         RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
760 #else
761         *value = CCtxParams->rsyncable;
762         break;
763 #endif
764     case ZSTD_c_enableLongDistanceMatching :
765         *value = CCtxParams->ldmParams.enableLdm;
766         break;
767     case ZSTD_c_ldmHashLog :
768         *value = CCtxParams->ldmParams.hashLog;
769         break;
770     case ZSTD_c_ldmMinMatch :
771         *value = CCtxParams->ldmParams.minMatchLength;
772         break;
773     case ZSTD_c_ldmBucketSizeLog :
774         *value = CCtxParams->ldmParams.bucketSizeLog;
775         break;
776     case ZSTD_c_ldmHashRateLog :
777         *value = CCtxParams->ldmParams.hashRateLog;
778         break;
779     case ZSTD_c_targetCBlockSize :
780         *value = (int)CCtxParams->targetCBlockSize;
781         break;
782     default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
783     }
784     return 0;
785 }
786 
787 /** ZSTD_CCtx_setParametersUsingCCtxParams() :
788  *  just applies `params` into `cctx`
789  *  no action is performed, parameters are merely stored.
790  *  If ZSTDMT is enabled, parameters are pushed to cctx->mtctx.
791  *    This is possible even if a compression is ongoing.
792  *    In which case, new parameters will be applied on the fly, starting with next compression job.
793  */
794 size_t ZSTD_CCtx_setParametersUsingCCtxParams(
795         ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params)
796 {
797     DEBUGLOG(4, "ZSTD_CCtx_setParametersUsingCCtxParams");
798     RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
799     RETURN_ERROR_IF(cctx->cdict, stage_wrong);
800 
801     cctx->requestedParams = *params;
802     return 0;
803 }
804 
805 ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize)
806 {
807     DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %u bytes", (U32)pledgedSrcSize);
808     RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
809     cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
810     return 0;
811 }
812 
813 /**
814  * Initializes the local dict using the requested parameters.
815  * NOTE: This does not use the pledged src size, because it may be used for more
816  * than one compression.
817  */
818 static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx)
819 {
820     ZSTD_localDict* const dl = &cctx->localDict;
821     ZSTD_compressionParameters const cParams = ZSTD_getCParamsFromCCtxParams(
822             &cctx->requestedParams, 0, dl->dictSize);
823     if (dl->dict == NULL) {
824         /* No local dictionary. */
825         assert(dl->dictBuffer == NULL);
826         assert(dl->cdict == NULL);
827         assert(dl->dictSize == 0);
828         return 0;
829     }
830     if (dl->cdict != NULL) {
831         assert(cctx->cdict == dl->cdict);
832         /* Local dictionary already initialized. */
833         return 0;
834     }
835     assert(dl->dictSize > 0);
836     assert(cctx->cdict == NULL);
837     assert(cctx->prefixDict.dict == NULL);
838 
839     dl->cdict = ZSTD_createCDict_advanced(
840             dl->dict,
841             dl->dictSize,
842             ZSTD_dlm_byRef,
843             dl->dictContentType,
844             cParams,
845             cctx->customMem);
846     RETURN_ERROR_IF(!dl->cdict, memory_allocation);
847     cctx->cdict = dl->cdict;
848     return 0;
849 }
850 
851 size_t ZSTD_CCtx_loadDictionary_advanced(
852         ZSTD_CCtx* cctx, const void* dict, size_t dictSize,
853         ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType)
854 {
855     RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
856     RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
857                     "no malloc for static CCtx");
858     DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize);
859     ZSTD_clearAllDicts(cctx);  /* in case one already exists */
860     if (dict == NULL || dictSize == 0)  /* no dictionary mode */
861         return 0;
862     if (dictLoadMethod == ZSTD_dlm_byRef) {
863         cctx->localDict.dict = dict;
864     } else {
865         void* dictBuffer = ZSTD_malloc(dictSize, cctx->customMem);
866         RETURN_ERROR_IF(!dictBuffer, memory_allocation);
867         memcpy(dictBuffer, dict, dictSize);
868         cctx->localDict.dictBuffer = dictBuffer;
869         cctx->localDict.dict = dictBuffer;
870     }
871     cctx->localDict.dictSize = dictSize;
872     cctx->localDict.dictContentType = dictContentType;
873     return 0;
874 }
875 
876 ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(
877       ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
878 {
879     return ZSTD_CCtx_loadDictionary_advanced(
880             cctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto);
881 }
882 
883 ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
884 {
885     return ZSTD_CCtx_loadDictionary_advanced(
886             cctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto);
887 }
888 
889 
890 size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
891 {
892     RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
893     /* Free the existing local cdict (if any) to save memory. */
894     ZSTD_clearAllDicts(cctx);
895     cctx->cdict = cdict;
896     return 0;
897 }
898 
899 size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize)
900 {
901     return ZSTD_CCtx_refPrefix_advanced(cctx, prefix, prefixSize, ZSTD_dct_rawContent);
902 }
903 
904 size_t ZSTD_CCtx_refPrefix_advanced(
905         ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
906 {
907     RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
908     ZSTD_clearAllDicts(cctx);
909     cctx->prefixDict.dict = prefix;
910     cctx->prefixDict.dictSize = prefixSize;
911     cctx->prefixDict.dictContentType = dictContentType;
912     return 0;
913 }
914 
915 /*! ZSTD_CCtx_reset() :
916  *  Also dumps dictionary */
917 size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset)
918 {
919     if ( (reset == ZSTD_reset_session_only)
920       || (reset == ZSTD_reset_session_and_parameters) ) {
921         cctx->streamStage = zcss_init;
922         cctx->pledgedSrcSizePlusOne = 0;
923     }
924     if ( (reset == ZSTD_reset_parameters)
925       || (reset == ZSTD_reset_session_and_parameters) ) {
926         RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong);
927         ZSTD_clearAllDicts(cctx);
928         return ZSTD_CCtxParams_reset(&cctx->requestedParams);
929     }
930     return 0;
931 }
932 
933 
934 /** ZSTD_checkCParams() :
935     control CParam values remain within authorized range.
936     @return : 0, or an error code if one value is beyond authorized range */
937 size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
938 {
939     BOUNDCHECK(ZSTD_c_windowLog, (int)cParams.windowLog);
940     BOUNDCHECK(ZSTD_c_chainLog,  (int)cParams.chainLog);
941     BOUNDCHECK(ZSTD_c_hashLog,   (int)cParams.hashLog);
942     BOUNDCHECK(ZSTD_c_searchLog, (int)cParams.searchLog);
943     BOUNDCHECK(ZSTD_c_minMatch,  (int)cParams.minMatch);
944     BOUNDCHECK(ZSTD_c_targetLength,(int)cParams.targetLength);
945     BOUNDCHECK(ZSTD_c_strategy,  cParams.strategy);
946     return 0;
947 }
948 
949 /** ZSTD_clampCParams() :
950  *  make CParam values within valid range.
951  *  @return : valid CParams */
952 static ZSTD_compressionParameters
953 ZSTD_clampCParams(ZSTD_compressionParameters cParams)
954 {
955 #   define CLAMP_TYPE(cParam, val, type) {                                \
956         ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);         \
957         if ((int)val<bounds.lowerBound) val=(type)bounds.lowerBound;      \
958         else if ((int)val>bounds.upperBound) val=(type)bounds.upperBound; \
959     }
960 #   define CLAMP(cParam, val) CLAMP_TYPE(cParam, val, unsigned)
961     CLAMP(ZSTD_c_windowLog, cParams.windowLog);
962     CLAMP(ZSTD_c_chainLog,  cParams.chainLog);
963     CLAMP(ZSTD_c_hashLog,   cParams.hashLog);
964     CLAMP(ZSTD_c_searchLog, cParams.searchLog);
965     CLAMP(ZSTD_c_minMatch,  cParams.minMatch);
966     CLAMP(ZSTD_c_targetLength,cParams.targetLength);
967     CLAMP_TYPE(ZSTD_c_strategy,cParams.strategy, ZSTD_strategy);
968     return cParams;
969 }
970 
971 /** ZSTD_cycleLog() :
972  *  condition for correct operation : hashLog > 1 */
973 static U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)
974 {
975     U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2);
976     return hashLog - btScale;
977 }
978 
979 /** ZSTD_adjustCParams_internal() :
980  *  optimize `cPar` for a specified input (`srcSize` and `dictSize`).
981  *  mostly downsize to reduce memory consumption and initialization latency.
982  * `srcSize` can be ZSTD_CONTENTSIZE_UNKNOWN when not known.
983  *  note : for the time being, `srcSize==0` means "unknown" too, for compatibility with older convention.
984  *  condition : cPar is presumed validated (can be checked using ZSTD_checkCParams()). */
985 static ZSTD_compressionParameters
986 ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
987                             unsigned long long srcSize,
988                             size_t dictSize)
989 {
990     static const U64 minSrcSize = 513; /* (1<<9) + 1 */
991     static const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1);
992     assert(ZSTD_checkCParams(cPar)==0);
993 
994     if (dictSize && (srcSize+1<2) /* ZSTD_CONTENTSIZE_UNKNOWN and 0 mean "unknown" */ )
995         srcSize = minSrcSize;  /* presumed small when there is a dictionary */
996     else if (srcSize == 0)
997         srcSize = ZSTD_CONTENTSIZE_UNKNOWN;  /* 0 == unknown : presumed large */
998 
999     /* resize windowLog if input is small enough, to use less memory */
1000     if ( (srcSize < maxWindowResize)
1001       && (dictSize < maxWindowResize) )  {
1002         U32 const tSize = (U32)(srcSize + dictSize);
1003         static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN;
1004         U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN :
1005                             ZSTD_highbit32(tSize-1) + 1;
1006         if (cPar.windowLog > srcLog) cPar.windowLog = srcLog;
1007     }
1008     if (cPar.hashLog > cPar.windowLog+1) cPar.hashLog = cPar.windowLog+1;
1009     {   U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
1010         if (cycleLog > cPar.windowLog)
1011             cPar.chainLog -= (cycleLog - cPar.windowLog);
1012     }
1013 
1014     if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
1015         cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN;  /* minimum wlog required for valid frame header */
1016 
1017     return cPar;
1018 }
1019 
1020 ZSTD_compressionParameters
1021 ZSTD_adjustCParams(ZSTD_compressionParameters cPar,
1022                    unsigned long long srcSize,
1023                    size_t dictSize)
1024 {
1025     cPar = ZSTD_clampCParams(cPar);   /* resulting cPar is necessarily valid (all parameters within range) */
1026     return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize);
1027 }
1028 
1029 ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
1030         const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize)
1031 {
1032     ZSTD_compressionParameters cParams = ZSTD_getCParams(CCtxParams->compressionLevel, srcSizeHint, dictSize);
1033     if (CCtxParams->ldmParams.enableLdm) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG;
1034     if (CCtxParams->cParams.windowLog) cParams.windowLog = CCtxParams->cParams.windowLog;
1035     if (CCtxParams->cParams.hashLog) cParams.hashLog = CCtxParams->cParams.hashLog;
1036     if (CCtxParams->cParams.chainLog) cParams.chainLog = CCtxParams->cParams.chainLog;
1037     if (CCtxParams->cParams.searchLog) cParams.searchLog = CCtxParams->cParams.searchLog;
1038     if (CCtxParams->cParams.minMatch) cParams.minMatch = CCtxParams->cParams.minMatch;
1039     if (CCtxParams->cParams.targetLength) cParams.targetLength = CCtxParams->cParams.targetLength;
1040     if (CCtxParams->cParams.strategy) cParams.strategy = CCtxParams->cParams.strategy;
1041     assert(!ZSTD_checkCParams(cParams));
1042     return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize);
1043 }
1044 
1045 static size_t
1046 ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
1047                        const U32 forCCtx)
1048 {
1049     size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
1050     size_t const hSize = ((size_t)1) << cParams->hashLog;
1051     U32    const hashLog3 = (forCCtx && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
1052     size_t const h3Size = ((size_t)1) << hashLog3;
1053     size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
1054     size_t const optPotentialSpace = ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<<Litbits)) * sizeof(U32)
1055                           + (ZSTD_OPT_NUM+1) * (sizeof(ZSTD_match_t)+sizeof(ZSTD_optimal_t));
1056     size_t const optSpace = (forCCtx && (cParams->strategy >= ZSTD_btopt))
1057                                 ? optPotentialSpace
1058                                 : 0;
1059     DEBUGLOG(4, "chainSize: %u - hSize: %u - h3Size: %u",
1060                 (U32)chainSize, (U32)hSize, (U32)h3Size);
1061     return tableSpace + optSpace;
1062 }
1063 
1064 size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)
1065 {
1066     RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
1067     {   ZSTD_compressionParameters const cParams =
1068                 ZSTD_getCParamsFromCCtxParams(params, 0, 0);
1069         size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
1070         U32    const divider = (cParams.minMatch==3) ? 3 : 4;
1071         size_t const maxNbSeq = blockSize / divider;
1072         size_t const tokenSpace = WILDCOPY_OVERLENGTH + blockSize + 11*maxNbSeq;
1073         size_t const entropySpace = HUF_WORKSPACE_SIZE;
1074         size_t const blockStateSpace = 2 * sizeof(ZSTD_compressedBlockState_t);
1075         size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 1);
1076 
1077         size_t const ldmSpace = ZSTD_ldm_getTableSize(params->ldmParams);
1078         size_t const ldmSeqSpace = ZSTD_ldm_getMaxNbSeq(params->ldmParams, blockSize) * sizeof(rawSeq);
1079 
1080         size_t const neededSpace = entropySpace + blockStateSpace + tokenSpace +
1081                                    matchStateSize + ldmSpace + ldmSeqSpace;
1082 
1083         DEBUGLOG(5, "sizeof(ZSTD_CCtx) : %u", (U32)sizeof(ZSTD_CCtx));
1084         DEBUGLOG(5, "estimate workSpace : %u", (U32)neededSpace);
1085         return sizeof(ZSTD_CCtx) + neededSpace;
1086     }
1087 }
1088 
1089 size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams)
1090 {
1091     ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
1092     return ZSTD_estimateCCtxSize_usingCCtxParams(&params);
1093 }
1094 
1095 static size_t ZSTD_estimateCCtxSize_internal(int compressionLevel)
1096 {
1097     ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, 0);
1098     return ZSTD_estimateCCtxSize_usingCParams(cParams);
1099 }
1100 
1101 size_t ZSTD_estimateCCtxSize(int compressionLevel)
1102 {
1103     int level;
1104     size_t memBudget = 0;
1105     for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {
1106         size_t const newMB = ZSTD_estimateCCtxSize_internal(level);
1107         if (newMB > memBudget) memBudget = newMB;
1108     }
1109     return memBudget;
1110 }
1111 
1112 size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params)
1113 {
1114     RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
1115     {   ZSTD_compressionParameters const cParams =
1116                 ZSTD_getCParamsFromCCtxParams(params, 0, 0);
1117         size_t const CCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(params);
1118         size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
1119         size_t const inBuffSize = ((size_t)1 << cParams.windowLog) + blockSize;
1120         size_t const outBuffSize = ZSTD_compressBound(blockSize) + 1;
1121         size_t const streamingSize = inBuffSize + outBuffSize;
1122 
1123         return CCtxSize + streamingSize;
1124     }
1125 }
1126 
1127 size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams)
1128 {
1129     ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
1130     return ZSTD_estimateCStreamSize_usingCCtxParams(&params);
1131 }
1132 
1133 static size_t ZSTD_estimateCStreamSize_internal(int compressionLevel)
1134 {
1135     ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, 0);
1136     return ZSTD_estimateCStreamSize_usingCParams(cParams);
1137 }
1138 
1139 size_t ZSTD_estimateCStreamSize(int compressionLevel)
1140 {
1141     int level;
1142     size_t memBudget = 0;
1143     for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {
1144         size_t const newMB = ZSTD_estimateCStreamSize_internal(level);
1145         if (newMB > memBudget) memBudget = newMB;
1146     }
1147     return memBudget;
1148 }
1149 
1150 /* ZSTD_getFrameProgression():
1151  * tells how much data has been consumed (input) and produced (output) for current frame.
1152  * able to count progression inside worker threads (non-blocking mode).
1153  */
1154 ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx)
1155 {
1156 #ifdef ZSTD_MULTITHREAD
1157     if (cctx->appliedParams.nbWorkers > 0) {
1158         return ZSTDMT_getFrameProgression(cctx->mtctx);
1159     }
1160 #endif
1161     {   ZSTD_frameProgression fp;
1162         size_t const buffered = (cctx->inBuff == NULL) ? 0 :
1163                                 cctx->inBuffPos - cctx->inToCompress;
1164         if (buffered) assert(cctx->inBuffPos >= cctx->inToCompress);
1165         assert(buffered <= ZSTD_BLOCKSIZE_MAX);
1166         fp.ingested = cctx->consumedSrcSize + buffered;
1167         fp.consumed = cctx->consumedSrcSize;
1168         fp.produced = cctx->producedCSize;
1169         fp.flushed  = cctx->producedCSize;   /* simplified; some data might still be left within streaming output buffer */
1170         fp.currentJobID = 0;
1171         fp.nbActiveWorkers = 0;
1172         return fp;
1173 }   }
1174 
1175 /*! ZSTD_toFlushNow()
1176  *  Only useful for multithreading scenarios currently (nbWorkers >= 1).
1177  */
1178 size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx)
1179 {
1180 #ifdef ZSTD_MULTITHREAD
1181     if (cctx->appliedParams.nbWorkers > 0) {
1182         return ZSTDMT_toFlushNow(cctx->mtctx);
1183     }
1184 #endif
1185     (void)cctx;
1186     return 0;   /* over-simplification; could also check if context is currently running in streaming mode, and in which case, report how many bytes are left to be flushed within output buffer */
1187 }
1188 
1189 
1190 
1191 static U32 ZSTD_equivalentCParams(ZSTD_compressionParameters cParams1,
1192                                   ZSTD_compressionParameters cParams2)
1193 {
1194     return (cParams1.hashLog  == cParams2.hashLog)
1195          & (cParams1.chainLog == cParams2.chainLog)
1196          & (cParams1.strategy == cParams2.strategy)   /* opt parser space */
1197          & ((cParams1.minMatch==3) == (cParams2.minMatch==3));  /* hashlog3 space */
1198 }
1199 
1200 static void ZSTD_assertEqualCParams(ZSTD_compressionParameters cParams1,
1201                                     ZSTD_compressionParameters cParams2)
1202 {
1203     (void)cParams1;
1204     (void)cParams2;
1205     assert(cParams1.windowLog    == cParams2.windowLog);
1206     assert(cParams1.chainLog     == cParams2.chainLog);
1207     assert(cParams1.hashLog      == cParams2.hashLog);
1208     assert(cParams1.searchLog    == cParams2.searchLog);
1209     assert(cParams1.minMatch     == cParams2.minMatch);
1210     assert(cParams1.targetLength == cParams2.targetLength);
1211     assert(cParams1.strategy     == cParams2.strategy);
1212 }
1213 
1214 /** The parameters are equivalent if ldm is not enabled in both sets or
1215  *  all the parameters are equivalent. */
1216 static U32 ZSTD_equivalentLdmParams(ldmParams_t ldmParams1,
1217                                     ldmParams_t ldmParams2)
1218 {
1219     return (!ldmParams1.enableLdm && !ldmParams2.enableLdm) ||
1220            (ldmParams1.enableLdm == ldmParams2.enableLdm &&
1221             ldmParams1.hashLog == ldmParams2.hashLog &&
1222             ldmParams1.bucketSizeLog == ldmParams2.bucketSizeLog &&
1223             ldmParams1.minMatchLength == ldmParams2.minMatchLength &&
1224             ldmParams1.hashRateLog == ldmParams2.hashRateLog);
1225 }
1226 
1227 typedef enum { ZSTDb_not_buffered, ZSTDb_buffered } ZSTD_buffered_policy_e;
1228 
1229 /* ZSTD_sufficientBuff() :
1230  * check internal buffers exist for streaming if buffPol == ZSTDb_buffered .
1231  * Note : they are assumed to be correctly sized if ZSTD_equivalentCParams()==1 */
1232 static U32 ZSTD_sufficientBuff(size_t bufferSize1, size_t maxNbSeq1,
1233                             size_t maxNbLit1,
1234                             ZSTD_buffered_policy_e buffPol2,
1235                             ZSTD_compressionParameters cParams2,
1236                             U64 pledgedSrcSize)
1237 {
1238     size_t const windowSize2 = MAX(1, (size_t)MIN(((U64)1 << cParams2.windowLog), pledgedSrcSize));
1239     size_t const blockSize2 = MIN(ZSTD_BLOCKSIZE_MAX, windowSize2);
1240     size_t const maxNbSeq2 = blockSize2 / ((cParams2.minMatch == 3) ? 3 : 4);
1241     size_t const maxNbLit2 = blockSize2;
1242     size_t const neededBufferSize2 = (buffPol2==ZSTDb_buffered) ? windowSize2 + blockSize2 : 0;
1243     DEBUGLOG(4, "ZSTD_sufficientBuff: is neededBufferSize2=%u <= bufferSize1=%u",
1244                 (U32)neededBufferSize2, (U32)bufferSize1);
1245     DEBUGLOG(4, "ZSTD_sufficientBuff: is maxNbSeq2=%u <= maxNbSeq1=%u",
1246                 (U32)maxNbSeq2, (U32)maxNbSeq1);
1247     DEBUGLOG(4, "ZSTD_sufficientBuff: is maxNbLit2=%u <= maxNbLit1=%u",
1248                 (U32)maxNbLit2, (U32)maxNbLit1);
1249     return (maxNbLit2 <= maxNbLit1)
1250          & (maxNbSeq2 <= maxNbSeq1)
1251          & (neededBufferSize2 <= bufferSize1);
1252 }
1253 
1254 /** Equivalence for resetCCtx purposes */
1255 static U32 ZSTD_equivalentParams(ZSTD_CCtx_params params1,
1256                                  ZSTD_CCtx_params params2,
1257                                  size_t buffSize1,
1258                                  size_t maxNbSeq1, size_t maxNbLit1,
1259                                  ZSTD_buffered_policy_e buffPol2,
1260                                  U64 pledgedSrcSize)
1261 {
1262     DEBUGLOG(4, "ZSTD_equivalentParams: pledgedSrcSize=%u", (U32)pledgedSrcSize);
1263     if (!ZSTD_equivalentCParams(params1.cParams, params2.cParams)) {
1264       DEBUGLOG(4, "ZSTD_equivalentCParams() == 0");
1265       return 0;
1266     }
1267     if (!ZSTD_equivalentLdmParams(params1.ldmParams, params2.ldmParams)) {
1268       DEBUGLOG(4, "ZSTD_equivalentLdmParams() == 0");
1269       return 0;
1270     }
1271     if (!ZSTD_sufficientBuff(buffSize1, maxNbSeq1, maxNbLit1, buffPol2,
1272                              params2.cParams, pledgedSrcSize)) {
1273       DEBUGLOG(4, "ZSTD_sufficientBuff() == 0");
1274       return 0;
1275     }
1276     return 1;
1277 }
1278 
1279 static void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs)
1280 {
1281     int i;
1282     for (i = 0; i < ZSTD_REP_NUM; ++i)
1283         bs->rep[i] = repStartValue[i];
1284     bs->entropy.huf.repeatMode = HUF_repeat_none;
1285     bs->entropy.fse.offcode_repeatMode = FSE_repeat_none;
1286     bs->entropy.fse.matchlength_repeatMode = FSE_repeat_none;
1287     bs->entropy.fse.litlength_repeatMode = FSE_repeat_none;
1288 }
1289 
1290 /*! ZSTD_invalidateMatchState()
1291  *  Invalidate all the matches in the match finder tables.
1292  *  Requires nextSrc and base to be set (can be NULL).
1293  */
1294 static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms)
1295 {
1296     ZSTD_window_clear(&ms->window);
1297 
1298     ms->nextToUpdate = ms->window.dictLimit;
1299     ms->loadedDictEnd = 0;
1300     ms->opt.litLengthSum = 0;  /* force reset of btopt stats */
1301     ms->dictMatchState = NULL;
1302 }
1303 
1304 /*! ZSTD_continueCCtx() :
1305  *  reuse CCtx without reset (note : requires no dictionary) */
1306 static size_t ZSTD_continueCCtx(ZSTD_CCtx* cctx, ZSTD_CCtx_params params, U64 pledgedSrcSize)
1307 {
1308     size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize));
1309     size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
1310     DEBUGLOG(4, "ZSTD_continueCCtx: re-use context in place");
1311 
1312     cctx->blockSize = blockSize;   /* previous block size could be different even for same windowLog, due to pledgedSrcSize */
1313     cctx->appliedParams = params;
1314     cctx->blockState.matchState.cParams = params.cParams;
1315     cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
1316     cctx->consumedSrcSize = 0;
1317     cctx->producedCSize = 0;
1318     if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)
1319         cctx->appliedParams.fParams.contentSizeFlag = 0;
1320     DEBUGLOG(4, "pledged content size : %u ; flag : %u",
1321         (U32)pledgedSrcSize, cctx->appliedParams.fParams.contentSizeFlag);
1322     cctx->stage = ZSTDcs_init;
1323     cctx->dictID = 0;
1324     if (params.ldmParams.enableLdm)
1325         ZSTD_window_clear(&cctx->ldmState.window);
1326     ZSTD_referenceExternalSequences(cctx, NULL, 0);
1327     ZSTD_invalidateMatchState(&cctx->blockState.matchState);
1328     ZSTD_reset_compressedBlockState(cctx->blockState.prevCBlock);
1329     XXH64_reset(&cctx->xxhState, 0);
1330     return 0;
1331 }
1332 
1333 typedef enum { ZSTDcrp_continue, ZSTDcrp_noMemset } ZSTD_compResetPolicy_e;
1334 
1335 typedef enum { ZSTD_resetTarget_CDict, ZSTD_resetTarget_CCtx } ZSTD_resetTarget_e;
1336 
1337 static void*
1338 ZSTD_reset_matchState(ZSTD_matchState_t* ms,
1339                       void* ptr,
1340                 const ZSTD_compressionParameters* cParams,
1341                       ZSTD_compResetPolicy_e const crp, ZSTD_resetTarget_e const forWho)
1342 {
1343     size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
1344     size_t const hSize = ((size_t)1) << cParams->hashLog;
1345     U32    const hashLog3 = ((forWho == ZSTD_resetTarget_CCtx) && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
1346     size_t const h3Size = ((size_t)1) << hashLog3;
1347     size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
1348 
1349     assert(((size_t)ptr & 3) == 0);
1350 
1351     ms->hashLog3 = hashLog3;
1352     memset(&ms->window, 0, sizeof(ms->window));
1353     ms->window.dictLimit = 1;    /* start from 1, so that 1st position is valid */
1354     ms->window.lowLimit = 1;     /* it ensures first and later CCtx usages compress the same */
1355     ms->window.nextSrc = ms->window.base + 1;   /* see issue #1241 */
1356     ZSTD_invalidateMatchState(ms);
1357 
1358     /* opt parser space */
1359     if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) {
1360         DEBUGLOG(4, "reserving optimal parser space");
1361         ms->opt.litFreq = (unsigned*)ptr;
1362         ms->opt.litLengthFreq = ms->opt.litFreq + (1<<Litbits);
1363         ms->opt.matchLengthFreq = ms->opt.litLengthFreq + (MaxLL+1);
1364         ms->opt.offCodeFreq = ms->opt.matchLengthFreq + (MaxML+1);
1365         ptr = ms->opt.offCodeFreq + (MaxOff+1);
1366         ms->opt.matchTable = (ZSTD_match_t*)ptr;
1367         ptr = ms->opt.matchTable + ZSTD_OPT_NUM+1;
1368         ms->opt.priceTable = (ZSTD_optimal_t*)ptr;
1369         ptr = ms->opt.priceTable + ZSTD_OPT_NUM+1;
1370     }
1371 
1372     /* table Space */
1373     DEBUGLOG(4, "reset table : %u", crp!=ZSTDcrp_noMemset);
1374     assert(((size_t)ptr & 3) == 0);  /* ensure ptr is properly aligned */
1375     if (crp!=ZSTDcrp_noMemset) memset(ptr, 0, tableSpace);   /* reset tables only */
1376     ms->hashTable = (U32*)(ptr);
1377     ms->chainTable = ms->hashTable + hSize;
1378     ms->hashTable3 = ms->chainTable + chainSize;
1379     ptr = ms->hashTable3 + h3Size;
1380 
1381     ms->cParams = *cParams;
1382 
1383     assert(((size_t)ptr & 3) == 0);
1384     return ptr;
1385 }
1386 
1387 /* ZSTD_indexTooCloseToMax() :
1388  * minor optimization : prefer memset() rather than reduceIndex()
1389  * which is measurably slow in some circumstances (reported for Visual Studio).
1390  * Works when re-using a context for a lot of smallish inputs :
1391  * if all inputs are smaller than ZSTD_INDEXOVERFLOW_MARGIN,
1392  * memset() will be triggered before reduceIndex().
1393  */
1394 #define ZSTD_INDEXOVERFLOW_MARGIN (16 MB)
1395 static int ZSTD_indexTooCloseToMax(ZSTD_window_t w)
1396 {
1397     return (size_t)(w.nextSrc - w.base) > (ZSTD_CURRENT_MAX - ZSTD_INDEXOVERFLOW_MARGIN);
1398 }
1399 
1400 #define ZSTD_WORKSPACETOOLARGE_FACTOR 3 /* define "workspace is too large" as this number of times larger than needed */
1401 #define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128  /* when workspace is continuously too large
1402                                          * during at least this number of times,
1403                                          * context's memory usage is considered wasteful,
1404                                          * because it's sized to handle a worst case scenario which rarely happens.
1405                                          * In which case, resize it down to free some memory */
1406 
1407 /*! ZSTD_resetCCtx_internal() :
1408     note : `params` are assumed fully validated at this stage */
1409 static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
1410                                       ZSTD_CCtx_params params,
1411                                       U64 const pledgedSrcSize,
1412                                       ZSTD_compResetPolicy_e const crp,
1413                                       ZSTD_buffered_policy_e const zbuff)
1414 {
1415     DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u",
1416                 (U32)pledgedSrcSize, params.cParams.windowLog);
1417     assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
1418 
1419     if (crp == ZSTDcrp_continue) {
1420         if (ZSTD_equivalentParams(zc->appliedParams, params,
1421                                   zc->inBuffSize,
1422                                   zc->seqStore.maxNbSeq, zc->seqStore.maxNbLit,
1423                                   zbuff, pledgedSrcSize) ) {
1424             DEBUGLOG(4, "ZSTD_equivalentParams()==1 -> consider continue mode");
1425             zc->workSpaceOversizedDuration += (zc->workSpaceOversizedDuration > 0);   /* if it was too large, it still is */
1426             if (zc->workSpaceOversizedDuration <= ZSTD_WORKSPACETOOLARGE_MAXDURATION) {
1427                 DEBUGLOG(4, "continue mode confirmed (wLog1=%u, blockSize1=%zu)",
1428                             zc->appliedParams.cParams.windowLog, zc->blockSize);
1429                 if (ZSTD_indexTooCloseToMax(zc->blockState.matchState.window)) {
1430                     /* prefer a reset, faster than a rescale */
1431                     ZSTD_reset_matchState(&zc->blockState.matchState,
1432                                            zc->entropyWorkspace + HUF_WORKSPACE_SIZE_U32,
1433                                           &params.cParams,
1434                                            crp, ZSTD_resetTarget_CCtx);
1435                 }
1436                 return ZSTD_continueCCtx(zc, params, pledgedSrcSize);
1437     }   }   }
1438     DEBUGLOG(4, "ZSTD_equivalentParams()==0 -> reset CCtx");
1439 
1440     if (params.ldmParams.enableLdm) {
1441         /* Adjust long distance matching parameters */
1442         ZSTD_ldm_adjustParameters(&params.ldmParams, &params.cParams);
1443         assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
1444         assert(params.ldmParams.hashRateLog < 32);
1445         zc->ldmState.hashPower = ZSTD_rollingHash_primePower(params.ldmParams.minMatchLength);
1446     }
1447 
1448     {   size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize));
1449         size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
1450         U32    const divider = (params.cParams.minMatch==3) ? 3 : 4;
1451         size_t const maxNbSeq = blockSize / divider;
1452         size_t const tokenSpace = WILDCOPY_OVERLENGTH + blockSize + 11*maxNbSeq;
1453         size_t const buffOutSize = (zbuff==ZSTDb_buffered) ? ZSTD_compressBound(blockSize)+1 : 0;
1454         size_t const buffInSize = (zbuff==ZSTDb_buffered) ? windowSize + blockSize : 0;
1455         size_t const matchStateSize = ZSTD_sizeof_matchState(&params.cParams, /* forCCtx */ 1);
1456         size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(params.ldmParams, blockSize);
1457         void* ptr;   /* used to partition workSpace */
1458 
1459         /* Check if workSpace is large enough, alloc a new one if needed */
1460         {   size_t const entropySpace = HUF_WORKSPACE_SIZE;
1461             size_t const blockStateSpace = 2 * sizeof(ZSTD_compressedBlockState_t);
1462             size_t const bufferSpace = buffInSize + buffOutSize;
1463             size_t const ldmSpace = ZSTD_ldm_getTableSize(params.ldmParams);
1464             size_t const ldmSeqSpace = maxNbLdmSeq * sizeof(rawSeq);
1465 
1466             size_t const neededSpace = entropySpace + blockStateSpace + ldmSpace +
1467                                        ldmSeqSpace + matchStateSize + tokenSpace +
1468                                        bufferSpace;
1469 
1470             int const workSpaceTooSmall = zc->workSpaceSize < neededSpace;
1471             int const workSpaceTooLarge = zc->workSpaceSize > ZSTD_WORKSPACETOOLARGE_FACTOR * neededSpace;
1472             int const workSpaceWasteful = workSpaceTooLarge && (zc->workSpaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION);
1473             zc->workSpaceOversizedDuration = workSpaceTooLarge ? zc->workSpaceOversizedDuration+1 : 0;
1474 
1475             DEBUGLOG(4, "Need %zuKB workspace, including %zuKB for match state, and %zuKB for buffers",
1476                         neededSpace>>10, matchStateSize>>10, bufferSpace>>10);
1477             DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize);
1478 
1479             if (workSpaceTooSmall || workSpaceWasteful) {
1480                 DEBUGLOG(4, "Resize workSpaceSize from %zuKB to %zuKB",
1481                             zc->workSpaceSize >> 10,
1482                             neededSpace >> 10);
1483 
1484                 RETURN_ERROR_IF(zc->staticSize, memory_allocation, "static cctx : no resize");
1485 
1486                 zc->workSpaceSize = 0;
1487                 ZSTD_free(zc->workSpace, zc->customMem);
1488                 zc->workSpace = ZSTD_malloc(neededSpace, zc->customMem);
1489                 RETURN_ERROR_IF(zc->workSpace == NULL, memory_allocation);
1490                 zc->workSpaceSize = neededSpace;
1491                 zc->workSpaceOversizedDuration = 0;
1492 
1493                 /* Statically sized space.
1494                  * entropyWorkspace never moves,
1495                  * though prev/next block swap places */
1496                 assert(((size_t)zc->workSpace & 3) == 0);   /* ensure correct alignment */
1497                 assert(zc->workSpaceSize >= 2 * sizeof(ZSTD_compressedBlockState_t));
1498                 zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)zc->workSpace;
1499                 zc->blockState.nextCBlock = zc->blockState.prevCBlock + 1;
1500                 ptr = zc->blockState.nextCBlock + 1;
1501                 zc->entropyWorkspace = (U32*)ptr;
1502         }   }
1503 
1504         /* init params */
1505         zc->appliedParams = params;
1506         zc->blockState.matchState.cParams = params.cParams;
1507         zc->pledgedSrcSizePlusOne = pledgedSrcSize+1;
1508         zc->consumedSrcSize = 0;
1509         zc->producedCSize = 0;
1510         if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)
1511             zc->appliedParams.fParams.contentSizeFlag = 0;
1512         DEBUGLOG(4, "pledged content size : %u ; flag : %u",
1513             (unsigned)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag);
1514         zc->blockSize = blockSize;
1515 
1516         XXH64_reset(&zc->xxhState, 0);
1517         zc->stage = ZSTDcs_init;
1518         zc->dictID = 0;
1519 
1520         ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock);
1521 
1522         ptr = ZSTD_reset_matchState(&zc->blockState.matchState,
1523                                      zc->entropyWorkspace + HUF_WORKSPACE_SIZE_U32,
1524                                     &params.cParams,
1525                                      crp, ZSTD_resetTarget_CCtx);
1526 
1527         /* ldm hash table */
1528         /* initialize bucketOffsets table later for pointer alignment */
1529         if (params.ldmParams.enableLdm) {
1530             size_t const ldmHSize = ((size_t)1) << params.ldmParams.hashLog;
1531             memset(ptr, 0, ldmHSize * sizeof(ldmEntry_t));
1532             assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */
1533             zc->ldmState.hashTable = (ldmEntry_t*)ptr;
1534             ptr = zc->ldmState.hashTable + ldmHSize;
1535             zc->ldmSequences = (rawSeq*)ptr;
1536             ptr = zc->ldmSequences + maxNbLdmSeq;
1537             zc->maxNbLdmSequences = maxNbLdmSeq;
1538 
1539             memset(&zc->ldmState.window, 0, sizeof(zc->ldmState.window));
1540         }
1541         assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */
1542 
1543         /* sequences storage */
1544         zc->seqStore.maxNbSeq = maxNbSeq;
1545         zc->seqStore.sequencesStart = (seqDef*)ptr;
1546         ptr = zc->seqStore.sequencesStart + maxNbSeq;
1547         zc->seqStore.llCode = (BYTE*) ptr;
1548         zc->seqStore.mlCode = zc->seqStore.llCode + maxNbSeq;
1549         zc->seqStore.ofCode = zc->seqStore.mlCode + maxNbSeq;
1550         zc->seqStore.litStart = zc->seqStore.ofCode + maxNbSeq;
1551         /* ZSTD_wildcopy() is used to copy into the literals buffer,
1552          * so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes.
1553          */
1554         zc->seqStore.maxNbLit = blockSize;
1555         ptr = zc->seqStore.litStart + blockSize + WILDCOPY_OVERLENGTH;
1556 
1557         /* ldm bucketOffsets table */
1558         if (params.ldmParams.enableLdm) {
1559             size_t const ldmBucketSize =
1560                   ((size_t)1) << (params.ldmParams.hashLog -
1561                                   params.ldmParams.bucketSizeLog);
1562             memset(ptr, 0, ldmBucketSize);
1563             zc->ldmState.bucketOffsets = (BYTE*)ptr;
1564             ptr = zc->ldmState.bucketOffsets + ldmBucketSize;
1565             ZSTD_window_clear(&zc->ldmState.window);
1566         }
1567         ZSTD_referenceExternalSequences(zc, NULL, 0);
1568 
1569         /* buffers */
1570         zc->inBuffSize = buffInSize;
1571         zc->inBuff = (char*)ptr;
1572         zc->outBuffSize = buffOutSize;
1573         zc->outBuff = zc->inBuff + buffInSize;
1574 
1575         return 0;
1576     }
1577 }
1578 
1579 /* ZSTD_invalidateRepCodes() :
1580  * ensures next compression will not use repcodes from previous block.
1581  * Note : only works with regular variant;
1582  *        do not use with extDict variant ! */
1583 void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) {
1584     int i;
1585     for (i=0; i<ZSTD_REP_NUM; i++) cctx->blockState.prevCBlock->rep[i] = 0;
1586     assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window));
1587 }
1588 
1589 /* These are the approximate sizes for each strategy past which copying the
1590  * dictionary tables into the working context is faster than using them
1591  * in-place.
1592  */
1593 static const size_t attachDictSizeCutoffs[ZSTD_STRATEGY_MAX+1] = {
1594     8 KB,  /* unused */
1595     8 KB,  /* ZSTD_fast */
1596     16 KB, /* ZSTD_dfast */
1597     32 KB, /* ZSTD_greedy */
1598     32 KB, /* ZSTD_lazy */
1599     32 KB, /* ZSTD_lazy2 */
1600     32 KB, /* ZSTD_btlazy2 */
1601     32 KB, /* ZSTD_btopt */
1602     8 KB,  /* ZSTD_btultra */
1603     8 KB   /* ZSTD_btultra2 */
1604 };
1605 
1606 static int ZSTD_shouldAttachDict(const ZSTD_CDict* cdict,
1607                                  ZSTD_CCtx_params params,
1608                                  U64 pledgedSrcSize)
1609 {
1610     size_t cutoff = attachDictSizeCutoffs[cdict->matchState.cParams.strategy];
1611     return ( pledgedSrcSize <= cutoff
1612           || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
1613           || params.attachDictPref == ZSTD_dictForceAttach )
1614         && params.attachDictPref != ZSTD_dictForceCopy
1615         && !params.forceWindow; /* dictMatchState isn't correctly
1616                                  * handled in _enforceMaxDist */
1617 }
1618 
1619 static size_t
1620 ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx,
1621                         const ZSTD_CDict* cdict,
1622                         ZSTD_CCtx_params params,
1623                         U64 pledgedSrcSize,
1624                         ZSTD_buffered_policy_e zbuff)
1625 {
1626     {   const ZSTD_compressionParameters* const cdict_cParams = &cdict->matchState.cParams;
1627         unsigned const windowLog = params.cParams.windowLog;
1628         assert(windowLog != 0);
1629         /* Resize working context table params for input only, since the dict
1630          * has its own tables. */
1631         params.cParams = ZSTD_adjustCParams_internal(*cdict_cParams, pledgedSrcSize, 0);
1632         params.cParams.windowLog = windowLog;
1633         ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
1634                                 ZSTDcrp_continue, zbuff);
1635         assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy);
1636     }
1637 
1638     {   const U32 cdictEnd = (U32)( cdict->matchState.window.nextSrc
1639                                   - cdict->matchState.window.base);
1640         const U32 cdictLen = cdictEnd - cdict->matchState.window.dictLimit;
1641         if (cdictLen == 0) {
1642             /* don't even attach dictionaries with no contents */
1643             DEBUGLOG(4, "skipping attaching empty dictionary");
1644         } else {
1645             DEBUGLOG(4, "attaching dictionary into context");
1646             cctx->blockState.matchState.dictMatchState = &cdict->matchState;
1647 
1648             /* prep working match state so dict matches never have negative indices
1649              * when they are translated to the working context's index space. */
1650             if (cctx->blockState.matchState.window.dictLimit < cdictEnd) {
1651                 cctx->blockState.matchState.window.nextSrc =
1652                     cctx->blockState.matchState.window.base + cdictEnd;
1653                 ZSTD_window_clear(&cctx->blockState.matchState.window);
1654             }
1655             /* loadedDictEnd is expressed within the referential of the active context */
1656             cctx->blockState.matchState.loadedDictEnd = cctx->blockState.matchState.window.dictLimit;
1657     }   }
1658 
1659     cctx->dictID = cdict->dictID;
1660 
1661     /* copy block state */
1662     memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
1663 
1664     return 0;
1665 }
1666 
1667 static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,
1668                             const ZSTD_CDict* cdict,
1669                             ZSTD_CCtx_params params,
1670                             U64 pledgedSrcSize,
1671                             ZSTD_buffered_policy_e zbuff)
1672 {
1673     const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams;
1674 
1675     DEBUGLOG(4, "copying dictionary into context");
1676 
1677     {   unsigned const windowLog = params.cParams.windowLog;
1678         assert(windowLog != 0);
1679         /* Copy only compression parameters related to tables. */
1680         params.cParams = *cdict_cParams;
1681         params.cParams.windowLog = windowLog;
1682         ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
1683                                 ZSTDcrp_noMemset, zbuff);
1684         assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy);
1685         assert(cctx->appliedParams.cParams.hashLog == cdict_cParams->hashLog);
1686         assert(cctx->appliedParams.cParams.chainLog == cdict_cParams->chainLog);
1687     }
1688 
1689     /* copy tables */
1690     {   size_t const chainSize = (cdict_cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cdict_cParams->chainLog);
1691         size_t const hSize =  (size_t)1 << cdict_cParams->hashLog;
1692         size_t const tableSpace = (chainSize + hSize) * sizeof(U32);
1693         assert((U32*)cctx->blockState.matchState.chainTable == (U32*)cctx->blockState.matchState.hashTable + hSize);  /* chainTable must follow hashTable */
1694         assert((U32*)cctx->blockState.matchState.hashTable3 == (U32*)cctx->blockState.matchState.chainTable + chainSize);
1695         assert((U32*)cdict->matchState.chainTable == (U32*)cdict->matchState.hashTable + hSize);  /* chainTable must follow hashTable */
1696         assert((U32*)cdict->matchState.hashTable3 == (U32*)cdict->matchState.chainTable + chainSize);
1697         memcpy(cctx->blockState.matchState.hashTable, cdict->matchState.hashTable, tableSpace);   /* presumes all tables follow each other */
1698     }
1699 
1700     /* Zero the hashTable3, since the cdict never fills it */
1701     {   size_t const h3Size = (size_t)1 << cctx->blockState.matchState.hashLog3;
1702         assert(cdict->matchState.hashLog3 == 0);
1703         memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32));
1704     }
1705 
1706     /* copy dictionary offsets */
1707     {   ZSTD_matchState_t const* srcMatchState = &cdict->matchState;
1708         ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState;
1709         dstMatchState->window       = srcMatchState->window;
1710         dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
1711         dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
1712     }
1713 
1714     cctx->dictID = cdict->dictID;
1715 
1716     /* copy block state */
1717     memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
1718 
1719     return 0;
1720 }
1721 
1722 /* We have a choice between copying the dictionary context into the working
1723  * context, or referencing the dictionary context from the working context
1724  * in-place. We decide here which strategy to use. */
1725 static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx,
1726                             const ZSTD_CDict* cdict,
1727                             ZSTD_CCtx_params params,
1728                             U64 pledgedSrcSize,
1729                             ZSTD_buffered_policy_e zbuff)
1730 {
1731 
1732     DEBUGLOG(4, "ZSTD_resetCCtx_usingCDict (pledgedSrcSize=%u)",
1733                 (unsigned)pledgedSrcSize);
1734 
1735     if (ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) {
1736         return ZSTD_resetCCtx_byAttachingCDict(
1737             cctx, cdict, params, pledgedSrcSize, zbuff);
1738     } else {
1739         return ZSTD_resetCCtx_byCopyingCDict(
1740             cctx, cdict, params, pledgedSrcSize, zbuff);
1741     }
1742 }
1743 
1744 /*! ZSTD_copyCCtx_internal() :
1745  *  Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
1746  *  Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
1747  *  The "context", in this case, refers to the hash and chain tables,
1748  *  entropy tables, and dictionary references.
1749  * `windowLog` value is enforced if != 0, otherwise value is copied from srcCCtx.
1750  * @return : 0, or an error code */
1751 static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
1752                             const ZSTD_CCtx* srcCCtx,
1753                             ZSTD_frameParameters fParams,
1754                             U64 pledgedSrcSize,
1755                             ZSTD_buffered_policy_e zbuff)
1756 {
1757     DEBUGLOG(5, "ZSTD_copyCCtx_internal");
1758     RETURN_ERROR_IF(srcCCtx->stage!=ZSTDcs_init, stage_wrong);
1759 
1760     memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
1761     {   ZSTD_CCtx_params params = dstCCtx->requestedParams;
1762         /* Copy only compression parameters related to tables. */
1763         params.cParams = srcCCtx->appliedParams.cParams;
1764         params.fParams = fParams;
1765         ZSTD_resetCCtx_internal(dstCCtx, params, pledgedSrcSize,
1766                                 ZSTDcrp_noMemset, zbuff);
1767         assert(dstCCtx->appliedParams.cParams.windowLog == srcCCtx->appliedParams.cParams.windowLog);
1768         assert(dstCCtx->appliedParams.cParams.strategy == srcCCtx->appliedParams.cParams.strategy);
1769         assert(dstCCtx->appliedParams.cParams.hashLog == srcCCtx->appliedParams.cParams.hashLog);
1770         assert(dstCCtx->appliedParams.cParams.chainLog == srcCCtx->appliedParams.cParams.chainLog);
1771         assert(dstCCtx->blockState.matchState.hashLog3 == srcCCtx->blockState.matchState.hashLog3);
1772     }
1773 
1774     /* copy tables */
1775     {   size_t const chainSize = (srcCCtx->appliedParams.cParams.strategy == ZSTD_fast) ? 0 : ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog);
1776         size_t const hSize =  (size_t)1 << srcCCtx->appliedParams.cParams.hashLog;
1777         size_t const h3Size = (size_t)1 << srcCCtx->blockState.matchState.hashLog3;
1778         size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
1779         assert((U32*)dstCCtx->blockState.matchState.chainTable == (U32*)dstCCtx->blockState.matchState.hashTable + hSize);  /* chainTable must follow hashTable */
1780         assert((U32*)dstCCtx->blockState.matchState.hashTable3 == (U32*)dstCCtx->blockState.matchState.chainTable + chainSize);
1781         memcpy(dstCCtx->blockState.matchState.hashTable, srcCCtx->blockState.matchState.hashTable, tableSpace);   /* presumes all tables follow each other */
1782     }
1783 
1784     /* copy dictionary offsets */
1785     {
1786         const ZSTD_matchState_t* srcMatchState = &srcCCtx->blockState.matchState;
1787         ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState;
1788         dstMatchState->window       = srcMatchState->window;
1789         dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
1790         dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
1791     }
1792     dstCCtx->dictID = srcCCtx->dictID;
1793 
1794     /* copy block state */
1795     memcpy(dstCCtx->blockState.prevCBlock, srcCCtx->blockState.prevCBlock, sizeof(*srcCCtx->blockState.prevCBlock));
1796 
1797     return 0;
1798 }
1799 
1800 /*! ZSTD_copyCCtx() :
1801  *  Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
1802  *  Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
1803  *  pledgedSrcSize==0 means "unknown".
1804 *   @return : 0, or an error code */
1805 size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize)
1806 {
1807     ZSTD_frameParameters fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
1808     ZSTD_buffered_policy_e const zbuff = (ZSTD_buffered_policy_e)(srcCCtx->inBuffSize>0);
1809     ZSTD_STATIC_ASSERT((U32)ZSTDb_buffered==1);
1810     if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
1811     fParams.contentSizeFlag = (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN);
1812 
1813     return ZSTD_copyCCtx_internal(dstCCtx, srcCCtx,
1814                                 fParams, pledgedSrcSize,
1815                                 zbuff);
1816 }
1817 
1818 
1819 #define ZSTD_ROWSIZE 16
1820 /*! ZSTD_reduceTable() :
1821  *  reduce table indexes by `reducerValue`, or squash to zero.
1822  *  PreserveMark preserves "unsorted mark" for btlazy2 strategy.
1823  *  It must be set to a clear 0/1 value, to remove branch during inlining.
1824  *  Presume table size is a multiple of ZSTD_ROWSIZE
1825  *  to help auto-vectorization */
1826 FORCE_INLINE_TEMPLATE void
1827 ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerValue, int const preserveMark)
1828 {
1829     int const nbRows = (int)size / ZSTD_ROWSIZE;
1830     int cellNb = 0;
1831     int rowNb;
1832     assert((size & (ZSTD_ROWSIZE-1)) == 0);  /* multiple of ZSTD_ROWSIZE */
1833     assert(size < (1U<<31));   /* can be casted to int */
1834     for (rowNb=0 ; rowNb < nbRows ; rowNb++) {
1835         int column;
1836         for (column=0; column<ZSTD_ROWSIZE; column++) {
1837             if (preserveMark) {
1838                 U32 const adder = (table[cellNb] == ZSTD_DUBT_UNSORTED_MARK) ? reducerValue : 0;
1839                 table[cellNb] += adder;
1840             }
1841             if (table[cellNb] < reducerValue) table[cellNb] = 0;
1842             else table[cellNb] -= reducerValue;
1843             cellNb++;
1844     }   }
1845 }
1846 
1847 static void ZSTD_reduceTable(U32* const table, U32 const size, U32 const reducerValue)
1848 {
1849     ZSTD_reduceTable_internal(table, size, reducerValue, 0);
1850 }
1851 
1852 static void ZSTD_reduceTable_btlazy2(U32* const table, U32 const size, U32 const reducerValue)
1853 {
1854     ZSTD_reduceTable_internal(table, size, reducerValue, 1);
1855 }
1856 
1857 /*! ZSTD_reduceIndex() :
1858 *   rescale all indexes to avoid future overflow (indexes are U32) */
1859 static void ZSTD_reduceIndex (ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, const U32 reducerValue)
1860 {
1861     {   U32 const hSize = (U32)1 << params->cParams.hashLog;
1862         ZSTD_reduceTable(ms->hashTable, hSize, reducerValue);
1863     }
1864 
1865     if (params->cParams.strategy != ZSTD_fast) {
1866         U32 const chainSize = (U32)1 << params->cParams.chainLog;
1867         if (params->cParams.strategy == ZSTD_btlazy2)
1868             ZSTD_reduceTable_btlazy2(ms->chainTable, chainSize, reducerValue);
1869         else
1870             ZSTD_reduceTable(ms->chainTable, chainSize, reducerValue);
1871     }
1872 
1873     if (ms->hashLog3) {
1874         U32 const h3Size = (U32)1 << ms->hashLog3;
1875         ZSTD_reduceTable(ms->hashTable3, h3Size, reducerValue);
1876     }
1877 }
1878 
1879 
1880 /*-*******************************************************
1881 *  Block entropic compression
1882 *********************************************************/
1883 
1884 /* See doc/zstd_compression_format.md for detailed format description */
1885 
1886 static size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock)
1887 {
1888     U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(srcSize << 3);
1889     RETURN_ERROR_IF(srcSize + ZSTD_blockHeaderSize > dstCapacity,
1890                     dstSize_tooSmall);
1891     MEM_writeLE24(dst, cBlockHeader24);
1892     memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize);
1893     return ZSTD_blockHeaderSize + srcSize;
1894 }
1895 
1896 void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
1897 {
1898     const seqDef* const sequences = seqStorePtr->sequencesStart;
1899     BYTE* const llCodeTable = seqStorePtr->llCode;
1900     BYTE* const ofCodeTable = seqStorePtr->ofCode;
1901     BYTE* const mlCodeTable = seqStorePtr->mlCode;
1902     U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
1903     U32 u;
1904     assert(nbSeq <= seqStorePtr->maxNbSeq);
1905     for (u=0; u<nbSeq; u++) {
1906         U32 const llv = sequences[u].litLength;
1907         U32 const mlv = sequences[u].matchLength;
1908         llCodeTable[u] = (BYTE)ZSTD_LLcode(llv);
1909         ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offset);
1910         mlCodeTable[u] = (BYTE)ZSTD_MLcode(mlv);
1911     }
1912     if (seqStorePtr->longLengthID==1)
1913         llCodeTable[seqStorePtr->longLengthPos] = MaxLL;
1914     if (seqStorePtr->longLengthID==2)
1915         mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
1916 }
1917 
1918 static int ZSTD_disableLiteralsCompression(const ZSTD_CCtx_params* cctxParams)
1919 {
1920     switch (cctxParams->literalCompressionMode) {
1921     case ZSTD_lcm_huffman:
1922         return 0;
1923     case ZSTD_lcm_uncompressed:
1924         return 1;
1925     default:
1926         assert(0 /* impossible: pre-validated */);
1927         /* fall-through */
1928     case ZSTD_lcm_auto:
1929         return (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0);
1930     }
1931 }
1932 
1933 /* ZSTD_compressSequences_internal():
1934  * actually compresses both literals and sequences */
1935 MEM_STATIC size_t
1936 ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
1937                           const ZSTD_entropyCTables_t* prevEntropy,
1938                                 ZSTD_entropyCTables_t* nextEntropy,
1939                           const ZSTD_CCtx_params* cctxParams,
1940                                 void* dst, size_t dstCapacity,
1941                                 void* workspace, size_t wkspSize,
1942                           const int bmi2)
1943 {
1944     const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
1945     ZSTD_strategy const strategy = cctxParams->cParams.strategy;
1946     unsigned count[MaxSeq+1];
1947     FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable;
1948     FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable;
1949     FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable;
1950     U32 LLtype, Offtype, MLtype;   /* compressed, raw or rle */
1951     const seqDef* const sequences = seqStorePtr->sequencesStart;
1952     const BYTE* const ofCodeTable = seqStorePtr->ofCode;
1953     const BYTE* const llCodeTable = seqStorePtr->llCode;
1954     const BYTE* const mlCodeTable = seqStorePtr->mlCode;
1955     BYTE* const ostart = (BYTE*)dst;
1956     BYTE* const oend = ostart + dstCapacity;
1957     BYTE* op = ostart;
1958     size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
1959     BYTE* seqHead;
1960     BYTE* lastNCount = NULL;
1961 
1962     DEBUGLOG(5, "ZSTD_compressSequences_internal (nbSeq=%zu)", nbSeq);
1963     ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
1964 
1965     /* Compress literals */
1966     {   const BYTE* const literals = seqStorePtr->litStart;
1967         size_t const litSize = seqStorePtr->lit - literals;
1968         size_t const cSize = ZSTD_compressLiterals(
1969                                     &prevEntropy->huf, &nextEntropy->huf,
1970                                     cctxParams->cParams.strategy,
1971                                     ZSTD_disableLiteralsCompression(cctxParams),
1972                                     op, dstCapacity,
1973                                     literals, litSize,
1974                                     workspace, wkspSize,
1975                                     bmi2);
1976         FORWARD_IF_ERROR(cSize);
1977         assert(cSize <= dstCapacity);
1978         op += cSize;
1979     }
1980 
1981     /* Sequences Header */
1982     RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
1983                     dstSize_tooSmall);
1984     if (nbSeq < 0x7F)
1985         *op++ = (BYTE)nbSeq;
1986     else if (nbSeq < LONGNBSEQ)
1987         op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2;
1988     else
1989         op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3;
1990     assert(op <= oend);
1991     if (nbSeq==0) {
1992         /* Copy the old tables over as if we repeated them */
1993         memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse));
1994         return op - ostart;
1995     }
1996 
1997     /* seqHead : flags for FSE encoding type */
1998     seqHead = op++;
1999     assert(op <= oend);
2000 
2001     /* convert length/distances into codes */
2002     ZSTD_seqToCodes(seqStorePtr);
2003     /* build CTable for Literal Lengths */
2004     {   unsigned max = MaxLL;
2005         size_t const mostFrequent = HIST_countFast_wksp(count, &max, llCodeTable, nbSeq, workspace, wkspSize);   /* can't fail */
2006         DEBUGLOG(5, "Building LL table");
2007         nextEntropy->fse.litlength_repeatMode = prevEntropy->fse.litlength_repeatMode;
2008         LLtype = ZSTD_selectEncodingType(&nextEntropy->fse.litlength_repeatMode,
2009                                         count, max, mostFrequent, nbSeq,
2010                                         LLFSELog, prevEntropy->fse.litlengthCTable,
2011                                         LL_defaultNorm, LL_defaultNormLog,
2012                                         ZSTD_defaultAllowed, strategy);
2013         assert(set_basic < set_compressed && set_rle < set_compressed);
2014         assert(!(LLtype < set_compressed && nextEntropy->fse.litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
2015         {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
2016                                                     count, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL,
2017                                                     prevEntropy->fse.litlengthCTable, sizeof(prevEntropy->fse.litlengthCTable),
2018                                                     workspace, wkspSize);
2019             FORWARD_IF_ERROR(countSize);
2020             if (LLtype == set_compressed)
2021                 lastNCount = op;
2022             op += countSize;
2023             assert(op <= oend);
2024     }   }
2025     /* build CTable for Offsets */
2026     {   unsigned max = MaxOff;
2027         size_t const mostFrequent = HIST_countFast_wksp(count, &max, ofCodeTable, nbSeq, workspace, wkspSize);  /* can't fail */
2028         /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
2029         ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
2030         DEBUGLOG(5, "Building OF table");
2031         nextEntropy->fse.offcode_repeatMode = prevEntropy->fse.offcode_repeatMode;
2032         Offtype = ZSTD_selectEncodingType(&nextEntropy->fse.offcode_repeatMode,
2033                                         count, max, mostFrequent, nbSeq,
2034                                         OffFSELog, prevEntropy->fse.offcodeCTable,
2035                                         OF_defaultNorm, OF_defaultNormLog,
2036                                         defaultPolicy, strategy);
2037         assert(!(Offtype < set_compressed && nextEntropy->fse.offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
2038         {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
2039                                                     count, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
2040                                                     prevEntropy->fse.offcodeCTable, sizeof(prevEntropy->fse.offcodeCTable),
2041                                                     workspace, wkspSize);
2042             FORWARD_IF_ERROR(countSize);
2043             if (Offtype == set_compressed)
2044                 lastNCount = op;
2045             op += countSize;
2046             assert(op <= oend);
2047     }   }
2048     /* build CTable for MatchLengths */
2049     {   unsigned max = MaxML;
2050         size_t const mostFrequent = HIST_countFast_wksp(count, &max, mlCodeTable, nbSeq, workspace, wkspSize);   /* can't fail */
2051         DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op));
2052         nextEntropy->fse.matchlength_repeatMode = prevEntropy->fse.matchlength_repeatMode;
2053         MLtype = ZSTD_selectEncodingType(&nextEntropy->fse.matchlength_repeatMode,
2054                                         count, max, mostFrequent, nbSeq,
2055                                         MLFSELog, prevEntropy->fse.matchlengthCTable,
2056                                         ML_defaultNorm, ML_defaultNormLog,
2057                                         ZSTD_defaultAllowed, strategy);
2058         assert(!(MLtype < set_compressed && nextEntropy->fse.matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
2059         {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
2060                                                     count, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML,
2061                                                     prevEntropy->fse.matchlengthCTable, sizeof(prevEntropy->fse.matchlengthCTable),
2062                                                     workspace, wkspSize);
2063             FORWARD_IF_ERROR(countSize);
2064             if (MLtype == set_compressed)
2065                 lastNCount = op;
2066             op += countSize;
2067             assert(op <= oend);
2068     }   }
2069 
2070     *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
2071 
2072     {   size_t const bitstreamSize = ZSTD_encodeSequences(
2073                                         op, oend - op,
2074                                         CTable_MatchLength, mlCodeTable,
2075                                         CTable_OffsetBits, ofCodeTable,
2076                                         CTable_LitLength, llCodeTable,
2077                                         sequences, nbSeq,
2078                                         longOffsets, bmi2);
2079         FORWARD_IF_ERROR(bitstreamSize);
2080         op += bitstreamSize;
2081         assert(op <= oend);
2082         /* zstd versions <= 1.3.4 mistakenly report corruption when
2083          * FSE_readNCount() receives a buffer < 4 bytes.
2084          * Fixed by https://github.com/facebook/zstd/pull/1146.
2085          * This can happen when the last set_compressed table present is 2
2086          * bytes and the bitstream is only one byte.
2087          * In this exceedingly rare case, we will simply emit an uncompressed
2088          * block, since it isn't worth optimizing.
2089          */
2090         if (lastNCount && (op - lastNCount) < 4) {
2091             /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
2092             assert(op - lastNCount == 3);
2093             DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by "
2094                         "emitting an uncompressed block.");
2095             return 0;
2096         }
2097     }
2098 
2099     DEBUGLOG(5, "compressed block size : %u", (unsigned)(op - ostart));
2100     return op - ostart;
2101 }
2102 
2103 MEM_STATIC size_t
2104 ZSTD_compressSequences(seqStore_t* seqStorePtr,
2105                        const ZSTD_entropyCTables_t* prevEntropy,
2106                              ZSTD_entropyCTables_t* nextEntropy,
2107                        const ZSTD_CCtx_params* cctxParams,
2108                              void* dst, size_t dstCapacity,
2109                              size_t srcSize,
2110                              void* workspace, size_t wkspSize,
2111                              int bmi2)
2112 {
2113     size_t const cSize = ZSTD_compressSequences_internal(
2114                             seqStorePtr, prevEntropy, nextEntropy, cctxParams,
2115                             dst, dstCapacity,
2116                             workspace, wkspSize, bmi2);
2117     if (cSize == 0) return 0;
2118     /* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block.
2119      * Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block.
2120      */
2121     if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity))
2122         return 0;  /* block not compressed */
2123     FORWARD_IF_ERROR(cSize);
2124 
2125     /* Check compressibility */
2126     {   size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy);
2127         if (cSize >= maxCSize) return 0;  /* block not compressed */
2128     }
2129 
2130     return cSize;
2131 }
2132 
2133 /* ZSTD_selectBlockCompressor() :
2134  * Not static, but internal use only (used by long distance matcher)
2135  * assumption : strat is a valid strategy */
2136 ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode)
2137 {
2138     static const ZSTD_blockCompressor blockCompressor[3][ZSTD_STRATEGY_MAX+1] = {
2139         { ZSTD_compressBlock_fast  /* default for 0 */,
2140           ZSTD_compressBlock_fast,
2141           ZSTD_compressBlock_doubleFast,
2142           ZSTD_compressBlock_greedy,
2143           ZSTD_compressBlock_lazy,
2144           ZSTD_compressBlock_lazy2,
2145           ZSTD_compressBlock_btlazy2,
2146           ZSTD_compressBlock_btopt,
2147           ZSTD_compressBlock_btultra,
2148           ZSTD_compressBlock_btultra2 },
2149         { ZSTD_compressBlock_fast_extDict  /* default for 0 */,
2150           ZSTD_compressBlock_fast_extDict,
2151           ZSTD_compressBlock_doubleFast_extDict,
2152           ZSTD_compressBlock_greedy_extDict,
2153           ZSTD_compressBlock_lazy_extDict,
2154           ZSTD_compressBlock_lazy2_extDict,
2155           ZSTD_compressBlock_btlazy2_extDict,
2156           ZSTD_compressBlock_btopt_extDict,
2157           ZSTD_compressBlock_btultra_extDict,
2158           ZSTD_compressBlock_btultra_extDict },
2159         { ZSTD_compressBlock_fast_dictMatchState  /* default for 0 */,
2160           ZSTD_compressBlock_fast_dictMatchState,
2161           ZSTD_compressBlock_doubleFast_dictMatchState,
2162           ZSTD_compressBlock_greedy_dictMatchState,
2163           ZSTD_compressBlock_lazy_dictMatchState,
2164           ZSTD_compressBlock_lazy2_dictMatchState,
2165           ZSTD_compressBlock_btlazy2_dictMatchState,
2166           ZSTD_compressBlock_btopt_dictMatchState,
2167           ZSTD_compressBlock_btultra_dictMatchState,
2168           ZSTD_compressBlock_btultra_dictMatchState }
2169     };
2170     ZSTD_blockCompressor selectedCompressor;
2171     ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1);
2172 
2173     assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
2174     selectedCompressor = blockCompressor[(int)dictMode][(int)strat];
2175     assert(selectedCompressor != NULL);
2176     return selectedCompressor;
2177 }
2178 
2179 static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr,
2180                                    const BYTE* anchor, size_t lastLLSize)
2181 {
2182     memcpy(seqStorePtr->lit, anchor, lastLLSize);
2183     seqStorePtr->lit += lastLLSize;
2184 }
2185 
2186 void ZSTD_resetSeqStore(seqStore_t* ssPtr)
2187 {
2188     ssPtr->lit = ssPtr->litStart;
2189     ssPtr->sequences = ssPtr->sequencesStart;
2190     ssPtr->longLengthID = 0;
2191 }
2192 
2193 typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_buildSeqStore_e;
2194 
2195 static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
2196 {
2197     ZSTD_matchState_t* const ms = &zc->blockState.matchState;
2198     DEBUGLOG(5, "ZSTD_buildSeqStore (srcSize=%zu)", srcSize);
2199     assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
2200     /* Assert that we have correctly flushed the ctx params into the ms's copy */
2201     ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams);
2202     if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
2203         ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.minMatch);
2204         return ZSTDbss_noCompress; /* don't even attempt compression below a certain srcSize */
2205     }
2206     ZSTD_resetSeqStore(&(zc->seqStore));
2207     /* required for optimal parser to read stats from dictionary */
2208     ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy;
2209     /* tell the optimal parser how we expect to compress literals */
2210     ms->opt.literalCompressionMode = zc->appliedParams.literalCompressionMode;
2211     /* a gap between an attached dict and the current window is not safe,
2212      * they must remain adjacent,
2213      * and when that stops being the case, the dict must be unset */
2214     assert(ms->dictMatchState == NULL || ms->loadedDictEnd == ms->window.dictLimit);
2215 
2216     /* limited update after a very long match */
2217     {   const BYTE* const base = ms->window.base;
2218         const BYTE* const istart = (const BYTE*)src;
2219         const U32 current = (U32)(istart-base);
2220         if (sizeof(ptrdiff_t)==8) assert(istart - base < (ptrdiff_t)(U32)(-1));   /* ensure no overflow */
2221         if (current > ms->nextToUpdate + 384)
2222             ms->nextToUpdate = current - MIN(192, (U32)(current - ms->nextToUpdate - 384));
2223     }
2224 
2225     /* select and store sequences */
2226     {   ZSTD_dictMode_e const dictMode = ZSTD_matchState_dictMode(ms);
2227         size_t lastLLSize;
2228         {   int i;
2229             for (i = 0; i < ZSTD_REP_NUM; ++i)
2230                 zc->blockState.nextCBlock->rep[i] = zc->blockState.prevCBlock->rep[i];
2231         }
2232         if (zc->externSeqStore.pos < zc->externSeqStore.size) {
2233             assert(!zc->appliedParams.ldmParams.enableLdm);
2234             /* Updates ldmSeqStore.pos */
2235             lastLLSize =
2236                 ZSTD_ldm_blockCompress(&zc->externSeqStore,
2237                                        ms, &zc->seqStore,
2238                                        zc->blockState.nextCBlock->rep,
2239                                        src, srcSize);
2240             assert(zc->externSeqStore.pos <= zc->externSeqStore.size);
2241         } else if (zc->appliedParams.ldmParams.enableLdm) {
2242             rawSeqStore_t ldmSeqStore = {NULL, 0, 0, 0};
2243 
2244             ldmSeqStore.seq = zc->ldmSequences;
2245             ldmSeqStore.capacity = zc->maxNbLdmSequences;
2246             /* Updates ldmSeqStore.size */
2247             FORWARD_IF_ERROR(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore,
2248                                                &zc->appliedParams.ldmParams,
2249                                                src, srcSize));
2250             /* Updates ldmSeqStore.pos */
2251             lastLLSize =
2252                 ZSTD_ldm_blockCompress(&ldmSeqStore,
2253                                        ms, &zc->seqStore,
2254                                        zc->blockState.nextCBlock->rep,
2255                                        src, srcSize);
2256             assert(ldmSeqStore.pos == ldmSeqStore.size);
2257         } else {   /* not long range mode */
2258             ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, dictMode);
2259             lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize);
2260         }
2261         {   const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize;
2262             ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize);
2263     }   }
2264     return ZSTDbss_compress;
2265 }
2266 
2267 static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
2268                                         void* dst, size_t dstCapacity,
2269                                         const void* src, size_t srcSize)
2270 {
2271     size_t cSize;
2272     DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)",
2273                 (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, (unsigned)zc->blockState.matchState.nextToUpdate);
2274 
2275     {   const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
2276         FORWARD_IF_ERROR(bss);
2277         if (bss == ZSTDbss_noCompress) { cSize = 0; goto out; }
2278     }
2279 
2280     /* encode sequences and literals */
2281     cSize = ZSTD_compressSequences(&zc->seqStore,
2282             &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy,
2283             &zc->appliedParams,
2284             dst, dstCapacity,
2285             srcSize,
2286             zc->entropyWorkspace, HUF_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
2287             zc->bmi2);
2288 
2289 out:
2290     if (!ZSTD_isError(cSize) && cSize != 0) {
2291         /* confirm repcodes and entropy tables when emitting a compressed block */
2292         ZSTD_compressedBlockState_t* const tmp = zc->blockState.prevCBlock;
2293         zc->blockState.prevCBlock = zc->blockState.nextCBlock;
2294         zc->blockState.nextCBlock = tmp;
2295     }
2296     /* We check that dictionaries have offset codes available for the first
2297      * block. After the first block, the offcode table might not have large
2298      * enough codes to represent the offsets in the data.
2299      */
2300     if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
2301         zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
2302 
2303     return cSize;
2304 }
2305 
2306 
2307 static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, void const* ip, void const* iend)
2308 {
2309     if (ZSTD_window_needOverflowCorrection(ms->window, iend)) {
2310         U32 const maxDist = (U32)1 << params->cParams.windowLog;
2311         U32 const cycleLog = ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy);
2312         U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, maxDist, ip);
2313         ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30);
2314         ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30);
2315         ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31);
2316         ZSTD_reduceIndex(ms, params, correction);
2317         if (ms->nextToUpdate < correction) ms->nextToUpdate = 0;
2318         else ms->nextToUpdate -= correction;
2319         /* invalidate dictionaries on overflow correction */
2320         ms->loadedDictEnd = 0;
2321         ms->dictMatchState = NULL;
2322     }
2323 }
2324 
2325 
2326 /*! ZSTD_compress_frameChunk() :
2327 *   Compress a chunk of data into one or multiple blocks.
2328 *   All blocks will be terminated, all input will be consumed.
2329 *   Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.
2330 *   Frame is supposed already started (header already produced)
2331 *   @return : compressed size, or an error code
2332 */
2333 static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx,
2334                                      void* dst, size_t dstCapacity,
2335                                const void* src, size_t srcSize,
2336                                      U32 lastFrameChunk)
2337 {
2338     size_t blockSize = cctx->blockSize;
2339     size_t remaining = srcSize;
2340     const BYTE* ip = (const BYTE*)src;
2341     BYTE* const ostart = (BYTE*)dst;
2342     BYTE* op = ostart;
2343     U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog;
2344     assert(cctx->appliedParams.cParams.windowLog <= ZSTD_WINDOWLOG_MAX);
2345 
2346     DEBUGLOG(5, "ZSTD_compress_frameChunk (blockSize=%u)", (unsigned)blockSize);
2347     if (cctx->appliedParams.fParams.checksumFlag && srcSize)
2348         XXH64_update(&cctx->xxhState, src, srcSize);
2349 
2350     while (remaining) {
2351         ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
2352         U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
2353 
2354         RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE,
2355                         dstSize_tooSmall,
2356                         "not enough space to store compressed block");
2357         if (remaining < blockSize) blockSize = remaining;
2358 
2359         ZSTD_overflowCorrectIfNeeded(ms, &cctx->appliedParams, ip, ip + blockSize);
2360         ZSTD_checkDictValidity(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState);
2361 
2362         /* Ensure hash/chain table insertion resumes no sooner than lowlimit */
2363         if (ms->nextToUpdate < ms->window.lowLimit) ms->nextToUpdate = ms->window.lowLimit;
2364 
2365         {   size_t cSize = ZSTD_compressBlock_internal(cctx,
2366                                 op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize,
2367                                 ip, blockSize);
2368             FORWARD_IF_ERROR(cSize);
2369 
2370             if (cSize == 0) {  /* block is not compressible */
2371                 cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
2372                 FORWARD_IF_ERROR(cSize);
2373             } else {
2374                 U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
2375                 MEM_writeLE24(op, cBlockHeader24);
2376                 cSize += ZSTD_blockHeaderSize;
2377             }
2378 
2379             ip += blockSize;
2380             assert(remaining >= blockSize);
2381             remaining -= blockSize;
2382             op += cSize;
2383             assert(dstCapacity >= cSize);
2384             dstCapacity -= cSize;
2385             DEBUGLOG(5, "ZSTD_compress_frameChunk: adding a block of size %u",
2386                         (unsigned)cSize);
2387     }   }
2388 
2389     if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending;
2390     return (size_t)(op-ostart);
2391 }
2392 
2393 
2394 static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
2395                                     ZSTD_CCtx_params params, U64 pledgedSrcSize, U32 dictID)
2396 {   BYTE* const op = (BYTE*)dst;
2397     U32   const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536);   /* 0-3 */
2398     U32   const dictIDSizeCode = params.fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength;   /* 0-3 */
2399     U32   const checksumFlag = params.fParams.checksumFlag>0;
2400     U32   const windowSize = (U32)1 << params.cParams.windowLog;
2401     U32   const singleSegment = params.fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);
2402     BYTE  const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
2403     U32   const fcsCode = params.fParams.contentSizeFlag ?
2404                      (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0;  /* 0-3 */
2405     BYTE  const frameHeaderDescriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
2406     size_t pos=0;
2407 
2408     assert(!(params.fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN));
2409     RETURN_ERROR_IF(dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX, dstSize_tooSmall);
2410     DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u",
2411                 !params.fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode);
2412 
2413     if (params.format == ZSTD_f_zstd1) {
2414         MEM_writeLE32(dst, ZSTD_MAGICNUMBER);
2415         pos = 4;
2416     }
2417     op[pos++] = frameHeaderDescriptionByte;
2418     if (!singleSegment) op[pos++] = windowLogByte;
2419     switch(dictIDSizeCode)
2420     {
2421         default:  assert(0); /* impossible */
2422         case 0 : break;
2423         case 1 : op[pos] = (BYTE)(dictID); pos++; break;
2424         case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break;
2425         case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break;
2426     }
2427     switch(fcsCode)
2428     {
2429         default:  assert(0); /* impossible */
2430         case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break;
2431         case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break;
2432         case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break;
2433         case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break;
2434     }
2435     return pos;
2436 }
2437 
2438 /* ZSTD_writeLastEmptyBlock() :
2439  * output an empty Block with end-of-frame mark to complete a frame
2440  * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
2441  *           or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
2442  */
2443 size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity)
2444 {
2445     RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall);
2446     {   U32 const cBlockHeader24 = 1 /*lastBlock*/ + (((U32)bt_raw)<<1);  /* 0 size */
2447         MEM_writeLE24(dst, cBlockHeader24);
2448         return ZSTD_blockHeaderSize;
2449     }
2450 }
2451 
2452 size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq)
2453 {
2454     RETURN_ERROR_IF(cctx->stage != ZSTDcs_init, stage_wrong);
2455     RETURN_ERROR_IF(cctx->appliedParams.ldmParams.enableLdm,
2456                     parameter_unsupported);
2457     cctx->externSeqStore.seq = seq;
2458     cctx->externSeqStore.size = nbSeq;
2459     cctx->externSeqStore.capacity = nbSeq;
2460     cctx->externSeqStore.pos = 0;
2461     return 0;
2462 }
2463 
2464 
2465 static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
2466                               void* dst, size_t dstCapacity,
2467                         const void* src, size_t srcSize,
2468                                U32 frame, U32 lastFrameChunk)
2469 {
2470     ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
2471     size_t fhSize = 0;
2472 
2473     DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u",
2474                 cctx->stage, (unsigned)srcSize);
2475     RETURN_ERROR_IF(cctx->stage==ZSTDcs_created, stage_wrong,
2476                     "missing init (ZSTD_compressBegin)");
2477 
2478     if (frame && (cctx->stage==ZSTDcs_init)) {
2479         fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->appliedParams,
2480                                        cctx->pledgedSrcSizePlusOne-1, cctx->dictID);
2481         FORWARD_IF_ERROR(fhSize);
2482         assert(fhSize <= dstCapacity);
2483         dstCapacity -= fhSize;
2484         dst = (char*)dst + fhSize;
2485         cctx->stage = ZSTDcs_ongoing;
2486     }
2487 
2488     if (!srcSize) return fhSize;  /* do not generate an empty block if no input */
2489 
2490     if (!ZSTD_window_update(&ms->window, src, srcSize)) {
2491         ms->nextToUpdate = ms->window.dictLimit;
2492     }
2493     if (cctx->appliedParams.ldmParams.enableLdm) {
2494         ZSTD_window_update(&cctx->ldmState.window, src, srcSize);
2495     }
2496 
2497     if (!frame) {
2498         /* overflow check and correction for block mode */
2499         ZSTD_overflowCorrectIfNeeded(ms, &cctx->appliedParams, src, (BYTE const*)src + srcSize);
2500     }
2501 
2502     DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSize);
2503     {   size_t const cSize = frame ?
2504                              ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
2505                              ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize);
2506         FORWARD_IF_ERROR(cSize);
2507         cctx->consumedSrcSize += srcSize;
2508         cctx->producedCSize += (cSize + fhSize);
2509         assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
2510         if (cctx->pledgedSrcSizePlusOne != 0) {  /* control src size */
2511             ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
2512             RETURN_ERROR_IF(
2513                 cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne,
2514                 srcSize_wrong,
2515                 "error : pledgedSrcSize = %u, while realSrcSize >= %u",
2516                 (unsigned)cctx->pledgedSrcSizePlusOne-1,
2517                 (unsigned)cctx->consumedSrcSize);
2518         }
2519         return cSize + fhSize;
2520     }
2521 }
2522 
2523 size_t ZSTD_compressContinue (ZSTD_CCtx* cctx,
2524                               void* dst, size_t dstCapacity,
2525                         const void* src, size_t srcSize)
2526 {
2527     DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (unsigned)srcSize);
2528     return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */);
2529 }
2530 
2531 
2532 size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx)
2533 {
2534     ZSTD_compressionParameters const cParams = cctx->appliedParams.cParams;
2535     assert(!ZSTD_checkCParams(cParams));
2536     return MIN (ZSTD_BLOCKSIZE_MAX, (U32)1 << cParams.windowLog);
2537 }
2538 
2539 size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
2540 {
2541     size_t const blockSizeMax = ZSTD_getBlockSize(cctx);
2542     RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong);
2543 
2544     return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */);
2545 }
2546 
2547 /*! ZSTD_loadDictionaryContent() :
2548  *  @return : 0, or an error code
2549  */
2550 static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
2551                                          ZSTD_CCtx_params const* params,
2552                                          const void* src, size_t srcSize,
2553                                          ZSTD_dictTableLoadMethod_e dtlm)
2554 {
2555     const BYTE* ip = (const BYTE*) src;
2556     const BYTE* const iend = ip + srcSize;
2557 
2558     ZSTD_window_update(&ms->window, src, srcSize);
2559     ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base);
2560 
2561     /* Assert that we the ms params match the params we're being given */
2562     ZSTD_assertEqualCParams(params->cParams, ms->cParams);
2563 
2564     if (srcSize <= HASH_READ_SIZE) return 0;
2565 
2566     while (iend - ip > HASH_READ_SIZE) {
2567         size_t const remaining = iend - ip;
2568         size_t const chunk = MIN(remaining, ZSTD_CHUNKSIZE_MAX);
2569         const BYTE* const ichunk = ip + chunk;
2570 
2571         ZSTD_overflowCorrectIfNeeded(ms, params, ip, ichunk);
2572 
2573         switch(params->cParams.strategy)
2574         {
2575         case ZSTD_fast:
2576             ZSTD_fillHashTable(ms, ichunk, dtlm);
2577             break;
2578         case ZSTD_dfast:
2579             ZSTD_fillDoubleHashTable(ms, ichunk, dtlm);
2580             break;
2581 
2582         case ZSTD_greedy:
2583         case ZSTD_lazy:
2584         case ZSTD_lazy2:
2585             if (chunk >= HASH_READ_SIZE)
2586                 ZSTD_insertAndFindFirstIndex(ms, ichunk-HASH_READ_SIZE);
2587             break;
2588 
2589         case ZSTD_btlazy2:   /* we want the dictionary table fully sorted */
2590         case ZSTD_btopt:
2591         case ZSTD_btultra:
2592         case ZSTD_btultra2:
2593             if (chunk >= HASH_READ_SIZE)
2594                 ZSTD_updateTree(ms, ichunk-HASH_READ_SIZE, ichunk);
2595             break;
2596 
2597         default:
2598             assert(0);  /* not possible : not a valid strategy id */
2599         }
2600 
2601         ip = ichunk;
2602     }
2603 
2604     ms->nextToUpdate = (U32)(iend - ms->window.base);
2605     return 0;
2606 }
2607 
2608 
2609 /* Dictionaries that assign zero probability to symbols that show up causes problems
2610    when FSE encoding.  Refuse dictionaries that assign zero probability to symbols
2611    that we may encounter during compression.
2612    NOTE: This behavior is not standard and could be improved in the future. */
2613 static size_t ZSTD_checkDictNCount(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue) {
2614     U32 s;
2615     RETURN_ERROR_IF(dictMaxSymbolValue < maxSymbolValue, dictionary_corrupted);
2616     for (s = 0; s <= maxSymbolValue; ++s) {
2617         RETURN_ERROR_IF(normalizedCounter[s] == 0, dictionary_corrupted);
2618     }
2619     return 0;
2620 }
2621 
2622 
2623 /* Dictionary format :
2624  * See :
2625  * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
2626  */
2627 /*! ZSTD_loadZstdDictionary() :
2628  * @return : dictID, or an error code
2629  *  assumptions : magic number supposed already checked
2630  *                dictSize supposed > 8
2631  */
2632 static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
2633                                       ZSTD_matchState_t* ms,
2634                                       ZSTD_CCtx_params const* params,
2635                                       const void* dict, size_t dictSize,
2636                                       ZSTD_dictTableLoadMethod_e dtlm,
2637                                       void* workspace)
2638 {
2639     const BYTE* dictPtr = (const BYTE*)dict;
2640     const BYTE* const dictEnd = dictPtr + dictSize;
2641     short offcodeNCount[MaxOff+1];
2642     unsigned offcodeMaxValue = MaxOff;
2643     size_t dictID;
2644 
2645     ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
2646     assert(dictSize > 8);
2647     assert(MEM_readLE32(dictPtr) == ZSTD_MAGIC_DICTIONARY);
2648 
2649     dictPtr += 4;   /* skip magic number */
2650     dictID = params->fParams.noDictIDFlag ? 0 :  MEM_readLE32(dictPtr);
2651     dictPtr += 4;
2652 
2653     {   unsigned maxSymbolValue = 255;
2654         size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr, dictEnd-dictPtr);
2655         RETURN_ERROR_IF(HUF_isError(hufHeaderSize), dictionary_corrupted);
2656         RETURN_ERROR_IF(maxSymbolValue < 255, dictionary_corrupted);
2657         dictPtr += hufHeaderSize;
2658     }
2659 
2660     {   unsigned offcodeLog;
2661         size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
2662         RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted);
2663         RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted);
2664         /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
2665         /* fill all offset symbols to avoid garbage at end of table */
2666         RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
2667                 bs->entropy.fse.offcodeCTable,
2668                 offcodeNCount, MaxOff, offcodeLog,
2669                 workspace, HUF_WORKSPACE_SIZE)),
2670             dictionary_corrupted);
2671         dictPtr += offcodeHeaderSize;
2672     }
2673 
2674     {   short matchlengthNCount[MaxML+1];
2675         unsigned matchlengthMaxValue = MaxML, matchlengthLog;
2676         size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
2677         RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted);
2678         RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted);
2679         /* Every match length code must have non-zero probability */
2680         FORWARD_IF_ERROR( ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML));
2681         RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
2682                 bs->entropy.fse.matchlengthCTable,
2683                 matchlengthNCount, matchlengthMaxValue, matchlengthLog,
2684                 workspace, HUF_WORKSPACE_SIZE)),
2685             dictionary_corrupted);
2686         dictPtr += matchlengthHeaderSize;
2687     }
2688 
2689     {   short litlengthNCount[MaxLL+1];
2690         unsigned litlengthMaxValue = MaxLL, litlengthLog;
2691         size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
2692         RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted);
2693         RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted);
2694         /* Every literal length code must have non-zero probability */
2695         FORWARD_IF_ERROR( ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL));
2696         RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
2697                 bs->entropy.fse.litlengthCTable,
2698                 litlengthNCount, litlengthMaxValue, litlengthLog,
2699                 workspace, HUF_WORKSPACE_SIZE)),
2700             dictionary_corrupted);
2701         dictPtr += litlengthHeaderSize;
2702     }
2703 
2704     RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted);
2705     bs->rep[0] = MEM_readLE32(dictPtr+0);
2706     bs->rep[1] = MEM_readLE32(dictPtr+4);
2707     bs->rep[2] = MEM_readLE32(dictPtr+8);
2708     dictPtr += 12;
2709 
2710     {   size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
2711         U32 offcodeMax = MaxOff;
2712         if (dictContentSize <= ((U32)-1) - 128 KB) {
2713             U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */
2714             offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */
2715         }
2716         /* All offset values <= dictContentSize + 128 KB must be representable */
2717         FORWARD_IF_ERROR(ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff)));
2718         /* All repCodes must be <= dictContentSize and != 0*/
2719         {   U32 u;
2720             for (u=0; u<3; u++) {
2721                 RETURN_ERROR_IF(bs->rep[u] == 0, dictionary_corrupted);
2722                 RETURN_ERROR_IF(bs->rep[u] > dictContentSize, dictionary_corrupted);
2723         }   }
2724 
2725         bs->entropy.huf.repeatMode = HUF_repeat_valid;
2726         bs->entropy.fse.offcode_repeatMode = FSE_repeat_valid;
2727         bs->entropy.fse.matchlength_repeatMode = FSE_repeat_valid;
2728         bs->entropy.fse.litlength_repeatMode = FSE_repeat_valid;
2729         FORWARD_IF_ERROR(ZSTD_loadDictionaryContent(ms, params, dictPtr, dictContentSize, dtlm));
2730         return dictID;
2731     }
2732 }
2733 
2734 /** ZSTD_compress_insertDictionary() :
2735 *   @return : dictID, or an error code */
2736 static size_t
2737 ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
2738                                ZSTD_matchState_t* ms,
2739                          const ZSTD_CCtx_params* params,
2740                          const void* dict, size_t dictSize,
2741                                ZSTD_dictContentType_e dictContentType,
2742                                ZSTD_dictTableLoadMethod_e dtlm,
2743                                void* workspace)
2744 {
2745     DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize);
2746     if ((dict==NULL) || (dictSize<=8)) return 0;
2747 
2748     ZSTD_reset_compressedBlockState(bs);
2749 
2750     /* dict restricted modes */
2751     if (dictContentType == ZSTD_dct_rawContent)
2752         return ZSTD_loadDictionaryContent(ms, params, dict, dictSize, dtlm);
2753 
2754     if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) {
2755         if (dictContentType == ZSTD_dct_auto) {
2756             DEBUGLOG(4, "raw content dictionary detected");
2757             return ZSTD_loadDictionaryContent(ms, params, dict, dictSize, dtlm);
2758         }
2759         RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong);
2760         assert(0);   /* impossible */
2761     }
2762 
2763     /* dict as full zstd dictionary */
2764     return ZSTD_loadZstdDictionary(bs, ms, params, dict, dictSize, dtlm, workspace);
2765 }
2766 
2767 /*! ZSTD_compressBegin_internal() :
2768  * @return : 0, or an error code */
2769 static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
2770                                     const void* dict, size_t dictSize,
2771                                     ZSTD_dictContentType_e dictContentType,
2772                                     ZSTD_dictTableLoadMethod_e dtlm,
2773                                     const ZSTD_CDict* cdict,
2774                                     ZSTD_CCtx_params params, U64 pledgedSrcSize,
2775                                     ZSTD_buffered_policy_e zbuff)
2776 {
2777     DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params.cParams.windowLog);
2778     /* params are supposed to be fully validated at this point */
2779     assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
2780     assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
2781 
2782     if (cdict && cdict->dictContentSize>0) {
2783         return ZSTD_resetCCtx_usingCDict(cctx, cdict, params, pledgedSrcSize, zbuff);
2784     }
2785 
2786     FORWARD_IF_ERROR( ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
2787                                      ZSTDcrp_continue, zbuff) );
2788     {   size_t const dictID = ZSTD_compress_insertDictionary(
2789                 cctx->blockState.prevCBlock, &cctx->blockState.matchState,
2790                 &params, dict, dictSize, dictContentType, dtlm, cctx->entropyWorkspace);
2791         FORWARD_IF_ERROR(dictID);
2792         assert(dictID <= UINT_MAX);
2793         cctx->dictID = (U32)dictID;
2794     }
2795     return 0;
2796 }
2797 
2798 size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
2799                                     const void* dict, size_t dictSize,
2800                                     ZSTD_dictContentType_e dictContentType,
2801                                     ZSTD_dictTableLoadMethod_e dtlm,
2802                                     const ZSTD_CDict* cdict,
2803                                     ZSTD_CCtx_params params,
2804                                     unsigned long long pledgedSrcSize)
2805 {
2806     DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params.cParams.windowLog);
2807     /* compression parameters verification and optimization */
2808     FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) );
2809     return ZSTD_compressBegin_internal(cctx,
2810                                        dict, dictSize, dictContentType, dtlm,
2811                                        cdict,
2812                                        params, pledgedSrcSize,
2813                                        ZSTDb_not_buffered);
2814 }
2815 
2816 /*! ZSTD_compressBegin_advanced() :
2817 *   @return : 0, or an error code */
2818 size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx,
2819                              const void* dict, size_t dictSize,
2820                                    ZSTD_parameters params, unsigned long long pledgedSrcSize)
2821 {
2822     ZSTD_CCtx_params const cctxParams =
2823             ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
2824     return ZSTD_compressBegin_advanced_internal(cctx,
2825                                             dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast,
2826                                             NULL /*cdict*/,
2827                                             cctxParams, pledgedSrcSize);
2828 }
2829 
2830 size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel)
2831 {
2832     ZSTD_parameters const params = ZSTD_getParams(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize);
2833     ZSTD_CCtx_params const cctxParams =
2834             ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
2835     DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (unsigned)dictSize);
2836     return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
2837                                        cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered);
2838 }
2839 
2840 size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel)
2841 {
2842     return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel);
2843 }
2844 
2845 
2846 /*! ZSTD_writeEpilogue() :
2847 *   Ends a frame.
2848 *   @return : nb of bytes written into dst (or an error code) */
2849 static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
2850 {
2851     BYTE* const ostart = (BYTE*)dst;
2852     BYTE* op = ostart;
2853     size_t fhSize = 0;
2854 
2855     DEBUGLOG(4, "ZSTD_writeEpilogue");
2856     RETURN_ERROR_IF(cctx->stage == ZSTDcs_created, stage_wrong, "init missing");
2857 
2858     /* special case : empty frame */
2859     if (cctx->stage == ZSTDcs_init) {
2860         fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->appliedParams, 0, 0);
2861         FORWARD_IF_ERROR(fhSize);
2862         dstCapacity -= fhSize;
2863         op += fhSize;
2864         cctx->stage = ZSTDcs_ongoing;
2865     }
2866 
2867     if (cctx->stage != ZSTDcs_ending) {
2868         /* write one last empty block, make it the "last" block */
2869         U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0;
2870         RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall);
2871         MEM_writeLE32(op, cBlockHeader24);
2872         op += ZSTD_blockHeaderSize;
2873         dstCapacity -= ZSTD_blockHeaderSize;
2874     }
2875 
2876     if (cctx->appliedParams.fParams.checksumFlag) {
2877         U32 const checksum = (U32) XXH64_digest(&cctx->xxhState);
2878         RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall);
2879         DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", (unsigned)checksum);
2880         MEM_writeLE32(op, checksum);
2881         op += 4;
2882     }
2883 
2884     cctx->stage = ZSTDcs_created;  /* return to "created but no init" status */
2885     return op-ostart;
2886 }
2887 
2888 size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
2889                          void* dst, size_t dstCapacity,
2890                    const void* src, size_t srcSize)
2891 {
2892     size_t endResult;
2893     size_t const cSize = ZSTD_compressContinue_internal(cctx,
2894                                 dst, dstCapacity, src, srcSize,
2895                                 1 /* frame mode */, 1 /* last chunk */);
2896     FORWARD_IF_ERROR(cSize);
2897     endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize);
2898     FORWARD_IF_ERROR(endResult);
2899     assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
2900     if (cctx->pledgedSrcSizePlusOne != 0) {  /* control src size */
2901         ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
2902         DEBUGLOG(4, "end of frame : controlling src size");
2903         RETURN_ERROR_IF(
2904             cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1,
2905             srcSize_wrong,
2906              "error : pledgedSrcSize = %u, while realSrcSize = %u",
2907             (unsigned)cctx->pledgedSrcSizePlusOne-1,
2908             (unsigned)cctx->consumedSrcSize);
2909     }
2910     return cSize + endResult;
2911 }
2912 
2913 
2914 static size_t ZSTD_compress_internal (ZSTD_CCtx* cctx,
2915                                       void* dst, size_t dstCapacity,
2916                                 const void* src, size_t srcSize,
2917                                 const void* dict,size_t dictSize,
2918                                       ZSTD_parameters params)
2919 {
2920     ZSTD_CCtx_params const cctxParams =
2921             ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
2922     DEBUGLOG(4, "ZSTD_compress_internal");
2923     return ZSTD_compress_advanced_internal(cctx,
2924                                            dst, dstCapacity,
2925                                            src, srcSize,
2926                                            dict, dictSize,
2927                                            cctxParams);
2928 }
2929 
2930 size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
2931                                void* dst, size_t dstCapacity,
2932                          const void* src, size_t srcSize,
2933                          const void* dict,size_t dictSize,
2934                                ZSTD_parameters params)
2935 {
2936     DEBUGLOG(4, "ZSTD_compress_advanced");
2937     FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams));
2938     return ZSTD_compress_internal(cctx,
2939                                   dst, dstCapacity,
2940                                   src, srcSize,
2941                                   dict, dictSize,
2942                                   params);
2943 }
2944 
2945 /* Internal */
2946 size_t ZSTD_compress_advanced_internal(
2947         ZSTD_CCtx* cctx,
2948         void* dst, size_t dstCapacity,
2949         const void* src, size_t srcSize,
2950         const void* dict,size_t dictSize,
2951         ZSTD_CCtx_params params)
2952 {
2953     DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (unsigned)srcSize);
2954     FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
2955                          dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
2956                          params, srcSize, ZSTDb_not_buffered) );
2957     return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
2958 }
2959 
2960 size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx,
2961                                void* dst, size_t dstCapacity,
2962                          const void* src, size_t srcSize,
2963                          const void* dict, size_t dictSize,
2964                                int compressionLevel)
2965 {
2966     ZSTD_parameters const params = ZSTD_getParams(compressionLevel, srcSize + (!srcSize), dict ? dictSize : 0);
2967     ZSTD_CCtx_params cctxParams = ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
2968     assert(params.fParams.contentSizeFlag == 1);
2969     return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, cctxParams);
2970 }
2971 
2972 size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
2973                          void* dst, size_t dstCapacity,
2974                    const void* src, size_t srcSize,
2975                          int compressionLevel)
2976 {
2977     DEBUGLOG(4, "ZSTD_compressCCtx (srcSize=%u)", (unsigned)srcSize);
2978     assert(cctx != NULL);
2979     return ZSTD_compress_usingDict(cctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel);
2980 }
2981 
2982 size_t ZSTD_compress(void* dst, size_t dstCapacity,
2983                const void* src, size_t srcSize,
2984                      int compressionLevel)
2985 {
2986     size_t result;
2987     ZSTD_CCtx ctxBody;
2988     ZSTD_initCCtx(&ctxBody, ZSTD_defaultCMem);
2989     result = ZSTD_compressCCtx(&ctxBody, dst, dstCapacity, src, srcSize, compressionLevel);
2990     ZSTD_freeCCtxContent(&ctxBody);   /* can't free ctxBody itself, as it's on stack; free only heap content */
2991     return result;
2992 }
2993 
2994 
2995 /* =====  Dictionary API  ===== */
2996 
2997 /*! ZSTD_estimateCDictSize_advanced() :
2998  *  Estimate amount of memory that will be needed to create a dictionary with following arguments */
2999 size_t ZSTD_estimateCDictSize_advanced(
3000         size_t dictSize, ZSTD_compressionParameters cParams,
3001         ZSTD_dictLoadMethod_e dictLoadMethod)
3002 {
3003     DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (unsigned)sizeof(ZSTD_CDict));
3004     return sizeof(ZSTD_CDict) + HUF_WORKSPACE_SIZE + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0)
3005            + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
3006 }
3007 
3008 size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel)
3009 {
3010     ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, dictSize);
3011     return ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy);
3012 }
3013 
3014 size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict)
3015 {
3016     if (cdict==NULL) return 0;   /* support sizeof on NULL */
3017     DEBUGLOG(5, "sizeof(*cdict) : %u", (unsigned)sizeof(*cdict));
3018     return cdict->workspaceSize + (cdict->dictBuffer ? cdict->dictContentSize : 0) + sizeof(*cdict);
3019 }
3020 
3021 static size_t ZSTD_initCDict_internal(
3022                     ZSTD_CDict* cdict,
3023               const void* dictBuffer, size_t dictSize,
3024                     ZSTD_dictLoadMethod_e dictLoadMethod,
3025                     ZSTD_dictContentType_e dictContentType,
3026                     ZSTD_compressionParameters cParams)
3027 {
3028     DEBUGLOG(3, "ZSTD_initCDict_internal (dictContentType:%u)", (unsigned)dictContentType);
3029     assert(!ZSTD_checkCParams(cParams));
3030     cdict->matchState.cParams = cParams;
3031     if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) {
3032         cdict->dictBuffer = NULL;
3033         cdict->dictContent = dictBuffer;
3034     } else {
3035         void* const internalBuffer = ZSTD_malloc(dictSize, cdict->customMem);
3036         cdict->dictBuffer = internalBuffer;
3037         cdict->dictContent = internalBuffer;
3038         RETURN_ERROR_IF(!internalBuffer, memory_allocation);
3039         memcpy(internalBuffer, dictBuffer, dictSize);
3040     }
3041     cdict->dictContentSize = dictSize;
3042 
3043     /* Reset the state to no dictionary */
3044     ZSTD_reset_compressedBlockState(&cdict->cBlockState);
3045     {   void* const end = ZSTD_reset_matchState(&cdict->matchState,
3046                             (U32*)cdict->workspace + HUF_WORKSPACE_SIZE_U32,
3047                             &cParams,
3048                              ZSTDcrp_continue, ZSTD_resetTarget_CDict);
3049         assert(end == (char*)cdict->workspace + cdict->workspaceSize);
3050         (void)end;
3051     }
3052     /* (Maybe) load the dictionary
3053      * Skips loading the dictionary if it is <= 8 bytes.
3054      */
3055     {   ZSTD_CCtx_params params;
3056         memset(&params, 0, sizeof(params));
3057         params.compressionLevel = ZSTD_CLEVEL_DEFAULT;
3058         params.fParams.contentSizeFlag = 1;
3059         params.cParams = cParams;
3060         {   size_t const dictID = ZSTD_compress_insertDictionary(
3061                     &cdict->cBlockState, &cdict->matchState, &params,
3062                     cdict->dictContent, cdict->dictContentSize,
3063                     dictContentType, ZSTD_dtlm_full, cdict->workspace);
3064             FORWARD_IF_ERROR(dictID);
3065             assert(dictID <= (size_t)(U32)-1);
3066             cdict->dictID = (U32)dictID;
3067         }
3068     }
3069 
3070     return 0;
3071 }
3072 
3073 ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize,
3074                                       ZSTD_dictLoadMethod_e dictLoadMethod,
3075                                       ZSTD_dictContentType_e dictContentType,
3076                                       ZSTD_compressionParameters cParams, ZSTD_customMem customMem)
3077 {
3078     DEBUGLOG(3, "ZSTD_createCDict_advanced, mode %u", (unsigned)dictContentType);
3079     if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
3080 
3081     {   ZSTD_CDict* const cdict = (ZSTD_CDict*)ZSTD_malloc(sizeof(ZSTD_CDict), customMem);
3082         size_t const workspaceSize = HUF_WORKSPACE_SIZE + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0);
3083         void* const workspace = ZSTD_malloc(workspaceSize, customMem);
3084 
3085         if (!cdict || !workspace) {
3086             ZSTD_free(cdict, customMem);
3087             ZSTD_free(workspace, customMem);
3088             return NULL;
3089         }
3090         cdict->customMem = customMem;
3091         cdict->workspace = workspace;
3092         cdict->workspaceSize = workspaceSize;
3093         if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
3094                                         dictBuffer, dictSize,
3095                                         dictLoadMethod, dictContentType,
3096                                         cParams) )) {
3097             ZSTD_freeCDict(cdict);
3098             return NULL;
3099         }
3100 
3101         return cdict;
3102     }
3103 }
3104 
3105 ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel)
3106 {
3107     ZSTD_compressionParameters cParams = ZSTD_getCParams(compressionLevel, 0, dictSize);
3108     return ZSTD_createCDict_advanced(dict, dictSize,
3109                                      ZSTD_dlm_byCopy, ZSTD_dct_auto,
3110                                      cParams, ZSTD_defaultCMem);
3111 }
3112 
3113 ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel)
3114 {
3115     ZSTD_compressionParameters cParams = ZSTD_getCParams(compressionLevel, 0, dictSize);
3116     return ZSTD_createCDict_advanced(dict, dictSize,
3117                                      ZSTD_dlm_byRef, ZSTD_dct_auto,
3118                                      cParams, ZSTD_defaultCMem);
3119 }
3120 
3121 size_t ZSTD_freeCDict(ZSTD_CDict* cdict)
3122 {
3123     if (cdict==NULL) return 0;   /* support free on NULL */
3124     {   ZSTD_customMem const cMem = cdict->customMem;
3125         ZSTD_free(cdict->workspace, cMem);
3126         ZSTD_free(cdict->dictBuffer, cMem);
3127         ZSTD_free(cdict, cMem);
3128         return 0;
3129     }
3130 }
3131 
3132 /*! ZSTD_initStaticCDict_advanced() :
3133  *  Generate a digested dictionary in provided memory area.
3134  *  workspace: The memory area to emplace the dictionary into.
3135  *             Provided pointer must 8-bytes aligned.
3136  *             It must outlive dictionary usage.
3137  *  workspaceSize: Use ZSTD_estimateCDictSize()
3138  *                 to determine how large workspace must be.
3139  *  cParams : use ZSTD_getCParams() to transform a compression level
3140  *            into its relevants cParams.
3141  * @return : pointer to ZSTD_CDict*, or NULL if error (size too small)
3142  *  Note : there is no corresponding "free" function.
3143  *         Since workspace was allocated externally, it must be freed externally.
3144  */
3145 const ZSTD_CDict* ZSTD_initStaticCDict(
3146                                  void* workspace, size_t workspaceSize,
3147                            const void* dict, size_t dictSize,
3148                                  ZSTD_dictLoadMethod_e dictLoadMethod,
3149                                  ZSTD_dictContentType_e dictContentType,
3150                                  ZSTD_compressionParameters cParams)
3151 {
3152     size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0);
3153     size_t const neededSize = sizeof(ZSTD_CDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize)
3154                             + HUF_WORKSPACE_SIZE + matchStateSize;
3155     ZSTD_CDict* const cdict = (ZSTD_CDict*) workspace;
3156     void* ptr;
3157     if ((size_t)workspace & 7) return NULL;  /* 8-aligned */
3158     DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u",
3159         (unsigned)workspaceSize, (unsigned)neededSize, (unsigned)(workspaceSize < neededSize));
3160     if (workspaceSize < neededSize) return NULL;
3161 
3162     if (dictLoadMethod == ZSTD_dlm_byCopy) {
3163         memcpy(cdict+1, dict, dictSize);
3164         dict = cdict+1;
3165         ptr = (char*)workspace + sizeof(ZSTD_CDict) + dictSize;
3166     } else {
3167         ptr = cdict+1;
3168     }
3169     cdict->workspace = ptr;
3170     cdict->workspaceSize = HUF_WORKSPACE_SIZE + matchStateSize;
3171 
3172     if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
3173                                               dict, dictSize,
3174                                               ZSTD_dlm_byRef, dictContentType,
3175                                               cParams) ))
3176         return NULL;
3177 
3178     return cdict;
3179 }
3180 
3181 ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict)
3182 {
3183     assert(cdict != NULL);
3184     return cdict->matchState.cParams;
3185 }
3186 
3187 /* ZSTD_compressBegin_usingCDict_advanced() :
3188  * cdict must be != NULL */
3189 size_t ZSTD_compressBegin_usingCDict_advanced(
3190     ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict,
3191     ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
3192 {
3193     DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_advanced");
3194     RETURN_ERROR_IF(cdict==NULL, dictionary_wrong);
3195     {   ZSTD_CCtx_params params = cctx->requestedParams;
3196         params.cParams = ZSTD_getCParamsFromCDict(cdict);
3197         /* Increase window log to fit the entire dictionary and source if the
3198          * source size is known. Limit the increase to 19, which is the
3199          * window log for compression level 1 with the largest source size.
3200          */
3201         if (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN) {
3202             U32 const limitedSrcSize = (U32)MIN(pledgedSrcSize, 1U << 19);
3203             U32 const limitedSrcLog = limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1;
3204             params.cParams.windowLog = MAX(params.cParams.windowLog, limitedSrcLog);
3205         }
3206         params.fParams = fParams;
3207         return ZSTD_compressBegin_internal(cctx,
3208                                            NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast,
3209                                            cdict,
3210                                            params, pledgedSrcSize,
3211                                            ZSTDb_not_buffered);
3212     }
3213 }
3214 
3215 /* ZSTD_compressBegin_usingCDict() :
3216  * pledgedSrcSize=0 means "unknown"
3217  * if pledgedSrcSize>0, it will enable contentSizeFlag */
3218 size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
3219 {
3220     ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
3221     DEBUGLOG(4, "ZSTD_compressBegin_usingCDict : dictIDFlag == %u", !fParams.noDictIDFlag);
3222     return ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN);
3223 }
3224 
3225 size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
3226                                 void* dst, size_t dstCapacity,
3227                                 const void* src, size_t srcSize,
3228                                 const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
3229 {
3230     FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, srcSize));   /* will check if cdict != NULL */
3231     return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
3232 }
3233 
3234 /*! ZSTD_compress_usingCDict() :
3235  *  Compression using a digested Dictionary.
3236  *  Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
3237  *  Note that compression parameters are decided at CDict creation time
3238  *  while frame parameters are hardcoded */
3239 size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
3240                                 void* dst, size_t dstCapacity,
3241                                 const void* src, size_t srcSize,
3242                                 const ZSTD_CDict* cdict)
3243 {
3244     ZSTD_frameParameters const fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
3245     return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, fParams);
3246 }
3247 
3248 
3249 
3250 /* ******************************************************************
3251 *  Streaming
3252 ********************************************************************/
3253 
3254 ZSTD_CStream* ZSTD_createCStream(void)
3255 {
3256     DEBUGLOG(3, "ZSTD_createCStream");
3257     return ZSTD_createCStream_advanced(ZSTD_defaultCMem);
3258 }
3259 
3260 ZSTD_CStream* ZSTD_initStaticCStream(void *workspace, size_t workspaceSize)
3261 {
3262     return ZSTD_initStaticCCtx(workspace, workspaceSize);
3263 }
3264 
3265 ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem)
3266 {   /* CStream and CCtx are now same object */
3267     return ZSTD_createCCtx_advanced(customMem);
3268 }
3269 
3270 size_t ZSTD_freeCStream(ZSTD_CStream* zcs)
3271 {
3272     return ZSTD_freeCCtx(zcs);   /* same object */
3273 }
3274 
3275 
3276 
3277 /*======   Initialization   ======*/
3278 
3279 size_t ZSTD_CStreamInSize(void)  { return ZSTD_BLOCKSIZE_MAX; }
3280 
3281 size_t ZSTD_CStreamOutSize(void)
3282 {
3283     return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ;
3284 }
3285 
3286 static size_t ZSTD_resetCStream_internal(ZSTD_CStream* cctx,
3287                     const void* const dict, size_t const dictSize, ZSTD_dictContentType_e const dictContentType,
3288                     const ZSTD_CDict* const cdict,
3289                     ZSTD_CCtx_params params, unsigned long long const pledgedSrcSize)
3290 {
3291     DEBUGLOG(4, "ZSTD_resetCStream_internal");
3292     /* Finalize the compression parameters */
3293     params.cParams = ZSTD_getCParamsFromCCtxParams(&params, pledgedSrcSize, dictSize);
3294     /* params are supposed to be fully validated at this point */
3295     assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
3296     assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
3297 
3298     FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
3299                                          dict, dictSize, dictContentType, ZSTD_dtlm_fast,
3300                                          cdict,
3301                                          params, pledgedSrcSize,
3302                                          ZSTDb_buffered) );
3303 
3304     cctx->inToCompress = 0;
3305     cctx->inBuffPos = 0;
3306     cctx->inBuffTarget = cctx->blockSize
3307                       + (cctx->blockSize == pledgedSrcSize);   /* for small input: avoid automatic flush on reaching end of block, since it would require to add a 3-bytes null block to end frame */
3308     cctx->outBuffContentSize = cctx->outBuffFlushedSize = 0;
3309     cctx->streamStage = zcss_load;
3310     cctx->frameEnded = 0;
3311     return 0;   /* ready to go */
3312 }
3313 
3314 /* ZSTD_resetCStream():
3315  * pledgedSrcSize == 0 means "unknown" */
3316 size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pss)
3317 {
3318     /* temporary : 0 interpreted as "unknown" during transition period.
3319      * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN.
3320      * 0 will be interpreted as "empty" in the future.
3321      */
3322     U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
3323     DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (unsigned)pledgedSrcSize);
3324     FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
3325     FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );
3326     return 0;
3327 }
3328 
3329 /*! ZSTD_initCStream_internal() :
3330  *  Note : for lib/compress only. Used by zstdmt_compress.c.
3331  *  Assumption 1 : params are valid
3332  *  Assumption 2 : either dict, or cdict, is defined, not both */
3333 size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
3334                     const void* dict, size_t dictSize, const ZSTD_CDict* cdict,
3335                     ZSTD_CCtx_params params, unsigned long long pledgedSrcSize)
3336 {
3337     DEBUGLOG(4, "ZSTD_initCStream_internal");
3338     FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
3339     FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );
3340     assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
3341     zcs->requestedParams = params;
3342     assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
3343     if (dict) {
3344         FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) );
3345     } else {
3346         /* Dictionary is cleared if !cdict */
3347         FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) );
3348     }
3349     return 0;
3350 }
3351 
3352 /* ZSTD_initCStream_usingCDict_advanced() :
3353  * same as ZSTD_initCStream_usingCDict(), with control over frame parameters */
3354 size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
3355                                             const ZSTD_CDict* cdict,
3356                                             ZSTD_frameParameters fParams,
3357                                             unsigned long long pledgedSrcSize)
3358 {
3359     DEBUGLOG(4, "ZSTD_initCStream_usingCDict_advanced");
3360     FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
3361     FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );
3362     zcs->requestedParams.fParams = fParams;
3363     FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) );
3364     return 0;
3365 }
3366 
3367 /* note : cdict must outlive compression session */
3368 size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict)
3369 {
3370     DEBUGLOG(4, "ZSTD_initCStream_usingCDict");
3371     FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
3372     FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) );
3373     return 0;
3374 }
3375 
3376 
3377 /* ZSTD_initCStream_advanced() :
3378  * pledgedSrcSize must be exact.
3379  * if srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.
3380  * dict is loaded with default parameters ZSTD_dm_auto and ZSTD_dlm_byCopy. */
3381 size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
3382                                  const void* dict, size_t dictSize,
3383                                  ZSTD_parameters params, unsigned long long pss)
3384 {
3385     /* for compatibility with older programs relying on this behavior.
3386      * Users should now specify ZSTD_CONTENTSIZE_UNKNOWN.
3387      * This line will be removed in the future.
3388      */
3389     U64 const pledgedSrcSize = (pss==0 && params.fParams.contentSizeFlag==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
3390     DEBUGLOG(4, "ZSTD_initCStream_advanced");
3391     FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
3392     FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );
3393     FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) );
3394     zcs->requestedParams = ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params);
3395     FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) );
3396     return 0;
3397 }
3398 
3399 size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel)
3400 {
3401     DEBUGLOG(4, "ZSTD_initCStream_usingDict");
3402     FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
3403     FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) );
3404     FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) );
3405     return 0;
3406 }
3407 
3408 size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pss)
3409 {
3410     /* temporary : 0 interpreted as "unknown" during transition period.
3411      * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN.
3412      * 0 will be interpreted as "empty" in the future.
3413      */
3414     U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
3415     DEBUGLOG(4, "ZSTD_initCStream_srcSize");
3416     FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
3417     FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) );
3418     FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) );
3419     FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) );
3420     return 0;
3421 }
3422 
3423 size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)
3424 {
3425     DEBUGLOG(4, "ZSTD_initCStream");
3426     FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) );
3427     FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) );
3428     FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) );
3429     return 0;
3430 }
3431 
3432 /*======   Compression   ======*/
3433 
3434 static size_t ZSTD_nextInputSizeHint(const ZSTD_CCtx* cctx)
3435 {
3436     size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos;
3437     if (hintInSize==0) hintInSize = cctx->blockSize;
3438     return hintInSize;
3439 }
3440 
3441 static size_t ZSTD_limitCopy(void* dst, size_t dstCapacity,
3442                        const void* src, size_t srcSize)
3443 {
3444     size_t const length = MIN(dstCapacity, srcSize);
3445     if (length) memcpy(dst, src, length);
3446     return length;
3447 }
3448 
3449 /** ZSTD_compressStream_generic():
3450  *  internal function for all *compressStream*() variants
3451  *  non-static, because can be called from zstdmt_compress.c
3452  * @return : hint size for next input */
3453 static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
3454                                           ZSTD_outBuffer* output,
3455                                           ZSTD_inBuffer* input,
3456                                           ZSTD_EndDirective const flushMode)
3457 {
3458     const char* const istart = (const char*)input->src;
3459     const char* const iend = istart + input->size;
3460     const char* ip = istart + input->pos;
3461     char* const ostart = (char*)output->dst;
3462     char* const oend = ostart + output->size;
3463     char* op = ostart + output->pos;
3464     U32 someMoreWork = 1;
3465 
3466     /* check expectations */
3467     DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (unsigned)flushMode);
3468     assert(zcs->inBuff != NULL);
3469     assert(zcs->inBuffSize > 0);
3470     assert(zcs->outBuff !=  NULL);
3471     assert(zcs->outBuffSize > 0);
3472     assert(output->pos <= output->size);
3473     assert(input->pos <= input->size);
3474 
3475     while (someMoreWork) {
3476         switch(zcs->streamStage)
3477         {
3478         case zcss_init:
3479             RETURN_ERROR(init_missing, "call ZSTD_initCStream() first!");
3480 
3481         case zcss_load:
3482             if ( (flushMode == ZSTD_e_end)
3483               && ((size_t)(oend-op) >= ZSTD_compressBound(iend-ip))  /* enough dstCapacity */
3484               && (zcs->inBuffPos == 0) ) {
3485                 /* shortcut to compression pass directly into output buffer */
3486                 size_t const cSize = ZSTD_compressEnd(zcs,
3487                                                 op, oend-op, ip, iend-ip);
3488                 DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize);
3489                 FORWARD_IF_ERROR(cSize);
3490                 ip = iend;
3491                 op += cSize;
3492                 zcs->frameEnded = 1;
3493                 ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
3494                 someMoreWork = 0; break;
3495             }
3496             /* complete loading into inBuffer */
3497             {   size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
3498                 size_t const loaded = ZSTD_limitCopy(
3499                                         zcs->inBuff + zcs->inBuffPos, toLoad,
3500                                         ip, iend-ip);
3501                 zcs->inBuffPos += loaded;
3502                 ip += loaded;
3503                 if ( (flushMode == ZSTD_e_continue)
3504                   && (zcs->inBuffPos < zcs->inBuffTarget) ) {
3505                     /* not enough input to fill full block : stop here */
3506                     someMoreWork = 0; break;
3507                 }
3508                 if ( (flushMode == ZSTD_e_flush)
3509                   && (zcs->inBuffPos == zcs->inToCompress) ) {
3510                     /* empty */
3511                     someMoreWork = 0; break;
3512                 }
3513             }
3514             /* compress current block (note : this stage cannot be stopped in the middle) */
3515             DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode);
3516             {   void* cDst;
3517                 size_t cSize;
3518                 size_t const iSize = zcs->inBuffPos - zcs->inToCompress;
3519                 size_t oSize = oend-op;
3520                 unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend);
3521                 if (oSize >= ZSTD_compressBound(iSize))
3522                     cDst = op;   /* compress into output buffer, to skip flush stage */
3523                 else
3524                     cDst = zcs->outBuff, oSize = zcs->outBuffSize;
3525                 cSize = lastBlock ?
3526                         ZSTD_compressEnd(zcs, cDst, oSize,
3527                                     zcs->inBuff + zcs->inToCompress, iSize) :
3528                         ZSTD_compressContinue(zcs, cDst, oSize,
3529                                     zcs->inBuff + zcs->inToCompress, iSize);
3530                 FORWARD_IF_ERROR(cSize);
3531                 zcs->frameEnded = lastBlock;
3532                 /* prepare next block */
3533                 zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
3534                 if (zcs->inBuffTarget > zcs->inBuffSize)
3535                     zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize;
3536                 DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u",
3537                          (unsigned)zcs->inBuffTarget, (unsigned)zcs->inBuffSize);
3538                 if (!lastBlock)
3539                     assert(zcs->inBuffTarget <= zcs->inBuffSize);
3540                 zcs->inToCompress = zcs->inBuffPos;
3541                 if (cDst == op) {  /* no need to flush */
3542                     op += cSize;
3543                     if (zcs->frameEnded) {
3544                         DEBUGLOG(5, "Frame completed directly in outBuffer");
3545                         someMoreWork = 0;
3546                         ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
3547                     }
3548                     break;
3549                 }
3550                 zcs->outBuffContentSize = cSize;
3551                 zcs->outBuffFlushedSize = 0;
3552                 zcs->streamStage = zcss_flush; /* pass-through to flush stage */
3553             }
3554 	    /* fall-through */
3555         case zcss_flush:
3556             DEBUGLOG(5, "flush stage");
3557             {   size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
3558                 size_t const flushed = ZSTD_limitCopy(op, (size_t)(oend-op),
3559                             zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
3560                 DEBUGLOG(5, "toFlush: %u into %u ==> flushed: %u",
3561                             (unsigned)toFlush, (unsigned)(oend-op), (unsigned)flushed);
3562                 op += flushed;
3563                 zcs->outBuffFlushedSize += flushed;
3564                 if (toFlush!=flushed) {
3565                     /* flush not fully completed, presumably because dst is too small */
3566                     assert(op==oend);
3567                     someMoreWork = 0;
3568                     break;
3569                 }
3570                 zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
3571                 if (zcs->frameEnded) {
3572                     DEBUGLOG(5, "Frame completed on flush");
3573                     someMoreWork = 0;
3574                     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
3575                     break;
3576                 }
3577                 zcs->streamStage = zcss_load;
3578                 break;
3579             }
3580 
3581         default: /* impossible */
3582             assert(0);
3583         }
3584     }
3585 
3586     input->pos = ip - istart;
3587     output->pos = op - ostart;
3588     if (zcs->frameEnded) return 0;
3589     return ZSTD_nextInputSizeHint(zcs);
3590 }
3591 
3592 static size_t ZSTD_nextInputSizeHint_MTorST(const ZSTD_CCtx* cctx)
3593 {
3594 #ifdef ZSTD_MULTITHREAD
3595     if (cctx->appliedParams.nbWorkers >= 1) {
3596         assert(cctx->mtctx != NULL);
3597         return ZSTDMT_nextInputSizeHint(cctx->mtctx);
3598     }
3599 #endif
3600     return ZSTD_nextInputSizeHint(cctx);
3601 
3602 }
3603 
3604 size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
3605 {
3606     FORWARD_IF_ERROR( ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue) );
3607     return ZSTD_nextInputSizeHint_MTorST(zcs);
3608 }
3609 
3610 
3611 size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
3612                              ZSTD_outBuffer* output,
3613                              ZSTD_inBuffer* input,
3614                              ZSTD_EndDirective endOp)
3615 {
3616     DEBUGLOG(5, "ZSTD_compressStream2, endOp=%u ", (unsigned)endOp);
3617     /* check conditions */
3618     RETURN_ERROR_IF(output->pos > output->size, GENERIC);
3619     RETURN_ERROR_IF(input->pos  > input->size, GENERIC);
3620     assert(cctx!=NULL);
3621 
3622     /* transparent initialization stage */
3623     if (cctx->streamStage == zcss_init) {
3624         ZSTD_CCtx_params params = cctx->requestedParams;
3625         ZSTD_prefixDict const prefixDict = cctx->prefixDict;
3626         FORWARD_IF_ERROR( ZSTD_initLocalDict(cctx) ); /* Init the local dict if present. */
3627         memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));   /* single usage */
3628         assert(prefixDict.dict==NULL || cctx->cdict==NULL);    /* only one can be set */
3629         DEBUGLOG(4, "ZSTD_compressStream2 : transparent init stage");
3630         if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = input->size + 1;  /* auto-fix pledgedSrcSize */
3631         params.cParams = ZSTD_getCParamsFromCCtxParams(
3632                 &cctx->requestedParams, cctx->pledgedSrcSizePlusOne-1, 0 /*dictSize*/);
3633 
3634 
3635 #ifdef ZSTD_MULTITHREAD
3636         if ((cctx->pledgedSrcSizePlusOne-1) <= ZSTDMT_JOBSIZE_MIN) {
3637             params.nbWorkers = 0; /* do not invoke multi-threading when src size is too small */
3638         }
3639         if (params.nbWorkers > 0) {
3640             /* mt context creation */
3641             if (cctx->mtctx == NULL) {
3642                 DEBUGLOG(4, "ZSTD_compressStream2: creating new mtctx for nbWorkers=%u",
3643                             params.nbWorkers);
3644                 cctx->mtctx = ZSTDMT_createCCtx_advanced(params.nbWorkers, cctx->customMem);
3645                 RETURN_ERROR_IF(cctx->mtctx == NULL, memory_allocation);
3646             }
3647             /* mt compression */
3648             DEBUGLOG(4, "call ZSTDMT_initCStream_internal as nbWorkers=%u", params.nbWorkers);
3649             FORWARD_IF_ERROR( ZSTDMT_initCStream_internal(
3650                         cctx->mtctx,
3651                         prefixDict.dict, prefixDict.dictSize, ZSTD_dct_rawContent,
3652                         cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) );
3653             cctx->streamStage = zcss_load;
3654             cctx->appliedParams.nbWorkers = params.nbWorkers;
3655         } else
3656 #endif
3657         {   FORWARD_IF_ERROR( ZSTD_resetCStream_internal(cctx,
3658                             prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType,
3659                             cctx->cdict,
3660                             params, cctx->pledgedSrcSizePlusOne-1) );
3661             assert(cctx->streamStage == zcss_load);
3662             assert(cctx->appliedParams.nbWorkers == 0);
3663     }   }
3664     /* end of transparent initialization stage */
3665 
3666     /* compression stage */
3667 #ifdef ZSTD_MULTITHREAD
3668     if (cctx->appliedParams.nbWorkers > 0) {
3669         int const forceMaxProgress = (endOp == ZSTD_e_flush || endOp == ZSTD_e_end);
3670         size_t flushMin;
3671         assert(forceMaxProgress || endOp == ZSTD_e_continue /* Protection for a new flush type */);
3672         if (cctx->cParamsChanged) {
3673             ZSTDMT_updateCParams_whileCompressing(cctx->mtctx, &cctx->requestedParams);
3674             cctx->cParamsChanged = 0;
3675         }
3676         do {
3677             flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp);
3678             if ( ZSTD_isError(flushMin)
3679               || (endOp == ZSTD_e_end && flushMin == 0) ) { /* compression completed */
3680                 ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only);
3681             }
3682             FORWARD_IF_ERROR(flushMin);
3683         } while (forceMaxProgress && flushMin != 0 && output->pos < output->size);
3684         DEBUGLOG(5, "completed ZSTD_compressStream2 delegating to ZSTDMT_compressStream_generic");
3685         /* Either we don't require maximum forward progress, we've finished the
3686          * flush, or we are out of output space.
3687          */
3688         assert(!forceMaxProgress || flushMin == 0 || output->pos == output->size);
3689         return flushMin;
3690     }
3691 #endif
3692     FORWARD_IF_ERROR( ZSTD_compressStream_generic(cctx, output, input, endOp) );
3693     DEBUGLOG(5, "completed ZSTD_compressStream2");
3694     return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */
3695 }
3696 
3697 size_t ZSTD_compressStream2_simpleArgs (
3698                             ZSTD_CCtx* cctx,
3699                             void* dst, size_t dstCapacity, size_t* dstPos,
3700                       const void* src, size_t srcSize, size_t* srcPos,
3701                             ZSTD_EndDirective endOp)
3702 {
3703     ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
3704     ZSTD_inBuffer  input  = { src, srcSize, *srcPos };
3705     /* ZSTD_compressStream2() will check validity of dstPos and srcPos */
3706     size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp);
3707     *dstPos = output.pos;
3708     *srcPos = input.pos;
3709     return cErr;
3710 }
3711 
3712 size_t ZSTD_compress2(ZSTD_CCtx* cctx,
3713                       void* dst, size_t dstCapacity,
3714                       const void* src, size_t srcSize)
3715 {
3716     ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only);
3717     {   size_t oPos = 0;
3718         size_t iPos = 0;
3719         size_t const result = ZSTD_compressStream2_simpleArgs(cctx,
3720                                         dst, dstCapacity, &oPos,
3721                                         src, srcSize, &iPos,
3722                                         ZSTD_e_end);
3723         FORWARD_IF_ERROR(result);
3724         if (result != 0) {  /* compression not completed, due to lack of output space */
3725             assert(oPos == dstCapacity);
3726             RETURN_ERROR(dstSize_tooSmall);
3727         }
3728         assert(iPos == srcSize);   /* all input is expected consumed */
3729         return oPos;
3730     }
3731 }
3732 
3733 /*======   Finalize   ======*/
3734 
3735 /*! ZSTD_flushStream() :
3736  * @return : amount of data remaining to flush */
3737 size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
3738 {
3739     ZSTD_inBuffer input = { NULL, 0, 0 };
3740     return ZSTD_compressStream2(zcs, output, &input, ZSTD_e_flush);
3741 }
3742 
3743 
3744 size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
3745 {
3746     ZSTD_inBuffer input = { NULL, 0, 0 };
3747     size_t const remainingToFlush = ZSTD_compressStream2(zcs, output, &input, ZSTD_e_end);
3748     FORWARD_IF_ERROR( remainingToFlush );
3749     if (zcs->appliedParams.nbWorkers > 0) return remainingToFlush;   /* minimal estimation */
3750     /* single thread mode : attempt to calculate remaining to flush more precisely */
3751     {   size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE;
3752         size_t const checksumSize = (size_t)(zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4);
3753         size_t const toFlush = remainingToFlush + lastBlockSize + checksumSize;
3754         DEBUGLOG(4, "ZSTD_endStream : remaining to flush : %u", (unsigned)toFlush);
3755         return toFlush;
3756     }
3757 }
3758 
3759 
3760 /*-=====  Pre-defined compression levels  =====-*/
3761 
3762 #define ZSTD_MAX_CLEVEL     22
3763 int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
3764 int ZSTD_minCLevel(void) { return (int)-ZSTD_TARGETLENGTH_MAX; }
3765 
3766 static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = {
3767 {   /* "default" - for any srcSize > 256 KB */
3768     /* W,  C,  H,  S,  L, TL, strat */
3769     { 19, 12, 13,  1,  6,  1, ZSTD_fast    },  /* base for negative levels */
3770     { 19, 13, 14,  1,  7,  0, ZSTD_fast    },  /* level  1 */
3771     { 20, 15, 16,  1,  6,  0, ZSTD_fast    },  /* level  2 */
3772     { 21, 16, 17,  1,  5,  1, ZSTD_dfast   },  /* level  3 */
3773     { 21, 18, 18,  1,  5,  1, ZSTD_dfast   },  /* level  4 */
3774     { 21, 18, 19,  2,  5,  2, ZSTD_greedy  },  /* level  5 */
3775     { 21, 19, 19,  3,  5,  4, ZSTD_greedy  },  /* level  6 */
3776     { 21, 19, 19,  3,  5,  8, ZSTD_lazy    },  /* level  7 */
3777     { 21, 19, 19,  3,  5, 16, ZSTD_lazy2   },  /* level  8 */
3778     { 21, 19, 20,  4,  5, 16, ZSTD_lazy2   },  /* level  9 */
3779     { 22, 20, 21,  4,  5, 16, ZSTD_lazy2   },  /* level 10 */
3780     { 22, 21, 22,  4,  5, 16, ZSTD_lazy2   },  /* level 11 */
3781     { 22, 21, 22,  5,  5, 16, ZSTD_lazy2   },  /* level 12 */
3782     { 22, 21, 22,  5,  5, 32, ZSTD_btlazy2 },  /* level 13 */
3783     { 22, 22, 23,  5,  5, 32, ZSTD_btlazy2 },  /* level 14 */
3784     { 22, 23, 23,  6,  5, 32, ZSTD_btlazy2 },  /* level 15 */
3785     { 22, 22, 22,  5,  5, 48, ZSTD_btopt   },  /* level 16 */
3786     { 23, 23, 22,  5,  4, 64, ZSTD_btopt   },  /* level 17 */
3787     { 23, 23, 22,  6,  3, 64, ZSTD_btultra },  /* level 18 */
3788     { 23, 24, 22,  7,  3,256, ZSTD_btultra2},  /* level 19 */
3789     { 25, 25, 23,  7,  3,256, ZSTD_btultra2},  /* level 20 */
3790     { 26, 26, 24,  7,  3,512, ZSTD_btultra2},  /* level 21 */
3791     { 27, 27, 25,  9,  3,999, ZSTD_btultra2},  /* level 22 */
3792 },
3793 {   /* for srcSize <= 256 KB */
3794     /* W,  C,  H,  S,  L,  T, strat */
3795     { 18, 12, 13,  1,  5,  1, ZSTD_fast    },  /* base for negative levels */
3796     { 18, 13, 14,  1,  6,  0, ZSTD_fast    },  /* level  1 */
3797     { 18, 14, 14,  1,  5,  1, ZSTD_dfast   },  /* level  2 */
3798     { 18, 16, 16,  1,  4,  1, ZSTD_dfast   },  /* level  3 */
3799     { 18, 16, 17,  2,  5,  2, ZSTD_greedy  },  /* level  4.*/
3800     { 18, 18, 18,  3,  5,  2, ZSTD_greedy  },  /* level  5.*/
3801     { 18, 18, 19,  3,  5,  4, ZSTD_lazy    },  /* level  6.*/
3802     { 18, 18, 19,  4,  4,  4, ZSTD_lazy    },  /* level  7 */
3803     { 18, 18, 19,  4,  4,  8, ZSTD_lazy2   },  /* level  8 */
3804     { 18, 18, 19,  5,  4,  8, ZSTD_lazy2   },  /* level  9 */
3805     { 18, 18, 19,  6,  4,  8, ZSTD_lazy2   },  /* level 10 */
3806     { 18, 18, 19,  5,  4, 12, ZSTD_btlazy2 },  /* level 11.*/
3807     { 18, 19, 19,  7,  4, 12, ZSTD_btlazy2 },  /* level 12.*/
3808     { 18, 18, 19,  4,  4, 16, ZSTD_btopt   },  /* level 13 */
3809     { 18, 18, 19,  4,  3, 32, ZSTD_btopt   },  /* level 14.*/
3810     { 18, 18, 19,  6,  3,128, ZSTD_btopt   },  /* level 15.*/
3811     { 18, 19, 19,  6,  3,128, ZSTD_btultra },  /* level 16.*/
3812     { 18, 19, 19,  8,  3,256, ZSTD_btultra },  /* level 17.*/
3813     { 18, 19, 19,  6,  3,128, ZSTD_btultra2},  /* level 18.*/
3814     { 18, 19, 19,  8,  3,256, ZSTD_btultra2},  /* level 19.*/
3815     { 18, 19, 19, 10,  3,512, ZSTD_btultra2},  /* level 20.*/
3816     { 18, 19, 19, 12,  3,512, ZSTD_btultra2},  /* level 21.*/
3817     { 18, 19, 19, 13,  3,999, ZSTD_btultra2},  /* level 22.*/
3818 },
3819 {   /* for srcSize <= 128 KB */
3820     /* W,  C,  H,  S,  L,  T, strat */
3821     { 17, 12, 12,  1,  5,  1, ZSTD_fast    },  /* base for negative levels */
3822     { 17, 12, 13,  1,  6,  0, ZSTD_fast    },  /* level  1 */
3823     { 17, 13, 15,  1,  5,  0, ZSTD_fast    },  /* level  2 */
3824     { 17, 15, 16,  2,  5,  1, ZSTD_dfast   },  /* level  3 */
3825     { 17, 17, 17,  2,  4,  1, ZSTD_dfast   },  /* level  4 */
3826     { 17, 16, 17,  3,  4,  2, ZSTD_greedy  },  /* level  5 */
3827     { 17, 17, 17,  3,  4,  4, ZSTD_lazy    },  /* level  6 */
3828     { 17, 17, 17,  3,  4,  8, ZSTD_lazy2   },  /* level  7 */
3829     { 17, 17, 17,  4,  4,  8, ZSTD_lazy2   },  /* level  8 */
3830     { 17, 17, 17,  5,  4,  8, ZSTD_lazy2   },  /* level  9 */
3831     { 17, 17, 17,  6,  4,  8, ZSTD_lazy2   },  /* level 10 */
3832     { 17, 17, 17,  5,  4,  8, ZSTD_btlazy2 },  /* level 11 */
3833     { 17, 18, 17,  7,  4, 12, ZSTD_btlazy2 },  /* level 12 */
3834     { 17, 18, 17,  3,  4, 12, ZSTD_btopt   },  /* level 13.*/
3835     { 17, 18, 17,  4,  3, 32, ZSTD_btopt   },  /* level 14.*/
3836     { 17, 18, 17,  6,  3,256, ZSTD_btopt   },  /* level 15.*/
3837     { 17, 18, 17,  6,  3,128, ZSTD_btultra },  /* level 16.*/
3838     { 17, 18, 17,  8,  3,256, ZSTD_btultra },  /* level 17.*/
3839     { 17, 18, 17, 10,  3,512, ZSTD_btultra },  /* level 18.*/
3840     { 17, 18, 17,  5,  3,256, ZSTD_btultra2},  /* level 19.*/
3841     { 17, 18, 17,  7,  3,512, ZSTD_btultra2},  /* level 20.*/
3842     { 17, 18, 17,  9,  3,512, ZSTD_btultra2},  /* level 21.*/
3843     { 17, 18, 17, 11,  3,999, ZSTD_btultra2},  /* level 22.*/
3844 },
3845 {   /* for srcSize <= 16 KB */
3846     /* W,  C,  H,  S,  L,  T, strat */
3847     { 14, 12, 13,  1,  5,  1, ZSTD_fast    },  /* base for negative levels */
3848     { 14, 14, 15,  1,  5,  0, ZSTD_fast    },  /* level  1 */
3849     { 14, 14, 15,  1,  4,  0, ZSTD_fast    },  /* level  2 */
3850     { 14, 14, 15,  2,  4,  1, ZSTD_dfast   },  /* level  3 */
3851     { 14, 14, 14,  4,  4,  2, ZSTD_greedy  },  /* level  4 */
3852     { 14, 14, 14,  3,  4,  4, ZSTD_lazy    },  /* level  5.*/
3853     { 14, 14, 14,  4,  4,  8, ZSTD_lazy2   },  /* level  6 */
3854     { 14, 14, 14,  6,  4,  8, ZSTD_lazy2   },  /* level  7 */
3855     { 14, 14, 14,  8,  4,  8, ZSTD_lazy2   },  /* level  8.*/
3856     { 14, 15, 14,  5,  4,  8, ZSTD_btlazy2 },  /* level  9.*/
3857     { 14, 15, 14,  9,  4,  8, ZSTD_btlazy2 },  /* level 10.*/
3858     { 14, 15, 14,  3,  4, 12, ZSTD_btopt   },  /* level 11.*/
3859     { 14, 15, 14,  4,  3, 24, ZSTD_btopt   },  /* level 12.*/
3860     { 14, 15, 14,  5,  3, 32, ZSTD_btultra },  /* level 13.*/
3861     { 14, 15, 15,  6,  3, 64, ZSTD_btultra },  /* level 14.*/
3862     { 14, 15, 15,  7,  3,256, ZSTD_btultra },  /* level 15.*/
3863     { 14, 15, 15,  5,  3, 48, ZSTD_btultra2},  /* level 16.*/
3864     { 14, 15, 15,  6,  3,128, ZSTD_btultra2},  /* level 17.*/
3865     { 14, 15, 15,  7,  3,256, ZSTD_btultra2},  /* level 18.*/
3866     { 14, 15, 15,  8,  3,256, ZSTD_btultra2},  /* level 19.*/
3867     { 14, 15, 15,  8,  3,512, ZSTD_btultra2},  /* level 20.*/
3868     { 14, 15, 15,  9,  3,512, ZSTD_btultra2},  /* level 21.*/
3869     { 14, 15, 15, 10,  3,999, ZSTD_btultra2},  /* level 22.*/
3870 },
3871 };
3872 
3873 /*! ZSTD_getCParams() :
3874  * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
3875  *  Size values are optional, provide 0 if not known or unused */
3876 ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize)
3877 {
3878     size_t const addedSize = srcSizeHint ? 0 : 500;
3879     U64 const rSize = srcSizeHint+dictSize ? srcSizeHint+dictSize+addedSize : ZSTD_CONTENTSIZE_UNKNOWN;  /* intentional overflow for srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN */
3880     U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB);
3881     int row = compressionLevel;
3882     DEBUGLOG(5, "ZSTD_getCParams (cLevel=%i)", compressionLevel);
3883     if (compressionLevel == 0) row = ZSTD_CLEVEL_DEFAULT;   /* 0 == default */
3884     if (compressionLevel < 0) row = 0;   /* entry 0 is baseline for fast mode */
3885     if (compressionLevel > ZSTD_MAX_CLEVEL) row = ZSTD_MAX_CLEVEL;
3886     {   ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row];
3887         if (compressionLevel < 0) cp.targetLength = (unsigned)(-compressionLevel);   /* acceleration factor */
3888         return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize);               /* refine parameters based on srcSize & dictSize */
3889     }
3890 }
3891 
3892 /*! ZSTD_getParams() :
3893  *  same idea as ZSTD_getCParams()
3894  * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).
3895  *  Fields of `ZSTD_frameParameters` are set to default values */
3896 ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) {
3897     ZSTD_parameters params;
3898     ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, srcSizeHint, dictSize);
3899     DEBUGLOG(5, "ZSTD_getParams (cLevel=%i)", compressionLevel);
3900     memset(&params, 0, sizeof(params));
3901     params.cParams = cParams;
3902     params.fParams.contentSizeFlag = 1;
3903     return params;
3904 }
3905