xref: /freebsd/sys/contrib/zstd/lib/compress/zstd_compress.c (revision 405c3050f102b8c74782f0366c8ead927bd07b68)
1 /*
2  * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
3  * All rights reserved.
4  *
5  * This source code is licensed under both the BSD-style license (found in the
6  * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7  * in the COPYING file in the root directory of this source tree).
8  * You may select, at your option, one of the above-listed licenses.
9  */
10 
11 /*-*************************************
12 *  Dependencies
13 ***************************************/
14 #include <string.h>         /* memset */
15 #include "cpu.h"
16 #include "mem.h"
17 #include "hist.h"           /* HIST_countFast_wksp */
18 #define FSE_STATIC_LINKING_ONLY   /* FSE_encodeSymbol */
19 #include "fse.h"
20 #define HUF_STATIC_LINKING_ONLY
21 #include "huf.h"
22 #include "zstd_compress_internal.h"
23 #include "zstd_fast.h"
24 #include "zstd_double_fast.h"
25 #include "zstd_lazy.h"
26 #include "zstd_opt.h"
27 #include "zstd_ldm.h"
28 
29 
30 /*-*************************************
31 *  Helper functions
32 ***************************************/
33 size_t ZSTD_compressBound(size_t srcSize) {
34     return ZSTD_COMPRESSBOUND(srcSize);
35 }
36 
37 
38 /*-*************************************
39 *  Context memory management
40 ***************************************/
41 struct ZSTD_CDict_s {
42     void* dictBuffer;
43     const void* dictContent;
44     size_t dictContentSize;
45     void* workspace;
46     size_t workspaceSize;
47     ZSTD_matchState_t matchState;
48     ZSTD_compressedBlockState_t cBlockState;
49     ZSTD_customMem customMem;
50     U32 dictID;
51 };  /* typedef'd to ZSTD_CDict within "zstd.h" */
52 
53 ZSTD_CCtx* ZSTD_createCCtx(void)
54 {
55     return ZSTD_createCCtx_advanced(ZSTD_defaultCMem);
56 }
57 
58 static void ZSTD_initCCtx(ZSTD_CCtx* cctx, ZSTD_customMem memManager)
59 {
60     assert(cctx != NULL);
61     memset(cctx, 0, sizeof(*cctx));
62     cctx->customMem = memManager;
63     cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
64     {   size_t const err = ZSTD_CCtx_resetParameters(cctx);
65         assert(!ZSTD_isError(err));
66         (void)err;
67     }
68 }
69 
70 ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
71 {
72     ZSTD_STATIC_ASSERT(zcss_init==0);
73     ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1));
74     if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
75     {   ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_malloc(sizeof(ZSTD_CCtx), customMem);
76         if (!cctx) return NULL;
77         ZSTD_initCCtx(cctx, customMem);
78         return cctx;
79     }
80 }
81 
82 ZSTD_CCtx* ZSTD_initStaticCCtx(void *workspace, size_t workspaceSize)
83 {
84     ZSTD_CCtx* const cctx = (ZSTD_CCtx*) workspace;
85     if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL;  /* minimum size */
86     if ((size_t)workspace & 7) return NULL;  /* must be 8-aligned */
87     memset(workspace, 0, workspaceSize);   /* may be a bit generous, could memset be smaller ? */
88     cctx->staticSize = workspaceSize;
89     cctx->workSpace = (void*)(cctx+1);
90     cctx->workSpaceSize = workspaceSize - sizeof(ZSTD_CCtx);
91 
92     /* statically sized space. entropyWorkspace never moves (but prev/next block swap places) */
93     if (cctx->workSpaceSize < HUF_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t)) return NULL;
94     assert(((size_t)cctx->workSpace & (sizeof(void*)-1)) == 0);   /* ensure correct alignment */
95     cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)cctx->workSpace;
96     cctx->blockState.nextCBlock = cctx->blockState.prevCBlock + 1;
97     {
98         void* const ptr = cctx->blockState.nextCBlock + 1;
99         cctx->entropyWorkspace = (U32*)ptr;
100     }
101     cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
102     return cctx;
103 }
104 
105 static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx)
106 {
107     assert(cctx != NULL);
108     assert(cctx->staticSize == 0);
109     ZSTD_free(cctx->workSpace, cctx->customMem); cctx->workSpace = NULL;
110     ZSTD_freeCDict(cctx->cdictLocal); cctx->cdictLocal = NULL;
111 #ifdef ZSTD_MULTITHREAD
112     ZSTDMT_freeCCtx(cctx->mtctx); cctx->mtctx = NULL;
113 #endif
114 }
115 
116 size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
117 {
118     if (cctx==NULL) return 0;   /* support free on NULL */
119     if (cctx->staticSize) return ERROR(memory_allocation);   /* not compatible with static CCtx */
120     ZSTD_freeCCtxContent(cctx);
121     ZSTD_free(cctx, cctx->customMem);
122     return 0;
123 }
124 
125 
126 static size_t ZSTD_sizeof_mtctx(const ZSTD_CCtx* cctx)
127 {
128 #ifdef ZSTD_MULTITHREAD
129     return ZSTDMT_sizeof_CCtx(cctx->mtctx);
130 #else
131     (void) cctx;
132     return 0;
133 #endif
134 }
135 
136 
137 size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx)
138 {
139     if (cctx==NULL) return 0;   /* support sizeof on NULL */
140     return sizeof(*cctx) + cctx->workSpaceSize
141            + ZSTD_sizeof_CDict(cctx->cdictLocal)
142            + ZSTD_sizeof_mtctx(cctx);
143 }
144 
145 size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs)
146 {
147     return ZSTD_sizeof_CCtx(zcs);  /* same object */
148 }
149 
150 /* private API call, for dictBuilder only */
151 const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); }
152 
153 static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams(
154         ZSTD_compressionParameters cParams)
155 {
156     ZSTD_CCtx_params cctxParams;
157     memset(&cctxParams, 0, sizeof(cctxParams));
158     cctxParams.cParams = cParams;
159     cctxParams.compressionLevel = ZSTD_CLEVEL_DEFAULT;  /* should not matter, as all cParams are presumed properly defined */
160     assert(!ZSTD_checkCParams(cParams));
161     cctxParams.fParams.contentSizeFlag = 1;
162     return cctxParams;
163 }
164 
165 static ZSTD_CCtx_params* ZSTD_createCCtxParams_advanced(
166         ZSTD_customMem customMem)
167 {
168     ZSTD_CCtx_params* params;
169     if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
170     params = (ZSTD_CCtx_params*)ZSTD_calloc(
171             sizeof(ZSTD_CCtx_params), customMem);
172     if (!params) { return NULL; }
173     params->customMem = customMem;
174     params->compressionLevel = ZSTD_CLEVEL_DEFAULT;
175     params->fParams.contentSizeFlag = 1;
176     return params;
177 }
178 
179 ZSTD_CCtx_params* ZSTD_createCCtxParams(void)
180 {
181     return ZSTD_createCCtxParams_advanced(ZSTD_defaultCMem);
182 }
183 
184 size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params)
185 {
186     if (params == NULL) { return 0; }
187     ZSTD_free(params, params->customMem);
188     return 0;
189 }
190 
191 size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params)
192 {
193     return ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT);
194 }
195 
196 size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) {
197     if (!cctxParams) { return ERROR(GENERIC); }
198     memset(cctxParams, 0, sizeof(*cctxParams));
199     cctxParams->compressionLevel = compressionLevel;
200     cctxParams->fParams.contentSizeFlag = 1;
201     return 0;
202 }
203 
204 size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params)
205 {
206     if (!cctxParams) { return ERROR(GENERIC); }
207     CHECK_F( ZSTD_checkCParams(params.cParams) );
208     memset(cctxParams, 0, sizeof(*cctxParams));
209     cctxParams->cParams = params.cParams;
210     cctxParams->fParams = params.fParams;
211     cctxParams->compressionLevel = ZSTD_CLEVEL_DEFAULT;   /* should not matter, as all cParams are presumed properly defined */
212     assert(!ZSTD_checkCParams(params.cParams));
213     return 0;
214 }
215 
216 /* ZSTD_assignParamsToCCtxParams() :
217  * params is presumed valid at this stage */
218 static ZSTD_CCtx_params ZSTD_assignParamsToCCtxParams(
219         ZSTD_CCtx_params cctxParams, ZSTD_parameters params)
220 {
221     ZSTD_CCtx_params ret = cctxParams;
222     ret.cParams = params.cParams;
223     ret.fParams = params.fParams;
224     ret.compressionLevel = ZSTD_CLEVEL_DEFAULT;   /* should not matter, as all cParams are presumed properly defined */
225     assert(!ZSTD_checkCParams(params.cParams));
226     return ret;
227 }
228 
229 #define CLAMPCHECK(val,min,max) {            \
230     if (((val)<(min)) | ((val)>(max))) {     \
231         return ERROR(parameter_outOfBound);  \
232 }   }
233 
234 
235 static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
236 {
237     switch(param)
238     {
239     case ZSTD_p_compressionLevel:
240     case ZSTD_p_hashLog:
241     case ZSTD_p_chainLog:
242     case ZSTD_p_searchLog:
243     case ZSTD_p_minMatch:
244     case ZSTD_p_targetLength:
245     case ZSTD_p_compressionStrategy:
246         return 1;
247 
248     case ZSTD_p_format:
249     case ZSTD_p_windowLog:
250     case ZSTD_p_contentSizeFlag:
251     case ZSTD_p_checksumFlag:
252     case ZSTD_p_dictIDFlag:
253     case ZSTD_p_forceMaxWindow :
254     case ZSTD_p_nbWorkers:
255     case ZSTD_p_jobSize:
256     case ZSTD_p_overlapSizeLog:
257     case ZSTD_p_enableLongDistanceMatching:
258     case ZSTD_p_ldmHashLog:
259     case ZSTD_p_ldmMinMatch:
260     case ZSTD_p_ldmBucketSizeLog:
261     case ZSTD_p_ldmHashEveryLog:
262     case ZSTD_p_forceAttachDict:
263     default:
264         return 0;
265     }
266 }
267 
268 size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, unsigned value)
269 {
270     DEBUGLOG(4, "ZSTD_CCtx_setParameter (%u, %u)", (U32)param, value);
271     if (cctx->streamStage != zcss_init) {
272         if (ZSTD_isUpdateAuthorized(param)) {
273             cctx->cParamsChanged = 1;
274         } else {
275             return ERROR(stage_wrong);
276     }   }
277 
278     switch(param)
279     {
280     case ZSTD_p_format :
281         return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
282 
283     case ZSTD_p_compressionLevel:
284         if (cctx->cdict) return ERROR(stage_wrong);
285         return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
286 
287     case ZSTD_p_windowLog:
288     case ZSTD_p_hashLog:
289     case ZSTD_p_chainLog:
290     case ZSTD_p_searchLog:
291     case ZSTD_p_minMatch:
292     case ZSTD_p_targetLength:
293     case ZSTD_p_compressionStrategy:
294         if (cctx->cdict) return ERROR(stage_wrong);
295         return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
296 
297     case ZSTD_p_contentSizeFlag:
298     case ZSTD_p_checksumFlag:
299     case ZSTD_p_dictIDFlag:
300         return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
301 
302     case ZSTD_p_forceMaxWindow :  /* Force back-references to remain < windowSize,
303                                    * even when referencing into Dictionary content.
304                                    * default : 0 when using a CDict, 1 when using a Prefix */
305         return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
306 
307     case ZSTD_p_forceAttachDict:
308         return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
309 
310     case ZSTD_p_nbWorkers:
311         if ((value>0) && cctx->staticSize) {
312             return ERROR(parameter_unsupported);  /* MT not compatible with static alloc */
313         }
314         return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
315 
316     case ZSTD_p_jobSize:
317     case ZSTD_p_overlapSizeLog:
318         return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
319 
320     case ZSTD_p_enableLongDistanceMatching:
321     case ZSTD_p_ldmHashLog:
322     case ZSTD_p_ldmMinMatch:
323     case ZSTD_p_ldmBucketSizeLog:
324     case ZSTD_p_ldmHashEveryLog:
325         if (cctx->cdict) return ERROR(stage_wrong);
326         return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
327 
328     default: return ERROR(parameter_unsupported);
329     }
330 }
331 
332 size_t ZSTD_CCtxParam_setParameter(
333         ZSTD_CCtx_params* CCtxParams, ZSTD_cParameter param, unsigned value)
334 {
335     DEBUGLOG(4, "ZSTD_CCtxParam_setParameter (%u, %u)", (U32)param, value);
336     switch(param)
337     {
338     case ZSTD_p_format :
339         if (value > (unsigned)ZSTD_f_zstd1_magicless)
340             return ERROR(parameter_unsupported);
341         CCtxParams->format = (ZSTD_format_e)value;
342         return (size_t)CCtxParams->format;
343 
344     case ZSTD_p_compressionLevel : {
345         int cLevel = (int)value;  /* cast expected to restore negative sign */
346         if (cLevel > ZSTD_maxCLevel()) cLevel = ZSTD_maxCLevel();
347         if (cLevel) {  /* 0 : does not change current level */
348             CCtxParams->compressionLevel = cLevel;
349         }
350         if (CCtxParams->compressionLevel >= 0) return CCtxParams->compressionLevel;
351         return 0;  /* return type (size_t) cannot represent negative values */
352     }
353 
354     case ZSTD_p_windowLog :
355         if (value>0)   /* 0 => use default */
356             CLAMPCHECK(value, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX);
357         CCtxParams->cParams.windowLog = value;
358         return CCtxParams->cParams.windowLog;
359 
360     case ZSTD_p_hashLog :
361         if (value>0)   /* 0 => use default */
362             CLAMPCHECK(value, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
363         CCtxParams->cParams.hashLog = value;
364         return CCtxParams->cParams.hashLog;
365 
366     case ZSTD_p_chainLog :
367         if (value>0)   /* 0 => use default */
368             CLAMPCHECK(value, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX);
369         CCtxParams->cParams.chainLog = value;
370         return CCtxParams->cParams.chainLog;
371 
372     case ZSTD_p_searchLog :
373         if (value>0)   /* 0 => use default */
374             CLAMPCHECK(value, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX);
375         CCtxParams->cParams.searchLog = value;
376         return value;
377 
378     case ZSTD_p_minMatch :
379         if (value>0)   /* 0 => use default */
380             CLAMPCHECK(value, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX);
381         CCtxParams->cParams.searchLength = value;
382         return CCtxParams->cParams.searchLength;
383 
384     case ZSTD_p_targetLength :
385         /* all values are valid. 0 => use default */
386         CCtxParams->cParams.targetLength = value;
387         return CCtxParams->cParams.targetLength;
388 
389     case ZSTD_p_compressionStrategy :
390         if (value>0)   /* 0 => use default */
391             CLAMPCHECK(value, (unsigned)ZSTD_fast, (unsigned)ZSTD_btultra);
392         CCtxParams->cParams.strategy = (ZSTD_strategy)value;
393         return (size_t)CCtxParams->cParams.strategy;
394 
395     case ZSTD_p_contentSizeFlag :
396         /* Content size written in frame header _when known_ (default:1) */
397         DEBUGLOG(4, "set content size flag = %u", (value>0));
398         CCtxParams->fParams.contentSizeFlag = value > 0;
399         return CCtxParams->fParams.contentSizeFlag;
400 
401     case ZSTD_p_checksumFlag :
402         /* A 32-bits content checksum will be calculated and written at end of frame (default:0) */
403         CCtxParams->fParams.checksumFlag = value > 0;
404         return CCtxParams->fParams.checksumFlag;
405 
406     case ZSTD_p_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */
407         DEBUGLOG(4, "set dictIDFlag = %u", (value>0));
408         CCtxParams->fParams.noDictIDFlag = !value;
409         return !CCtxParams->fParams.noDictIDFlag;
410 
411     case ZSTD_p_forceMaxWindow :
412         CCtxParams->forceWindow = (value > 0);
413         return CCtxParams->forceWindow;
414 
415     case ZSTD_p_forceAttachDict :
416         CCtxParams->attachDictPref = value ?
417                                     (value > 0 ? ZSTD_dictForceAttach : ZSTD_dictForceCopy) :
418                                      ZSTD_dictDefaultAttach;
419         return CCtxParams->attachDictPref;
420 
421     case ZSTD_p_nbWorkers :
422 #ifndef ZSTD_MULTITHREAD
423         if (value>0) return ERROR(parameter_unsupported);
424         return 0;
425 #else
426         return ZSTDMT_CCtxParam_setNbWorkers(CCtxParams, value);
427 #endif
428 
429     case ZSTD_p_jobSize :
430 #ifndef ZSTD_MULTITHREAD
431         return ERROR(parameter_unsupported);
432 #else
433         return ZSTDMT_CCtxParam_setMTCtxParameter(CCtxParams, ZSTDMT_p_jobSize, value);
434 #endif
435 
436     case ZSTD_p_overlapSizeLog :
437 #ifndef ZSTD_MULTITHREAD
438         return ERROR(parameter_unsupported);
439 #else
440         return ZSTDMT_CCtxParam_setMTCtxParameter(CCtxParams, ZSTDMT_p_overlapSectionLog, value);
441 #endif
442 
443     case ZSTD_p_enableLongDistanceMatching :
444         CCtxParams->ldmParams.enableLdm = (value>0);
445         return CCtxParams->ldmParams.enableLdm;
446 
447     case ZSTD_p_ldmHashLog :
448         if (value>0)   /* 0 ==> auto */
449             CLAMPCHECK(value, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
450         CCtxParams->ldmParams.hashLog = value;
451         return CCtxParams->ldmParams.hashLog;
452 
453     case ZSTD_p_ldmMinMatch :
454         if (value>0)   /* 0 ==> default */
455             CLAMPCHECK(value, ZSTD_LDM_MINMATCH_MIN, ZSTD_LDM_MINMATCH_MAX);
456         CCtxParams->ldmParams.minMatchLength = value;
457         return CCtxParams->ldmParams.minMatchLength;
458 
459     case ZSTD_p_ldmBucketSizeLog :
460         if (value > ZSTD_LDM_BUCKETSIZELOG_MAX)
461             return ERROR(parameter_outOfBound);
462         CCtxParams->ldmParams.bucketSizeLog = value;
463         return CCtxParams->ldmParams.bucketSizeLog;
464 
465     case ZSTD_p_ldmHashEveryLog :
466         if (value > ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN)
467             return ERROR(parameter_outOfBound);
468         CCtxParams->ldmParams.hashEveryLog = value;
469         return CCtxParams->ldmParams.hashEveryLog;
470 
471     default: return ERROR(parameter_unsupported);
472     }
473 }
474 
475 size_t ZSTD_CCtx_getParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, unsigned* value)
476 {
477     return ZSTD_CCtxParam_getParameter(&cctx->requestedParams, param, value);
478 }
479 
480 size_t ZSTD_CCtxParam_getParameter(
481         ZSTD_CCtx_params* CCtxParams, ZSTD_cParameter param, unsigned* value)
482 {
483     switch(param)
484     {
485     case ZSTD_p_format :
486         *value = CCtxParams->format;
487         break;
488     case ZSTD_p_compressionLevel :
489         *value = CCtxParams->compressionLevel;
490         break;
491     case ZSTD_p_windowLog :
492         *value = CCtxParams->cParams.windowLog;
493         break;
494     case ZSTD_p_hashLog :
495         *value = CCtxParams->cParams.hashLog;
496         break;
497     case ZSTD_p_chainLog :
498         *value = CCtxParams->cParams.chainLog;
499         break;
500     case ZSTD_p_searchLog :
501         *value = CCtxParams->cParams.searchLog;
502         break;
503     case ZSTD_p_minMatch :
504         *value = CCtxParams->cParams.searchLength;
505         break;
506     case ZSTD_p_targetLength :
507         *value = CCtxParams->cParams.targetLength;
508         break;
509     case ZSTD_p_compressionStrategy :
510         *value = (unsigned)CCtxParams->cParams.strategy;
511         break;
512     case ZSTD_p_contentSizeFlag :
513         *value = CCtxParams->fParams.contentSizeFlag;
514         break;
515     case ZSTD_p_checksumFlag :
516         *value = CCtxParams->fParams.checksumFlag;
517         break;
518     case ZSTD_p_dictIDFlag :
519         *value = !CCtxParams->fParams.noDictIDFlag;
520         break;
521     case ZSTD_p_forceMaxWindow :
522         *value = CCtxParams->forceWindow;
523         break;
524     case ZSTD_p_forceAttachDict :
525         *value = CCtxParams->attachDictPref;
526         break;
527     case ZSTD_p_nbWorkers :
528 #ifndef ZSTD_MULTITHREAD
529         assert(CCtxParams->nbWorkers == 0);
530 #endif
531         *value = CCtxParams->nbWorkers;
532         break;
533     case ZSTD_p_jobSize :
534 #ifndef ZSTD_MULTITHREAD
535         return ERROR(parameter_unsupported);
536 #else
537         *value = CCtxParams->jobSize;
538         break;
539 #endif
540     case ZSTD_p_overlapSizeLog :
541 #ifndef ZSTD_MULTITHREAD
542         return ERROR(parameter_unsupported);
543 #else
544         *value = CCtxParams->overlapSizeLog;
545         break;
546 #endif
547     case ZSTD_p_enableLongDistanceMatching :
548         *value = CCtxParams->ldmParams.enableLdm;
549         break;
550     case ZSTD_p_ldmHashLog :
551         *value = CCtxParams->ldmParams.hashLog;
552         break;
553     case ZSTD_p_ldmMinMatch :
554         *value = CCtxParams->ldmParams.minMatchLength;
555         break;
556     case ZSTD_p_ldmBucketSizeLog :
557         *value = CCtxParams->ldmParams.bucketSizeLog;
558         break;
559     case ZSTD_p_ldmHashEveryLog :
560         *value = CCtxParams->ldmParams.hashEveryLog;
561         break;
562     default: return ERROR(parameter_unsupported);
563     }
564     return 0;
565 }
566 
567 /** ZSTD_CCtx_setParametersUsingCCtxParams() :
568  *  just applies `params` into `cctx`
569  *  no action is performed, parameters are merely stored.
570  *  If ZSTDMT is enabled, parameters are pushed to cctx->mtctx.
571  *    This is possible even if a compression is ongoing.
572  *    In which case, new parameters will be applied on the fly, starting with next compression job.
573  */
574 size_t ZSTD_CCtx_setParametersUsingCCtxParams(
575         ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params)
576 {
577     DEBUGLOG(4, "ZSTD_CCtx_setParametersUsingCCtxParams");
578     if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
579     if (cctx->cdict) return ERROR(stage_wrong);
580 
581     cctx->requestedParams = *params;
582     return 0;
583 }
584 
585 ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize)
586 {
587     DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %u bytes", (U32)pledgedSrcSize);
588     if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
589     cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
590     return 0;
591 }
592 
593 size_t ZSTD_CCtx_loadDictionary_advanced(
594         ZSTD_CCtx* cctx, const void* dict, size_t dictSize,
595         ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType)
596 {
597     if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
598     if (cctx->staticSize) return ERROR(memory_allocation);  /* no malloc for static CCtx */
599     DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize);
600     ZSTD_freeCDict(cctx->cdictLocal);  /* in case one already exists */
601     if (dict==NULL || dictSize==0) {   /* no dictionary mode */
602         cctx->cdictLocal = NULL;
603         cctx->cdict = NULL;
604     } else {
605         ZSTD_compressionParameters const cParams =
606                 ZSTD_getCParamsFromCCtxParams(&cctx->requestedParams, cctx->pledgedSrcSizePlusOne-1, dictSize);
607         cctx->cdictLocal = ZSTD_createCDict_advanced(
608                                 dict, dictSize,
609                                 dictLoadMethod, dictContentType,
610                                 cParams, cctx->customMem);
611         cctx->cdict = cctx->cdictLocal;
612         if (cctx->cdictLocal == NULL)
613             return ERROR(memory_allocation);
614     }
615     return 0;
616 }
617 
618 ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(
619       ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
620 {
621     return ZSTD_CCtx_loadDictionary_advanced(
622             cctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto);
623 }
624 
625 ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
626 {
627     return ZSTD_CCtx_loadDictionary_advanced(
628             cctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto);
629 }
630 
631 
632 size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
633 {
634     if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
635     cctx->cdict = cdict;
636     memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));  /* exclusive */
637     return 0;
638 }
639 
640 size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize)
641 {
642     return ZSTD_CCtx_refPrefix_advanced(cctx, prefix, prefixSize, ZSTD_dct_rawContent);
643 }
644 
645 size_t ZSTD_CCtx_refPrefix_advanced(
646         ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
647 {
648     if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
649     cctx->cdict = NULL;   /* prefix discards any prior cdict */
650     cctx->prefixDict.dict = prefix;
651     cctx->prefixDict.dictSize = prefixSize;
652     cctx->prefixDict.dictContentType = dictContentType;
653     return 0;
654 }
655 
656 /*! ZSTD_CCtx_reset() :
657  *  Also dumps dictionary */
658 void ZSTD_CCtx_reset(ZSTD_CCtx* cctx)
659 {
660     cctx->streamStage = zcss_init;
661     cctx->pledgedSrcSizePlusOne = 0;
662 }
663 
664 size_t ZSTD_CCtx_resetParameters(ZSTD_CCtx* cctx)
665 {
666     if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
667     cctx->cdict = NULL;
668     return ZSTD_CCtxParams_reset(&cctx->requestedParams);
669 }
670 
671 /** ZSTD_checkCParams() :
672     control CParam values remain within authorized range.
673     @return : 0, or an error code if one value is beyond authorized range */
674 size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
675 {
676     CLAMPCHECK(cParams.windowLog, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX);
677     CLAMPCHECK(cParams.chainLog, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX);
678     CLAMPCHECK(cParams.hashLog, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
679     CLAMPCHECK(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX);
680     CLAMPCHECK(cParams.searchLength, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX);
681     ZSTD_STATIC_ASSERT(ZSTD_TARGETLENGTH_MIN == 0);
682     if (cParams.targetLength > ZSTD_TARGETLENGTH_MAX)
683         return ERROR(parameter_outOfBound);
684     if ((U32)(cParams.strategy) > (U32)ZSTD_btultra)
685         return ERROR(parameter_unsupported);
686     return 0;
687 }
688 
689 /** ZSTD_clampCParams() :
690  *  make CParam values within valid range.
691  *  @return : valid CParams */
692 static ZSTD_compressionParameters
693 ZSTD_clampCParams(ZSTD_compressionParameters cParams)
694 {
695 #   define CLAMP(val,min,max) {      \
696         if (val<min) val=min;        \
697         else if (val>max) val=max;   \
698     }
699     CLAMP(cParams.windowLog, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX);
700     CLAMP(cParams.chainLog, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX);
701     CLAMP(cParams.hashLog, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
702     CLAMP(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX);
703     CLAMP(cParams.searchLength, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX);
704     ZSTD_STATIC_ASSERT(ZSTD_TARGETLENGTH_MIN == 0);
705     if (cParams.targetLength > ZSTD_TARGETLENGTH_MAX)
706         cParams.targetLength = ZSTD_TARGETLENGTH_MAX;
707     CLAMP(cParams.strategy, ZSTD_fast, ZSTD_btultra);
708     return cParams;
709 }
710 
711 /** ZSTD_cycleLog() :
712  *  condition for correct operation : hashLog > 1 */
713 static U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)
714 {
715     U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2);
716     return hashLog - btScale;
717 }
718 
719 /** ZSTD_adjustCParams_internal() :
720     optimize `cPar` for a given input (`srcSize` and `dictSize`).
721     mostly downsizing to reduce memory consumption and initialization latency.
722     Both `srcSize` and `dictSize` are optional (use 0 if unknown).
723     Note : cPar is assumed validated. Use ZSTD_checkCParams() to ensure this condition. */
724 static ZSTD_compressionParameters
725 ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
726                             unsigned long long srcSize,
727                             size_t dictSize)
728 {
729     static const U64 minSrcSize = 513; /* (1<<9) + 1 */
730     static const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1);
731     assert(ZSTD_checkCParams(cPar)==0);
732 
733     if (dictSize && (srcSize+1<2) /* srcSize unknown */ )
734         srcSize = minSrcSize;  /* presumed small when there is a dictionary */
735     else if (srcSize == 0)
736         srcSize = ZSTD_CONTENTSIZE_UNKNOWN;  /* 0 == unknown : presumed large */
737 
738     /* resize windowLog if input is small enough, to use less memory */
739     if ( (srcSize < maxWindowResize)
740       && (dictSize < maxWindowResize) )  {
741         U32 const tSize = (U32)(srcSize + dictSize);
742         static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN;
743         U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN :
744                             ZSTD_highbit32(tSize-1) + 1;
745         if (cPar.windowLog > srcLog) cPar.windowLog = srcLog;
746     }
747     if (cPar.hashLog > cPar.windowLog+1) cPar.hashLog = cPar.windowLog+1;
748     {   U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
749         if (cycleLog > cPar.windowLog)
750             cPar.chainLog -= (cycleLog - cPar.windowLog);
751     }
752 
753     if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
754         cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN;  /* required for frame header */
755 
756     return cPar;
757 }
758 
759 ZSTD_compressionParameters
760 ZSTD_adjustCParams(ZSTD_compressionParameters cPar,
761                    unsigned long long srcSize,
762                    size_t dictSize)
763 {
764     cPar = ZSTD_clampCParams(cPar);
765     return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize);
766 }
767 
768 ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
769         const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize)
770 {
771     ZSTD_compressionParameters cParams = ZSTD_getCParams(CCtxParams->compressionLevel, srcSizeHint, dictSize);
772     if (CCtxParams->ldmParams.enableLdm) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG;
773     if (CCtxParams->cParams.windowLog) cParams.windowLog = CCtxParams->cParams.windowLog;
774     if (CCtxParams->cParams.hashLog) cParams.hashLog = CCtxParams->cParams.hashLog;
775     if (CCtxParams->cParams.chainLog) cParams.chainLog = CCtxParams->cParams.chainLog;
776     if (CCtxParams->cParams.searchLog) cParams.searchLog = CCtxParams->cParams.searchLog;
777     if (CCtxParams->cParams.searchLength) cParams.searchLength = CCtxParams->cParams.searchLength;
778     if (CCtxParams->cParams.targetLength) cParams.targetLength = CCtxParams->cParams.targetLength;
779     if (CCtxParams->cParams.strategy) cParams.strategy = CCtxParams->cParams.strategy;
780     assert(!ZSTD_checkCParams(cParams));
781     return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize);
782 }
783 
784 static size_t
785 ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
786                        const U32 forCCtx)
787 {
788     size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
789     size_t const hSize = ((size_t)1) << cParams->hashLog;
790     U32    const hashLog3 = (forCCtx && cParams->searchLength==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
791     size_t const h3Size = ((size_t)1) << hashLog3;
792     size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
793     size_t const optPotentialSpace = ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<<Litbits)) * sizeof(U32)
794                           + (ZSTD_OPT_NUM+1) * (sizeof(ZSTD_match_t)+sizeof(ZSTD_optimal_t));
795     size_t const optSpace = (forCCtx && ((cParams->strategy == ZSTD_btopt) ||
796                                          (cParams->strategy == ZSTD_btultra)))
797                                 ? optPotentialSpace
798                                 : 0;
799     DEBUGLOG(4, "chainSize: %u - hSize: %u - h3Size: %u",
800                 (U32)chainSize, (U32)hSize, (U32)h3Size);
801     return tableSpace + optSpace;
802 }
803 
804 size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)
805 {
806     /* Estimate CCtx size is supported for single-threaded compression only. */
807     if (params->nbWorkers > 0) { return ERROR(GENERIC); }
808     {   ZSTD_compressionParameters const cParams =
809                 ZSTD_getCParamsFromCCtxParams(params, 0, 0);
810         size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
811         U32    const divider = (cParams.searchLength==3) ? 3 : 4;
812         size_t const maxNbSeq = blockSize / divider;
813         size_t const tokenSpace = WILDCOPY_OVERLENGTH + blockSize + 11*maxNbSeq;
814         size_t const entropySpace = HUF_WORKSPACE_SIZE;
815         size_t const blockStateSpace = 2 * sizeof(ZSTD_compressedBlockState_t);
816         size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 1);
817 
818         size_t const ldmSpace = ZSTD_ldm_getTableSize(params->ldmParams);
819         size_t const ldmSeqSpace = ZSTD_ldm_getMaxNbSeq(params->ldmParams, blockSize) * sizeof(rawSeq);
820 
821         size_t const neededSpace = entropySpace + blockStateSpace + tokenSpace +
822                                    matchStateSize + ldmSpace + ldmSeqSpace;
823 
824         DEBUGLOG(5, "sizeof(ZSTD_CCtx) : %u", (U32)sizeof(ZSTD_CCtx));
825         DEBUGLOG(5, "estimate workSpace : %u", (U32)neededSpace);
826         return sizeof(ZSTD_CCtx) + neededSpace;
827     }
828 }
829 
830 size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams)
831 {
832     ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
833     return ZSTD_estimateCCtxSize_usingCCtxParams(&params);
834 }
835 
836 static size_t ZSTD_estimateCCtxSize_internal(int compressionLevel)
837 {
838     ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, 0);
839     return ZSTD_estimateCCtxSize_usingCParams(cParams);
840 }
841 
842 size_t ZSTD_estimateCCtxSize(int compressionLevel)
843 {
844     int level;
845     size_t memBudget = 0;
846     for (level=1; level<=compressionLevel; level++) {
847         size_t const newMB = ZSTD_estimateCCtxSize_internal(level);
848         if (newMB > memBudget) memBudget = newMB;
849     }
850     return memBudget;
851 }
852 
853 size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params)
854 {
855     if (params->nbWorkers > 0) { return ERROR(GENERIC); }
856     {   size_t const CCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(params);
857         size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << params->cParams.windowLog);
858         size_t const inBuffSize = ((size_t)1 << params->cParams.windowLog) + blockSize;
859         size_t const outBuffSize = ZSTD_compressBound(blockSize) + 1;
860         size_t const streamingSize = inBuffSize + outBuffSize;
861 
862         return CCtxSize + streamingSize;
863     }
864 }
865 
866 size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams)
867 {
868     ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
869     return ZSTD_estimateCStreamSize_usingCCtxParams(&params);
870 }
871 
872 static size_t ZSTD_estimateCStreamSize_internal(int compressionLevel)
873 {
874     ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, 0);
875     return ZSTD_estimateCStreamSize_usingCParams(cParams);
876 }
877 
878 size_t ZSTD_estimateCStreamSize(int compressionLevel)
879 {
880     int level;
881     size_t memBudget = 0;
882     for (level=1; level<=compressionLevel; level++) {
883         size_t const newMB = ZSTD_estimateCStreamSize_internal(level);
884         if (newMB > memBudget) memBudget = newMB;
885     }
886     return memBudget;
887 }
888 
889 /* ZSTD_getFrameProgression():
890  * tells how much data has been consumed (input) and produced (output) for current frame.
891  * able to count progression inside worker threads (non-blocking mode).
892  */
893 ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx)
894 {
895 #ifdef ZSTD_MULTITHREAD
896     if (cctx->appliedParams.nbWorkers > 0) {
897         return ZSTDMT_getFrameProgression(cctx->mtctx);
898     }
899 #endif
900     {   ZSTD_frameProgression fp;
901         size_t const buffered = (cctx->inBuff == NULL) ? 0 :
902                                 cctx->inBuffPos - cctx->inToCompress;
903         if (buffered) assert(cctx->inBuffPos >= cctx->inToCompress);
904         assert(buffered <= ZSTD_BLOCKSIZE_MAX);
905         fp.ingested = cctx->consumedSrcSize + buffered;
906         fp.consumed = cctx->consumedSrcSize;
907         fp.produced = cctx->producedCSize;
908         fp.flushed  = cctx->producedCSize;   /* simplified; some data might still be left within streaming output buffer */
909         fp.currentJobID = 0;
910         fp.nbActiveWorkers = 0;
911         return fp;
912 }   }
913 
914 /*! ZSTD_toFlushNow()
915  *  Only useful for multithreading scenarios currently (nbWorkers >= 1).
916  */
917 size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx)
918 {
919 #ifdef ZSTD_MULTITHREAD
920     if (cctx->appliedParams.nbWorkers > 0) {
921         return ZSTDMT_toFlushNow(cctx->mtctx);
922     }
923 #endif
924     (void)cctx;
925     return 0;   /* over-simplification; could also check if context is currently running in streaming mode, and in which case, report how many bytes are left to be flushed within output buffer */
926 }
927 
928 
929 
930 static U32 ZSTD_equivalentCParams(ZSTD_compressionParameters cParams1,
931                                   ZSTD_compressionParameters cParams2)
932 {
933     return (cParams1.hashLog  == cParams2.hashLog)
934          & (cParams1.chainLog == cParams2.chainLog)
935          & (cParams1.strategy == cParams2.strategy)   /* opt parser space */
936          & ((cParams1.searchLength==3) == (cParams2.searchLength==3));  /* hashlog3 space */
937 }
938 
939 static void ZSTD_assertEqualCParams(ZSTD_compressionParameters cParams1,
940                                     ZSTD_compressionParameters cParams2)
941 {
942     (void)cParams1;
943     (void)cParams2;
944     assert(cParams1.windowLog    == cParams2.windowLog);
945     assert(cParams1.chainLog     == cParams2.chainLog);
946     assert(cParams1.hashLog      == cParams2.hashLog);
947     assert(cParams1.searchLog    == cParams2.searchLog);
948     assert(cParams1.searchLength == cParams2.searchLength);
949     assert(cParams1.targetLength == cParams2.targetLength);
950     assert(cParams1.strategy     == cParams2.strategy);
951 }
952 
953 /** The parameters are equivalent if ldm is not enabled in both sets or
954  *  all the parameters are equivalent. */
955 static U32 ZSTD_equivalentLdmParams(ldmParams_t ldmParams1,
956                                     ldmParams_t ldmParams2)
957 {
958     return (!ldmParams1.enableLdm && !ldmParams2.enableLdm) ||
959            (ldmParams1.enableLdm == ldmParams2.enableLdm &&
960             ldmParams1.hashLog == ldmParams2.hashLog &&
961             ldmParams1.bucketSizeLog == ldmParams2.bucketSizeLog &&
962             ldmParams1.minMatchLength == ldmParams2.minMatchLength &&
963             ldmParams1.hashEveryLog == ldmParams2.hashEveryLog);
964 }
965 
966 typedef enum { ZSTDb_not_buffered, ZSTDb_buffered } ZSTD_buffered_policy_e;
967 
968 /* ZSTD_sufficientBuff() :
969  * check internal buffers exist for streaming if buffPol == ZSTDb_buffered .
970  * Note : they are assumed to be correctly sized if ZSTD_equivalentCParams()==1 */
971 static U32 ZSTD_sufficientBuff(size_t bufferSize1, size_t maxNbSeq1,
972                             size_t maxNbLit1,
973                             ZSTD_buffered_policy_e buffPol2,
974                             ZSTD_compressionParameters cParams2,
975                             U64 pledgedSrcSize)
976 {
977     size_t const windowSize2 = MAX(1, (size_t)MIN(((U64)1 << cParams2.windowLog), pledgedSrcSize));
978     size_t const blockSize2 = MIN(ZSTD_BLOCKSIZE_MAX, windowSize2);
979     size_t const maxNbSeq2 = blockSize2 / ((cParams2.searchLength == 3) ? 3 : 4);
980     size_t const maxNbLit2 = blockSize2;
981     size_t const neededBufferSize2 = (buffPol2==ZSTDb_buffered) ? windowSize2 + blockSize2 : 0;
982     DEBUGLOG(4, "ZSTD_sufficientBuff: is neededBufferSize2=%u <= bufferSize1=%u",
983                 (U32)neededBufferSize2, (U32)bufferSize1);
984     DEBUGLOG(4, "ZSTD_sufficientBuff: is maxNbSeq2=%u <= maxNbSeq1=%u",
985                 (U32)maxNbSeq2, (U32)maxNbSeq1);
986     DEBUGLOG(4, "ZSTD_sufficientBuff: is maxNbLit2=%u <= maxNbLit1=%u",
987                 (U32)maxNbLit2, (U32)maxNbLit1);
988     return (maxNbLit2 <= maxNbLit1)
989          & (maxNbSeq2 <= maxNbSeq1)
990          & (neededBufferSize2 <= bufferSize1);
991 }
992 
993 /** Equivalence for resetCCtx purposes */
994 static U32 ZSTD_equivalentParams(ZSTD_CCtx_params params1,
995                                  ZSTD_CCtx_params params2,
996                                  size_t buffSize1,
997                                  size_t maxNbSeq1, size_t maxNbLit1,
998                                  ZSTD_buffered_policy_e buffPol2,
999                                  U64 pledgedSrcSize)
1000 {
1001     DEBUGLOG(4, "ZSTD_equivalentParams: pledgedSrcSize=%u", (U32)pledgedSrcSize);
1002     if (!ZSTD_equivalentCParams(params1.cParams, params2.cParams)) {
1003       DEBUGLOG(4, "ZSTD_equivalentCParams() == 0");
1004       return 0;
1005     }
1006     if (!ZSTD_equivalentLdmParams(params1.ldmParams, params2.ldmParams)) {
1007       DEBUGLOG(4, "ZSTD_equivalentLdmParams() == 0");
1008       return 0;
1009     }
1010     if (!ZSTD_sufficientBuff(buffSize1, maxNbSeq1, maxNbLit1, buffPol2,
1011                              params2.cParams, pledgedSrcSize)) {
1012       DEBUGLOG(4, "ZSTD_sufficientBuff() == 0");
1013       return 0;
1014     }
1015     return 1;
1016 }
1017 
1018 static void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs)
1019 {
1020     int i;
1021     for (i = 0; i < ZSTD_REP_NUM; ++i)
1022         bs->rep[i] = repStartValue[i];
1023     bs->entropy.huf.repeatMode = HUF_repeat_none;
1024     bs->entropy.fse.offcode_repeatMode = FSE_repeat_none;
1025     bs->entropy.fse.matchlength_repeatMode = FSE_repeat_none;
1026     bs->entropy.fse.litlength_repeatMode = FSE_repeat_none;
1027 }
1028 
1029 /*! ZSTD_invalidateMatchState()
1030  * Invalidate all the matches in the match finder tables.
1031  * Requires nextSrc and base to be set (can be NULL).
1032  */
1033 static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms)
1034 {
1035     ZSTD_window_clear(&ms->window);
1036 
1037     ms->nextToUpdate = ms->window.dictLimit + 1;
1038     ms->nextToUpdate3 = ms->window.dictLimit + 1;
1039     ms->loadedDictEnd = 0;
1040     ms->opt.litLengthSum = 0;  /* force reset of btopt stats */
1041     ms->dictMatchState = NULL;
1042 }
1043 
1044 /*! ZSTD_continueCCtx() :
1045  *  reuse CCtx without reset (note : requires no dictionary) */
1046 static size_t ZSTD_continueCCtx(ZSTD_CCtx* cctx, ZSTD_CCtx_params params, U64 pledgedSrcSize)
1047 {
1048     size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize));
1049     size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
1050     DEBUGLOG(4, "ZSTD_continueCCtx: re-use context in place");
1051 
1052     cctx->blockSize = blockSize;   /* previous block size could be different even for same windowLog, due to pledgedSrcSize */
1053     cctx->appliedParams = params;
1054     cctx->blockState.matchState.cParams = params.cParams;
1055     cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
1056     cctx->consumedSrcSize = 0;
1057     cctx->producedCSize = 0;
1058     if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)
1059         cctx->appliedParams.fParams.contentSizeFlag = 0;
1060     DEBUGLOG(4, "pledged content size : %u ; flag : %u",
1061         (U32)pledgedSrcSize, cctx->appliedParams.fParams.contentSizeFlag);
1062     cctx->stage = ZSTDcs_init;
1063     cctx->dictID = 0;
1064     if (params.ldmParams.enableLdm)
1065         ZSTD_window_clear(&cctx->ldmState.window);
1066     ZSTD_referenceExternalSequences(cctx, NULL, 0);
1067     ZSTD_invalidateMatchState(&cctx->blockState.matchState);
1068     ZSTD_reset_compressedBlockState(cctx->blockState.prevCBlock);
1069     XXH64_reset(&cctx->xxhState, 0);
1070     return 0;
1071 }
1072 
1073 typedef enum { ZSTDcrp_continue, ZSTDcrp_noMemset } ZSTD_compResetPolicy_e;
1074 
1075 static void*
1076 ZSTD_reset_matchState(ZSTD_matchState_t* ms,
1077                       void* ptr,
1078                 const ZSTD_compressionParameters* cParams,
1079                       ZSTD_compResetPolicy_e const crp, U32 const forCCtx)
1080 {
1081     size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
1082     size_t const hSize = ((size_t)1) << cParams->hashLog;
1083     U32    const hashLog3 = (forCCtx && cParams->searchLength==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
1084     size_t const h3Size = ((size_t)1) << hashLog3;
1085     size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
1086 
1087     assert(((size_t)ptr & 3) == 0);
1088 
1089     ms->hashLog3 = hashLog3;
1090     memset(&ms->window, 0, sizeof(ms->window));
1091     ms->window.dictLimit = 1;    /* start from 1, so that 1st position is valid */
1092     ms->window.lowLimit = 1;     /* it ensures first and later CCtx usages compress the same */
1093     ms->window.nextSrc = ms->window.base + 1;   /* see issue #1241 */
1094     ZSTD_invalidateMatchState(ms);
1095 
1096     /* opt parser space */
1097     if (forCCtx && ((cParams->strategy == ZSTD_btopt) | (cParams->strategy == ZSTD_btultra))) {
1098         DEBUGLOG(4, "reserving optimal parser space");
1099         ms->opt.litFreq = (U32*)ptr;
1100         ms->opt.litLengthFreq = ms->opt.litFreq + (1<<Litbits);
1101         ms->opt.matchLengthFreq = ms->opt.litLengthFreq + (MaxLL+1);
1102         ms->opt.offCodeFreq = ms->opt.matchLengthFreq + (MaxML+1);
1103         ptr = ms->opt.offCodeFreq + (MaxOff+1);
1104         ms->opt.matchTable = (ZSTD_match_t*)ptr;
1105         ptr = ms->opt.matchTable + ZSTD_OPT_NUM+1;
1106         ms->opt.priceTable = (ZSTD_optimal_t*)ptr;
1107         ptr = ms->opt.priceTable + ZSTD_OPT_NUM+1;
1108     }
1109 
1110     /* table Space */
1111     DEBUGLOG(4, "reset table : %u", crp!=ZSTDcrp_noMemset);
1112     assert(((size_t)ptr & 3) == 0);  /* ensure ptr is properly aligned */
1113     if (crp!=ZSTDcrp_noMemset) memset(ptr, 0, tableSpace);   /* reset tables only */
1114     ms->hashTable = (U32*)(ptr);
1115     ms->chainTable = ms->hashTable + hSize;
1116     ms->hashTable3 = ms->chainTable + chainSize;
1117     ptr = ms->hashTable3 + h3Size;
1118 
1119     ms->cParams = *cParams;
1120 
1121     assert(((size_t)ptr & 3) == 0);
1122     return ptr;
1123 }
1124 
1125 #define ZSTD_WORKSPACETOOLARGE_FACTOR 3 /* define "workspace is too large" as this number of times larger than needed */
1126 #define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128  /* when workspace is continuously too large
1127                                          * during at least this number of times,
1128                                          * context's memory usage is considered wasteful,
1129                                          * because it's sized to handle a worst case scenario which rarely happens.
1130                                          * In which case, resize it down to free some memory */
1131 
1132 /*! ZSTD_resetCCtx_internal() :
1133     note : `params` are assumed fully validated at this stage */
1134 static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
1135                                       ZSTD_CCtx_params params,
1136                                       U64 pledgedSrcSize,
1137                                       ZSTD_compResetPolicy_e const crp,
1138                                       ZSTD_buffered_policy_e const zbuff)
1139 {
1140     DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u",
1141                 (U32)pledgedSrcSize, params.cParams.windowLog);
1142     assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
1143 
1144     if (crp == ZSTDcrp_continue) {
1145         if (ZSTD_equivalentParams(zc->appliedParams, params,
1146                                   zc->inBuffSize,
1147                                   zc->seqStore.maxNbSeq, zc->seqStore.maxNbLit,
1148                                   zbuff, pledgedSrcSize)) {
1149             DEBUGLOG(4, "ZSTD_equivalentParams()==1 -> continue mode (wLog1=%u, blockSize1=%zu)",
1150                         zc->appliedParams.cParams.windowLog, zc->blockSize);
1151             zc->workSpaceOversizedDuration += (zc->workSpaceOversizedDuration > 0);   /* if it was too large, it still is */
1152             if (zc->workSpaceOversizedDuration <= ZSTD_WORKSPACETOOLARGE_MAXDURATION)
1153                 return ZSTD_continueCCtx(zc, params, pledgedSrcSize);
1154     }   }
1155     DEBUGLOG(4, "ZSTD_equivalentParams()==0 -> reset CCtx");
1156 
1157     if (params.ldmParams.enableLdm) {
1158         /* Adjust long distance matching parameters */
1159         ZSTD_ldm_adjustParameters(&params.ldmParams, &params.cParams);
1160         assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
1161         assert(params.ldmParams.hashEveryLog < 32);
1162         zc->ldmState.hashPower = ZSTD_ldm_getHashPower(params.ldmParams.minMatchLength);
1163     }
1164 
1165     {   size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize));
1166         size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
1167         U32    const divider = (params.cParams.searchLength==3) ? 3 : 4;
1168         size_t const maxNbSeq = blockSize / divider;
1169         size_t const tokenSpace = WILDCOPY_OVERLENGTH + blockSize + 11*maxNbSeq;
1170         size_t const buffOutSize = (zbuff==ZSTDb_buffered) ? ZSTD_compressBound(blockSize)+1 : 0;
1171         size_t const buffInSize = (zbuff==ZSTDb_buffered) ? windowSize + blockSize : 0;
1172         size_t const matchStateSize = ZSTD_sizeof_matchState(&params.cParams, /* forCCtx */ 1);
1173         size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(params.ldmParams, blockSize);
1174         void* ptr;   /* used to partition workSpace */
1175 
1176         /* Check if workSpace is large enough, alloc a new one if needed */
1177         {   size_t const entropySpace = HUF_WORKSPACE_SIZE;
1178             size_t const blockStateSpace = 2 * sizeof(ZSTD_compressedBlockState_t);
1179             size_t const bufferSpace = buffInSize + buffOutSize;
1180             size_t const ldmSpace = ZSTD_ldm_getTableSize(params.ldmParams);
1181             size_t const ldmSeqSpace = maxNbLdmSeq * sizeof(rawSeq);
1182 
1183             size_t const neededSpace = entropySpace + blockStateSpace + ldmSpace +
1184                                        ldmSeqSpace + matchStateSize + tokenSpace +
1185                                        bufferSpace;
1186 
1187             int const workSpaceTooSmall = zc->workSpaceSize < neededSpace;
1188             int const workSpaceTooLarge = zc->workSpaceSize > ZSTD_WORKSPACETOOLARGE_FACTOR * neededSpace;
1189             int const workSpaceWasteful = workSpaceTooLarge && (zc->workSpaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION);
1190             zc->workSpaceOversizedDuration = workSpaceTooLarge ? zc->workSpaceOversizedDuration+1 : 0;
1191 
1192             DEBUGLOG(4, "Need %zuKB workspace, including %zuKB for match state, and %zuKB for buffers",
1193                         neededSpace>>10, matchStateSize>>10, bufferSpace>>10);
1194             DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize);
1195 
1196             if (workSpaceTooSmall || workSpaceWasteful) {
1197                 DEBUGLOG(4, "Need to resize workSpaceSize from %zuKB to %zuKB",
1198                             zc->workSpaceSize >> 10,
1199                             neededSpace >> 10);
1200                 /* static cctx : no resize, error out */
1201                 if (zc->staticSize) return ERROR(memory_allocation);
1202 
1203                 zc->workSpaceSize = 0;
1204                 ZSTD_free(zc->workSpace, zc->customMem);
1205                 zc->workSpace = ZSTD_malloc(neededSpace, zc->customMem);
1206                 if (zc->workSpace == NULL) return ERROR(memory_allocation);
1207                 zc->workSpaceSize = neededSpace;
1208                 zc->workSpaceOversizedDuration = 0;
1209 
1210                 /* Statically sized space.
1211                  * entropyWorkspace never moves,
1212                  * though prev/next block swap places */
1213                 assert(((size_t)zc->workSpace & 3) == 0);   /* ensure correct alignment */
1214                 assert(zc->workSpaceSize >= 2 * sizeof(ZSTD_compressedBlockState_t));
1215                 zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)zc->workSpace;
1216                 zc->blockState.nextCBlock = zc->blockState.prevCBlock + 1;
1217                 ptr = zc->blockState.nextCBlock + 1;
1218                 zc->entropyWorkspace = (U32*)ptr;
1219         }   }
1220 
1221         /* init params */
1222         zc->appliedParams = params;
1223         zc->blockState.matchState.cParams = params.cParams;
1224         zc->pledgedSrcSizePlusOne = pledgedSrcSize+1;
1225         zc->consumedSrcSize = 0;
1226         zc->producedCSize = 0;
1227         if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)
1228             zc->appliedParams.fParams.contentSizeFlag = 0;
1229         DEBUGLOG(4, "pledged content size : %u ; flag : %u",
1230             (U32)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag);
1231         zc->blockSize = blockSize;
1232 
1233         XXH64_reset(&zc->xxhState, 0);
1234         zc->stage = ZSTDcs_init;
1235         zc->dictID = 0;
1236 
1237         ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock);
1238 
1239         ptr = zc->entropyWorkspace + HUF_WORKSPACE_SIZE_U32;
1240 
1241         /* ldm hash table */
1242         /* initialize bucketOffsets table later for pointer alignment */
1243         if (params.ldmParams.enableLdm) {
1244             size_t const ldmHSize = ((size_t)1) << params.ldmParams.hashLog;
1245             memset(ptr, 0, ldmHSize * sizeof(ldmEntry_t));
1246             assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */
1247             zc->ldmState.hashTable = (ldmEntry_t*)ptr;
1248             ptr = zc->ldmState.hashTable + ldmHSize;
1249             zc->ldmSequences = (rawSeq*)ptr;
1250             ptr = zc->ldmSequences + maxNbLdmSeq;
1251             zc->maxNbLdmSequences = maxNbLdmSeq;
1252 
1253             memset(&zc->ldmState.window, 0, sizeof(zc->ldmState.window));
1254         }
1255         assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */
1256 
1257         ptr = ZSTD_reset_matchState(&zc->blockState.matchState, ptr, &params.cParams, crp, /* forCCtx */ 1);
1258 
1259         /* sequences storage */
1260         zc->seqStore.maxNbSeq = maxNbSeq;
1261         zc->seqStore.sequencesStart = (seqDef*)ptr;
1262         ptr = zc->seqStore.sequencesStart + maxNbSeq;
1263         zc->seqStore.llCode = (BYTE*) ptr;
1264         zc->seqStore.mlCode = zc->seqStore.llCode + maxNbSeq;
1265         zc->seqStore.ofCode = zc->seqStore.mlCode + maxNbSeq;
1266         zc->seqStore.litStart = zc->seqStore.ofCode + maxNbSeq;
1267         /* ZSTD_wildcopy() is used to copy into the literals buffer,
1268          * so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes.
1269          */
1270         zc->seqStore.maxNbLit = blockSize;
1271         ptr = zc->seqStore.litStart + blockSize + WILDCOPY_OVERLENGTH;
1272 
1273         /* ldm bucketOffsets table */
1274         if (params.ldmParams.enableLdm) {
1275             size_t const ldmBucketSize =
1276                   ((size_t)1) << (params.ldmParams.hashLog -
1277                                   params.ldmParams.bucketSizeLog);
1278             memset(ptr, 0, ldmBucketSize);
1279             zc->ldmState.bucketOffsets = (BYTE*)ptr;
1280             ptr = zc->ldmState.bucketOffsets + ldmBucketSize;
1281             ZSTD_window_clear(&zc->ldmState.window);
1282         }
1283         ZSTD_referenceExternalSequences(zc, NULL, 0);
1284 
1285         /* buffers */
1286         zc->inBuffSize = buffInSize;
1287         zc->inBuff = (char*)ptr;
1288         zc->outBuffSize = buffOutSize;
1289         zc->outBuff = zc->inBuff + buffInSize;
1290 
1291         return 0;
1292     }
1293 }
1294 
1295 /* ZSTD_invalidateRepCodes() :
1296  * ensures next compression will not use repcodes from previous block.
1297  * Note : only works with regular variant;
1298  *        do not use with extDict variant ! */
1299 void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) {
1300     int i;
1301     for (i=0; i<ZSTD_REP_NUM; i++) cctx->blockState.prevCBlock->rep[i] = 0;
1302     assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window));
1303 }
1304 
1305 /* These are the approximate sizes for each strategy past which copying the
1306  * dictionary tables into the working context is faster than using them
1307  * in-place.
1308  */
1309 static const size_t attachDictSizeCutoffs[(unsigned)ZSTD_btultra+1] = {
1310     8 KB, /* unused */
1311     8 KB, /* ZSTD_fast */
1312     16 KB, /* ZSTD_dfast */
1313     32 KB, /* ZSTD_greedy */
1314     32 KB, /* ZSTD_lazy */
1315     32 KB, /* ZSTD_lazy2 */
1316     32 KB, /* ZSTD_btlazy2 */
1317     32 KB, /* ZSTD_btopt */
1318     8 KB /* ZSTD_btultra */
1319 };
1320 
1321 static int ZSTD_shouldAttachDict(const ZSTD_CDict* cdict,
1322                                  ZSTD_CCtx_params params,
1323                                  U64 pledgedSrcSize)
1324 {
1325     size_t cutoff = attachDictSizeCutoffs[cdict->matchState.cParams.strategy];
1326     return ( pledgedSrcSize <= cutoff
1327           || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
1328           || params.attachDictPref == ZSTD_dictForceAttach )
1329         && params.attachDictPref != ZSTD_dictForceCopy
1330         && !params.forceWindow; /* dictMatchState isn't correctly
1331                                  * handled in _enforceMaxDist */
1332 }
1333 
1334 static size_t ZSTD_resetCCtx_byAttachingCDict(
1335     ZSTD_CCtx* cctx,
1336     const ZSTD_CDict* cdict,
1337     ZSTD_CCtx_params params,
1338     U64 pledgedSrcSize,
1339     ZSTD_buffered_policy_e zbuff)
1340 {
1341     {
1342         const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams;
1343         unsigned const windowLog = params.cParams.windowLog;
1344         assert(windowLog != 0);
1345         /* Resize working context table params for input only, since the dict
1346          * has its own tables. */
1347         params.cParams = ZSTD_adjustCParams_internal(*cdict_cParams, pledgedSrcSize, 0);
1348         params.cParams.windowLog = windowLog;
1349         ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
1350                                 ZSTDcrp_continue, zbuff);
1351         assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy);
1352     }
1353 
1354     {
1355         const U32 cdictEnd = (U32)( cdict->matchState.window.nextSrc
1356                                   - cdict->matchState.window.base);
1357         const U32 cdictLen = cdictEnd - cdict->matchState.window.dictLimit;
1358         if (cdictLen == 0) {
1359             /* don't even attach dictionaries with no contents */
1360             DEBUGLOG(4, "skipping attaching empty dictionary");
1361         } else {
1362             DEBUGLOG(4, "attaching dictionary into context");
1363             cctx->blockState.matchState.dictMatchState = &cdict->matchState;
1364 
1365             /* prep working match state so dict matches never have negative indices
1366              * when they are translated to the working context's index space. */
1367             if (cctx->blockState.matchState.window.dictLimit < cdictEnd) {
1368                 cctx->blockState.matchState.window.nextSrc =
1369                     cctx->blockState.matchState.window.base + cdictEnd;
1370                 ZSTD_window_clear(&cctx->blockState.matchState.window);
1371             }
1372             cctx->blockState.matchState.loadedDictEnd = cctx->blockState.matchState.window.dictLimit;
1373         }
1374     }
1375 
1376     cctx->dictID = cdict->dictID;
1377 
1378     /* copy block state */
1379     memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
1380 
1381     return 0;
1382 }
1383 
1384 static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,
1385                             const ZSTD_CDict* cdict,
1386                             ZSTD_CCtx_params params,
1387                             U64 pledgedSrcSize,
1388                             ZSTD_buffered_policy_e zbuff)
1389 {
1390     const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams;
1391 
1392     DEBUGLOG(4, "copying dictionary into context");
1393 
1394     {   unsigned const windowLog = params.cParams.windowLog;
1395         assert(windowLog != 0);
1396         /* Copy only compression parameters related to tables. */
1397         params.cParams = *cdict_cParams;
1398         params.cParams.windowLog = windowLog;
1399         ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
1400                                 ZSTDcrp_noMemset, zbuff);
1401         assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy);
1402         assert(cctx->appliedParams.cParams.hashLog == cdict_cParams->hashLog);
1403         assert(cctx->appliedParams.cParams.chainLog == cdict_cParams->chainLog);
1404     }
1405 
1406     /* copy tables */
1407     {   size_t const chainSize = (cdict_cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cdict_cParams->chainLog);
1408         size_t const hSize =  (size_t)1 << cdict_cParams->hashLog;
1409         size_t const tableSpace = (chainSize + hSize) * sizeof(U32);
1410         assert((U32*)cctx->blockState.matchState.chainTable == (U32*)cctx->blockState.matchState.hashTable + hSize);  /* chainTable must follow hashTable */
1411         assert((U32*)cctx->blockState.matchState.hashTable3 == (U32*)cctx->blockState.matchState.chainTable + chainSize);
1412         assert((U32*)cdict->matchState.chainTable == (U32*)cdict->matchState.hashTable + hSize);  /* chainTable must follow hashTable */
1413         assert((U32*)cdict->matchState.hashTable3 == (U32*)cdict->matchState.chainTable + chainSize);
1414         memcpy(cctx->blockState.matchState.hashTable, cdict->matchState.hashTable, tableSpace);   /* presumes all tables follow each other */
1415     }
1416 
1417     /* Zero the hashTable3, since the cdict never fills it */
1418     {   size_t const h3Size = (size_t)1 << cctx->blockState.matchState.hashLog3;
1419         assert(cdict->matchState.hashLog3 == 0);
1420         memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32));
1421     }
1422 
1423     /* copy dictionary offsets */
1424     {   ZSTD_matchState_t const* srcMatchState = &cdict->matchState;
1425         ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState;
1426         dstMatchState->window       = srcMatchState->window;
1427         dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
1428         dstMatchState->nextToUpdate3= srcMatchState->nextToUpdate3;
1429         dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
1430     }
1431 
1432     cctx->dictID = cdict->dictID;
1433 
1434     /* copy block state */
1435     memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
1436 
1437     return 0;
1438 }
1439 
1440 /* We have a choice between copying the dictionary context into the working
1441  * context, or referencing the dictionary context from the working context
1442  * in-place. We decide here which strategy to use. */
1443 static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx,
1444                             const ZSTD_CDict* cdict,
1445                             ZSTD_CCtx_params params,
1446                             U64 pledgedSrcSize,
1447                             ZSTD_buffered_policy_e zbuff)
1448 {
1449 
1450     DEBUGLOG(4, "ZSTD_resetCCtx_usingCDict (pledgedSrcSize=%u)", (U32)pledgedSrcSize);
1451 
1452     if (ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) {
1453         return ZSTD_resetCCtx_byAttachingCDict(
1454             cctx, cdict, params, pledgedSrcSize, zbuff);
1455     } else {
1456         return ZSTD_resetCCtx_byCopyingCDict(
1457             cctx, cdict, params, pledgedSrcSize, zbuff);
1458     }
1459 }
1460 
1461 /*! ZSTD_copyCCtx_internal() :
1462  *  Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
1463  *  Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
1464  *  The "context", in this case, refers to the hash and chain tables,
1465  *  entropy tables, and dictionary references.
1466  * `windowLog` value is enforced if != 0, otherwise value is copied from srcCCtx.
1467  * @return : 0, or an error code */
1468 static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
1469                             const ZSTD_CCtx* srcCCtx,
1470                             ZSTD_frameParameters fParams,
1471                             U64 pledgedSrcSize,
1472                             ZSTD_buffered_policy_e zbuff)
1473 {
1474     DEBUGLOG(5, "ZSTD_copyCCtx_internal");
1475     if (srcCCtx->stage!=ZSTDcs_init) return ERROR(stage_wrong);
1476 
1477     memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
1478     {   ZSTD_CCtx_params params = dstCCtx->requestedParams;
1479         /* Copy only compression parameters related to tables. */
1480         params.cParams = srcCCtx->appliedParams.cParams;
1481         params.fParams = fParams;
1482         ZSTD_resetCCtx_internal(dstCCtx, params, pledgedSrcSize,
1483                                 ZSTDcrp_noMemset, zbuff);
1484         assert(dstCCtx->appliedParams.cParams.windowLog == srcCCtx->appliedParams.cParams.windowLog);
1485         assert(dstCCtx->appliedParams.cParams.strategy == srcCCtx->appliedParams.cParams.strategy);
1486         assert(dstCCtx->appliedParams.cParams.hashLog == srcCCtx->appliedParams.cParams.hashLog);
1487         assert(dstCCtx->appliedParams.cParams.chainLog == srcCCtx->appliedParams.cParams.chainLog);
1488         assert(dstCCtx->blockState.matchState.hashLog3 == srcCCtx->blockState.matchState.hashLog3);
1489     }
1490 
1491     /* copy tables */
1492     {   size_t const chainSize = (srcCCtx->appliedParams.cParams.strategy == ZSTD_fast) ? 0 : ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog);
1493         size_t const hSize =  (size_t)1 << srcCCtx->appliedParams.cParams.hashLog;
1494         size_t const h3Size = (size_t)1 << srcCCtx->blockState.matchState.hashLog3;
1495         size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
1496         assert((U32*)dstCCtx->blockState.matchState.chainTable == (U32*)dstCCtx->blockState.matchState.hashTable + hSize);  /* chainTable must follow hashTable */
1497         assert((U32*)dstCCtx->blockState.matchState.hashTable3 == (U32*)dstCCtx->blockState.matchState.chainTable + chainSize);
1498         memcpy(dstCCtx->blockState.matchState.hashTable, srcCCtx->blockState.matchState.hashTable, tableSpace);   /* presumes all tables follow each other */
1499     }
1500 
1501     /* copy dictionary offsets */
1502     {
1503         const ZSTD_matchState_t* srcMatchState = &srcCCtx->blockState.matchState;
1504         ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState;
1505         dstMatchState->window       = srcMatchState->window;
1506         dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
1507         dstMatchState->nextToUpdate3= srcMatchState->nextToUpdate3;
1508         dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
1509     }
1510     dstCCtx->dictID = srcCCtx->dictID;
1511 
1512     /* copy block state */
1513     memcpy(dstCCtx->blockState.prevCBlock, srcCCtx->blockState.prevCBlock, sizeof(*srcCCtx->blockState.prevCBlock));
1514 
1515     return 0;
1516 }
1517 
1518 /*! ZSTD_copyCCtx() :
1519  *  Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
1520  *  Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
1521  *  pledgedSrcSize==0 means "unknown".
1522 *   @return : 0, or an error code */
1523 size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize)
1524 {
1525     ZSTD_frameParameters fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
1526     ZSTD_buffered_policy_e const zbuff = (ZSTD_buffered_policy_e)(srcCCtx->inBuffSize>0);
1527     ZSTD_STATIC_ASSERT((U32)ZSTDb_buffered==1);
1528     if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
1529     fParams.contentSizeFlag = (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN);
1530 
1531     return ZSTD_copyCCtx_internal(dstCCtx, srcCCtx,
1532                                 fParams, pledgedSrcSize,
1533                                 zbuff);
1534 }
1535 
1536 
1537 #define ZSTD_ROWSIZE 16
1538 /*! ZSTD_reduceTable() :
1539  *  reduce table indexes by `reducerValue`, or squash to zero.
1540  *  PreserveMark preserves "unsorted mark" for btlazy2 strategy.
1541  *  It must be set to a clear 0/1 value, to remove branch during inlining.
1542  *  Presume table size is a multiple of ZSTD_ROWSIZE
1543  *  to help auto-vectorization */
1544 FORCE_INLINE_TEMPLATE void
1545 ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerValue, int const preserveMark)
1546 {
1547     int const nbRows = (int)size / ZSTD_ROWSIZE;
1548     int cellNb = 0;
1549     int rowNb;
1550     assert((size & (ZSTD_ROWSIZE-1)) == 0);  /* multiple of ZSTD_ROWSIZE */
1551     assert(size < (1U<<31));   /* can be casted to int */
1552     for (rowNb=0 ; rowNb < nbRows ; rowNb++) {
1553         int column;
1554         for (column=0; column<ZSTD_ROWSIZE; column++) {
1555             if (preserveMark) {
1556                 U32 const adder = (table[cellNb] == ZSTD_DUBT_UNSORTED_MARK) ? reducerValue : 0;
1557                 table[cellNb] += adder;
1558             }
1559             if (table[cellNb] < reducerValue) table[cellNb] = 0;
1560             else table[cellNb] -= reducerValue;
1561             cellNb++;
1562     }   }
1563 }
1564 
1565 static void ZSTD_reduceTable(U32* const table, U32 const size, U32 const reducerValue)
1566 {
1567     ZSTD_reduceTable_internal(table, size, reducerValue, 0);
1568 }
1569 
1570 static void ZSTD_reduceTable_btlazy2(U32* const table, U32 const size, U32 const reducerValue)
1571 {
1572     ZSTD_reduceTable_internal(table, size, reducerValue, 1);
1573 }
1574 
1575 /*! ZSTD_reduceIndex() :
1576 *   rescale all indexes to avoid future overflow (indexes are U32) */
1577 static void ZSTD_reduceIndex (ZSTD_CCtx* zc, const U32 reducerValue)
1578 {
1579     ZSTD_matchState_t* const ms = &zc->blockState.matchState;
1580     {   U32 const hSize = (U32)1 << zc->appliedParams.cParams.hashLog;
1581         ZSTD_reduceTable(ms->hashTable, hSize, reducerValue);
1582     }
1583 
1584     if (zc->appliedParams.cParams.strategy != ZSTD_fast) {
1585         U32 const chainSize = (U32)1 << zc->appliedParams.cParams.chainLog;
1586         if (zc->appliedParams.cParams.strategy == ZSTD_btlazy2)
1587             ZSTD_reduceTable_btlazy2(ms->chainTable, chainSize, reducerValue);
1588         else
1589             ZSTD_reduceTable(ms->chainTable, chainSize, reducerValue);
1590     }
1591 
1592     if (ms->hashLog3) {
1593         U32 const h3Size = (U32)1 << ms->hashLog3;
1594         ZSTD_reduceTable(ms->hashTable3, h3Size, reducerValue);
1595     }
1596 }
1597 
1598 
1599 /*-*******************************************************
1600 *  Block entropic compression
1601 *********************************************************/
1602 
1603 /* See doc/zstd_compression_format.md for detailed format description */
1604 
1605 static size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock)
1606 {
1607     U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(srcSize << 3);
1608     if (srcSize + ZSTD_blockHeaderSize > dstCapacity) return ERROR(dstSize_tooSmall);
1609     MEM_writeLE24(dst, cBlockHeader24);
1610     memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize);
1611     return ZSTD_blockHeaderSize + srcSize;
1612 }
1613 
1614 static size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
1615 {
1616     BYTE* const ostart = (BYTE* const)dst;
1617     U32   const flSize = 1 + (srcSize>31) + (srcSize>4095);
1618 
1619     if (srcSize + flSize > dstCapacity) return ERROR(dstSize_tooSmall);
1620 
1621     switch(flSize)
1622     {
1623         case 1: /* 2 - 1 - 5 */
1624             ostart[0] = (BYTE)((U32)set_basic + (srcSize<<3));
1625             break;
1626         case 2: /* 2 - 2 - 12 */
1627             MEM_writeLE16(ostart, (U16)((U32)set_basic + (1<<2) + (srcSize<<4)));
1628             break;
1629         case 3: /* 2 - 2 - 20 */
1630             MEM_writeLE32(ostart, (U32)((U32)set_basic + (3<<2) + (srcSize<<4)));
1631             break;
1632         default:   /* not necessary : flSize is {1,2,3} */
1633             assert(0);
1634     }
1635 
1636     memcpy(ostart + flSize, src, srcSize);
1637     return srcSize + flSize;
1638 }
1639 
1640 static size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
1641 {
1642     BYTE* const ostart = (BYTE* const)dst;
1643     U32   const flSize = 1 + (srcSize>31) + (srcSize>4095);
1644 
1645     (void)dstCapacity;  /* dstCapacity already guaranteed to be >=4, hence large enough */
1646 
1647     switch(flSize)
1648     {
1649         case 1: /* 2 - 1 - 5 */
1650             ostart[0] = (BYTE)((U32)set_rle + (srcSize<<3));
1651             break;
1652         case 2: /* 2 - 2 - 12 */
1653             MEM_writeLE16(ostart, (U16)((U32)set_rle + (1<<2) + (srcSize<<4)));
1654             break;
1655         case 3: /* 2 - 2 - 20 */
1656             MEM_writeLE32(ostart, (U32)((U32)set_rle + (3<<2) + (srcSize<<4)));
1657             break;
1658         default:   /* not necessary : flSize is {1,2,3} */
1659             assert(0);
1660     }
1661 
1662     ostart[flSize] = *(const BYTE*)src;
1663     return flSize+1;
1664 }
1665 
1666 
1667 /* ZSTD_minGain() :
1668  * minimum compression required
1669  * to generate a compress block or a compressed literals section.
1670  * note : use same formula for both situations */
1671 static size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat)
1672 {
1673     U32 const minlog = (strat==ZSTD_btultra) ? 7 : 6;
1674     return (srcSize >> minlog) + 2;
1675 }
1676 
1677 static size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
1678                                      ZSTD_hufCTables_t* nextHuf,
1679                                      ZSTD_strategy strategy, int disableLiteralCompression,
1680                                      void* dst, size_t dstCapacity,
1681                                const void* src, size_t srcSize,
1682                                      U32* workspace, const int bmi2)
1683 {
1684     size_t const minGain = ZSTD_minGain(srcSize, strategy);
1685     size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
1686     BYTE*  const ostart = (BYTE*)dst;
1687     U32 singleStream = srcSize < 256;
1688     symbolEncodingType_e hType = set_compressed;
1689     size_t cLitSize;
1690 
1691     DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i)",
1692                 disableLiteralCompression);
1693 
1694     /* Prepare nextEntropy assuming reusing the existing table */
1695     memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
1696 
1697     if (disableLiteralCompression)
1698         return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
1699 
1700     /* small ? don't even attempt compression (speed opt) */
1701 #   define COMPRESS_LITERALS_SIZE_MIN 63
1702     {   size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
1703         if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
1704     }
1705 
1706     if (dstCapacity < lhSize+1) return ERROR(dstSize_tooSmall);   /* not enough space for compression */
1707     {   HUF_repeat repeat = prevHuf->repeatMode;
1708         int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
1709         if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
1710         cLitSize = singleStream ? HUF_compress1X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
1711                                       workspace, HUF_WORKSPACE_SIZE, (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2)
1712                                 : HUF_compress4X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
1713                                       workspace, HUF_WORKSPACE_SIZE, (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2);
1714         if (repeat != HUF_repeat_none) {
1715             /* reused the existing table */
1716             hType = set_repeat;
1717         }
1718     }
1719 
1720     if ((cLitSize==0) | (cLitSize >= srcSize - minGain) | ERR_isError(cLitSize)) {
1721         memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
1722         return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
1723     }
1724     if (cLitSize==1) {
1725         memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
1726         return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
1727     }
1728 
1729     if (hType == set_compressed) {
1730         /* using a newly constructed table */
1731         nextHuf->repeatMode = HUF_repeat_check;
1732     }
1733 
1734     /* Build header */
1735     switch(lhSize)
1736     {
1737     case 3: /* 2 - 2 - 10 - 10 */
1738         {   U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14);
1739             MEM_writeLE24(ostart, lhc);
1740             break;
1741         }
1742     case 4: /* 2 - 2 - 14 - 14 */
1743         {   U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18);
1744             MEM_writeLE32(ostart, lhc);
1745             break;
1746         }
1747     case 5: /* 2 - 2 - 18 - 18 */
1748         {   U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22);
1749             MEM_writeLE32(ostart, lhc);
1750             ostart[4] = (BYTE)(cLitSize >> 10);
1751             break;
1752         }
1753     default:  /* not possible : lhSize is {3,4,5} */
1754         assert(0);
1755     }
1756     return lhSize+cLitSize;
1757 }
1758 
1759 
1760 void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
1761 {
1762     const seqDef* const sequences = seqStorePtr->sequencesStart;
1763     BYTE* const llCodeTable = seqStorePtr->llCode;
1764     BYTE* const ofCodeTable = seqStorePtr->ofCode;
1765     BYTE* const mlCodeTable = seqStorePtr->mlCode;
1766     U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
1767     U32 u;
1768     assert(nbSeq <= seqStorePtr->maxNbSeq);
1769     for (u=0; u<nbSeq; u++) {
1770         U32 const llv = sequences[u].litLength;
1771         U32 const mlv = sequences[u].matchLength;
1772         llCodeTable[u] = (BYTE)ZSTD_LLcode(llv);
1773         ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offset);
1774         mlCodeTable[u] = (BYTE)ZSTD_MLcode(mlv);
1775     }
1776     if (seqStorePtr->longLengthID==1)
1777         llCodeTable[seqStorePtr->longLengthPos] = MaxLL;
1778     if (seqStorePtr->longLengthID==2)
1779         mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
1780 }
1781 
1782 
1783 /**
1784  * -log2(x / 256) lookup table for x in [0, 256).
1785  * If x == 0: Return 0
1786  * Else: Return floor(-log2(x / 256) * 256)
1787  */
1788 static unsigned const kInverseProbabiltyLog256[256] = {
1789     0,    2048, 1792, 1642, 1536, 1453, 1386, 1329, 1280, 1236, 1197, 1162,
1790     1130, 1100, 1073, 1047, 1024, 1001, 980,  960,  941,  923,  906,  889,
1791     874,  859,  844,  830,  817,  804,  791,  779,  768,  756,  745,  734,
1792     724,  714,  704,  694,  685,  676,  667,  658,  650,  642,  633,  626,
1793     618,  610,  603,  595,  588,  581,  574,  567,  561,  554,  548,  542,
1794     535,  529,  523,  517,  512,  506,  500,  495,  489,  484,  478,  473,
1795     468,  463,  458,  453,  448,  443,  438,  434,  429,  424,  420,  415,
1796     411,  407,  402,  398,  394,  390,  386,  382,  377,  373,  370,  366,
1797     362,  358,  354,  350,  347,  343,  339,  336,  332,  329,  325,  322,
1798     318,  315,  311,  308,  305,  302,  298,  295,  292,  289,  286,  282,
1799     279,  276,  273,  270,  267,  264,  261,  258,  256,  253,  250,  247,
1800     244,  241,  239,  236,  233,  230,  228,  225,  222,  220,  217,  215,
1801     212,  209,  207,  204,  202,  199,  197,  194,  192,  190,  187,  185,
1802     182,  180,  178,  175,  173,  171,  168,  166,  164,  162,  159,  157,
1803     155,  153,  151,  149,  146,  144,  142,  140,  138,  136,  134,  132,
1804     130,  128,  126,  123,  121,  119,  117,  115,  114,  112,  110,  108,
1805     106,  104,  102,  100,  98,   96,   94,   93,   91,   89,   87,   85,
1806     83,   82,   80,   78,   76,   74,   73,   71,   69,   67,   66,   64,
1807     62,   61,   59,   57,   55,   54,   52,   50,   49,   47,   46,   44,
1808     42,   41,   39,   37,   36,   34,   33,   31,   30,   28,   26,   25,
1809     23,   22,   20,   19,   17,   16,   14,   13,   11,   10,   8,    7,
1810     5,    4,    2,    1,
1811 };
1812 
1813 
1814 /**
1815  * Returns the cost in bits of encoding the distribution described by count
1816  * using the entropy bound.
1817  */
1818 static size_t ZSTD_entropyCost(unsigned const* count, unsigned const max, size_t const total)
1819 {
1820     unsigned cost = 0;
1821     unsigned s;
1822     for (s = 0; s <= max; ++s) {
1823         unsigned norm = (unsigned)((256 * count[s]) / total);
1824         if (count[s] != 0 && norm == 0)
1825             norm = 1;
1826         assert(count[s] < total);
1827         cost += count[s] * kInverseProbabiltyLog256[norm];
1828     }
1829     return cost >> 8;
1830 }
1831 
1832 
1833 /**
1834  * Returns the cost in bits of encoding the distribution in count using the
1835  * table described by norm. The max symbol support by norm is assumed >= max.
1836  * norm must be valid for every symbol with non-zero probability in count.
1837  */
1838 static size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
1839                                     unsigned const* count, unsigned const max)
1840 {
1841     unsigned const shift = 8 - accuracyLog;
1842     size_t cost = 0;
1843     unsigned s;
1844     assert(accuracyLog <= 8);
1845     for (s = 0; s <= max; ++s) {
1846         unsigned const normAcc = norm[s] != -1 ? norm[s] : 1;
1847         unsigned const norm256 = normAcc << shift;
1848         assert(norm256 > 0);
1849         assert(norm256 < 256);
1850         cost += count[s] * kInverseProbabiltyLog256[norm256];
1851     }
1852     return cost >> 8;
1853 }
1854 
1855 
1856 static unsigned ZSTD_getFSEMaxSymbolValue(FSE_CTable const* ctable) {
1857   void const* ptr = ctable;
1858   U16 const* u16ptr = (U16 const*)ptr;
1859   U32 const maxSymbolValue = MEM_read16(u16ptr + 1);
1860   return maxSymbolValue;
1861 }
1862 
1863 
1864 /**
1865  * Returns the cost in bits of encoding the distribution in count using ctable.
1866  * Returns an error if ctable cannot represent all the symbols in count.
1867  */
1868 static size_t ZSTD_fseBitCost(
1869     FSE_CTable const* ctable,
1870     unsigned const* count,
1871     unsigned const max)
1872 {
1873     unsigned const kAccuracyLog = 8;
1874     size_t cost = 0;
1875     unsigned s;
1876     FSE_CState_t cstate;
1877     FSE_initCState(&cstate, ctable);
1878     if (ZSTD_getFSEMaxSymbolValue(ctable) < max) {
1879         DEBUGLOG(5, "Repeat FSE_CTable has maxSymbolValue %u < %u",
1880                     ZSTD_getFSEMaxSymbolValue(ctable), max);
1881         return ERROR(GENERIC);
1882     }
1883     for (s = 0; s <= max; ++s) {
1884         unsigned const tableLog = cstate.stateLog;
1885         unsigned const badCost = (tableLog + 1) << kAccuracyLog;
1886         unsigned const bitCost = FSE_bitCost(cstate.symbolTT, tableLog, s, kAccuracyLog);
1887         if (count[s] == 0)
1888             continue;
1889         if (bitCost >= badCost) {
1890             DEBUGLOG(5, "Repeat FSE_CTable has Prob[%u] == 0", s);
1891             return ERROR(GENERIC);
1892         }
1893         cost += count[s] * bitCost;
1894     }
1895     return cost >> kAccuracyLog;
1896 }
1897 
1898 /**
1899  * Returns the cost in bytes of encoding the normalized count header.
1900  * Returns an error if any of the helper functions return an error.
1901  */
1902 static size_t ZSTD_NCountCost(unsigned const* count, unsigned const max,
1903                               size_t const nbSeq, unsigned const FSELog)
1904 {
1905     BYTE wksp[FSE_NCOUNTBOUND];
1906     S16 norm[MaxSeq + 1];
1907     const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
1908     CHECK_F(FSE_normalizeCount(norm, tableLog, count, nbSeq, max));
1909     return FSE_writeNCount(wksp, sizeof(wksp), norm, max, tableLog);
1910 }
1911 
1912 
1913 typedef enum {
1914     ZSTD_defaultDisallowed = 0,
1915     ZSTD_defaultAllowed = 1
1916 } ZSTD_defaultPolicy_e;
1917 
1918 MEM_STATIC symbolEncodingType_e
1919 ZSTD_selectEncodingType(
1920         FSE_repeat* repeatMode, unsigned const* count, unsigned const max,
1921         size_t const mostFrequent, size_t nbSeq, unsigned const FSELog,
1922         FSE_CTable const* prevCTable,
1923         short const* defaultNorm, U32 defaultNormLog,
1924         ZSTD_defaultPolicy_e const isDefaultAllowed,
1925         ZSTD_strategy const strategy)
1926 {
1927     ZSTD_STATIC_ASSERT(ZSTD_defaultDisallowed == 0 && ZSTD_defaultAllowed != 0);
1928     if (mostFrequent == nbSeq) {
1929         *repeatMode = FSE_repeat_none;
1930         if (isDefaultAllowed && nbSeq <= 2) {
1931             /* Prefer set_basic over set_rle when there are 2 or less symbols,
1932              * since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol.
1933              * If basic encoding isn't possible, always choose RLE.
1934              */
1935             DEBUGLOG(5, "Selected set_basic");
1936             return set_basic;
1937         }
1938         DEBUGLOG(5, "Selected set_rle");
1939         return set_rle;
1940     }
1941     if (strategy < ZSTD_lazy) {
1942         if (isDefaultAllowed) {
1943             size_t const staticFse_nbSeq_max = 1000;
1944             size_t const mult = 10 - strategy;
1945             size_t const baseLog = 3;
1946             size_t const dynamicFse_nbSeq_min = (((size_t)1 << defaultNormLog) * mult) >> baseLog;  /* 28-36 for offset, 56-72 for lengths */
1947             assert(defaultNormLog >= 5 && defaultNormLog <= 6);  /* xx_DEFAULTNORMLOG */
1948             assert(mult <= 9 && mult >= 7);
1949             if ( (*repeatMode == FSE_repeat_valid)
1950               && (nbSeq < staticFse_nbSeq_max) ) {
1951                 DEBUGLOG(5, "Selected set_repeat");
1952                 return set_repeat;
1953             }
1954             if ( (nbSeq < dynamicFse_nbSeq_min)
1955               || (mostFrequent < (nbSeq >> (defaultNormLog-1))) ) {
1956                 DEBUGLOG(5, "Selected set_basic");
1957                 /* The format allows default tables to be repeated, but it isn't useful.
1958                  * When using simple heuristics to select encoding type, we don't want
1959                  * to confuse these tables with dictionaries. When running more careful
1960                  * analysis, we don't need to waste time checking both repeating tables
1961                  * and default tables.
1962                  */
1963                 *repeatMode = FSE_repeat_none;
1964                 return set_basic;
1965             }
1966         }
1967     } else {
1968         size_t const basicCost = isDefaultAllowed ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, count, max) : ERROR(GENERIC);
1969         size_t const repeatCost = *repeatMode != FSE_repeat_none ? ZSTD_fseBitCost(prevCTable, count, max) : ERROR(GENERIC);
1970         size_t const NCountCost = ZSTD_NCountCost(count, max, nbSeq, FSELog);
1971         size_t const compressedCost = (NCountCost << 3) + ZSTD_entropyCost(count, max, nbSeq);
1972 
1973         if (isDefaultAllowed) {
1974             assert(!ZSTD_isError(basicCost));
1975             assert(!(*repeatMode == FSE_repeat_valid && ZSTD_isError(repeatCost)));
1976         }
1977         assert(!ZSTD_isError(NCountCost));
1978         assert(compressedCost < ERROR(maxCode));
1979         DEBUGLOG(5, "Estimated bit costs: basic=%u\trepeat=%u\tcompressed=%u",
1980                     (U32)basicCost, (U32)repeatCost, (U32)compressedCost);
1981         if (basicCost <= repeatCost && basicCost <= compressedCost) {
1982             DEBUGLOG(5, "Selected set_basic");
1983             assert(isDefaultAllowed);
1984             *repeatMode = FSE_repeat_none;
1985             return set_basic;
1986         }
1987         if (repeatCost <= compressedCost) {
1988             DEBUGLOG(5, "Selected set_repeat");
1989             assert(!ZSTD_isError(repeatCost));
1990             return set_repeat;
1991         }
1992         assert(compressedCost < basicCost && compressedCost < repeatCost);
1993     }
1994     DEBUGLOG(5, "Selected set_compressed");
1995     *repeatMode = FSE_repeat_check;
1996     return set_compressed;
1997 }
1998 
1999 MEM_STATIC size_t
2000 ZSTD_buildCTable(void* dst, size_t dstCapacity,
2001                 FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
2002                 U32* count, U32 max,
2003                 const BYTE* codeTable, size_t nbSeq,
2004                 const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
2005                 const FSE_CTable* prevCTable, size_t prevCTableSize,
2006                 void* workspace, size_t workspaceSize)
2007 {
2008     BYTE* op = (BYTE*)dst;
2009     const BYTE* const oend = op + dstCapacity;
2010 
2011     switch (type) {
2012     case set_rle:
2013         *op = codeTable[0];
2014         CHECK_F(FSE_buildCTable_rle(nextCTable, (BYTE)max));
2015         return 1;
2016     case set_repeat:
2017         memcpy(nextCTable, prevCTable, prevCTableSize);
2018         return 0;
2019     case set_basic:
2020         CHECK_F(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, workspace, workspaceSize));  /* note : could be pre-calculated */
2021         return 0;
2022     case set_compressed: {
2023         S16 norm[MaxSeq + 1];
2024         size_t nbSeq_1 = nbSeq;
2025         const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
2026         if (count[codeTable[nbSeq-1]] > 1) {
2027             count[codeTable[nbSeq-1]]--;
2028             nbSeq_1--;
2029         }
2030         assert(nbSeq_1 > 1);
2031         CHECK_F(FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max));
2032         {   size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog);   /* overflow protected */
2033             if (FSE_isError(NCountSize)) return NCountSize;
2034             CHECK_F(FSE_buildCTable_wksp(nextCTable, norm, max, tableLog, workspace, workspaceSize));
2035             return NCountSize;
2036         }
2037     }
2038     default: return assert(0), ERROR(GENERIC);
2039     }
2040 }
2041 
2042 FORCE_INLINE_TEMPLATE size_t
2043 ZSTD_encodeSequences_body(
2044             void* dst, size_t dstCapacity,
2045             FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
2046             FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
2047             FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
2048             seqDef const* sequences, size_t nbSeq, int longOffsets)
2049 {
2050     BIT_CStream_t blockStream;
2051     FSE_CState_t  stateMatchLength;
2052     FSE_CState_t  stateOffsetBits;
2053     FSE_CState_t  stateLitLength;
2054 
2055     CHECK_E(BIT_initCStream(&blockStream, dst, dstCapacity), dstSize_tooSmall); /* not enough space remaining */
2056 
2057     /* first symbols */
2058     FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]);
2059     FSE_initCState2(&stateOffsetBits,  CTable_OffsetBits,  ofCodeTable[nbSeq-1]);
2060     FSE_initCState2(&stateLitLength,   CTable_LitLength,   llCodeTable[nbSeq-1]);
2061     BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]);
2062     if (MEM_32bits()) BIT_flushBits(&blockStream);
2063     BIT_addBits(&blockStream, sequences[nbSeq-1].matchLength, ML_bits[mlCodeTable[nbSeq-1]]);
2064     if (MEM_32bits()) BIT_flushBits(&blockStream);
2065     if (longOffsets) {
2066         U32 const ofBits = ofCodeTable[nbSeq-1];
2067         int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
2068         if (extraBits) {
2069             BIT_addBits(&blockStream, sequences[nbSeq-1].offset, extraBits);
2070             BIT_flushBits(&blockStream);
2071         }
2072         BIT_addBits(&blockStream, sequences[nbSeq-1].offset >> extraBits,
2073                     ofBits - extraBits);
2074     } else {
2075         BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]);
2076     }
2077     BIT_flushBits(&blockStream);
2078 
2079     {   size_t n;
2080         for (n=nbSeq-2 ; n<nbSeq ; n--) {      /* intentional underflow */
2081             BYTE const llCode = llCodeTable[n];
2082             BYTE const ofCode = ofCodeTable[n];
2083             BYTE const mlCode = mlCodeTable[n];
2084             U32  const llBits = LL_bits[llCode];
2085             U32  const ofBits = ofCode;
2086             U32  const mlBits = ML_bits[mlCode];
2087             DEBUGLOG(6, "encoding: litlen:%2u - matchlen:%2u - offCode:%7u",
2088                         sequences[n].litLength,
2089                         sequences[n].matchLength + MINMATCH,
2090                         sequences[n].offset);
2091                                                                             /* 32b*/  /* 64b*/
2092                                                                             /* (7)*/  /* (7)*/
2093             FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode);       /* 15 */  /* 15 */
2094             FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode);      /* 24 */  /* 24 */
2095             if (MEM_32bits()) BIT_flushBits(&blockStream);                  /* (7)*/
2096             FSE_encodeSymbol(&blockStream, &stateLitLength, llCode);        /* 16 */  /* 33 */
2097             if (MEM_32bits() || (ofBits+mlBits+llBits >= 64-7-(LLFSELog+MLFSELog+OffFSELog)))
2098                 BIT_flushBits(&blockStream);                                /* (7)*/
2099             BIT_addBits(&blockStream, sequences[n].litLength, llBits);
2100             if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream);
2101             BIT_addBits(&blockStream, sequences[n].matchLength, mlBits);
2102             if (MEM_32bits() || (ofBits+mlBits+llBits > 56)) BIT_flushBits(&blockStream);
2103             if (longOffsets) {
2104                 int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
2105                 if (extraBits) {
2106                     BIT_addBits(&blockStream, sequences[n].offset, extraBits);
2107                     BIT_flushBits(&blockStream);                            /* (7)*/
2108                 }
2109                 BIT_addBits(&blockStream, sequences[n].offset >> extraBits,
2110                             ofBits - extraBits);                            /* 31 */
2111             } else {
2112                 BIT_addBits(&blockStream, sequences[n].offset, ofBits);     /* 31 */
2113             }
2114             BIT_flushBits(&blockStream);                                    /* (7)*/
2115     }   }
2116 
2117     DEBUGLOG(6, "ZSTD_encodeSequences: flushing ML state with %u bits", stateMatchLength.stateLog);
2118     FSE_flushCState(&blockStream, &stateMatchLength);
2119     DEBUGLOG(6, "ZSTD_encodeSequences: flushing Off state with %u bits", stateOffsetBits.stateLog);
2120     FSE_flushCState(&blockStream, &stateOffsetBits);
2121     DEBUGLOG(6, "ZSTD_encodeSequences: flushing LL state with %u bits", stateLitLength.stateLog);
2122     FSE_flushCState(&blockStream, &stateLitLength);
2123 
2124     {   size_t const streamSize = BIT_closeCStream(&blockStream);
2125         if (streamSize==0) return ERROR(dstSize_tooSmall);   /* not enough space */
2126         return streamSize;
2127     }
2128 }
2129 
2130 static size_t
2131 ZSTD_encodeSequences_default(
2132             void* dst, size_t dstCapacity,
2133             FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
2134             FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
2135             FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
2136             seqDef const* sequences, size_t nbSeq, int longOffsets)
2137 {
2138     return ZSTD_encodeSequences_body(dst, dstCapacity,
2139                                     CTable_MatchLength, mlCodeTable,
2140                                     CTable_OffsetBits, ofCodeTable,
2141                                     CTable_LitLength, llCodeTable,
2142                                     sequences, nbSeq, longOffsets);
2143 }
2144 
2145 
2146 #if DYNAMIC_BMI2
2147 
2148 static TARGET_ATTRIBUTE("bmi2") size_t
2149 ZSTD_encodeSequences_bmi2(
2150             void* dst, size_t dstCapacity,
2151             FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
2152             FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
2153             FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
2154             seqDef const* sequences, size_t nbSeq, int longOffsets)
2155 {
2156     return ZSTD_encodeSequences_body(dst, dstCapacity,
2157                                     CTable_MatchLength, mlCodeTable,
2158                                     CTable_OffsetBits, ofCodeTable,
2159                                     CTable_LitLength, llCodeTable,
2160                                     sequences, nbSeq, longOffsets);
2161 }
2162 
2163 #endif
2164 
2165 static size_t ZSTD_encodeSequences(
2166             void* dst, size_t dstCapacity,
2167             FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
2168             FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
2169             FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
2170             seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2)
2171 {
2172 #if DYNAMIC_BMI2
2173     if (bmi2) {
2174         return ZSTD_encodeSequences_bmi2(dst, dstCapacity,
2175                                          CTable_MatchLength, mlCodeTable,
2176                                          CTable_OffsetBits, ofCodeTable,
2177                                          CTable_LitLength, llCodeTable,
2178                                          sequences, nbSeq, longOffsets);
2179     }
2180 #endif
2181     (void)bmi2;
2182     return ZSTD_encodeSequences_default(dst, dstCapacity,
2183                                         CTable_MatchLength, mlCodeTable,
2184                                         CTable_OffsetBits, ofCodeTable,
2185                                         CTable_LitLength, llCodeTable,
2186                                         sequences, nbSeq, longOffsets);
2187 }
2188 
2189 MEM_STATIC size_t ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
2190                               ZSTD_entropyCTables_t const* prevEntropy,
2191                               ZSTD_entropyCTables_t* nextEntropy,
2192                               ZSTD_CCtx_params const* cctxParams,
2193                               void* dst, size_t dstCapacity, U32* workspace,
2194                               const int bmi2)
2195 {
2196     const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
2197     ZSTD_strategy const strategy = cctxParams->cParams.strategy;
2198     U32 count[MaxSeq+1];
2199     FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable;
2200     FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable;
2201     FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable;
2202     U32 LLtype, Offtype, MLtype;   /* compressed, raw or rle */
2203     const seqDef* const sequences = seqStorePtr->sequencesStart;
2204     const BYTE* const ofCodeTable = seqStorePtr->ofCode;
2205     const BYTE* const llCodeTable = seqStorePtr->llCode;
2206     const BYTE* const mlCodeTable = seqStorePtr->mlCode;
2207     BYTE* const ostart = (BYTE*)dst;
2208     BYTE* const oend = ostart + dstCapacity;
2209     BYTE* op = ostart;
2210     size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
2211     BYTE* seqHead;
2212     BYTE* lastNCount = NULL;
2213 
2214     ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
2215 
2216     /* Compress literals */
2217     {   const BYTE* const literals = seqStorePtr->litStart;
2218         size_t const litSize = seqStorePtr->lit - literals;
2219         int const disableLiteralCompression = (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0);
2220         size_t const cSize = ZSTD_compressLiterals(
2221                                     &prevEntropy->huf, &nextEntropy->huf,
2222                                     cctxParams->cParams.strategy, disableLiteralCompression,
2223                                     op, dstCapacity,
2224                                     literals, litSize,
2225                                     workspace, bmi2);
2226         if (ZSTD_isError(cSize))
2227           return cSize;
2228         assert(cSize <= dstCapacity);
2229         op += cSize;
2230     }
2231 
2232     /* Sequences Header */
2233     if ((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/) return ERROR(dstSize_tooSmall);
2234     if (nbSeq < 0x7F)
2235         *op++ = (BYTE)nbSeq;
2236     else if (nbSeq < LONGNBSEQ)
2237         op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2;
2238     else
2239         op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3;
2240     if (nbSeq==0) {
2241         /* Copy the old tables over as if we repeated them */
2242         memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse));
2243         return op - ostart;
2244     }
2245 
2246     /* seqHead : flags for FSE encoding type */
2247     seqHead = op++;
2248 
2249     /* convert length/distances into codes */
2250     ZSTD_seqToCodes(seqStorePtr);
2251     /* build CTable for Literal Lengths */
2252     {   U32 max = MaxLL;
2253         size_t const mostFrequent = HIST_countFast_wksp(count, &max, llCodeTable, nbSeq, workspace);   /* can't fail */
2254         DEBUGLOG(5, "Building LL table");
2255         nextEntropy->fse.litlength_repeatMode = prevEntropy->fse.litlength_repeatMode;
2256         LLtype = ZSTD_selectEncodingType(&nextEntropy->fse.litlength_repeatMode, count, max, mostFrequent, nbSeq, LLFSELog, prevEntropy->fse.litlengthCTable, LL_defaultNorm, LL_defaultNormLog, ZSTD_defaultAllowed, strategy);
2257         assert(set_basic < set_compressed && set_rle < set_compressed);
2258         assert(!(LLtype < set_compressed && nextEntropy->fse.litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
2259         {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
2260                                                     count, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL,
2261                                                     prevEntropy->fse.litlengthCTable, sizeof(prevEntropy->fse.litlengthCTable),
2262                                                     workspace, HUF_WORKSPACE_SIZE);
2263             if (ZSTD_isError(countSize)) return countSize;
2264             if (LLtype == set_compressed)
2265                 lastNCount = op;
2266             op += countSize;
2267     }   }
2268     /* build CTable for Offsets */
2269     {   U32 max = MaxOff;
2270         size_t const mostFrequent = HIST_countFast_wksp(count, &max, ofCodeTable, nbSeq, workspace);  /* can't fail */
2271         /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
2272         ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
2273         DEBUGLOG(5, "Building OF table");
2274         nextEntropy->fse.offcode_repeatMode = prevEntropy->fse.offcode_repeatMode;
2275         Offtype = ZSTD_selectEncodingType(&nextEntropy->fse.offcode_repeatMode, count, max, mostFrequent, nbSeq, OffFSELog, prevEntropy->fse.offcodeCTable, OF_defaultNorm, OF_defaultNormLog, defaultPolicy, strategy);
2276         assert(!(Offtype < set_compressed && nextEntropy->fse.offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
2277         {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
2278                                                     count, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
2279                                                     prevEntropy->fse.offcodeCTable, sizeof(prevEntropy->fse.offcodeCTable),
2280                                                     workspace, HUF_WORKSPACE_SIZE);
2281             if (ZSTD_isError(countSize)) return countSize;
2282             if (Offtype == set_compressed)
2283                 lastNCount = op;
2284             op += countSize;
2285     }   }
2286     /* build CTable for MatchLengths */
2287     {   U32 max = MaxML;
2288         size_t const mostFrequent = HIST_countFast_wksp(count, &max, mlCodeTable, nbSeq, workspace);   /* can't fail */
2289         DEBUGLOG(5, "Building ML table");
2290         nextEntropy->fse.matchlength_repeatMode = prevEntropy->fse.matchlength_repeatMode;
2291         MLtype = ZSTD_selectEncodingType(&nextEntropy->fse.matchlength_repeatMode, count, max, mostFrequent, nbSeq, MLFSELog, prevEntropy->fse.matchlengthCTable, ML_defaultNorm, ML_defaultNormLog, ZSTD_defaultAllowed, strategy);
2292         assert(!(MLtype < set_compressed && nextEntropy->fse.matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
2293         {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
2294                                                     count, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML,
2295                                                     prevEntropy->fse.matchlengthCTable, sizeof(prevEntropy->fse.matchlengthCTable),
2296                                                     workspace, HUF_WORKSPACE_SIZE);
2297             if (ZSTD_isError(countSize)) return countSize;
2298             if (MLtype == set_compressed)
2299                 lastNCount = op;
2300             op += countSize;
2301     }   }
2302 
2303     *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
2304 
2305     {   size_t const bitstreamSize = ZSTD_encodeSequences(
2306                                         op, oend - op,
2307                                         CTable_MatchLength, mlCodeTable,
2308                                         CTable_OffsetBits, ofCodeTable,
2309                                         CTable_LitLength, llCodeTable,
2310                                         sequences, nbSeq,
2311                                         longOffsets, bmi2);
2312         if (ZSTD_isError(bitstreamSize)) return bitstreamSize;
2313         op += bitstreamSize;
2314         /* zstd versions <= 1.3.4 mistakenly report corruption when
2315          * FSE_readNCount() recieves a buffer < 4 bytes.
2316          * Fixed by https://github.com/facebook/zstd/pull/1146.
2317          * This can happen when the last set_compressed table present is 2
2318          * bytes and the bitstream is only one byte.
2319          * In this exceedingly rare case, we will simply emit an uncompressed
2320          * block, since it isn't worth optimizing.
2321          */
2322         if (lastNCount && (op - lastNCount) < 4) {
2323             /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
2324             assert(op - lastNCount == 3);
2325             DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by "
2326                         "emitting an uncompressed block.");
2327             return 0;
2328         }
2329     }
2330 
2331     return op - ostart;
2332 }
2333 
2334 MEM_STATIC size_t ZSTD_compressSequences(seqStore_t* seqStorePtr,
2335                         const ZSTD_entropyCTables_t* prevEntropy,
2336                               ZSTD_entropyCTables_t* nextEntropy,
2337                         const ZSTD_CCtx_params* cctxParams,
2338                               void* dst, size_t dstCapacity,
2339                               size_t srcSize, U32* workspace, int bmi2)
2340 {
2341     size_t const cSize = ZSTD_compressSequences_internal(
2342             seqStorePtr, prevEntropy, nextEntropy, cctxParams, dst, dstCapacity,
2343             workspace, bmi2);
2344     if (cSize == 0) return 0;
2345     /* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block.
2346      * Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block.
2347      */
2348     if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity))
2349         return 0;  /* block not compressed */
2350     if (ZSTD_isError(cSize)) return cSize;
2351 
2352     /* Check compressibility */
2353     {   size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy);
2354         if (cSize >= maxCSize) return 0;  /* block not compressed */
2355     }
2356 
2357     return cSize;
2358 }
2359 
2360 /* ZSTD_selectBlockCompressor() :
2361  * Not static, but internal use only (used by long distance matcher)
2362  * assumption : strat is a valid strategy */
2363 ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode)
2364 {
2365     static const ZSTD_blockCompressor blockCompressor[3][(unsigned)ZSTD_btultra+1] = {
2366         { ZSTD_compressBlock_fast  /* default for 0 */,
2367           ZSTD_compressBlock_fast,
2368           ZSTD_compressBlock_doubleFast,
2369           ZSTD_compressBlock_greedy,
2370           ZSTD_compressBlock_lazy,
2371           ZSTD_compressBlock_lazy2,
2372           ZSTD_compressBlock_btlazy2,
2373           ZSTD_compressBlock_btopt,
2374           ZSTD_compressBlock_btultra },
2375         { ZSTD_compressBlock_fast_extDict  /* default for 0 */,
2376           ZSTD_compressBlock_fast_extDict,
2377           ZSTD_compressBlock_doubleFast_extDict,
2378           ZSTD_compressBlock_greedy_extDict,
2379           ZSTD_compressBlock_lazy_extDict,
2380           ZSTD_compressBlock_lazy2_extDict,
2381           ZSTD_compressBlock_btlazy2_extDict,
2382           ZSTD_compressBlock_btopt_extDict,
2383           ZSTD_compressBlock_btultra_extDict },
2384         { ZSTD_compressBlock_fast_dictMatchState  /* default for 0 */,
2385           ZSTD_compressBlock_fast_dictMatchState,
2386           ZSTD_compressBlock_doubleFast_dictMatchState,
2387           ZSTD_compressBlock_greedy_dictMatchState,
2388           ZSTD_compressBlock_lazy_dictMatchState,
2389           ZSTD_compressBlock_lazy2_dictMatchState,
2390           ZSTD_compressBlock_btlazy2_dictMatchState,
2391           ZSTD_compressBlock_btopt_dictMatchState,
2392           ZSTD_compressBlock_btultra_dictMatchState }
2393     };
2394     ZSTD_blockCompressor selectedCompressor;
2395     ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1);
2396 
2397     assert((U32)strat >= (U32)ZSTD_fast);
2398     assert((U32)strat <= (U32)ZSTD_btultra);
2399     selectedCompressor = blockCompressor[(int)dictMode][(U32)strat];
2400     assert(selectedCompressor != NULL);
2401     return selectedCompressor;
2402 }
2403 
2404 static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr,
2405                                    const BYTE* anchor, size_t lastLLSize)
2406 {
2407     memcpy(seqStorePtr->lit, anchor, lastLLSize);
2408     seqStorePtr->lit += lastLLSize;
2409 }
2410 
2411 void ZSTD_resetSeqStore(seqStore_t* ssPtr)
2412 {
2413     ssPtr->lit = ssPtr->litStart;
2414     ssPtr->sequences = ssPtr->sequencesStart;
2415     ssPtr->longLengthID = 0;
2416 }
2417 
2418 static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
2419                                         void* dst, size_t dstCapacity,
2420                                         const void* src, size_t srcSize)
2421 {
2422     ZSTD_matchState_t* const ms = &zc->blockState.matchState;
2423     size_t cSize;
2424     DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%zu, dictLimit=%u, nextToUpdate=%u)",
2425                 dstCapacity, ms->window.dictLimit, ms->nextToUpdate);
2426     assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
2427 
2428     /* Assert that we have correctly flushed the ctx params into the ms's copy */
2429     ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams);
2430 
2431     if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
2432         ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.searchLength);
2433         cSize = 0;
2434         goto out;  /* don't even attempt compression below a certain srcSize */
2435     }
2436     ZSTD_resetSeqStore(&(zc->seqStore));
2437     ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy;   /* required for optimal parser to read stats from dictionary */
2438 
2439     /* a gap between an attached dict and the current window is not safe,
2440      * they must remain adjacent, and when that stops being the case, the dict
2441      * must be unset */
2442     assert(ms->dictMatchState == NULL || ms->loadedDictEnd == ms->window.dictLimit);
2443 
2444     /* limited update after a very long match */
2445     {   const BYTE* const base = ms->window.base;
2446         const BYTE* const istart = (const BYTE*)src;
2447         const U32 current = (U32)(istart-base);
2448         if (sizeof(ptrdiff_t)==8) assert(istart - base < (ptrdiff_t)(U32)(-1));   /* ensure no overflow */
2449         if (current > ms->nextToUpdate + 384)
2450             ms->nextToUpdate = current - MIN(192, (U32)(current - ms->nextToUpdate - 384));
2451     }
2452 
2453     /* select and store sequences */
2454     {   ZSTD_dictMode_e const dictMode = ZSTD_matchState_dictMode(ms);
2455         size_t lastLLSize;
2456         {   int i;
2457             for (i = 0; i < ZSTD_REP_NUM; ++i)
2458                 zc->blockState.nextCBlock->rep[i] = zc->blockState.prevCBlock->rep[i];
2459         }
2460         if (zc->externSeqStore.pos < zc->externSeqStore.size) {
2461             assert(!zc->appliedParams.ldmParams.enableLdm);
2462             /* Updates ldmSeqStore.pos */
2463             lastLLSize =
2464                 ZSTD_ldm_blockCompress(&zc->externSeqStore,
2465                                        ms, &zc->seqStore,
2466                                        zc->blockState.nextCBlock->rep,
2467                                        src, srcSize);
2468             assert(zc->externSeqStore.pos <= zc->externSeqStore.size);
2469         } else if (zc->appliedParams.ldmParams.enableLdm) {
2470             rawSeqStore_t ldmSeqStore = {NULL, 0, 0, 0};
2471 
2472             ldmSeqStore.seq = zc->ldmSequences;
2473             ldmSeqStore.capacity = zc->maxNbLdmSequences;
2474             /* Updates ldmSeqStore.size */
2475             CHECK_F(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore,
2476                                                &zc->appliedParams.ldmParams,
2477                                                src, srcSize));
2478             /* Updates ldmSeqStore.pos */
2479             lastLLSize =
2480                 ZSTD_ldm_blockCompress(&ldmSeqStore,
2481                                        ms, &zc->seqStore,
2482                                        zc->blockState.nextCBlock->rep,
2483                                        src, srcSize);
2484             assert(ldmSeqStore.pos == ldmSeqStore.size);
2485         } else {   /* not long range mode */
2486             ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, dictMode);
2487             lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize);
2488         }
2489         {   const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize;
2490             ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize);
2491     }   }
2492 
2493     /* encode sequences and literals */
2494     cSize = ZSTD_compressSequences(&zc->seqStore,
2495             &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy,
2496             &zc->appliedParams,
2497             dst, dstCapacity,
2498             srcSize, zc->entropyWorkspace, zc->bmi2);
2499 
2500 out:
2501     if (!ZSTD_isError(cSize) && cSize != 0) {
2502         /* confirm repcodes and entropy tables when emitting a compressed block */
2503         ZSTD_compressedBlockState_t* const tmp = zc->blockState.prevCBlock;
2504         zc->blockState.prevCBlock = zc->blockState.nextCBlock;
2505         zc->blockState.nextCBlock = tmp;
2506     }
2507     /* We check that dictionaries have offset codes available for the first
2508      * block. After the first block, the offcode table might not have large
2509      * enough codes to represent the offsets in the data.
2510      */
2511     if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
2512         zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
2513 
2514     return cSize;
2515 }
2516 
2517 
2518 /*! ZSTD_compress_frameChunk() :
2519 *   Compress a chunk of data into one or multiple blocks.
2520 *   All blocks will be terminated, all input will be consumed.
2521 *   Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.
2522 *   Frame is supposed already started (header already produced)
2523 *   @return : compressed size, or an error code
2524 */
2525 static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx,
2526                                      void* dst, size_t dstCapacity,
2527                                const void* src, size_t srcSize,
2528                                      U32 lastFrameChunk)
2529 {
2530     size_t blockSize = cctx->blockSize;
2531     size_t remaining = srcSize;
2532     const BYTE* ip = (const BYTE*)src;
2533     BYTE* const ostart = (BYTE*)dst;
2534     BYTE* op = ostart;
2535     U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog;
2536     assert(cctx->appliedParams.cParams.windowLog <= 31);
2537 
2538     DEBUGLOG(5, "ZSTD_compress_frameChunk (blockSize=%u)", (U32)blockSize);
2539     if (cctx->appliedParams.fParams.checksumFlag && srcSize)
2540         XXH64_update(&cctx->xxhState, src, srcSize);
2541 
2542     while (remaining) {
2543         ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
2544         U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
2545 
2546         if (dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE)
2547             return ERROR(dstSize_tooSmall);   /* not enough space to store compressed block */
2548         if (remaining < blockSize) blockSize = remaining;
2549 
2550         if (ZSTD_window_needOverflowCorrection(ms->window, ip + blockSize)) {
2551             U32 const cycleLog = ZSTD_cycleLog(cctx->appliedParams.cParams.chainLog, cctx->appliedParams.cParams.strategy);
2552             U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, maxDist, ip);
2553             ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30);
2554             ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30);
2555             ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31);
2556             ZSTD_reduceIndex(cctx, correction);
2557             if (ms->nextToUpdate < correction) ms->nextToUpdate = 0;
2558             else ms->nextToUpdate -= correction;
2559             ms->loadedDictEnd = 0;
2560             ms->dictMatchState = NULL;
2561         }
2562         ZSTD_window_enforceMaxDist(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState);
2563         if (ms->nextToUpdate < ms->window.lowLimit) ms->nextToUpdate = ms->window.lowLimit;
2564 
2565         {   size_t cSize = ZSTD_compressBlock_internal(cctx,
2566                                 op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize,
2567                                 ip, blockSize);
2568             if (ZSTD_isError(cSize)) return cSize;
2569 
2570             if (cSize == 0) {  /* block is not compressible */
2571                 cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
2572                 if (ZSTD_isError(cSize)) return cSize;
2573             } else {
2574                 U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
2575                 MEM_writeLE24(op, cBlockHeader24);
2576                 cSize += ZSTD_blockHeaderSize;
2577             }
2578 
2579             ip += blockSize;
2580             assert(remaining >= blockSize);
2581             remaining -= blockSize;
2582             op += cSize;
2583             assert(dstCapacity >= cSize);
2584             dstCapacity -= cSize;
2585             DEBUGLOG(5, "ZSTD_compress_frameChunk: adding a block of size %u",
2586                         (U32)cSize);
2587     }   }
2588 
2589     if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending;
2590     return op-ostart;
2591 }
2592 
2593 
2594 static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
2595                                     ZSTD_CCtx_params params, U64 pledgedSrcSize, U32 dictID)
2596 {   BYTE* const op = (BYTE*)dst;
2597     U32   const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536);   /* 0-3 */
2598     U32   const dictIDSizeCode = params.fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength;   /* 0-3 */
2599     U32   const checksumFlag = params.fParams.checksumFlag>0;
2600     U32   const windowSize = (U32)1 << params.cParams.windowLog;
2601     U32   const singleSegment = params.fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);
2602     BYTE  const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
2603     U32   const fcsCode = params.fParams.contentSizeFlag ?
2604                      (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0;  /* 0-3 */
2605     BYTE  const frameHeaderDecriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
2606     size_t pos=0;
2607 
2608     assert(!(params.fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN));
2609     if (dstCapacity < ZSTD_frameHeaderSize_max) return ERROR(dstSize_tooSmall);
2610     DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u",
2611                 !params.fParams.noDictIDFlag, dictID,  dictIDSizeCode);
2612 
2613     if (params.format == ZSTD_f_zstd1) {
2614         MEM_writeLE32(dst, ZSTD_MAGICNUMBER);
2615         pos = 4;
2616     }
2617     op[pos++] = frameHeaderDecriptionByte;
2618     if (!singleSegment) op[pos++] = windowLogByte;
2619     switch(dictIDSizeCode)
2620     {
2621         default:  assert(0); /* impossible */
2622         case 0 : break;
2623         case 1 : op[pos] = (BYTE)(dictID); pos++; break;
2624         case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break;
2625         case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break;
2626     }
2627     switch(fcsCode)
2628     {
2629         default:  assert(0); /* impossible */
2630         case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break;
2631         case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break;
2632         case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break;
2633         case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break;
2634     }
2635     return pos;
2636 }
2637 
2638 /* ZSTD_writeLastEmptyBlock() :
2639  * output an empty Block with end-of-frame mark to complete a frame
2640  * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
2641  *           or an error code if `dstCapcity` is too small (<ZSTD_blockHeaderSize)
2642  */
2643 size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity)
2644 {
2645     if (dstCapacity < ZSTD_blockHeaderSize) return ERROR(dstSize_tooSmall);
2646     {   U32 const cBlockHeader24 = 1 /*lastBlock*/ + (((U32)bt_raw)<<1);  /* 0 size */
2647         MEM_writeLE24(dst, cBlockHeader24);
2648         return ZSTD_blockHeaderSize;
2649     }
2650 }
2651 
2652 size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq)
2653 {
2654     if (cctx->stage != ZSTDcs_init)
2655         return ERROR(stage_wrong);
2656     if (cctx->appliedParams.ldmParams.enableLdm)
2657         return ERROR(parameter_unsupported);
2658     cctx->externSeqStore.seq = seq;
2659     cctx->externSeqStore.size = nbSeq;
2660     cctx->externSeqStore.capacity = nbSeq;
2661     cctx->externSeqStore.pos = 0;
2662     return 0;
2663 }
2664 
2665 
2666 static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
2667                               void* dst, size_t dstCapacity,
2668                         const void* src, size_t srcSize,
2669                                U32 frame, U32 lastFrameChunk)
2670 {
2671     ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
2672     size_t fhSize = 0;
2673 
2674     DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u",
2675                 cctx->stage, (U32)srcSize);
2676     if (cctx->stage==ZSTDcs_created) return ERROR(stage_wrong);   /* missing init (ZSTD_compressBegin) */
2677 
2678     if (frame && (cctx->stage==ZSTDcs_init)) {
2679         fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->appliedParams,
2680                                        cctx->pledgedSrcSizePlusOne-1, cctx->dictID);
2681         if (ZSTD_isError(fhSize)) return fhSize;
2682         dstCapacity -= fhSize;
2683         dst = (char*)dst + fhSize;
2684         cctx->stage = ZSTDcs_ongoing;
2685     }
2686 
2687     if (!srcSize) return fhSize;  /* do not generate an empty block if no input */
2688 
2689     if (!ZSTD_window_update(&ms->window, src, srcSize)) {
2690         ms->nextToUpdate = ms->window.dictLimit;
2691     }
2692     if (cctx->appliedParams.ldmParams.enableLdm) {
2693         ZSTD_window_update(&cctx->ldmState.window, src, srcSize);
2694     }
2695 
2696     if (!frame) {
2697         /* overflow check and correction for block mode */
2698         if (ZSTD_window_needOverflowCorrection(ms->window, (const char*)src + srcSize)) {
2699             U32 const cycleLog = ZSTD_cycleLog(cctx->appliedParams.cParams.chainLog, cctx->appliedParams.cParams.strategy);
2700             U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, 1 << cctx->appliedParams.cParams.windowLog, src);
2701             ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30);
2702             ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30);
2703             ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31);
2704             ZSTD_reduceIndex(cctx, correction);
2705             if (ms->nextToUpdate < correction) ms->nextToUpdate = 0;
2706             else ms->nextToUpdate -= correction;
2707             ms->loadedDictEnd = 0;
2708             ms->dictMatchState = NULL;
2709         }
2710     }
2711 
2712     DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (U32)cctx->blockSize);
2713     {   size_t const cSize = frame ?
2714                              ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
2715                              ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize);
2716         if (ZSTD_isError(cSize)) return cSize;
2717         cctx->consumedSrcSize += srcSize;
2718         cctx->producedCSize += (cSize + fhSize);
2719         assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
2720         if (cctx->pledgedSrcSizePlusOne != 0) {  /* control src size */
2721             ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
2722             if (cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne) {
2723                 DEBUGLOG(4, "error : pledgedSrcSize = %u, while realSrcSize >= %u",
2724                     (U32)cctx->pledgedSrcSizePlusOne-1, (U32)cctx->consumedSrcSize);
2725                 return ERROR(srcSize_wrong);
2726             }
2727         }
2728         return cSize + fhSize;
2729     }
2730 }
2731 
2732 size_t ZSTD_compressContinue (ZSTD_CCtx* cctx,
2733                               void* dst, size_t dstCapacity,
2734                         const void* src, size_t srcSize)
2735 {
2736     DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (U32)srcSize);
2737     return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */);
2738 }
2739 
2740 
2741 size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx)
2742 {
2743     ZSTD_compressionParameters const cParams = cctx->appliedParams.cParams;
2744     assert(!ZSTD_checkCParams(cParams));
2745     return MIN (ZSTD_BLOCKSIZE_MAX, (U32)1 << cParams.windowLog);
2746 }
2747 
2748 size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
2749 {
2750     size_t const blockSizeMax = ZSTD_getBlockSize(cctx);
2751     if (srcSize > blockSizeMax) return ERROR(srcSize_wrong);
2752 
2753     return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */);
2754 }
2755 
2756 /*! ZSTD_loadDictionaryContent() :
2757  *  @return : 0, or an error code
2758  */
2759 static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
2760                                          ZSTD_CCtx_params const* params,
2761                                          const void* src, size_t srcSize,
2762                                          ZSTD_dictTableLoadMethod_e dtlm)
2763 {
2764     const BYTE* const ip = (const BYTE*) src;
2765     const BYTE* const iend = ip + srcSize;
2766 
2767     ZSTD_window_update(&ms->window, src, srcSize);
2768     ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base);
2769 
2770     /* Assert that we the ms params match the params we're being given */
2771     ZSTD_assertEqualCParams(params->cParams, ms->cParams);
2772 
2773     if (srcSize <= HASH_READ_SIZE) return 0;
2774 
2775     switch(params->cParams.strategy)
2776     {
2777     case ZSTD_fast:
2778         ZSTD_fillHashTable(ms, iend, dtlm);
2779         break;
2780     case ZSTD_dfast:
2781         ZSTD_fillDoubleHashTable(ms, iend, dtlm);
2782         break;
2783 
2784     case ZSTD_greedy:
2785     case ZSTD_lazy:
2786     case ZSTD_lazy2:
2787         if (srcSize >= HASH_READ_SIZE)
2788             ZSTD_insertAndFindFirstIndex(ms, iend-HASH_READ_SIZE);
2789         break;
2790 
2791     case ZSTD_btlazy2:   /* we want the dictionary table fully sorted */
2792     case ZSTD_btopt:
2793     case ZSTD_btultra:
2794         if (srcSize >= HASH_READ_SIZE)
2795             ZSTD_updateTree(ms, iend-HASH_READ_SIZE, iend);
2796         break;
2797 
2798     default:
2799         assert(0);  /* not possible : not a valid strategy id */
2800     }
2801 
2802     ms->nextToUpdate = (U32)(iend - ms->window.base);
2803     return 0;
2804 }
2805 
2806 
2807 /* Dictionaries that assign zero probability to symbols that show up causes problems
2808    when FSE encoding.  Refuse dictionaries that assign zero probability to symbols
2809    that we may encounter during compression.
2810    NOTE: This behavior is not standard and could be improved in the future. */
2811 static size_t ZSTD_checkDictNCount(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue) {
2812     U32 s;
2813     if (dictMaxSymbolValue < maxSymbolValue) return ERROR(dictionary_corrupted);
2814     for (s = 0; s <= maxSymbolValue; ++s) {
2815         if (normalizedCounter[s] == 0) return ERROR(dictionary_corrupted);
2816     }
2817     return 0;
2818 }
2819 
2820 
2821 /* Dictionary format :
2822  * See :
2823  * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
2824  */
2825 /*! ZSTD_loadZstdDictionary() :
2826  * @return : dictID, or an error code
2827  *  assumptions : magic number supposed already checked
2828  *                dictSize supposed > 8
2829  */
2830 static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
2831                                       ZSTD_matchState_t* ms,
2832                                       ZSTD_CCtx_params const* params,
2833                                       const void* dict, size_t dictSize,
2834                                       ZSTD_dictTableLoadMethod_e dtlm,
2835                                       void* workspace)
2836 {
2837     const BYTE* dictPtr = (const BYTE*)dict;
2838     const BYTE* const dictEnd = dictPtr + dictSize;
2839     short offcodeNCount[MaxOff+1];
2840     unsigned offcodeMaxValue = MaxOff;
2841     size_t dictID;
2842 
2843     ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
2844     assert(dictSize > 8);
2845     assert(MEM_readLE32(dictPtr) == ZSTD_MAGIC_DICTIONARY);
2846 
2847     dictPtr += 4;   /* skip magic number */
2848     dictID = params->fParams.noDictIDFlag ? 0 :  MEM_readLE32(dictPtr);
2849     dictPtr += 4;
2850 
2851     {   unsigned maxSymbolValue = 255;
2852         size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr, dictEnd-dictPtr);
2853         if (HUF_isError(hufHeaderSize)) return ERROR(dictionary_corrupted);
2854         if (maxSymbolValue < 255) return ERROR(dictionary_corrupted);
2855         dictPtr += hufHeaderSize;
2856     }
2857 
2858     {   unsigned offcodeLog;
2859         size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
2860         if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
2861         if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);
2862         /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
2863         /* fill all offset symbols to avoid garbage at end of table */
2864         CHECK_E( FSE_buildCTable_wksp(bs->entropy.fse.offcodeCTable, offcodeNCount, MaxOff, offcodeLog, workspace, HUF_WORKSPACE_SIZE),
2865                  dictionary_corrupted);
2866         dictPtr += offcodeHeaderSize;
2867     }
2868 
2869     {   short matchlengthNCount[MaxML+1];
2870         unsigned matchlengthMaxValue = MaxML, matchlengthLog;
2871         size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
2872         if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);
2873         if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted);
2874         /* Every match length code must have non-zero probability */
2875         CHECK_F( ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML));
2876         CHECK_E( FSE_buildCTable_wksp(bs->entropy.fse.matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, workspace, HUF_WORKSPACE_SIZE),
2877                  dictionary_corrupted);
2878         dictPtr += matchlengthHeaderSize;
2879     }
2880 
2881     {   short litlengthNCount[MaxLL+1];
2882         unsigned litlengthMaxValue = MaxLL, litlengthLog;
2883         size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
2884         if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);
2885         if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted);
2886         /* Every literal length code must have non-zero probability */
2887         CHECK_F( ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL));
2888         CHECK_E( FSE_buildCTable_wksp(bs->entropy.fse.litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog, workspace, HUF_WORKSPACE_SIZE),
2889                  dictionary_corrupted);
2890         dictPtr += litlengthHeaderSize;
2891     }
2892 
2893     if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted);
2894     bs->rep[0] = MEM_readLE32(dictPtr+0);
2895     bs->rep[1] = MEM_readLE32(dictPtr+4);
2896     bs->rep[2] = MEM_readLE32(dictPtr+8);
2897     dictPtr += 12;
2898 
2899     {   size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
2900         U32 offcodeMax = MaxOff;
2901         if (dictContentSize <= ((U32)-1) - 128 KB) {
2902             U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */
2903             offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */
2904         }
2905         /* All offset values <= dictContentSize + 128 KB must be representable */
2906         CHECK_F (ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff)));
2907         /* All repCodes must be <= dictContentSize and != 0*/
2908         {   U32 u;
2909             for (u=0; u<3; u++) {
2910                 if (bs->rep[u] == 0) return ERROR(dictionary_corrupted);
2911                 if (bs->rep[u] > dictContentSize) return ERROR(dictionary_corrupted);
2912         }   }
2913 
2914         bs->entropy.huf.repeatMode = HUF_repeat_valid;
2915         bs->entropy.fse.offcode_repeatMode = FSE_repeat_valid;
2916         bs->entropy.fse.matchlength_repeatMode = FSE_repeat_valid;
2917         bs->entropy.fse.litlength_repeatMode = FSE_repeat_valid;
2918         CHECK_F(ZSTD_loadDictionaryContent(ms, params, dictPtr, dictContentSize, dtlm));
2919         return dictID;
2920     }
2921 }
2922 
2923 /** ZSTD_compress_insertDictionary() :
2924 *   @return : dictID, or an error code */
2925 static size_t
2926 ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
2927                                ZSTD_matchState_t* ms,
2928                          const ZSTD_CCtx_params* params,
2929                          const void* dict, size_t dictSize,
2930                                ZSTD_dictContentType_e dictContentType,
2931                                ZSTD_dictTableLoadMethod_e dtlm,
2932                                void* workspace)
2933 {
2934     DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize);
2935     if ((dict==NULL) || (dictSize<=8)) return 0;
2936 
2937     ZSTD_reset_compressedBlockState(bs);
2938 
2939     /* dict restricted modes */
2940     if (dictContentType == ZSTD_dct_rawContent)
2941         return ZSTD_loadDictionaryContent(ms, params, dict, dictSize, dtlm);
2942 
2943     if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) {
2944         if (dictContentType == ZSTD_dct_auto) {
2945             DEBUGLOG(4, "raw content dictionary detected");
2946             return ZSTD_loadDictionaryContent(ms, params, dict, dictSize, dtlm);
2947         }
2948         if (dictContentType == ZSTD_dct_fullDict)
2949             return ERROR(dictionary_wrong);
2950         assert(0);   /* impossible */
2951     }
2952 
2953     /* dict as full zstd dictionary */
2954     return ZSTD_loadZstdDictionary(bs, ms, params, dict, dictSize, dtlm, workspace);
2955 }
2956 
2957 /*! ZSTD_compressBegin_internal() :
2958  * @return : 0, or an error code */
2959 static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
2960                                     const void* dict, size_t dictSize,
2961                                     ZSTD_dictContentType_e dictContentType,
2962                                     ZSTD_dictTableLoadMethod_e dtlm,
2963                                     const ZSTD_CDict* cdict,
2964                                     ZSTD_CCtx_params params, U64 pledgedSrcSize,
2965                                     ZSTD_buffered_policy_e zbuff)
2966 {
2967     DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params.cParams.windowLog);
2968     /* params are supposed to be fully validated at this point */
2969     assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
2970     assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
2971 
2972     if (cdict && cdict->dictContentSize>0) {
2973         return ZSTD_resetCCtx_usingCDict(cctx, cdict, params, pledgedSrcSize, zbuff);
2974     }
2975 
2976     CHECK_F( ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
2977                                      ZSTDcrp_continue, zbuff) );
2978     {
2979         size_t const dictID = ZSTD_compress_insertDictionary(
2980                 cctx->blockState.prevCBlock, &cctx->blockState.matchState,
2981                 &params, dict, dictSize, dictContentType, dtlm, cctx->entropyWorkspace);
2982         if (ZSTD_isError(dictID)) return dictID;
2983         assert(dictID <= (size_t)(U32)-1);
2984         cctx->dictID = (U32)dictID;
2985     }
2986     return 0;
2987 }
2988 
2989 size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
2990                                     const void* dict, size_t dictSize,
2991                                     ZSTD_dictContentType_e dictContentType,
2992                                     ZSTD_dictTableLoadMethod_e dtlm,
2993                                     const ZSTD_CDict* cdict,
2994                                     ZSTD_CCtx_params params,
2995                                     unsigned long long pledgedSrcSize)
2996 {
2997     DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params.cParams.windowLog);
2998     /* compression parameters verification and optimization */
2999     CHECK_F( ZSTD_checkCParams(params.cParams) );
3000     return ZSTD_compressBegin_internal(cctx,
3001                                        dict, dictSize, dictContentType, dtlm,
3002                                        cdict,
3003                                        params, pledgedSrcSize,
3004                                        ZSTDb_not_buffered);
3005 }
3006 
3007 /*! ZSTD_compressBegin_advanced() :
3008 *   @return : 0, or an error code */
3009 size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx,
3010                              const void* dict, size_t dictSize,
3011                                    ZSTD_parameters params, unsigned long long pledgedSrcSize)
3012 {
3013     ZSTD_CCtx_params const cctxParams =
3014             ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
3015     return ZSTD_compressBegin_advanced_internal(cctx,
3016                                             dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast,
3017                                             NULL /*cdict*/,
3018                                             cctxParams, pledgedSrcSize);
3019 }
3020 
3021 size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel)
3022 {
3023     ZSTD_parameters const params = ZSTD_getParams(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize);
3024     ZSTD_CCtx_params const cctxParams =
3025             ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
3026     DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (U32)dictSize);
3027     return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
3028                                        cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered);
3029 }
3030 
3031 size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel)
3032 {
3033     return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel);
3034 }
3035 
3036 
3037 /*! ZSTD_writeEpilogue() :
3038 *   Ends a frame.
3039 *   @return : nb of bytes written into dst (or an error code) */
3040 static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
3041 {
3042     BYTE* const ostart = (BYTE*)dst;
3043     BYTE* op = ostart;
3044     size_t fhSize = 0;
3045 
3046     DEBUGLOG(4, "ZSTD_writeEpilogue");
3047     if (cctx->stage == ZSTDcs_created) return ERROR(stage_wrong);  /* init missing */
3048 
3049     /* special case : empty frame */
3050     if (cctx->stage == ZSTDcs_init) {
3051         fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->appliedParams, 0, 0);
3052         if (ZSTD_isError(fhSize)) return fhSize;
3053         dstCapacity -= fhSize;
3054         op += fhSize;
3055         cctx->stage = ZSTDcs_ongoing;
3056     }
3057 
3058     if (cctx->stage != ZSTDcs_ending) {
3059         /* write one last empty block, make it the "last" block */
3060         U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0;
3061         if (dstCapacity<4) return ERROR(dstSize_tooSmall);
3062         MEM_writeLE32(op, cBlockHeader24);
3063         op += ZSTD_blockHeaderSize;
3064         dstCapacity -= ZSTD_blockHeaderSize;
3065     }
3066 
3067     if (cctx->appliedParams.fParams.checksumFlag) {
3068         U32 const checksum = (U32) XXH64_digest(&cctx->xxhState);
3069         if (dstCapacity<4) return ERROR(dstSize_tooSmall);
3070         DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", checksum);
3071         MEM_writeLE32(op, checksum);
3072         op += 4;
3073     }
3074 
3075     cctx->stage = ZSTDcs_created;  /* return to "created but no init" status */
3076     return op-ostart;
3077 }
3078 
3079 size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
3080                          void* dst, size_t dstCapacity,
3081                    const void* src, size_t srcSize)
3082 {
3083     size_t endResult;
3084     size_t const cSize = ZSTD_compressContinue_internal(cctx,
3085                                 dst, dstCapacity, src, srcSize,
3086                                 1 /* frame mode */, 1 /* last chunk */);
3087     if (ZSTD_isError(cSize)) return cSize;
3088     endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize);
3089     if (ZSTD_isError(endResult)) return endResult;
3090     assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
3091     if (cctx->pledgedSrcSizePlusOne != 0) {  /* control src size */
3092         ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
3093         DEBUGLOG(4, "end of frame : controlling src size");
3094         if (cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1) {
3095             DEBUGLOG(4, "error : pledgedSrcSize = %u, while realSrcSize = %u",
3096                 (U32)cctx->pledgedSrcSizePlusOne-1, (U32)cctx->consumedSrcSize);
3097             return ERROR(srcSize_wrong);
3098     }   }
3099     return cSize + endResult;
3100 }
3101 
3102 
3103 static size_t ZSTD_compress_internal (ZSTD_CCtx* cctx,
3104                                       void* dst, size_t dstCapacity,
3105                                 const void* src, size_t srcSize,
3106                                 const void* dict,size_t dictSize,
3107                                       ZSTD_parameters params)
3108 {
3109     ZSTD_CCtx_params const cctxParams =
3110             ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
3111     DEBUGLOG(4, "ZSTD_compress_internal");
3112     return ZSTD_compress_advanced_internal(cctx,
3113                                            dst, dstCapacity,
3114                                            src, srcSize,
3115                                            dict, dictSize,
3116                                            cctxParams);
3117 }
3118 
3119 size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
3120                                void* dst, size_t dstCapacity,
3121                          const void* src, size_t srcSize,
3122                          const void* dict,size_t dictSize,
3123                                ZSTD_parameters params)
3124 {
3125     DEBUGLOG(4, "ZSTD_compress_advanced");
3126     CHECK_F(ZSTD_checkCParams(params.cParams));
3127     return ZSTD_compress_internal(cctx,
3128                                   dst, dstCapacity,
3129                                   src, srcSize,
3130                                   dict, dictSize,
3131                                   params);
3132 }
3133 
3134 /* Internal */
3135 size_t ZSTD_compress_advanced_internal(
3136         ZSTD_CCtx* cctx,
3137         void* dst, size_t dstCapacity,
3138         const void* src, size_t srcSize,
3139         const void* dict,size_t dictSize,
3140         ZSTD_CCtx_params params)
3141 {
3142     DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (U32)srcSize);
3143     CHECK_F( ZSTD_compressBegin_internal(cctx,
3144                          dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
3145                          params, srcSize, ZSTDb_not_buffered) );
3146     return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
3147 }
3148 
3149 size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx,
3150                                void* dst, size_t dstCapacity,
3151                          const void* src, size_t srcSize,
3152                          const void* dict, size_t dictSize,
3153                                int compressionLevel)
3154 {
3155     ZSTD_parameters const params = ZSTD_getParams(compressionLevel, srcSize + (!srcSize), dict ? dictSize : 0);
3156     ZSTD_CCtx_params cctxParams = ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
3157     assert(params.fParams.contentSizeFlag == 1);
3158     return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, cctxParams);
3159 }
3160 
3161 size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
3162                          void* dst, size_t dstCapacity,
3163                    const void* src, size_t srcSize,
3164                          int compressionLevel)
3165 {
3166     DEBUGLOG(4, "ZSTD_compressCCtx (srcSize=%u)", (U32)srcSize);
3167     assert(cctx != NULL);
3168     return ZSTD_compress_usingDict(cctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel);
3169 }
3170 
3171 size_t ZSTD_compress(void* dst, size_t dstCapacity,
3172                const void* src, size_t srcSize,
3173                      int compressionLevel)
3174 {
3175     size_t result;
3176     ZSTD_CCtx ctxBody;
3177     ZSTD_initCCtx(&ctxBody, ZSTD_defaultCMem);
3178     result = ZSTD_compressCCtx(&ctxBody, dst, dstCapacity, src, srcSize, compressionLevel);
3179     ZSTD_freeCCtxContent(&ctxBody);   /* can't free ctxBody itself, as it's on stack; free only heap content */
3180     return result;
3181 }
3182 
3183 
3184 /* =====  Dictionary API  ===== */
3185 
3186 /*! ZSTD_estimateCDictSize_advanced() :
3187  *  Estimate amount of memory that will be needed to create a dictionary with following arguments */
3188 size_t ZSTD_estimateCDictSize_advanced(
3189         size_t dictSize, ZSTD_compressionParameters cParams,
3190         ZSTD_dictLoadMethod_e dictLoadMethod)
3191 {
3192     DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (U32)sizeof(ZSTD_CDict));
3193     return sizeof(ZSTD_CDict) + HUF_WORKSPACE_SIZE + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0)
3194            + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
3195 }
3196 
3197 size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel)
3198 {
3199     ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, dictSize);
3200     return ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy);
3201 }
3202 
3203 size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict)
3204 {
3205     if (cdict==NULL) return 0;   /* support sizeof on NULL */
3206     DEBUGLOG(5, "sizeof(*cdict) : %u", (U32)sizeof(*cdict));
3207     return cdict->workspaceSize + (cdict->dictBuffer ? cdict->dictContentSize : 0) + sizeof(*cdict);
3208 }
3209 
3210 static size_t ZSTD_initCDict_internal(
3211                     ZSTD_CDict* cdict,
3212               const void* dictBuffer, size_t dictSize,
3213                     ZSTD_dictLoadMethod_e dictLoadMethod,
3214                     ZSTD_dictContentType_e dictContentType,
3215                     ZSTD_compressionParameters cParams)
3216 {
3217     DEBUGLOG(3, "ZSTD_initCDict_internal (dictContentType:%u)", (U32)dictContentType);
3218     assert(!ZSTD_checkCParams(cParams));
3219     cdict->matchState.cParams = cParams;
3220     if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) {
3221         cdict->dictBuffer = NULL;
3222         cdict->dictContent = dictBuffer;
3223     } else {
3224         void* const internalBuffer = ZSTD_malloc(dictSize, cdict->customMem);
3225         cdict->dictBuffer = internalBuffer;
3226         cdict->dictContent = internalBuffer;
3227         if (!internalBuffer) return ERROR(memory_allocation);
3228         memcpy(internalBuffer, dictBuffer, dictSize);
3229     }
3230     cdict->dictContentSize = dictSize;
3231 
3232     /* Reset the state to no dictionary */
3233     ZSTD_reset_compressedBlockState(&cdict->cBlockState);
3234     {   void* const end = ZSTD_reset_matchState(
3235                 &cdict->matchState,
3236                 (U32*)cdict->workspace + HUF_WORKSPACE_SIZE_U32,
3237                 &cParams, ZSTDcrp_continue, /* forCCtx */ 0);
3238         assert(end == (char*)cdict->workspace + cdict->workspaceSize);
3239         (void)end;
3240     }
3241     /* (Maybe) load the dictionary
3242      * Skips loading the dictionary if it is <= 8 bytes.
3243      */
3244     {   ZSTD_CCtx_params params;
3245         memset(&params, 0, sizeof(params));
3246         params.compressionLevel = ZSTD_CLEVEL_DEFAULT;
3247         params.fParams.contentSizeFlag = 1;
3248         params.cParams = cParams;
3249         {   size_t const dictID = ZSTD_compress_insertDictionary(
3250                     &cdict->cBlockState, &cdict->matchState, &params,
3251                     cdict->dictContent, cdict->dictContentSize,
3252                     dictContentType, ZSTD_dtlm_full, cdict->workspace);
3253             if (ZSTD_isError(dictID)) return dictID;
3254             assert(dictID <= (size_t)(U32)-1);
3255             cdict->dictID = (U32)dictID;
3256         }
3257     }
3258 
3259     return 0;
3260 }
3261 
3262 ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize,
3263                                       ZSTD_dictLoadMethod_e dictLoadMethod,
3264                                       ZSTD_dictContentType_e dictContentType,
3265                                       ZSTD_compressionParameters cParams, ZSTD_customMem customMem)
3266 {
3267     DEBUGLOG(3, "ZSTD_createCDict_advanced, mode %u", (U32)dictContentType);
3268     if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
3269 
3270     {   ZSTD_CDict* const cdict = (ZSTD_CDict*)ZSTD_malloc(sizeof(ZSTD_CDict), customMem);
3271         size_t const workspaceSize = HUF_WORKSPACE_SIZE + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0);
3272         void* const workspace = ZSTD_malloc(workspaceSize, customMem);
3273 
3274         if (!cdict || !workspace) {
3275             ZSTD_free(cdict, customMem);
3276             ZSTD_free(workspace, customMem);
3277             return NULL;
3278         }
3279         cdict->customMem = customMem;
3280         cdict->workspace = workspace;
3281         cdict->workspaceSize = workspaceSize;
3282         if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
3283                                         dictBuffer, dictSize,
3284                                         dictLoadMethod, dictContentType,
3285                                         cParams) )) {
3286             ZSTD_freeCDict(cdict);
3287             return NULL;
3288         }
3289 
3290         return cdict;
3291     }
3292 }
3293 
3294 ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel)
3295 {
3296     ZSTD_compressionParameters cParams = ZSTD_getCParams(compressionLevel, 0, dictSize);
3297     return ZSTD_createCDict_advanced(dict, dictSize,
3298                                      ZSTD_dlm_byCopy, ZSTD_dct_auto,
3299                                      cParams, ZSTD_defaultCMem);
3300 }
3301 
3302 ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel)
3303 {
3304     ZSTD_compressionParameters cParams = ZSTD_getCParams(compressionLevel, 0, dictSize);
3305     return ZSTD_createCDict_advanced(dict, dictSize,
3306                                      ZSTD_dlm_byRef, ZSTD_dct_auto,
3307                                      cParams, ZSTD_defaultCMem);
3308 }
3309 
3310 size_t ZSTD_freeCDict(ZSTD_CDict* cdict)
3311 {
3312     if (cdict==NULL) return 0;   /* support free on NULL */
3313     {   ZSTD_customMem const cMem = cdict->customMem;
3314         ZSTD_free(cdict->workspace, cMem);
3315         ZSTD_free(cdict->dictBuffer, cMem);
3316         ZSTD_free(cdict, cMem);
3317         return 0;
3318     }
3319 }
3320 
3321 /*! ZSTD_initStaticCDict_advanced() :
3322  *  Generate a digested dictionary in provided memory area.
3323  *  workspace: The memory area to emplace the dictionary into.
3324  *             Provided pointer must 8-bytes aligned.
3325  *             It must outlive dictionary usage.
3326  *  workspaceSize: Use ZSTD_estimateCDictSize()
3327  *                 to determine how large workspace must be.
3328  *  cParams : use ZSTD_getCParams() to transform a compression level
3329  *            into its relevants cParams.
3330  * @return : pointer to ZSTD_CDict*, or NULL if error (size too small)
3331  *  Note : there is no corresponding "free" function.
3332  *         Since workspace was allocated externally, it must be freed externally.
3333  */
3334 const ZSTD_CDict* ZSTD_initStaticCDict(
3335                                  void* workspace, size_t workspaceSize,
3336                            const void* dict, size_t dictSize,
3337                                  ZSTD_dictLoadMethod_e dictLoadMethod,
3338                                  ZSTD_dictContentType_e dictContentType,
3339                                  ZSTD_compressionParameters cParams)
3340 {
3341     size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0);
3342     size_t const neededSize = sizeof(ZSTD_CDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize)
3343                             + HUF_WORKSPACE_SIZE + matchStateSize;
3344     ZSTD_CDict* const cdict = (ZSTD_CDict*) workspace;
3345     void* ptr;
3346     if ((size_t)workspace & 7) return NULL;  /* 8-aligned */
3347     DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u",
3348         (U32)workspaceSize, (U32)neededSize, (U32)(workspaceSize < neededSize));
3349     if (workspaceSize < neededSize) return NULL;
3350 
3351     if (dictLoadMethod == ZSTD_dlm_byCopy) {
3352         memcpy(cdict+1, dict, dictSize);
3353         dict = cdict+1;
3354         ptr = (char*)workspace + sizeof(ZSTD_CDict) + dictSize;
3355     } else {
3356         ptr = cdict+1;
3357     }
3358     cdict->workspace = ptr;
3359     cdict->workspaceSize = HUF_WORKSPACE_SIZE + matchStateSize;
3360 
3361     if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
3362                                               dict, dictSize,
3363                                               ZSTD_dlm_byRef, dictContentType,
3364                                               cParams) ))
3365         return NULL;
3366 
3367     return cdict;
3368 }
3369 
3370 ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict)
3371 {
3372     assert(cdict != NULL);
3373     return cdict->matchState.cParams;
3374 }
3375 
3376 /* ZSTD_compressBegin_usingCDict_advanced() :
3377  * cdict must be != NULL */
3378 size_t ZSTD_compressBegin_usingCDict_advanced(
3379     ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict,
3380     ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
3381 {
3382     DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_advanced");
3383     if (cdict==NULL) return ERROR(dictionary_wrong);
3384     {   ZSTD_CCtx_params params = cctx->requestedParams;
3385         params.cParams = ZSTD_getCParamsFromCDict(cdict);
3386         /* Increase window log to fit the entire dictionary and source if the
3387          * source size is known. Limit the increase to 19, which is the
3388          * window log for compression level 1 with the largest source size.
3389          */
3390         if (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN) {
3391             U32 const limitedSrcSize = (U32)MIN(pledgedSrcSize, 1U << 19);
3392             U32 const limitedSrcLog = limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1;
3393             params.cParams.windowLog = MAX(params.cParams.windowLog, limitedSrcLog);
3394         }
3395         params.fParams = fParams;
3396         return ZSTD_compressBegin_internal(cctx,
3397                                            NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast,
3398                                            cdict,
3399                                            params, pledgedSrcSize,
3400                                            ZSTDb_not_buffered);
3401     }
3402 }
3403 
3404 /* ZSTD_compressBegin_usingCDict() :
3405  * pledgedSrcSize=0 means "unknown"
3406  * if pledgedSrcSize>0, it will enable contentSizeFlag */
3407 size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
3408 {
3409     ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
3410     DEBUGLOG(4, "ZSTD_compressBegin_usingCDict : dictIDFlag == %u", !fParams.noDictIDFlag);
3411     return ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN);
3412 }
3413 
3414 size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
3415                                 void* dst, size_t dstCapacity,
3416                                 const void* src, size_t srcSize,
3417                                 const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
3418 {
3419     CHECK_F (ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, srcSize));   /* will check if cdict != NULL */
3420     return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
3421 }
3422 
3423 /*! ZSTD_compress_usingCDict() :
3424  *  Compression using a digested Dictionary.
3425  *  Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
3426  *  Note that compression parameters are decided at CDict creation time
3427  *  while frame parameters are hardcoded */
3428 size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
3429                                 void* dst, size_t dstCapacity,
3430                                 const void* src, size_t srcSize,
3431                                 const ZSTD_CDict* cdict)
3432 {
3433     ZSTD_frameParameters const fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
3434     return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, fParams);
3435 }
3436 
3437 
3438 
3439 /* ******************************************************************
3440 *  Streaming
3441 ********************************************************************/
3442 
3443 ZSTD_CStream* ZSTD_createCStream(void)
3444 {
3445     DEBUGLOG(3, "ZSTD_createCStream");
3446     return ZSTD_createCStream_advanced(ZSTD_defaultCMem);
3447 }
3448 
3449 ZSTD_CStream* ZSTD_initStaticCStream(void *workspace, size_t workspaceSize)
3450 {
3451     return ZSTD_initStaticCCtx(workspace, workspaceSize);
3452 }
3453 
3454 ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem)
3455 {   /* CStream and CCtx are now same object */
3456     return ZSTD_createCCtx_advanced(customMem);
3457 }
3458 
3459 size_t ZSTD_freeCStream(ZSTD_CStream* zcs)
3460 {
3461     return ZSTD_freeCCtx(zcs);   /* same object */
3462 }
3463 
3464 
3465 
3466 /*======   Initialization   ======*/
3467 
3468 size_t ZSTD_CStreamInSize(void)  { return ZSTD_BLOCKSIZE_MAX; }
3469 
3470 size_t ZSTD_CStreamOutSize(void)
3471 {
3472     return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ;
3473 }
3474 
3475 static size_t ZSTD_resetCStream_internal(ZSTD_CStream* cctx,
3476                     const void* const dict, size_t const dictSize, ZSTD_dictContentType_e const dictContentType,
3477                     const ZSTD_CDict* const cdict,
3478                     ZSTD_CCtx_params params, unsigned long long const pledgedSrcSize)
3479 {
3480     DEBUGLOG(4, "ZSTD_resetCStream_internal");
3481     /* Finalize the compression parameters */
3482     params.cParams = ZSTD_getCParamsFromCCtxParams(&params, pledgedSrcSize, dictSize);
3483     /* params are supposed to be fully validated at this point */
3484     assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
3485     assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
3486 
3487     CHECK_F( ZSTD_compressBegin_internal(cctx,
3488                                          dict, dictSize, dictContentType, ZSTD_dtlm_fast,
3489                                          cdict,
3490                                          params, pledgedSrcSize,
3491                                          ZSTDb_buffered) );
3492 
3493     cctx->inToCompress = 0;
3494     cctx->inBuffPos = 0;
3495     cctx->inBuffTarget = cctx->blockSize
3496                       + (cctx->blockSize == pledgedSrcSize);   /* for small input: avoid automatic flush on reaching end of block, since it would require to add a 3-bytes null block to end frame */
3497     cctx->outBuffContentSize = cctx->outBuffFlushedSize = 0;
3498     cctx->streamStage = zcss_load;
3499     cctx->frameEnded = 0;
3500     return 0;   /* ready to go */
3501 }
3502 
3503 /* ZSTD_resetCStream():
3504  * pledgedSrcSize == 0 means "unknown" */
3505 size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize)
3506 {
3507     ZSTD_CCtx_params params = zcs->requestedParams;
3508     DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (U32)pledgedSrcSize);
3509     if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
3510     params.fParams.contentSizeFlag = 1;
3511     return ZSTD_resetCStream_internal(zcs, NULL, 0, ZSTD_dct_auto, zcs->cdict, params, pledgedSrcSize);
3512 }
3513 
3514 /*! ZSTD_initCStream_internal() :
3515  *  Note : for lib/compress only. Used by zstdmt_compress.c.
3516  *  Assumption 1 : params are valid
3517  *  Assumption 2 : either dict, or cdict, is defined, not both */
3518 size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
3519                     const void* dict, size_t dictSize, const ZSTD_CDict* cdict,
3520                     ZSTD_CCtx_params params, unsigned long long pledgedSrcSize)
3521 {
3522     DEBUGLOG(4, "ZSTD_initCStream_internal");
3523     params.cParams = ZSTD_getCParamsFromCCtxParams(&params, pledgedSrcSize, dictSize);
3524     assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
3525     assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
3526 
3527     if (dict && dictSize >= 8) {
3528         DEBUGLOG(4, "loading dictionary of size %u", (U32)dictSize);
3529         if (zcs->staticSize) {   /* static CCtx : never uses malloc */
3530             /* incompatible with internal cdict creation */
3531             return ERROR(memory_allocation);
3532         }
3533         ZSTD_freeCDict(zcs->cdictLocal);
3534         zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize,
3535                                             ZSTD_dlm_byCopy, ZSTD_dct_auto,
3536                                             params.cParams, zcs->customMem);
3537         zcs->cdict = zcs->cdictLocal;
3538         if (zcs->cdictLocal == NULL) return ERROR(memory_allocation);
3539     } else {
3540         if (cdict) {
3541             params.cParams = ZSTD_getCParamsFromCDict(cdict);  /* cParams are enforced from cdict; it includes windowLog */
3542         }
3543         ZSTD_freeCDict(zcs->cdictLocal);
3544         zcs->cdictLocal = NULL;
3545         zcs->cdict = cdict;
3546     }
3547 
3548     return ZSTD_resetCStream_internal(zcs, NULL, 0, ZSTD_dct_auto, zcs->cdict, params, pledgedSrcSize);
3549 }
3550 
3551 /* ZSTD_initCStream_usingCDict_advanced() :
3552  * same as ZSTD_initCStream_usingCDict(), with control over frame parameters */
3553 size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
3554                                             const ZSTD_CDict* cdict,
3555                                             ZSTD_frameParameters fParams,
3556                                             unsigned long long pledgedSrcSize)
3557 {
3558     DEBUGLOG(4, "ZSTD_initCStream_usingCDict_advanced");
3559     if (!cdict) return ERROR(dictionary_wrong); /* cannot handle NULL cdict (does not know what to do) */
3560     {   ZSTD_CCtx_params params = zcs->requestedParams;
3561         params.cParams = ZSTD_getCParamsFromCDict(cdict);
3562         params.fParams = fParams;
3563         return ZSTD_initCStream_internal(zcs,
3564                                 NULL, 0, cdict,
3565                                 params, pledgedSrcSize);
3566     }
3567 }
3568 
3569 /* note : cdict must outlive compression session */
3570 size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict)
3571 {
3572     ZSTD_frameParameters const fParams = { 0 /* contentSizeFlag */, 0 /* checksum */, 0 /* hideDictID */ };
3573     DEBUGLOG(4, "ZSTD_initCStream_usingCDict");
3574     return ZSTD_initCStream_usingCDict_advanced(zcs, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN);  /* note : will check that cdict != NULL */
3575 }
3576 
3577 
3578 /* ZSTD_initCStream_advanced() :
3579  * pledgedSrcSize must be exact.
3580  * if srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.
3581  * dict is loaded with default parameters ZSTD_dm_auto and ZSTD_dlm_byCopy. */
3582 size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
3583                                  const void* dict, size_t dictSize,
3584                                  ZSTD_parameters params, unsigned long long pledgedSrcSize)
3585 {
3586     DEBUGLOG(4, "ZSTD_initCStream_advanced: pledgedSrcSize=%u, flag=%u",
3587                 (U32)pledgedSrcSize, params.fParams.contentSizeFlag);
3588     CHECK_F( ZSTD_checkCParams(params.cParams) );
3589     if ((pledgedSrcSize==0) && (params.fParams.contentSizeFlag==0)) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;  /* for compatibility with older programs relying on this behavior. Users should now specify ZSTD_CONTENTSIZE_UNKNOWN. This line will be removed in the future. */
3590     zcs->requestedParams = ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params);
3591     return ZSTD_initCStream_internal(zcs, dict, dictSize, NULL /*cdict*/, zcs->requestedParams, pledgedSrcSize);
3592 }
3593 
3594 size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel)
3595 {
3596     ZSTD_CCtxParams_init(&zcs->requestedParams, compressionLevel);
3597     return ZSTD_initCStream_internal(zcs, dict, dictSize, NULL, zcs->requestedParams, ZSTD_CONTENTSIZE_UNKNOWN);
3598 }
3599 
3600 size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pss)
3601 {
3602     U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;  /* temporary : 0 interpreted as "unknown" during transition period. Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN. `0` will be interpreted as "empty" in the future */
3603     ZSTD_CCtxParams_init(&zcs->requestedParams, compressionLevel);
3604     return ZSTD_initCStream_internal(zcs, NULL, 0, NULL, zcs->requestedParams, pledgedSrcSize);
3605 }
3606 
3607 size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)
3608 {
3609     DEBUGLOG(4, "ZSTD_initCStream");
3610     return ZSTD_initCStream_srcSize(zcs, compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN);
3611 }
3612 
3613 /*======   Compression   ======*/
3614 
3615 MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity,
3616                            const void* src, size_t srcSize)
3617 {
3618     size_t const length = MIN(dstCapacity, srcSize);
3619     if (length) memcpy(dst, src, length);
3620     return length;
3621 }
3622 
3623 /** ZSTD_compressStream_generic():
3624  *  internal function for all *compressStream*() variants and *compress_generic()
3625  *  non-static, because can be called from zstdmt_compress.c
3626  * @return : hint size for next input */
3627 size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
3628                                    ZSTD_outBuffer* output,
3629                                    ZSTD_inBuffer* input,
3630                                    ZSTD_EndDirective const flushMode)
3631 {
3632     const char* const istart = (const char*)input->src;
3633     const char* const iend = istart + input->size;
3634     const char* ip = istart + input->pos;
3635     char* const ostart = (char*)output->dst;
3636     char* const oend = ostart + output->size;
3637     char* op = ostart + output->pos;
3638     U32 someMoreWork = 1;
3639 
3640     /* check expectations */
3641     DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (U32)flushMode);
3642     assert(zcs->inBuff != NULL);
3643     assert(zcs->inBuffSize > 0);
3644     assert(zcs->outBuff !=  NULL);
3645     assert(zcs->outBuffSize > 0);
3646     assert(output->pos <= output->size);
3647     assert(input->pos <= input->size);
3648 
3649     while (someMoreWork) {
3650         switch(zcs->streamStage)
3651         {
3652         case zcss_init:
3653             /* call ZSTD_initCStream() first ! */
3654             return ERROR(init_missing);
3655 
3656         case zcss_load:
3657             if ( (flushMode == ZSTD_e_end)
3658               && ((size_t)(oend-op) >= ZSTD_compressBound(iend-ip))  /* enough dstCapacity */
3659               && (zcs->inBuffPos == 0) ) {
3660                 /* shortcut to compression pass directly into output buffer */
3661                 size_t const cSize = ZSTD_compressEnd(zcs,
3662                                                 op, oend-op, ip, iend-ip);
3663                 DEBUGLOG(4, "ZSTD_compressEnd : %u", (U32)cSize);
3664                 if (ZSTD_isError(cSize)) return cSize;
3665                 ip = iend;
3666                 op += cSize;
3667                 zcs->frameEnded = 1;
3668                 ZSTD_CCtx_reset(zcs);
3669                 someMoreWork = 0; break;
3670             }
3671             /* complete loading into inBuffer */
3672             {   size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
3673                 size_t const loaded = ZSTD_limitCopy(
3674                                         zcs->inBuff + zcs->inBuffPos, toLoad,
3675                                         ip, iend-ip);
3676                 zcs->inBuffPos += loaded;
3677                 ip += loaded;
3678                 if ( (flushMode == ZSTD_e_continue)
3679                   && (zcs->inBuffPos < zcs->inBuffTarget) ) {
3680                     /* not enough input to fill full block : stop here */
3681                     someMoreWork = 0; break;
3682                 }
3683                 if ( (flushMode == ZSTD_e_flush)
3684                   && (zcs->inBuffPos == zcs->inToCompress) ) {
3685                     /* empty */
3686                     someMoreWork = 0; break;
3687                 }
3688             }
3689             /* compress current block (note : this stage cannot be stopped in the middle) */
3690             DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode);
3691             {   void* cDst;
3692                 size_t cSize;
3693                 size_t const iSize = zcs->inBuffPos - zcs->inToCompress;
3694                 size_t oSize = oend-op;
3695                 unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend);
3696                 if (oSize >= ZSTD_compressBound(iSize))
3697                     cDst = op;   /* compress into output buffer, to skip flush stage */
3698                 else
3699                     cDst = zcs->outBuff, oSize = zcs->outBuffSize;
3700                 cSize = lastBlock ?
3701                         ZSTD_compressEnd(zcs, cDst, oSize,
3702                                     zcs->inBuff + zcs->inToCompress, iSize) :
3703                         ZSTD_compressContinue(zcs, cDst, oSize,
3704                                     zcs->inBuff + zcs->inToCompress, iSize);
3705                 if (ZSTD_isError(cSize)) return cSize;
3706                 zcs->frameEnded = lastBlock;
3707                 /* prepare next block */
3708                 zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
3709                 if (zcs->inBuffTarget > zcs->inBuffSize)
3710                     zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize;
3711                 DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u",
3712                          (U32)zcs->inBuffTarget, (U32)zcs->inBuffSize);
3713                 if (!lastBlock)
3714                     assert(zcs->inBuffTarget <= zcs->inBuffSize);
3715                 zcs->inToCompress = zcs->inBuffPos;
3716                 if (cDst == op) {  /* no need to flush */
3717                     op += cSize;
3718                     if (zcs->frameEnded) {
3719                         DEBUGLOG(5, "Frame completed directly in outBuffer");
3720                         someMoreWork = 0;
3721                         ZSTD_CCtx_reset(zcs);
3722                     }
3723                     break;
3724                 }
3725                 zcs->outBuffContentSize = cSize;
3726                 zcs->outBuffFlushedSize = 0;
3727                 zcs->streamStage = zcss_flush; /* pass-through to flush stage */
3728             }
3729 	    /* fall-through */
3730         case zcss_flush:
3731             DEBUGLOG(5, "flush stage");
3732             {   size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
3733                 size_t const flushed = ZSTD_limitCopy(op, oend-op,
3734                             zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
3735                 DEBUGLOG(5, "toFlush: %u into %u ==> flushed: %u",
3736                             (U32)toFlush, (U32)(oend-op), (U32)flushed);
3737                 op += flushed;
3738                 zcs->outBuffFlushedSize += flushed;
3739                 if (toFlush!=flushed) {
3740                     /* flush not fully completed, presumably because dst is too small */
3741                     assert(op==oend);
3742                     someMoreWork = 0;
3743                     break;
3744                 }
3745                 zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
3746                 if (zcs->frameEnded) {
3747                     DEBUGLOG(5, "Frame completed on flush");
3748                     someMoreWork = 0;
3749                     ZSTD_CCtx_reset(zcs);
3750                     break;
3751                 }
3752                 zcs->streamStage = zcss_load;
3753                 break;
3754             }
3755 
3756         default: /* impossible */
3757             assert(0);
3758         }
3759     }
3760 
3761     input->pos = ip - istart;
3762     output->pos = op - ostart;
3763     if (zcs->frameEnded) return 0;
3764     {   size_t hintInSize = zcs->inBuffTarget - zcs->inBuffPos;
3765         if (hintInSize==0) hintInSize = zcs->blockSize;
3766         return hintInSize;
3767     }
3768 }
3769 
3770 size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
3771 {
3772     /* check conditions */
3773     if (output->pos > output->size) return ERROR(GENERIC);
3774     if (input->pos  > input->size)  return ERROR(GENERIC);
3775 
3776     return ZSTD_compressStream_generic(zcs, output, input, ZSTD_e_continue);
3777 }
3778 
3779 
3780 size_t ZSTD_compress_generic (ZSTD_CCtx* cctx,
3781                               ZSTD_outBuffer* output,
3782                               ZSTD_inBuffer* input,
3783                               ZSTD_EndDirective endOp)
3784 {
3785     DEBUGLOG(5, "ZSTD_compress_generic, endOp=%u ", (U32)endOp);
3786     /* check conditions */
3787     if (output->pos > output->size) return ERROR(GENERIC);
3788     if (input->pos  > input->size)  return ERROR(GENERIC);
3789     assert(cctx!=NULL);
3790 
3791     /* transparent initialization stage */
3792     if (cctx->streamStage == zcss_init) {
3793         ZSTD_CCtx_params params = cctx->requestedParams;
3794         ZSTD_prefixDict const prefixDict = cctx->prefixDict;
3795         memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));  /* single usage */
3796         assert(prefixDict.dict==NULL || cctx->cdict==NULL);   /* only one can be set */
3797         DEBUGLOG(4, "ZSTD_compress_generic : transparent init stage");
3798         if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = input->size + 1;  /* auto-fix pledgedSrcSize */
3799         params.cParams = ZSTD_getCParamsFromCCtxParams(
3800                 &cctx->requestedParams, cctx->pledgedSrcSizePlusOne-1, 0 /*dictSize*/);
3801 
3802 
3803 #ifdef ZSTD_MULTITHREAD
3804         if ((cctx->pledgedSrcSizePlusOne-1) <= ZSTDMT_JOBSIZE_MIN) {
3805             params.nbWorkers = 0; /* do not invoke multi-threading when src size is too small */
3806         }
3807         if (params.nbWorkers > 0) {
3808             /* mt context creation */
3809             if (cctx->mtctx == NULL) {
3810                 DEBUGLOG(4, "ZSTD_compress_generic: creating new mtctx for nbWorkers=%u",
3811                             params.nbWorkers);
3812                 cctx->mtctx = ZSTDMT_createCCtx_advanced(params.nbWorkers, cctx->customMem);
3813                 if (cctx->mtctx == NULL) return ERROR(memory_allocation);
3814             }
3815             /* mt compression */
3816             DEBUGLOG(4, "call ZSTDMT_initCStream_internal as nbWorkers=%u", params.nbWorkers);
3817             CHECK_F( ZSTDMT_initCStream_internal(
3818                         cctx->mtctx,
3819                         prefixDict.dict, prefixDict.dictSize, ZSTD_dct_rawContent,
3820                         cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) );
3821             cctx->streamStage = zcss_load;
3822             cctx->appliedParams.nbWorkers = params.nbWorkers;
3823         } else
3824 #endif
3825         {   CHECK_F( ZSTD_resetCStream_internal(cctx,
3826                             prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType,
3827                             cctx->cdict,
3828                             params, cctx->pledgedSrcSizePlusOne-1) );
3829             assert(cctx->streamStage == zcss_load);
3830             assert(cctx->appliedParams.nbWorkers == 0);
3831     }   }
3832 
3833     /* compression stage */
3834 #ifdef ZSTD_MULTITHREAD
3835     if (cctx->appliedParams.nbWorkers > 0) {
3836         if (cctx->cParamsChanged) {
3837             ZSTDMT_updateCParams_whileCompressing(cctx->mtctx, &cctx->requestedParams);
3838             cctx->cParamsChanged = 0;
3839         }
3840         {   size_t const flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp);
3841             if ( ZSTD_isError(flushMin)
3842               || (endOp == ZSTD_e_end && flushMin == 0) ) { /* compression completed */
3843                 ZSTD_CCtx_reset(cctx);
3844             }
3845             DEBUGLOG(5, "completed ZSTD_compress_generic delegating to ZSTDMT_compressStream_generic");
3846             return flushMin;
3847     }   }
3848 #endif
3849     CHECK_F( ZSTD_compressStream_generic(cctx, output, input, endOp) );
3850     DEBUGLOG(5, "completed ZSTD_compress_generic");
3851     return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */
3852 }
3853 
3854 size_t ZSTD_compress_generic_simpleArgs (
3855                             ZSTD_CCtx* cctx,
3856                             void* dst, size_t dstCapacity, size_t* dstPos,
3857                       const void* src, size_t srcSize, size_t* srcPos,
3858                             ZSTD_EndDirective endOp)
3859 {
3860     ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
3861     ZSTD_inBuffer  input  = { src, srcSize, *srcPos };
3862     /* ZSTD_compress_generic() will check validity of dstPos and srcPos */
3863     size_t const cErr = ZSTD_compress_generic(cctx, &output, &input, endOp);
3864     *dstPos = output.pos;
3865     *srcPos = input.pos;
3866     return cErr;
3867 }
3868 
3869 
3870 /*======   Finalize   ======*/
3871 
3872 /*! ZSTD_flushStream() :
3873  * @return : amount of data remaining to flush */
3874 size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
3875 {
3876     ZSTD_inBuffer input = { NULL, 0, 0 };
3877     if (output->pos > output->size) return ERROR(GENERIC);
3878     CHECK_F( ZSTD_compressStream_generic(zcs, output, &input, ZSTD_e_flush) );
3879     return zcs->outBuffContentSize - zcs->outBuffFlushedSize;  /* remaining to flush */
3880 }
3881 
3882 
3883 size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
3884 {
3885     ZSTD_inBuffer input = { NULL, 0, 0 };
3886     if (output->pos > output->size) return ERROR(GENERIC);
3887     CHECK_F( ZSTD_compressStream_generic(zcs, output, &input, ZSTD_e_end) );
3888     {   size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE;
3889         size_t const checksumSize = zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4;
3890         size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize + lastBlockSize + checksumSize;
3891         DEBUGLOG(4, "ZSTD_endStream : remaining to flush : %u", (U32)toFlush);
3892         return toFlush;
3893     }
3894 }
3895 
3896 
3897 /*-=====  Pre-defined compression levels  =====-*/
3898 
3899 #define ZSTD_MAX_CLEVEL     22
3900 int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
3901 int ZSTD_minCLevel(void) { return (int)-ZSTD_TARGETLENGTH_MAX; }
3902 
3903 static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = {
3904 {   /* "default" - guarantees a monotonically increasing memory budget */
3905     /* W,  C,  H,  S,  L, TL, strat */
3906     { 19, 12, 13,  1,  6,  1, ZSTD_fast    },  /* base for negative levels */
3907     { 19, 13, 14,  1,  7,  0, ZSTD_fast    },  /* level  1 */
3908     { 19, 15, 16,  1,  6,  0, ZSTD_fast    },  /* level  2 */
3909     { 20, 16, 17,  1,  5,  1, ZSTD_dfast   },  /* level  3 */
3910     { 20, 18, 18,  1,  5,  1, ZSTD_dfast   },  /* level  4 */
3911     { 20, 18, 18,  2,  5,  2, ZSTD_greedy  },  /* level  5 */
3912     { 21, 18, 19,  2,  5,  4, ZSTD_lazy    },  /* level  6 */
3913     { 21, 18, 19,  3,  5,  8, ZSTD_lazy2   },  /* level  7 */
3914     { 21, 19, 19,  3,  5, 16, ZSTD_lazy2   },  /* level  8 */
3915     { 21, 19, 20,  4,  5, 16, ZSTD_lazy2   },  /* level  9 */
3916     { 21, 20, 21,  4,  5, 16, ZSTD_lazy2   },  /* level 10 */
3917     { 21, 21, 22,  4,  5, 16, ZSTD_lazy2   },  /* level 11 */
3918     { 22, 20, 22,  5,  5, 16, ZSTD_lazy2   },  /* level 12 */
3919     { 22, 21, 22,  4,  5, 32, ZSTD_btlazy2 },  /* level 13 */
3920     { 22, 21, 22,  5,  5, 32, ZSTD_btlazy2 },  /* level 14 */
3921     { 22, 22, 22,  6,  5, 32, ZSTD_btlazy2 },  /* level 15 */
3922     { 22, 21, 22,  4,  5, 48, ZSTD_btopt   },  /* level 16 */
3923     { 23, 22, 22,  4,  4, 64, ZSTD_btopt   },  /* level 17 */
3924     { 23, 23, 22,  6,  3,256, ZSTD_btopt   },  /* level 18 */
3925     { 23, 24, 22,  7,  3,256, ZSTD_btultra },  /* level 19 */
3926     { 25, 25, 23,  7,  3,256, ZSTD_btultra },  /* level 20 */
3927     { 26, 26, 24,  7,  3,512, ZSTD_btultra },  /* level 21 */
3928     { 27, 27, 25,  9,  3,999, ZSTD_btultra },  /* level 22 */
3929 },
3930 {   /* for srcSize <= 256 KB */
3931     /* W,  C,  H,  S,  L,  T, strat */
3932     { 18, 12, 13,  1,  5,  1, ZSTD_fast    },  /* base for negative levels */
3933     { 18, 13, 14,  1,  6,  0, ZSTD_fast    },  /* level  1 */
3934     { 18, 14, 14,  1,  5,  1, ZSTD_dfast   },  /* level  2 */
3935     { 18, 16, 16,  1,  4,  1, ZSTD_dfast   },  /* level  3 */
3936     { 18, 16, 17,  2,  5,  2, ZSTD_greedy  },  /* level  4.*/
3937     { 18, 18, 18,  3,  5,  2, ZSTD_greedy  },  /* level  5.*/
3938     { 18, 18, 19,  3,  5,  4, ZSTD_lazy    },  /* level  6.*/
3939     { 18, 18, 19,  4,  4,  4, ZSTD_lazy    },  /* level  7 */
3940     { 18, 18, 19,  4,  4,  8, ZSTD_lazy2   },  /* level  8 */
3941     { 18, 18, 19,  5,  4,  8, ZSTD_lazy2   },  /* level  9 */
3942     { 18, 18, 19,  6,  4,  8, ZSTD_lazy2   },  /* level 10 */
3943     { 18, 18, 19,  5,  4, 16, ZSTD_btlazy2 },  /* level 11.*/
3944     { 18, 19, 19,  6,  4, 16, ZSTD_btlazy2 },  /* level 12.*/
3945     { 18, 19, 19,  8,  4, 16, ZSTD_btlazy2 },  /* level 13 */
3946     { 18, 18, 19,  4,  4, 24, ZSTD_btopt   },  /* level 14.*/
3947     { 18, 18, 19,  4,  3, 24, ZSTD_btopt   },  /* level 15.*/
3948     { 18, 19, 19,  6,  3, 64, ZSTD_btopt   },  /* level 16.*/
3949     { 18, 19, 19,  8,  3,128, ZSTD_btopt   },  /* level 17.*/
3950     { 18, 19, 19, 10,  3,256, ZSTD_btopt   },  /* level 18.*/
3951     { 18, 19, 19, 10,  3,256, ZSTD_btultra },  /* level 19.*/
3952     { 18, 19, 19, 11,  3,512, ZSTD_btultra },  /* level 20.*/
3953     { 18, 19, 19, 12,  3,512, ZSTD_btultra },  /* level 21.*/
3954     { 18, 19, 19, 13,  3,999, ZSTD_btultra },  /* level 22.*/
3955 },
3956 {   /* for srcSize <= 128 KB */
3957     /* W,  C,  H,  S,  L,  T, strat */
3958     { 17, 12, 12,  1,  5,  1, ZSTD_fast    },  /* base for negative levels */
3959     { 17, 12, 13,  1,  6,  0, ZSTD_fast    },  /* level  1 */
3960     { 17, 13, 15,  1,  5,  0, ZSTD_fast    },  /* level  2 */
3961     { 17, 15, 16,  2,  5,  1, ZSTD_dfast   },  /* level  3 */
3962     { 17, 17, 17,  2,  4,  1, ZSTD_dfast   },  /* level  4 */
3963     { 17, 16, 17,  3,  4,  2, ZSTD_greedy  },  /* level  5 */
3964     { 17, 17, 17,  3,  4,  4, ZSTD_lazy    },  /* level  6 */
3965     { 17, 17, 17,  3,  4,  8, ZSTD_lazy2   },  /* level  7 */
3966     { 17, 17, 17,  4,  4,  8, ZSTD_lazy2   },  /* level  8 */
3967     { 17, 17, 17,  5,  4,  8, ZSTD_lazy2   },  /* level  9 */
3968     { 17, 17, 17,  6,  4,  8, ZSTD_lazy2   },  /* level 10 */
3969     { 17, 17, 17,  7,  4,  8, ZSTD_lazy2   },  /* level 11 */
3970     { 17, 18, 17,  6,  4, 16, ZSTD_btlazy2 },  /* level 12 */
3971     { 17, 18, 17,  8,  4, 16, ZSTD_btlazy2 },  /* level 13.*/
3972     { 17, 18, 17,  4,  4, 32, ZSTD_btopt   },  /* level 14.*/
3973     { 17, 18, 17,  6,  3, 64, ZSTD_btopt   },  /* level 15.*/
3974     { 17, 18, 17,  7,  3,128, ZSTD_btopt   },  /* level 16.*/
3975     { 17, 18, 17,  7,  3,256, ZSTD_btopt   },  /* level 17.*/
3976     { 17, 18, 17,  8,  3,256, ZSTD_btopt   },  /* level 18.*/
3977     { 17, 18, 17,  8,  3,256, ZSTD_btultra },  /* level 19.*/
3978     { 17, 18, 17,  9,  3,256, ZSTD_btultra },  /* level 20.*/
3979     { 17, 18, 17, 10,  3,256, ZSTD_btultra },  /* level 21.*/
3980     { 17, 18, 17, 11,  3,512, ZSTD_btultra },  /* level 22.*/
3981 },
3982 {   /* for srcSize <= 16 KB */
3983     /* W,  C,  H,  S,  L,  T, strat */
3984     { 14, 12, 13,  1,  5,  1, ZSTD_fast    },  /* base for negative levels */
3985     { 14, 14, 15,  1,  5,  0, ZSTD_fast    },  /* level  1 */
3986     { 14, 14, 15,  1,  4,  0, ZSTD_fast    },  /* level  2 */
3987     { 14, 14, 14,  2,  4,  1, ZSTD_dfast   },  /* level  3.*/
3988     { 14, 14, 14,  4,  4,  2, ZSTD_greedy  },  /* level  4.*/
3989     { 14, 14, 14,  3,  4,  4, ZSTD_lazy    },  /* level  5.*/
3990     { 14, 14, 14,  4,  4,  8, ZSTD_lazy2   },  /* level  6 */
3991     { 14, 14, 14,  6,  4,  8, ZSTD_lazy2   },  /* level  7 */
3992     { 14, 14, 14,  8,  4,  8, ZSTD_lazy2   },  /* level  8.*/
3993     { 14, 15, 14,  5,  4,  8, ZSTD_btlazy2 },  /* level  9.*/
3994     { 14, 15, 14,  9,  4,  8, ZSTD_btlazy2 },  /* level 10.*/
3995     { 14, 15, 14,  3,  4, 12, ZSTD_btopt   },  /* level 11.*/
3996     { 14, 15, 14,  6,  3, 16, ZSTD_btopt   },  /* level 12.*/
3997     { 14, 15, 14,  6,  3, 24, ZSTD_btopt   },  /* level 13.*/
3998     { 14, 15, 15,  6,  3, 48, ZSTD_btopt   },  /* level 14.*/
3999     { 14, 15, 15,  6,  3, 64, ZSTD_btopt   },  /* level 15.*/
4000     { 14, 15, 15,  6,  3, 96, ZSTD_btopt   },  /* level 16.*/
4001     { 14, 15, 15,  6,  3,128, ZSTD_btopt   },  /* level 17.*/
4002     { 14, 15, 15,  8,  3,256, ZSTD_btopt   },  /* level 18.*/
4003     { 14, 15, 15,  6,  3,256, ZSTD_btultra },  /* level 19.*/
4004     { 14, 15, 15,  8,  3,256, ZSTD_btultra },  /* level 20.*/
4005     { 14, 15, 15,  9,  3,256, ZSTD_btultra },  /* level 21.*/
4006     { 14, 15, 15, 10,  3,512, ZSTD_btultra },  /* level 22.*/
4007 },
4008 };
4009 
4010 /*! ZSTD_getCParams() :
4011 *  @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
4012 *   Size values are optional, provide 0 if not known or unused */
4013 ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize)
4014 {
4015     size_t const addedSize = srcSizeHint ? 0 : 500;
4016     U64 const rSize = srcSizeHint+dictSize ? srcSizeHint+dictSize+addedSize : (U64)-1;
4017     U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB);   /* intentional underflow for srcSizeHint == 0 */
4018     int row = compressionLevel;
4019     DEBUGLOG(5, "ZSTD_getCParams (cLevel=%i)", compressionLevel);
4020     if (compressionLevel == 0) row = ZSTD_CLEVEL_DEFAULT;   /* 0 == default */
4021     if (compressionLevel < 0) row = 0;   /* entry 0 is baseline for fast mode */
4022     if (compressionLevel > ZSTD_MAX_CLEVEL) row = ZSTD_MAX_CLEVEL;
4023     {   ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row];
4024         if (compressionLevel < 0) cp.targetLength = (unsigned)(-compressionLevel);   /* acceleration factor */
4025         return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize); }
4026 
4027 }
4028 
4029 /*! ZSTD_getParams() :
4030 *   same as ZSTD_getCParams(), but @return a `ZSTD_parameters` object (instead of `ZSTD_compressionParameters`).
4031 *   All fields of `ZSTD_frameParameters` are set to default (0) */
4032 ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) {
4033     ZSTD_parameters params;
4034     ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, srcSizeHint, dictSize);
4035     DEBUGLOG(5, "ZSTD_getParams (cLevel=%i)", compressionLevel);
4036     memset(&params, 0, sizeof(params));
4037     params.cParams = cParams;
4038     params.fParams.contentSizeFlag = 1;
4039     return params;
4040 }
4041