1 /*
2 * Copyright (c) Yann Collet, Facebook, Inc.
3 * All rights reserved.
4 *
5 * This source code is licensed under both the BSD-style license (found in the
6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7 * in the COPYING file in the root directory of this source tree).
8 * You may select, at your option, one of the above-listed licenses.
9 */
10
11
12 /* ====== Compiler specifics ====== */
13 #if defined(_MSC_VER)
14 # pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */
15 #endif
16
17
18 /* ====== Constants ====== */
19 #define ZSTDMT_OVERLAPLOG_DEFAULT 0
20
21
22 /* ====== Dependencies ====== */
23 #include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memset, INT_MAX, UINT_MAX */
24 #include "../common/mem.h" /* MEM_STATIC */
25 #include "../common/pool.h" /* threadpool */
26 #include "../common/threading.h" /* mutex */
27 #include "zstd_compress_internal.h" /* MIN, ERROR, ZSTD_*, ZSTD_highbit32 */
28 #include "zstd_ldm.h"
29 #include "zstdmt_compress.h"
30
31 /* Guards code to support resizing the SeqPool.
32 * We will want to resize the SeqPool to save memory in the future.
33 * Until then, comment the code out since it is unused.
34 */
35 #define ZSTD_RESIZE_SEQPOOL 0
36
37 /* ====== Debug ====== */
38 #if defined(DEBUGLEVEL) && (DEBUGLEVEL>=2) \
39 && !defined(_MSC_VER) \
40 && !defined(__MINGW32__)
41
42 # include <stdio.h>
43 # include <unistd.h>
44 # include <sys/times.h>
45
46 # define DEBUG_PRINTHEX(l,p,n) { \
47 unsigned debug_u; \
48 for (debug_u=0; debug_u<(n); debug_u++) \
49 RAWLOG(l, "%02X ", ((const unsigned char*)(p))[debug_u]); \
50 RAWLOG(l, " \n"); \
51 }
52
GetCurrentClockTimeMicroseconds(void)53 static unsigned long long GetCurrentClockTimeMicroseconds(void)
54 {
55 static clock_t _ticksPerSecond = 0;
56 if (_ticksPerSecond <= 0) _ticksPerSecond = sysconf(_SC_CLK_TCK);
57
58 { struct tms junk; clock_t newTicks = (clock_t) times(&junk);
59 return ((((unsigned long long)newTicks)*(1000000))/_ticksPerSecond);
60 } }
61
62 #define MUTEX_WAIT_TIME_DLEVEL 6
63 #define ZSTD_PTHREAD_MUTEX_LOCK(mutex) { \
64 if (DEBUGLEVEL >= MUTEX_WAIT_TIME_DLEVEL) { \
65 unsigned long long const beforeTime = GetCurrentClockTimeMicroseconds(); \
66 ZSTD_pthread_mutex_lock(mutex); \
67 { unsigned long long const afterTime = GetCurrentClockTimeMicroseconds(); \
68 unsigned long long const elapsedTime = (afterTime-beforeTime); \
69 if (elapsedTime > 1000) { /* or whatever threshold you like; I'm using 1 millisecond here */ \
70 DEBUGLOG(MUTEX_WAIT_TIME_DLEVEL, "Thread took %llu microseconds to acquire mutex %s \n", \
71 elapsedTime, #mutex); \
72 } } \
73 } else { \
74 ZSTD_pthread_mutex_lock(mutex); \
75 } \
76 }
77
78 #else
79
80 # define ZSTD_PTHREAD_MUTEX_LOCK(m) ZSTD_pthread_mutex_lock(m)
81 # define DEBUG_PRINTHEX(l,p,n) {}
82
83 #endif
84
85
86 /* ===== Buffer Pool ===== */
87 /* a single Buffer Pool can be invoked from multiple threads in parallel */
88
89 typedef struct buffer_s {
90 void* start;
91 size_t capacity;
92 } buffer_t;
93
94 static const buffer_t g_nullBuffer = { NULL, 0 };
95
96 typedef struct ZSTDMT_bufferPool_s {
97 ZSTD_pthread_mutex_t poolMutex;
98 size_t bufferSize;
99 unsigned totalBuffers;
100 unsigned nbBuffers;
101 ZSTD_customMem cMem;
102 buffer_t bTable[1]; /* variable size */
103 } ZSTDMT_bufferPool;
104
ZSTDMT_createBufferPool(unsigned maxNbBuffers,ZSTD_customMem cMem)105 static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned maxNbBuffers, ZSTD_customMem cMem)
106 {
107 ZSTDMT_bufferPool* const bufPool = (ZSTDMT_bufferPool*)ZSTD_customCalloc(
108 sizeof(ZSTDMT_bufferPool) + (maxNbBuffers-1) * sizeof(buffer_t), cMem);
109 if (bufPool==NULL) return NULL;
110 if (ZSTD_pthread_mutex_init(&bufPool->poolMutex, NULL)) {
111 ZSTD_customFree(bufPool, cMem);
112 return NULL;
113 }
114 bufPool->bufferSize = 64 KB;
115 bufPool->totalBuffers = maxNbBuffers;
116 bufPool->nbBuffers = 0;
117 bufPool->cMem = cMem;
118 return bufPool;
119 }
120
ZSTDMT_freeBufferPool(ZSTDMT_bufferPool * bufPool)121 static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool* bufPool)
122 {
123 unsigned u;
124 DEBUGLOG(3, "ZSTDMT_freeBufferPool (address:%08X)", (U32)(size_t)bufPool);
125 if (!bufPool) return; /* compatibility with free on NULL */
126 for (u=0; u<bufPool->totalBuffers; u++) {
127 DEBUGLOG(4, "free buffer %2u (address:%08X)", u, (U32)(size_t)bufPool->bTable[u].start);
128 ZSTD_customFree(bufPool->bTable[u].start, bufPool->cMem);
129 }
130 ZSTD_pthread_mutex_destroy(&bufPool->poolMutex);
131 ZSTD_customFree(bufPool, bufPool->cMem);
132 }
133
134 /* only works at initialization, not during compression */
ZSTDMT_sizeof_bufferPool(ZSTDMT_bufferPool * bufPool)135 static size_t ZSTDMT_sizeof_bufferPool(ZSTDMT_bufferPool* bufPool)
136 {
137 size_t const poolSize = sizeof(*bufPool)
138 + (bufPool->totalBuffers - 1) * sizeof(buffer_t);
139 unsigned u;
140 size_t totalBufferSize = 0;
141 ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
142 for (u=0; u<bufPool->totalBuffers; u++)
143 totalBufferSize += bufPool->bTable[u].capacity;
144 ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
145
146 return poolSize + totalBufferSize;
147 }
148
149 /* ZSTDMT_setBufferSize() :
150 * all future buffers provided by this buffer pool will have _at least_ this size
151 * note : it's better for all buffers to have same size,
152 * as they become freely interchangeable, reducing malloc/free usages and memory fragmentation */
ZSTDMT_setBufferSize(ZSTDMT_bufferPool * const bufPool,size_t const bSize)153 static void ZSTDMT_setBufferSize(ZSTDMT_bufferPool* const bufPool, size_t const bSize)
154 {
155 ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
156 DEBUGLOG(4, "ZSTDMT_setBufferSize: bSize = %u", (U32)bSize);
157 bufPool->bufferSize = bSize;
158 ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
159 }
160
161
ZSTDMT_expandBufferPool(ZSTDMT_bufferPool * srcBufPool,unsigned maxNbBuffers)162 static ZSTDMT_bufferPool* ZSTDMT_expandBufferPool(ZSTDMT_bufferPool* srcBufPool, unsigned maxNbBuffers)
163 {
164 if (srcBufPool==NULL) return NULL;
165 if (srcBufPool->totalBuffers >= maxNbBuffers) /* good enough */
166 return srcBufPool;
167 /* need a larger buffer pool */
168 { ZSTD_customMem const cMem = srcBufPool->cMem;
169 size_t const bSize = srcBufPool->bufferSize; /* forward parameters */
170 ZSTDMT_bufferPool* newBufPool;
171 ZSTDMT_freeBufferPool(srcBufPool);
172 newBufPool = ZSTDMT_createBufferPool(maxNbBuffers, cMem);
173 if (newBufPool==NULL) return newBufPool;
174 ZSTDMT_setBufferSize(newBufPool, bSize);
175 return newBufPool;
176 }
177 }
178
179 /** ZSTDMT_getBuffer() :
180 * assumption : bufPool must be valid
181 * @return : a buffer, with start pointer and size
182 * note: allocation may fail, in this case, start==NULL and size==0 */
ZSTDMT_getBuffer(ZSTDMT_bufferPool * bufPool)183 static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool)
184 {
185 size_t const bSize = bufPool->bufferSize;
186 DEBUGLOG(5, "ZSTDMT_getBuffer: bSize = %u", (U32)bufPool->bufferSize);
187 ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
188 if (bufPool->nbBuffers) { /* try to use an existing buffer */
189 buffer_t const buf = bufPool->bTable[--(bufPool->nbBuffers)];
190 size_t const availBufferSize = buf.capacity;
191 bufPool->bTable[bufPool->nbBuffers] = g_nullBuffer;
192 if ((availBufferSize >= bSize) & ((availBufferSize>>3) <= bSize)) {
193 /* large enough, but not too much */
194 DEBUGLOG(5, "ZSTDMT_getBuffer: provide buffer %u of size %u",
195 bufPool->nbBuffers, (U32)buf.capacity);
196 ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
197 return buf;
198 }
199 /* size conditions not respected : scratch this buffer, create new one */
200 DEBUGLOG(5, "ZSTDMT_getBuffer: existing buffer does not meet size conditions => freeing");
201 ZSTD_customFree(buf.start, bufPool->cMem);
202 }
203 ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
204 /* create new buffer */
205 DEBUGLOG(5, "ZSTDMT_getBuffer: create a new buffer");
206 { buffer_t buffer;
207 void* const start = ZSTD_customMalloc(bSize, bufPool->cMem);
208 buffer.start = start; /* note : start can be NULL if malloc fails ! */
209 buffer.capacity = (start==NULL) ? 0 : bSize;
210 if (start==NULL) {
211 DEBUGLOG(5, "ZSTDMT_getBuffer: buffer allocation failure !!");
212 } else {
213 DEBUGLOG(5, "ZSTDMT_getBuffer: created buffer of size %u", (U32)bSize);
214 }
215 return buffer;
216 }
217 }
218
219 #if ZSTD_RESIZE_SEQPOOL
220 /** ZSTDMT_resizeBuffer() :
221 * assumption : bufPool must be valid
222 * @return : a buffer that is at least the buffer pool buffer size.
223 * If a reallocation happens, the data in the input buffer is copied.
224 */
ZSTDMT_resizeBuffer(ZSTDMT_bufferPool * bufPool,buffer_t buffer)225 static buffer_t ZSTDMT_resizeBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buffer)
226 {
227 size_t const bSize = bufPool->bufferSize;
228 if (buffer.capacity < bSize) {
229 void* const start = ZSTD_customMalloc(bSize, bufPool->cMem);
230 buffer_t newBuffer;
231 newBuffer.start = start;
232 newBuffer.capacity = start == NULL ? 0 : bSize;
233 if (start != NULL) {
234 assert(newBuffer.capacity >= buffer.capacity);
235 ZSTD_memcpy(newBuffer.start, buffer.start, buffer.capacity);
236 DEBUGLOG(5, "ZSTDMT_resizeBuffer: created buffer of size %u", (U32)bSize);
237 return newBuffer;
238 }
239 DEBUGLOG(5, "ZSTDMT_resizeBuffer: buffer allocation failure !!");
240 }
241 return buffer;
242 }
243 #endif
244
245 /* store buffer for later re-use, up to pool capacity */
ZSTDMT_releaseBuffer(ZSTDMT_bufferPool * bufPool,buffer_t buf)246 static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buf)
247 {
248 DEBUGLOG(5, "ZSTDMT_releaseBuffer");
249 if (buf.start == NULL) return; /* compatible with release on NULL */
250 ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
251 if (bufPool->nbBuffers < bufPool->totalBuffers) {
252 bufPool->bTable[bufPool->nbBuffers++] = buf; /* stored for later use */
253 DEBUGLOG(5, "ZSTDMT_releaseBuffer: stored buffer of size %u in slot %u",
254 (U32)buf.capacity, (U32)(bufPool->nbBuffers-1));
255 ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
256 return;
257 }
258 ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
259 /* Reached bufferPool capacity (should not happen) */
260 DEBUGLOG(5, "ZSTDMT_releaseBuffer: pool capacity reached => freeing ");
261 ZSTD_customFree(buf.start, bufPool->cMem);
262 }
263
264 /* We need 2 output buffers per worker since each dstBuff must be flushed after it is released.
265 * The 3 additional buffers are as follows:
266 * 1 buffer for input loading
267 * 1 buffer for "next input" when submitting current one
268 * 1 buffer stuck in queue */
269 #define BUF_POOL_MAX_NB_BUFFERS(nbWorkers) 2*nbWorkers + 3
270
271 /* After a worker releases its rawSeqStore, it is immediately ready for reuse.
272 * So we only need one seq buffer per worker. */
273 #define SEQ_POOL_MAX_NB_BUFFERS(nbWorkers) nbWorkers
274
275 /* ===== Seq Pool Wrapper ====== */
276
277 typedef ZSTDMT_bufferPool ZSTDMT_seqPool;
278
ZSTDMT_sizeof_seqPool(ZSTDMT_seqPool * seqPool)279 static size_t ZSTDMT_sizeof_seqPool(ZSTDMT_seqPool* seqPool)
280 {
281 return ZSTDMT_sizeof_bufferPool(seqPool);
282 }
283
bufferToSeq(buffer_t buffer)284 static rawSeqStore_t bufferToSeq(buffer_t buffer)
285 {
286 rawSeqStore_t seq = kNullRawSeqStore;
287 seq.seq = (rawSeq*)buffer.start;
288 seq.capacity = buffer.capacity / sizeof(rawSeq);
289 return seq;
290 }
291
seqToBuffer(rawSeqStore_t seq)292 static buffer_t seqToBuffer(rawSeqStore_t seq)
293 {
294 buffer_t buffer;
295 buffer.start = seq.seq;
296 buffer.capacity = seq.capacity * sizeof(rawSeq);
297 return buffer;
298 }
299
ZSTDMT_getSeq(ZSTDMT_seqPool * seqPool)300 static rawSeqStore_t ZSTDMT_getSeq(ZSTDMT_seqPool* seqPool)
301 {
302 if (seqPool->bufferSize == 0) {
303 return kNullRawSeqStore;
304 }
305 return bufferToSeq(ZSTDMT_getBuffer(seqPool));
306 }
307
308 #if ZSTD_RESIZE_SEQPOOL
ZSTDMT_resizeSeq(ZSTDMT_seqPool * seqPool,rawSeqStore_t seq)309 static rawSeqStore_t ZSTDMT_resizeSeq(ZSTDMT_seqPool* seqPool, rawSeqStore_t seq)
310 {
311 return bufferToSeq(ZSTDMT_resizeBuffer(seqPool, seqToBuffer(seq)));
312 }
313 #endif
314
ZSTDMT_releaseSeq(ZSTDMT_seqPool * seqPool,rawSeqStore_t seq)315 static void ZSTDMT_releaseSeq(ZSTDMT_seqPool* seqPool, rawSeqStore_t seq)
316 {
317 ZSTDMT_releaseBuffer(seqPool, seqToBuffer(seq));
318 }
319
ZSTDMT_setNbSeq(ZSTDMT_seqPool * const seqPool,size_t const nbSeq)320 static void ZSTDMT_setNbSeq(ZSTDMT_seqPool* const seqPool, size_t const nbSeq)
321 {
322 ZSTDMT_setBufferSize(seqPool, nbSeq * sizeof(rawSeq));
323 }
324
ZSTDMT_createSeqPool(unsigned nbWorkers,ZSTD_customMem cMem)325 static ZSTDMT_seqPool* ZSTDMT_createSeqPool(unsigned nbWorkers, ZSTD_customMem cMem)
326 {
327 ZSTDMT_seqPool* const seqPool = ZSTDMT_createBufferPool(SEQ_POOL_MAX_NB_BUFFERS(nbWorkers), cMem);
328 if (seqPool == NULL) return NULL;
329 ZSTDMT_setNbSeq(seqPool, 0);
330 return seqPool;
331 }
332
ZSTDMT_freeSeqPool(ZSTDMT_seqPool * seqPool)333 static void ZSTDMT_freeSeqPool(ZSTDMT_seqPool* seqPool)
334 {
335 ZSTDMT_freeBufferPool(seqPool);
336 }
337
ZSTDMT_expandSeqPool(ZSTDMT_seqPool * pool,U32 nbWorkers)338 static ZSTDMT_seqPool* ZSTDMT_expandSeqPool(ZSTDMT_seqPool* pool, U32 nbWorkers)
339 {
340 return ZSTDMT_expandBufferPool(pool, SEQ_POOL_MAX_NB_BUFFERS(nbWorkers));
341 }
342
343
344 /* ===== CCtx Pool ===== */
345 /* a single CCtx Pool can be invoked from multiple threads in parallel */
346
347 typedef struct {
348 ZSTD_pthread_mutex_t poolMutex;
349 int totalCCtx;
350 int availCCtx;
351 ZSTD_customMem cMem;
352 ZSTD_CCtx* cctx[1]; /* variable size */
353 } ZSTDMT_CCtxPool;
354
355 /* note : all CCtx borrowed from the pool should be released back to the pool _before_ freeing the pool */
ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool * pool)356 static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool)
357 {
358 int cid;
359 for (cid=0; cid<pool->totalCCtx; cid++)
360 ZSTD_freeCCtx(pool->cctx[cid]); /* note : compatible with free on NULL */
361 ZSTD_pthread_mutex_destroy(&pool->poolMutex);
362 ZSTD_customFree(pool, pool->cMem);
363 }
364
365 /* ZSTDMT_createCCtxPool() :
366 * implies nbWorkers >= 1 , checked by caller ZSTDMT_createCCtx() */
ZSTDMT_createCCtxPool(int nbWorkers,ZSTD_customMem cMem)367 static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(int nbWorkers,
368 ZSTD_customMem cMem)
369 {
370 ZSTDMT_CCtxPool* const cctxPool = (ZSTDMT_CCtxPool*) ZSTD_customCalloc(
371 sizeof(ZSTDMT_CCtxPool) + (nbWorkers-1)*sizeof(ZSTD_CCtx*), cMem);
372 assert(nbWorkers > 0);
373 if (!cctxPool) return NULL;
374 if (ZSTD_pthread_mutex_init(&cctxPool->poolMutex, NULL)) {
375 ZSTD_customFree(cctxPool, cMem);
376 return NULL;
377 }
378 cctxPool->cMem = cMem;
379 cctxPool->totalCCtx = nbWorkers;
380 cctxPool->availCCtx = 1; /* at least one cctx for single-thread mode */
381 cctxPool->cctx[0] = ZSTD_createCCtx_advanced(cMem);
382 if (!cctxPool->cctx[0]) { ZSTDMT_freeCCtxPool(cctxPool); return NULL; }
383 DEBUGLOG(3, "cctxPool created, with %u workers", nbWorkers);
384 return cctxPool;
385 }
386
ZSTDMT_expandCCtxPool(ZSTDMT_CCtxPool * srcPool,int nbWorkers)387 static ZSTDMT_CCtxPool* ZSTDMT_expandCCtxPool(ZSTDMT_CCtxPool* srcPool,
388 int nbWorkers)
389 {
390 if (srcPool==NULL) return NULL;
391 if (nbWorkers <= srcPool->totalCCtx) return srcPool; /* good enough */
392 /* need a larger cctx pool */
393 { ZSTD_customMem const cMem = srcPool->cMem;
394 ZSTDMT_freeCCtxPool(srcPool);
395 return ZSTDMT_createCCtxPool(nbWorkers, cMem);
396 }
397 }
398
399 /* only works during initialization phase, not during compression */
ZSTDMT_sizeof_CCtxPool(ZSTDMT_CCtxPool * cctxPool)400 static size_t ZSTDMT_sizeof_CCtxPool(ZSTDMT_CCtxPool* cctxPool)
401 {
402 ZSTD_pthread_mutex_lock(&cctxPool->poolMutex);
403 { unsigned const nbWorkers = cctxPool->totalCCtx;
404 size_t const poolSize = sizeof(*cctxPool)
405 + (nbWorkers-1) * sizeof(ZSTD_CCtx*);
406 unsigned u;
407 size_t totalCCtxSize = 0;
408 for (u=0; u<nbWorkers; u++) {
409 totalCCtxSize += ZSTD_sizeof_CCtx(cctxPool->cctx[u]);
410 }
411 ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);
412 assert(nbWorkers > 0);
413 return poolSize + totalCCtxSize;
414 }
415 }
416
ZSTDMT_getCCtx(ZSTDMT_CCtxPool * cctxPool)417 static ZSTD_CCtx* ZSTDMT_getCCtx(ZSTDMT_CCtxPool* cctxPool)
418 {
419 DEBUGLOG(5, "ZSTDMT_getCCtx");
420 ZSTD_pthread_mutex_lock(&cctxPool->poolMutex);
421 if (cctxPool->availCCtx) {
422 cctxPool->availCCtx--;
423 { ZSTD_CCtx* const cctx = cctxPool->cctx[cctxPool->availCCtx];
424 ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);
425 return cctx;
426 } }
427 ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);
428 DEBUGLOG(5, "create one more CCtx");
429 return ZSTD_createCCtx_advanced(cctxPool->cMem); /* note : can be NULL, when creation fails ! */
430 }
431
ZSTDMT_releaseCCtx(ZSTDMT_CCtxPool * pool,ZSTD_CCtx * cctx)432 static void ZSTDMT_releaseCCtx(ZSTDMT_CCtxPool* pool, ZSTD_CCtx* cctx)
433 {
434 if (cctx==NULL) return; /* compatibility with release on NULL */
435 ZSTD_pthread_mutex_lock(&pool->poolMutex);
436 if (pool->availCCtx < pool->totalCCtx)
437 pool->cctx[pool->availCCtx++] = cctx;
438 else {
439 /* pool overflow : should not happen, since totalCCtx==nbWorkers */
440 DEBUGLOG(4, "CCtx pool overflow : free cctx");
441 ZSTD_freeCCtx(cctx);
442 }
443 ZSTD_pthread_mutex_unlock(&pool->poolMutex);
444 }
445
446 /* ==== Serial State ==== */
447
448 typedef struct {
449 void const* start;
450 size_t size;
451 } range_t;
452
453 typedef struct {
454 /* All variables in the struct are protected by mutex. */
455 ZSTD_pthread_mutex_t mutex;
456 ZSTD_pthread_cond_t cond;
457 ZSTD_CCtx_params params;
458 ldmState_t ldmState;
459 XXH64_state_t xxhState;
460 unsigned nextJobID;
461 /* Protects ldmWindow.
462 * Must be acquired after the main mutex when acquiring both.
463 */
464 ZSTD_pthread_mutex_t ldmWindowMutex;
465 ZSTD_pthread_cond_t ldmWindowCond; /* Signaled when ldmWindow is updated */
466 ZSTD_window_t ldmWindow; /* A thread-safe copy of ldmState.window */
467 } serialState_t;
468
469 static int
ZSTDMT_serialState_reset(serialState_t * serialState,ZSTDMT_seqPool * seqPool,ZSTD_CCtx_params params,size_t jobSize,const void * dict,size_t const dictSize,ZSTD_dictContentType_e dictContentType)470 ZSTDMT_serialState_reset(serialState_t* serialState,
471 ZSTDMT_seqPool* seqPool,
472 ZSTD_CCtx_params params,
473 size_t jobSize,
474 const void* dict, size_t const dictSize,
475 ZSTD_dictContentType_e dictContentType)
476 {
477 /* Adjust parameters */
478 if (params.ldmParams.enableLdm == ZSTD_ps_enable) {
479 DEBUGLOG(4, "LDM window size = %u KB", (1U << params.cParams.windowLog) >> 10);
480 ZSTD_ldm_adjustParameters(¶ms.ldmParams, ¶ms.cParams);
481 assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
482 assert(params.ldmParams.hashRateLog < 32);
483 } else {
484 ZSTD_memset(¶ms.ldmParams, 0, sizeof(params.ldmParams));
485 }
486 serialState->nextJobID = 0;
487 if (params.fParams.checksumFlag)
488 XXH64_reset(&serialState->xxhState, 0);
489 if (params.ldmParams.enableLdm == ZSTD_ps_enable) {
490 ZSTD_customMem cMem = params.customMem;
491 unsigned const hashLog = params.ldmParams.hashLog;
492 size_t const hashSize = ((size_t)1 << hashLog) * sizeof(ldmEntry_t);
493 unsigned const bucketLog =
494 params.ldmParams.hashLog - params.ldmParams.bucketSizeLog;
495 unsigned const prevBucketLog =
496 serialState->params.ldmParams.hashLog -
497 serialState->params.ldmParams.bucketSizeLog;
498 size_t const numBuckets = (size_t)1 << bucketLog;
499 /* Size the seq pool tables */
500 ZSTDMT_setNbSeq(seqPool, ZSTD_ldm_getMaxNbSeq(params.ldmParams, jobSize));
501 /* Reset the window */
502 ZSTD_window_init(&serialState->ldmState.window);
503 /* Resize tables and output space if necessary. */
504 if (serialState->ldmState.hashTable == NULL || serialState->params.ldmParams.hashLog < hashLog) {
505 ZSTD_customFree(serialState->ldmState.hashTable, cMem);
506 serialState->ldmState.hashTable = (ldmEntry_t*)ZSTD_customMalloc(hashSize, cMem);
507 }
508 if (serialState->ldmState.bucketOffsets == NULL || prevBucketLog < bucketLog) {
509 ZSTD_customFree(serialState->ldmState.bucketOffsets, cMem);
510 serialState->ldmState.bucketOffsets = (BYTE*)ZSTD_customMalloc(numBuckets, cMem);
511 }
512 if (!serialState->ldmState.hashTable || !serialState->ldmState.bucketOffsets)
513 return 1;
514 /* Zero the tables */
515 ZSTD_memset(serialState->ldmState.hashTable, 0, hashSize);
516 ZSTD_memset(serialState->ldmState.bucketOffsets, 0, numBuckets);
517
518 /* Update window state and fill hash table with dict */
519 serialState->ldmState.loadedDictEnd = 0;
520 if (dictSize > 0) {
521 if (dictContentType == ZSTD_dct_rawContent) {
522 BYTE const* const dictEnd = (const BYTE*)dict + dictSize;
523 ZSTD_window_update(&serialState->ldmState.window, dict, dictSize, /* forceNonContiguous */ 0);
524 ZSTD_ldm_fillHashTable(&serialState->ldmState, (const BYTE*)dict, dictEnd, ¶ms.ldmParams);
525 serialState->ldmState.loadedDictEnd = params.forceWindow ? 0 : (U32)(dictEnd - serialState->ldmState.window.base);
526 } else {
527 /* don't even load anything */
528 }
529 }
530
531 /* Initialize serialState's copy of ldmWindow. */
532 serialState->ldmWindow = serialState->ldmState.window;
533 }
534
535 serialState->params = params;
536 serialState->params.jobSize = (U32)jobSize;
537 return 0;
538 }
539
ZSTDMT_serialState_init(serialState_t * serialState)540 static int ZSTDMT_serialState_init(serialState_t* serialState)
541 {
542 int initError = 0;
543 ZSTD_memset(serialState, 0, sizeof(*serialState));
544 initError |= ZSTD_pthread_mutex_init(&serialState->mutex, NULL);
545 initError |= ZSTD_pthread_cond_init(&serialState->cond, NULL);
546 initError |= ZSTD_pthread_mutex_init(&serialState->ldmWindowMutex, NULL);
547 initError |= ZSTD_pthread_cond_init(&serialState->ldmWindowCond, NULL);
548 return initError;
549 }
550
ZSTDMT_serialState_free(serialState_t * serialState)551 static void ZSTDMT_serialState_free(serialState_t* serialState)
552 {
553 ZSTD_customMem cMem = serialState->params.customMem;
554 ZSTD_pthread_mutex_destroy(&serialState->mutex);
555 ZSTD_pthread_cond_destroy(&serialState->cond);
556 ZSTD_pthread_mutex_destroy(&serialState->ldmWindowMutex);
557 ZSTD_pthread_cond_destroy(&serialState->ldmWindowCond);
558 ZSTD_customFree(serialState->ldmState.hashTable, cMem);
559 ZSTD_customFree(serialState->ldmState.bucketOffsets, cMem);
560 }
561
ZSTDMT_serialState_update(serialState_t * serialState,ZSTD_CCtx * jobCCtx,rawSeqStore_t seqStore,range_t src,unsigned jobID)562 static void ZSTDMT_serialState_update(serialState_t* serialState,
563 ZSTD_CCtx* jobCCtx, rawSeqStore_t seqStore,
564 range_t src, unsigned jobID)
565 {
566 /* Wait for our turn */
567 ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex);
568 while (serialState->nextJobID < jobID) {
569 DEBUGLOG(5, "wait for serialState->cond");
570 ZSTD_pthread_cond_wait(&serialState->cond, &serialState->mutex);
571 }
572 /* A future job may error and skip our job */
573 if (serialState->nextJobID == jobID) {
574 /* It is now our turn, do any processing necessary */
575 if (serialState->params.ldmParams.enableLdm == ZSTD_ps_enable) {
576 size_t error;
577 assert(seqStore.seq != NULL && seqStore.pos == 0 &&
578 seqStore.size == 0 && seqStore.capacity > 0);
579 assert(src.size <= serialState->params.jobSize);
580 ZSTD_window_update(&serialState->ldmState.window, src.start, src.size, /* forceNonContiguous */ 0);
581 error = ZSTD_ldm_generateSequences(
582 &serialState->ldmState, &seqStore,
583 &serialState->params.ldmParams, src.start, src.size);
584 /* We provide a large enough buffer to never fail. */
585 assert(!ZSTD_isError(error)); (void)error;
586 /* Update ldmWindow to match the ldmState.window and signal the main
587 * thread if it is waiting for a buffer.
588 */
589 ZSTD_PTHREAD_MUTEX_LOCK(&serialState->ldmWindowMutex);
590 serialState->ldmWindow = serialState->ldmState.window;
591 ZSTD_pthread_cond_signal(&serialState->ldmWindowCond);
592 ZSTD_pthread_mutex_unlock(&serialState->ldmWindowMutex);
593 }
594 if (serialState->params.fParams.checksumFlag && src.size > 0)
595 XXH64_update(&serialState->xxhState, src.start, src.size);
596 }
597 /* Now it is the next jobs turn */
598 serialState->nextJobID++;
599 ZSTD_pthread_cond_broadcast(&serialState->cond);
600 ZSTD_pthread_mutex_unlock(&serialState->mutex);
601
602 if (seqStore.size > 0) {
603 size_t const err = ZSTD_referenceExternalSequences(
604 jobCCtx, seqStore.seq, seqStore.size);
605 assert(serialState->params.ldmParams.enableLdm == ZSTD_ps_enable);
606 assert(!ZSTD_isError(err));
607 (void)err;
608 }
609 }
610
ZSTDMT_serialState_ensureFinished(serialState_t * serialState,unsigned jobID,size_t cSize)611 static void ZSTDMT_serialState_ensureFinished(serialState_t* serialState,
612 unsigned jobID, size_t cSize)
613 {
614 ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex);
615 if (serialState->nextJobID <= jobID) {
616 assert(ZSTD_isError(cSize)); (void)cSize;
617 DEBUGLOG(5, "Skipping past job %u because of error", jobID);
618 serialState->nextJobID = jobID + 1;
619 ZSTD_pthread_cond_broadcast(&serialState->cond);
620
621 ZSTD_PTHREAD_MUTEX_LOCK(&serialState->ldmWindowMutex);
622 ZSTD_window_clear(&serialState->ldmWindow);
623 ZSTD_pthread_cond_signal(&serialState->ldmWindowCond);
624 ZSTD_pthread_mutex_unlock(&serialState->ldmWindowMutex);
625 }
626 ZSTD_pthread_mutex_unlock(&serialState->mutex);
627
628 }
629
630
631 /* ------------------------------------------ */
632 /* ===== Worker thread ===== */
633 /* ------------------------------------------ */
634
635 static const range_t kNullRange = { NULL, 0 };
636
637 typedef struct {
638 size_t consumed; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx */
639 size_t cSize; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx, then set0 by mtctx */
640 ZSTD_pthread_mutex_t job_mutex; /* Thread-safe - used by mtctx and worker */
641 ZSTD_pthread_cond_t job_cond; /* Thread-safe - used by mtctx and worker */
642 ZSTDMT_CCtxPool* cctxPool; /* Thread-safe - used by mtctx and (all) workers */
643 ZSTDMT_bufferPool* bufPool; /* Thread-safe - used by mtctx and (all) workers */
644 ZSTDMT_seqPool* seqPool; /* Thread-safe - used by mtctx and (all) workers */
645 serialState_t* serial; /* Thread-safe - used by mtctx and (all) workers */
646 buffer_t dstBuff; /* set by worker (or mtctx), then read by worker & mtctx, then modified by mtctx => no barrier */
647 range_t prefix; /* set by mtctx, then read by worker & mtctx => no barrier */
648 range_t src; /* set by mtctx, then read by worker & mtctx => no barrier */
649 unsigned jobID; /* set by mtctx, then read by worker => no barrier */
650 unsigned firstJob; /* set by mtctx, then read by worker => no barrier */
651 unsigned lastJob; /* set by mtctx, then read by worker => no barrier */
652 ZSTD_CCtx_params params; /* set by mtctx, then read by worker => no barrier */
653 const ZSTD_CDict* cdict; /* set by mtctx, then read by worker => no barrier */
654 unsigned long long fullFrameSize; /* set by mtctx, then read by worker => no barrier */
655 size_t dstFlushed; /* used only by mtctx */
656 unsigned frameChecksumNeeded; /* used only by mtctx */
657 } ZSTDMT_jobDescription;
658
659 #define JOB_ERROR(e) { \
660 ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex); \
661 job->cSize = e; \
662 ZSTD_pthread_mutex_unlock(&job->job_mutex); \
663 goto _endJob; \
664 }
665
666 /* ZSTDMT_compressionJob() is a POOL_function type */
ZSTDMT_compressionJob(void * jobDescription)667 static void ZSTDMT_compressionJob(void* jobDescription)
668 {
669 ZSTDMT_jobDescription* const job = (ZSTDMT_jobDescription*)jobDescription;
670 ZSTD_CCtx_params jobParams = job->params; /* do not modify job->params ! copy it, modify the copy */
671 ZSTD_CCtx* const cctx = ZSTDMT_getCCtx(job->cctxPool);
672 rawSeqStore_t rawSeqStore = ZSTDMT_getSeq(job->seqPool);
673 buffer_t dstBuff = job->dstBuff;
674 size_t lastCBlockSize = 0;
675
676 /* resources */
677 if (cctx==NULL) JOB_ERROR(ERROR(memory_allocation));
678 if (dstBuff.start == NULL) { /* streaming job : doesn't provide a dstBuffer */
679 dstBuff = ZSTDMT_getBuffer(job->bufPool);
680 if (dstBuff.start==NULL) JOB_ERROR(ERROR(memory_allocation));
681 job->dstBuff = dstBuff; /* this value can be read in ZSTDMT_flush, when it copies the whole job */
682 }
683 if (jobParams.ldmParams.enableLdm == ZSTD_ps_enable && rawSeqStore.seq == NULL)
684 JOB_ERROR(ERROR(memory_allocation));
685
686 /* Don't compute the checksum for chunks, since we compute it externally,
687 * but write it in the header.
688 */
689 if (job->jobID != 0) jobParams.fParams.checksumFlag = 0;
690 /* Don't run LDM for the chunks, since we handle it externally */
691 jobParams.ldmParams.enableLdm = ZSTD_ps_disable;
692 /* Correct nbWorkers to 0. */
693 jobParams.nbWorkers = 0;
694
695
696 /* init */
697 if (job->cdict) {
698 size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, job->cdict, &jobParams, job->fullFrameSize);
699 assert(job->firstJob); /* only allowed for first job */
700 if (ZSTD_isError(initError)) JOB_ERROR(initError);
701 } else { /* srcStart points at reloaded section */
702 U64 const pledgedSrcSize = job->firstJob ? job->fullFrameSize : job->src.size;
703 { size_t const forceWindowError = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_c_forceMaxWindow, !job->firstJob);
704 if (ZSTD_isError(forceWindowError)) JOB_ERROR(forceWindowError);
705 }
706 if (!job->firstJob) {
707 size_t const err = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_c_deterministicRefPrefix, 0);
708 if (ZSTD_isError(err)) JOB_ERROR(err);
709 }
710 { size_t const initError = ZSTD_compressBegin_advanced_internal(cctx,
711 job->prefix.start, job->prefix.size, ZSTD_dct_rawContent, /* load dictionary in "content-only" mode (no header analysis) */
712 ZSTD_dtlm_fast,
713 NULL, /*cdict*/
714 &jobParams, pledgedSrcSize);
715 if (ZSTD_isError(initError)) JOB_ERROR(initError);
716 } }
717
718 /* Perform serial step as early as possible, but after CCtx initialization */
719 ZSTDMT_serialState_update(job->serial, cctx, rawSeqStore, job->src, job->jobID);
720
721 if (!job->firstJob) { /* flush and overwrite frame header when it's not first job */
722 size_t const hSize = ZSTD_compressContinue(cctx, dstBuff.start, dstBuff.capacity, job->src.start, 0);
723 if (ZSTD_isError(hSize)) JOB_ERROR(hSize);
724 DEBUGLOG(5, "ZSTDMT_compressionJob: flush and overwrite %u bytes of frame header (not first job)", (U32)hSize);
725 ZSTD_invalidateRepCodes(cctx);
726 }
727
728 /* compress */
729 { size_t const chunkSize = 4*ZSTD_BLOCKSIZE_MAX;
730 int const nbChunks = (int)((job->src.size + (chunkSize-1)) / chunkSize);
731 const BYTE* ip = (const BYTE*) job->src.start;
732 BYTE* const ostart = (BYTE*)dstBuff.start;
733 BYTE* op = ostart;
734 BYTE* oend = op + dstBuff.capacity;
735 int chunkNb;
736 if (sizeof(size_t) > sizeof(int)) assert(job->src.size < ((size_t)INT_MAX) * chunkSize); /* check overflow */
737 DEBUGLOG(5, "ZSTDMT_compressionJob: compress %u bytes in %i blocks", (U32)job->src.size, nbChunks);
738 assert(job->cSize == 0);
739 for (chunkNb = 1; chunkNb < nbChunks; chunkNb++) {
740 size_t const cSize = ZSTD_compressContinue(cctx, op, oend-op, ip, chunkSize);
741 if (ZSTD_isError(cSize)) JOB_ERROR(cSize);
742 ip += chunkSize;
743 op += cSize; assert(op < oend);
744 /* stats */
745 ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex);
746 job->cSize += cSize;
747 job->consumed = chunkSize * chunkNb;
748 DEBUGLOG(5, "ZSTDMT_compressionJob: compress new block : cSize==%u bytes (total: %u)",
749 (U32)cSize, (U32)job->cSize);
750 ZSTD_pthread_cond_signal(&job->job_cond); /* warns some more data is ready to be flushed */
751 ZSTD_pthread_mutex_unlock(&job->job_mutex);
752 }
753 /* last block */
754 assert(chunkSize > 0);
755 assert((chunkSize & (chunkSize - 1)) == 0); /* chunkSize must be power of 2 for mask==(chunkSize-1) to work */
756 if ((nbChunks > 0) | job->lastJob /*must output a "last block" flag*/ ) {
757 size_t const lastBlockSize1 = job->src.size & (chunkSize-1);
758 size_t const lastBlockSize = ((lastBlockSize1==0) & (job->src.size>=chunkSize)) ? chunkSize : lastBlockSize1;
759 size_t const cSize = (job->lastJob) ?
760 ZSTD_compressEnd (cctx, op, oend-op, ip, lastBlockSize) :
761 ZSTD_compressContinue(cctx, op, oend-op, ip, lastBlockSize);
762 if (ZSTD_isError(cSize)) JOB_ERROR(cSize);
763 lastCBlockSize = cSize;
764 } }
765 if (!job->firstJob) {
766 /* Double check that we don't have an ext-dict, because then our
767 * repcode invalidation doesn't work.
768 */
769 assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window));
770 }
771 ZSTD_CCtx_trace(cctx, 0);
772
773 _endJob:
774 ZSTDMT_serialState_ensureFinished(job->serial, job->jobID, job->cSize);
775 if (job->prefix.size > 0)
776 DEBUGLOG(5, "Finished with prefix: %zx", (size_t)job->prefix.start);
777 DEBUGLOG(5, "Finished with source: %zx", (size_t)job->src.start);
778 /* release resources */
779 ZSTDMT_releaseSeq(job->seqPool, rawSeqStore);
780 ZSTDMT_releaseCCtx(job->cctxPool, cctx);
781 /* report */
782 ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex);
783 if (ZSTD_isError(job->cSize)) assert(lastCBlockSize == 0);
784 job->cSize += lastCBlockSize;
785 job->consumed = job->src.size; /* when job->consumed == job->src.size , compression job is presumed completed */
786 ZSTD_pthread_cond_signal(&job->job_cond);
787 ZSTD_pthread_mutex_unlock(&job->job_mutex);
788 }
789
790
791 /* ------------------------------------------ */
792 /* ===== Multi-threaded compression ===== */
793 /* ------------------------------------------ */
794
795 typedef struct {
796 range_t prefix; /* read-only non-owned prefix buffer */
797 buffer_t buffer;
798 size_t filled;
799 } inBuff_t;
800
801 typedef struct {
802 BYTE* buffer; /* The round input buffer. All jobs get references
803 * to pieces of the buffer. ZSTDMT_tryGetInputRange()
804 * handles handing out job input buffers, and makes
805 * sure it doesn't overlap with any pieces still in use.
806 */
807 size_t capacity; /* The capacity of buffer. */
808 size_t pos; /* The position of the current inBuff in the round
809 * buffer. Updated past the end if the inBuff once
810 * the inBuff is sent to the worker thread.
811 * pos <= capacity.
812 */
813 } roundBuff_t;
814
815 static const roundBuff_t kNullRoundBuff = {NULL, 0, 0};
816
817 #define RSYNC_LENGTH 32
818 /* Don't create chunks smaller than the zstd block size.
819 * This stops us from regressing compression ratio too much,
820 * and ensures our output fits in ZSTD_compressBound().
821 *
822 * If this is shrunk < ZSTD_BLOCKSIZELOG_MIN then
823 * ZSTD_COMPRESSBOUND() will need to be updated.
824 */
825 #define RSYNC_MIN_BLOCK_LOG ZSTD_BLOCKSIZELOG_MAX
826 #define RSYNC_MIN_BLOCK_SIZE (1<<RSYNC_MIN_BLOCK_LOG)
827
828 typedef struct {
829 U64 hash;
830 U64 hitMask;
831 U64 primePower;
832 } rsyncState_t;
833
834 struct ZSTDMT_CCtx_s {
835 POOL_ctx* factory;
836 ZSTDMT_jobDescription* jobs;
837 ZSTDMT_bufferPool* bufPool;
838 ZSTDMT_CCtxPool* cctxPool;
839 ZSTDMT_seqPool* seqPool;
840 ZSTD_CCtx_params params;
841 size_t targetSectionSize;
842 size_t targetPrefixSize;
843 int jobReady; /* 1 => one job is already prepared, but pool has shortage of workers. Don't create a new job. */
844 inBuff_t inBuff;
845 roundBuff_t roundBuff;
846 serialState_t serial;
847 rsyncState_t rsync;
848 unsigned jobIDMask;
849 unsigned doneJobID;
850 unsigned nextJobID;
851 unsigned frameEnded;
852 unsigned allJobsCompleted;
853 unsigned long long frameContentSize;
854 unsigned long long consumed;
855 unsigned long long produced;
856 ZSTD_customMem cMem;
857 ZSTD_CDict* cdictLocal;
858 const ZSTD_CDict* cdict;
859 unsigned providedFactory: 1;
860 };
861
ZSTDMT_freeJobsTable(ZSTDMT_jobDescription * jobTable,U32 nbJobs,ZSTD_customMem cMem)862 static void ZSTDMT_freeJobsTable(ZSTDMT_jobDescription* jobTable, U32 nbJobs, ZSTD_customMem cMem)
863 {
864 U32 jobNb;
865 if (jobTable == NULL) return;
866 for (jobNb=0; jobNb<nbJobs; jobNb++) {
867 ZSTD_pthread_mutex_destroy(&jobTable[jobNb].job_mutex);
868 ZSTD_pthread_cond_destroy(&jobTable[jobNb].job_cond);
869 }
870 ZSTD_customFree(jobTable, cMem);
871 }
872
873 /* ZSTDMT_allocJobsTable()
874 * allocate and init a job table.
875 * update *nbJobsPtr to next power of 2 value, as size of table */
ZSTDMT_createJobsTable(U32 * nbJobsPtr,ZSTD_customMem cMem)876 static ZSTDMT_jobDescription* ZSTDMT_createJobsTable(U32* nbJobsPtr, ZSTD_customMem cMem)
877 {
878 U32 const nbJobsLog2 = ZSTD_highbit32(*nbJobsPtr) + 1;
879 U32 const nbJobs = 1 << nbJobsLog2;
880 U32 jobNb;
881 ZSTDMT_jobDescription* const jobTable = (ZSTDMT_jobDescription*)
882 ZSTD_customCalloc(nbJobs * sizeof(ZSTDMT_jobDescription), cMem);
883 int initError = 0;
884 if (jobTable==NULL) return NULL;
885 *nbJobsPtr = nbJobs;
886 for (jobNb=0; jobNb<nbJobs; jobNb++) {
887 initError |= ZSTD_pthread_mutex_init(&jobTable[jobNb].job_mutex, NULL);
888 initError |= ZSTD_pthread_cond_init(&jobTable[jobNb].job_cond, NULL);
889 }
890 if (initError != 0) {
891 ZSTDMT_freeJobsTable(jobTable, nbJobs, cMem);
892 return NULL;
893 }
894 return jobTable;
895 }
896
ZSTDMT_expandJobsTable(ZSTDMT_CCtx * mtctx,U32 nbWorkers)897 static size_t ZSTDMT_expandJobsTable (ZSTDMT_CCtx* mtctx, U32 nbWorkers) {
898 U32 nbJobs = nbWorkers + 2;
899 if (nbJobs > mtctx->jobIDMask+1) { /* need more job capacity */
900 ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem);
901 mtctx->jobIDMask = 0;
902 mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, mtctx->cMem);
903 if (mtctx->jobs==NULL) return ERROR(memory_allocation);
904 assert((nbJobs != 0) && ((nbJobs & (nbJobs - 1)) == 0)); /* ensure nbJobs is a power of 2 */
905 mtctx->jobIDMask = nbJobs - 1;
906 }
907 return 0;
908 }
909
910
911 /* ZSTDMT_CCtxParam_setNbWorkers():
912 * Internal use only */
ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params * params,unsigned nbWorkers)913 static size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers)
914 {
915 return ZSTD_CCtxParams_setParameter(params, ZSTD_c_nbWorkers, (int)nbWorkers);
916 }
917
ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers,ZSTD_customMem cMem,ZSTD_threadPool * pool)918 MEM_STATIC ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers, ZSTD_customMem cMem, ZSTD_threadPool* pool)
919 {
920 ZSTDMT_CCtx* mtctx;
921 U32 nbJobs = nbWorkers + 2;
922 int initError;
923 DEBUGLOG(3, "ZSTDMT_createCCtx_advanced (nbWorkers = %u)", nbWorkers);
924
925 if (nbWorkers < 1) return NULL;
926 nbWorkers = MIN(nbWorkers , ZSTDMT_NBWORKERS_MAX);
927 if ((cMem.customAlloc!=NULL) ^ (cMem.customFree!=NULL))
928 /* invalid custom allocator */
929 return NULL;
930
931 mtctx = (ZSTDMT_CCtx*) ZSTD_customCalloc(sizeof(ZSTDMT_CCtx), cMem);
932 if (!mtctx) return NULL;
933 ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers);
934 mtctx->cMem = cMem;
935 mtctx->allJobsCompleted = 1;
936 if (pool != NULL) {
937 mtctx->factory = pool;
938 mtctx->providedFactory = 1;
939 }
940 else {
941 mtctx->factory = POOL_create_advanced(nbWorkers, 0, cMem);
942 mtctx->providedFactory = 0;
943 }
944 mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, cMem);
945 assert(nbJobs > 0); assert((nbJobs & (nbJobs - 1)) == 0); /* ensure nbJobs is a power of 2 */
946 mtctx->jobIDMask = nbJobs - 1;
947 mtctx->bufPool = ZSTDMT_createBufferPool(BUF_POOL_MAX_NB_BUFFERS(nbWorkers), cMem);
948 mtctx->cctxPool = ZSTDMT_createCCtxPool(nbWorkers, cMem);
949 mtctx->seqPool = ZSTDMT_createSeqPool(nbWorkers, cMem);
950 initError = ZSTDMT_serialState_init(&mtctx->serial);
951 mtctx->roundBuff = kNullRoundBuff;
952 if (!mtctx->factory | !mtctx->jobs | !mtctx->bufPool | !mtctx->cctxPool | !mtctx->seqPool | initError) {
953 ZSTDMT_freeCCtx(mtctx);
954 return NULL;
955 }
956 DEBUGLOG(3, "mt_cctx created, for %u threads", nbWorkers);
957 return mtctx;
958 }
959
ZSTDMT_createCCtx_advanced(unsigned nbWorkers,ZSTD_customMem cMem,ZSTD_threadPool * pool)960 ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, ZSTD_customMem cMem, ZSTD_threadPool* pool)
961 {
962 #ifdef ZSTD_MULTITHREAD
963 return ZSTDMT_createCCtx_advanced_internal(nbWorkers, cMem, pool);
964 #else
965 (void)nbWorkers;
966 (void)cMem;
967 (void)pool;
968 return NULL;
969 #endif
970 }
971
972
973 /* ZSTDMT_releaseAllJobResources() :
974 * note : ensure all workers are killed first ! */
ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx * mtctx)975 static void ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx* mtctx)
976 {
977 unsigned jobID;
978 DEBUGLOG(3, "ZSTDMT_releaseAllJobResources");
979 for (jobID=0; jobID <= mtctx->jobIDMask; jobID++) {
980 /* Copy the mutex/cond out */
981 ZSTD_pthread_mutex_t const mutex = mtctx->jobs[jobID].job_mutex;
982 ZSTD_pthread_cond_t const cond = mtctx->jobs[jobID].job_cond;
983
984 DEBUGLOG(4, "job%02u: release dst address %08X", jobID, (U32)(size_t)mtctx->jobs[jobID].dstBuff.start);
985 ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff);
986
987 /* Clear the job description, but keep the mutex/cond */
988 ZSTD_memset(&mtctx->jobs[jobID], 0, sizeof(mtctx->jobs[jobID]));
989 mtctx->jobs[jobID].job_mutex = mutex;
990 mtctx->jobs[jobID].job_cond = cond;
991 }
992 mtctx->inBuff.buffer = g_nullBuffer;
993 mtctx->inBuff.filled = 0;
994 mtctx->allJobsCompleted = 1;
995 }
996
ZSTDMT_waitForAllJobsCompleted(ZSTDMT_CCtx * mtctx)997 static void ZSTDMT_waitForAllJobsCompleted(ZSTDMT_CCtx* mtctx)
998 {
999 DEBUGLOG(4, "ZSTDMT_waitForAllJobsCompleted");
1000 while (mtctx->doneJobID < mtctx->nextJobID) {
1001 unsigned const jobID = mtctx->doneJobID & mtctx->jobIDMask;
1002 ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[jobID].job_mutex);
1003 while (mtctx->jobs[jobID].consumed < mtctx->jobs[jobID].src.size) {
1004 DEBUGLOG(4, "waiting for jobCompleted signal from job %u", mtctx->doneJobID); /* we want to block when waiting for data to flush */
1005 ZSTD_pthread_cond_wait(&mtctx->jobs[jobID].job_cond, &mtctx->jobs[jobID].job_mutex);
1006 }
1007 ZSTD_pthread_mutex_unlock(&mtctx->jobs[jobID].job_mutex);
1008 mtctx->doneJobID++;
1009 }
1010 }
1011
ZSTDMT_freeCCtx(ZSTDMT_CCtx * mtctx)1012 size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx)
1013 {
1014 if (mtctx==NULL) return 0; /* compatible with free on NULL */
1015 if (!mtctx->providedFactory)
1016 POOL_free(mtctx->factory); /* stop and free worker threads */
1017 ZSTDMT_releaseAllJobResources(mtctx); /* release job resources into pools first */
1018 ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem);
1019 ZSTDMT_freeBufferPool(mtctx->bufPool);
1020 ZSTDMT_freeCCtxPool(mtctx->cctxPool);
1021 ZSTDMT_freeSeqPool(mtctx->seqPool);
1022 ZSTDMT_serialState_free(&mtctx->serial);
1023 ZSTD_freeCDict(mtctx->cdictLocal);
1024 if (mtctx->roundBuff.buffer)
1025 ZSTD_customFree(mtctx->roundBuff.buffer, mtctx->cMem);
1026 ZSTD_customFree(mtctx, mtctx->cMem);
1027 return 0;
1028 }
1029
ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx * mtctx)1030 size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx)
1031 {
1032 if (mtctx == NULL) return 0; /* supports sizeof NULL */
1033 return sizeof(*mtctx)
1034 + POOL_sizeof(mtctx->factory)
1035 + ZSTDMT_sizeof_bufferPool(mtctx->bufPool)
1036 + (mtctx->jobIDMask+1) * sizeof(ZSTDMT_jobDescription)
1037 + ZSTDMT_sizeof_CCtxPool(mtctx->cctxPool)
1038 + ZSTDMT_sizeof_seqPool(mtctx->seqPool)
1039 + ZSTD_sizeof_CDict(mtctx->cdictLocal)
1040 + mtctx->roundBuff.capacity;
1041 }
1042
1043
1044 /* ZSTDMT_resize() :
1045 * @return : error code if fails, 0 on success */
ZSTDMT_resize(ZSTDMT_CCtx * mtctx,unsigned nbWorkers)1046 static size_t ZSTDMT_resize(ZSTDMT_CCtx* mtctx, unsigned nbWorkers)
1047 {
1048 if (POOL_resize(mtctx->factory, nbWorkers)) return ERROR(memory_allocation);
1049 FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbWorkers) , "");
1050 mtctx->bufPool = ZSTDMT_expandBufferPool(mtctx->bufPool, BUF_POOL_MAX_NB_BUFFERS(nbWorkers));
1051 if (mtctx->bufPool == NULL) return ERROR(memory_allocation);
1052 mtctx->cctxPool = ZSTDMT_expandCCtxPool(mtctx->cctxPool, nbWorkers);
1053 if (mtctx->cctxPool == NULL) return ERROR(memory_allocation);
1054 mtctx->seqPool = ZSTDMT_expandSeqPool(mtctx->seqPool, nbWorkers);
1055 if (mtctx->seqPool == NULL) return ERROR(memory_allocation);
1056 ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers);
1057 return 0;
1058 }
1059
1060
1061 /*! ZSTDMT_updateCParams_whileCompressing() :
1062 * Updates a selected set of compression parameters, remaining compatible with currently active frame.
1063 * New parameters will be applied to next compression job. */
ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx * mtctx,const ZSTD_CCtx_params * cctxParams)1064 void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_params* cctxParams)
1065 {
1066 U32 const saved_wlog = mtctx->params.cParams.windowLog; /* Do not modify windowLog while compressing */
1067 int const compressionLevel = cctxParams->compressionLevel;
1068 DEBUGLOG(5, "ZSTDMT_updateCParams_whileCompressing (level:%i)",
1069 compressionLevel);
1070 mtctx->params.compressionLevel = compressionLevel;
1071 { ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams(cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
1072 cParams.windowLog = saved_wlog;
1073 mtctx->params.cParams = cParams;
1074 }
1075 }
1076
1077 /* ZSTDMT_getFrameProgression():
1078 * tells how much data has been consumed (input) and produced (output) for current frame.
1079 * able to count progression inside worker threads.
1080 * Note : mutex will be acquired during statistics collection inside workers. */
ZSTDMT_getFrameProgression(ZSTDMT_CCtx * mtctx)1081 ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx)
1082 {
1083 ZSTD_frameProgression fps;
1084 DEBUGLOG(5, "ZSTDMT_getFrameProgression");
1085 fps.ingested = mtctx->consumed + mtctx->inBuff.filled;
1086 fps.consumed = mtctx->consumed;
1087 fps.produced = fps.flushed = mtctx->produced;
1088 fps.currentJobID = mtctx->nextJobID;
1089 fps.nbActiveWorkers = 0;
1090 { unsigned jobNb;
1091 unsigned lastJobNb = mtctx->nextJobID + mtctx->jobReady; assert(mtctx->jobReady <= 1);
1092 DEBUGLOG(6, "ZSTDMT_getFrameProgression: jobs: from %u to <%u (jobReady:%u)",
1093 mtctx->doneJobID, lastJobNb, mtctx->jobReady)
1094 for (jobNb = mtctx->doneJobID ; jobNb < lastJobNb ; jobNb++) {
1095 unsigned const wJobID = jobNb & mtctx->jobIDMask;
1096 ZSTDMT_jobDescription* jobPtr = &mtctx->jobs[wJobID];
1097 ZSTD_pthread_mutex_lock(&jobPtr->job_mutex);
1098 { size_t const cResult = jobPtr->cSize;
1099 size_t const produced = ZSTD_isError(cResult) ? 0 : cResult;
1100 size_t const flushed = ZSTD_isError(cResult) ? 0 : jobPtr->dstFlushed;
1101 assert(flushed <= produced);
1102 fps.ingested += jobPtr->src.size;
1103 fps.consumed += jobPtr->consumed;
1104 fps.produced += produced;
1105 fps.flushed += flushed;
1106 fps.nbActiveWorkers += (jobPtr->consumed < jobPtr->src.size);
1107 }
1108 ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
1109 }
1110 }
1111 return fps;
1112 }
1113
1114
ZSTDMT_toFlushNow(ZSTDMT_CCtx * mtctx)1115 size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx)
1116 {
1117 size_t toFlush;
1118 unsigned const jobID = mtctx->doneJobID;
1119 assert(jobID <= mtctx->nextJobID);
1120 if (jobID == mtctx->nextJobID) return 0; /* no active job => nothing to flush */
1121
1122 /* look into oldest non-fully-flushed job */
1123 { unsigned const wJobID = jobID & mtctx->jobIDMask;
1124 ZSTDMT_jobDescription* const jobPtr = &mtctx->jobs[wJobID];
1125 ZSTD_pthread_mutex_lock(&jobPtr->job_mutex);
1126 { size_t const cResult = jobPtr->cSize;
1127 size_t const produced = ZSTD_isError(cResult) ? 0 : cResult;
1128 size_t const flushed = ZSTD_isError(cResult) ? 0 : jobPtr->dstFlushed;
1129 assert(flushed <= produced);
1130 assert(jobPtr->consumed <= jobPtr->src.size);
1131 toFlush = produced - flushed;
1132 /* if toFlush==0, nothing is available to flush.
1133 * However, jobID is expected to still be active:
1134 * if jobID was already completed and fully flushed,
1135 * ZSTDMT_flushProduced() should have already moved onto next job.
1136 * Therefore, some input has not yet been consumed. */
1137 if (toFlush==0) {
1138 assert(jobPtr->consumed < jobPtr->src.size);
1139 }
1140 }
1141 ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
1142 }
1143
1144 return toFlush;
1145 }
1146
1147
1148 /* ------------------------------------------ */
1149 /* ===== Multi-threaded compression ===== */
1150 /* ------------------------------------------ */
1151
ZSTDMT_computeTargetJobLog(const ZSTD_CCtx_params * params)1152 static unsigned ZSTDMT_computeTargetJobLog(const ZSTD_CCtx_params* params)
1153 {
1154 unsigned jobLog;
1155 if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
1156 /* In Long Range Mode, the windowLog is typically oversized.
1157 * In which case, it's preferable to determine the jobSize
1158 * based on cycleLog instead. */
1159 jobLog = MAX(21, ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy) + 3);
1160 } else {
1161 jobLog = MAX(20, params->cParams.windowLog + 2);
1162 }
1163 return MIN(jobLog, (unsigned)ZSTDMT_JOBLOG_MAX);
1164 }
1165
ZSTDMT_overlapLog_default(ZSTD_strategy strat)1166 static int ZSTDMT_overlapLog_default(ZSTD_strategy strat)
1167 {
1168 switch(strat)
1169 {
1170 case ZSTD_btultra2:
1171 return 9;
1172 case ZSTD_btultra:
1173 case ZSTD_btopt:
1174 return 8;
1175 case ZSTD_btlazy2:
1176 case ZSTD_lazy2:
1177 return 7;
1178 case ZSTD_lazy:
1179 case ZSTD_greedy:
1180 case ZSTD_dfast:
1181 case ZSTD_fast:
1182 default:;
1183 }
1184 return 6;
1185 }
1186
ZSTDMT_overlapLog(int ovlog,ZSTD_strategy strat)1187 static int ZSTDMT_overlapLog(int ovlog, ZSTD_strategy strat)
1188 {
1189 assert(0 <= ovlog && ovlog <= 9);
1190 if (ovlog == 0) return ZSTDMT_overlapLog_default(strat);
1191 return ovlog;
1192 }
1193
ZSTDMT_computeOverlapSize(const ZSTD_CCtx_params * params)1194 static size_t ZSTDMT_computeOverlapSize(const ZSTD_CCtx_params* params)
1195 {
1196 int const overlapRLog = 9 - ZSTDMT_overlapLog(params->overlapLog, params->cParams.strategy);
1197 int ovLog = (overlapRLog >= 8) ? 0 : (params->cParams.windowLog - overlapRLog);
1198 assert(0 <= overlapRLog && overlapRLog <= 8);
1199 if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
1200 /* In Long Range Mode, the windowLog is typically oversized.
1201 * In which case, it's preferable to determine the jobSize
1202 * based on chainLog instead.
1203 * Then, ovLog becomes a fraction of the jobSize, rather than windowSize */
1204 ovLog = MIN(params->cParams.windowLog, ZSTDMT_computeTargetJobLog(params) - 2)
1205 - overlapRLog;
1206 }
1207 assert(0 <= ovLog && ovLog <= ZSTD_WINDOWLOG_MAX);
1208 DEBUGLOG(4, "overlapLog : %i", params->overlapLog);
1209 DEBUGLOG(4, "overlap size : %i", 1 << ovLog);
1210 return (ovLog==0) ? 0 : (size_t)1 << ovLog;
1211 }
1212
1213 /* ====================================== */
1214 /* ======= Streaming API ======= */
1215 /* ====================================== */
1216
ZSTDMT_initCStream_internal(ZSTDMT_CCtx * mtctx,const void * dict,size_t dictSize,ZSTD_dictContentType_e dictContentType,const ZSTD_CDict * cdict,ZSTD_CCtx_params params,unsigned long long pledgedSrcSize)1217 size_t ZSTDMT_initCStream_internal(
1218 ZSTDMT_CCtx* mtctx,
1219 const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType,
1220 const ZSTD_CDict* cdict, ZSTD_CCtx_params params,
1221 unsigned long long pledgedSrcSize)
1222 {
1223 DEBUGLOG(4, "ZSTDMT_initCStream_internal (pledgedSrcSize=%u, nbWorkers=%u, cctxPool=%u)",
1224 (U32)pledgedSrcSize, params.nbWorkers, mtctx->cctxPool->totalCCtx);
1225
1226 /* params supposed partially fully validated at this point */
1227 assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
1228 assert(!((dict) && (cdict))); /* either dict or cdict, not both */
1229
1230 /* init */
1231 if (params.nbWorkers != mtctx->params.nbWorkers)
1232 FORWARD_IF_ERROR( ZSTDMT_resize(mtctx, params.nbWorkers) , "");
1233
1234 if (params.jobSize != 0 && params.jobSize < ZSTDMT_JOBSIZE_MIN) params.jobSize = ZSTDMT_JOBSIZE_MIN;
1235 if (params.jobSize > (size_t)ZSTDMT_JOBSIZE_MAX) params.jobSize = (size_t)ZSTDMT_JOBSIZE_MAX;
1236
1237 DEBUGLOG(4, "ZSTDMT_initCStream_internal: %u workers", params.nbWorkers);
1238
1239 if (mtctx->allJobsCompleted == 0) { /* previous compression not correctly finished */
1240 ZSTDMT_waitForAllJobsCompleted(mtctx);
1241 ZSTDMT_releaseAllJobResources(mtctx);
1242 mtctx->allJobsCompleted = 1;
1243 }
1244
1245 mtctx->params = params;
1246 mtctx->frameContentSize = pledgedSrcSize;
1247 if (dict) {
1248 ZSTD_freeCDict(mtctx->cdictLocal);
1249 mtctx->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize,
1250 ZSTD_dlm_byCopy, dictContentType, /* note : a loadPrefix becomes an internal CDict */
1251 params.cParams, mtctx->cMem);
1252 mtctx->cdict = mtctx->cdictLocal;
1253 if (mtctx->cdictLocal == NULL) return ERROR(memory_allocation);
1254 } else {
1255 ZSTD_freeCDict(mtctx->cdictLocal);
1256 mtctx->cdictLocal = NULL;
1257 mtctx->cdict = cdict;
1258 }
1259
1260 mtctx->targetPrefixSize = ZSTDMT_computeOverlapSize(¶ms);
1261 DEBUGLOG(4, "overlapLog=%i => %u KB", params.overlapLog, (U32)(mtctx->targetPrefixSize>>10));
1262 mtctx->targetSectionSize = params.jobSize;
1263 if (mtctx->targetSectionSize == 0) {
1264 mtctx->targetSectionSize = 1ULL << ZSTDMT_computeTargetJobLog(¶ms);
1265 }
1266 assert(mtctx->targetSectionSize <= (size_t)ZSTDMT_JOBSIZE_MAX);
1267
1268 if (params.rsyncable) {
1269 /* Aim for the targetsectionSize as the average job size. */
1270 U32 const jobSizeKB = (U32)(mtctx->targetSectionSize >> 10);
1271 U32 const rsyncBits = (assert(jobSizeKB >= 1), ZSTD_highbit32(jobSizeKB) + 10);
1272 /* We refuse to create jobs < RSYNC_MIN_BLOCK_SIZE bytes, so make sure our
1273 * expected job size is at least 4x larger. */
1274 assert(rsyncBits >= RSYNC_MIN_BLOCK_LOG + 2);
1275 DEBUGLOG(4, "rsyncLog = %u", rsyncBits);
1276 mtctx->rsync.hash = 0;
1277 mtctx->rsync.hitMask = (1ULL << rsyncBits) - 1;
1278 mtctx->rsync.primePower = ZSTD_rollingHash_primePower(RSYNC_LENGTH);
1279 }
1280 if (mtctx->targetSectionSize < mtctx->targetPrefixSize) mtctx->targetSectionSize = mtctx->targetPrefixSize; /* job size must be >= overlap size */
1281 DEBUGLOG(4, "Job Size : %u KB (note : set to %u)", (U32)(mtctx->targetSectionSize>>10), (U32)params.jobSize);
1282 DEBUGLOG(4, "inBuff Size : %u KB", (U32)(mtctx->targetSectionSize>>10));
1283 ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(mtctx->targetSectionSize));
1284 {
1285 /* If ldm is enabled we need windowSize space. */
1286 size_t const windowSize = mtctx->params.ldmParams.enableLdm == ZSTD_ps_enable ? (1U << mtctx->params.cParams.windowLog) : 0;
1287 /* Two buffers of slack, plus extra space for the overlap
1288 * This is the minimum slack that LDM works with. One extra because
1289 * flush might waste up to targetSectionSize-1 bytes. Another extra
1290 * for the overlap (if > 0), then one to fill which doesn't overlap
1291 * with the LDM window.
1292 */
1293 size_t const nbSlackBuffers = 2 + (mtctx->targetPrefixSize > 0);
1294 size_t const slackSize = mtctx->targetSectionSize * nbSlackBuffers;
1295 /* Compute the total size, and always have enough slack */
1296 size_t const nbWorkers = MAX(mtctx->params.nbWorkers, 1);
1297 size_t const sectionsSize = mtctx->targetSectionSize * nbWorkers;
1298 size_t const capacity = MAX(windowSize, sectionsSize) + slackSize;
1299 if (mtctx->roundBuff.capacity < capacity) {
1300 if (mtctx->roundBuff.buffer)
1301 ZSTD_customFree(mtctx->roundBuff.buffer, mtctx->cMem);
1302 mtctx->roundBuff.buffer = (BYTE*)ZSTD_customMalloc(capacity, mtctx->cMem);
1303 if (mtctx->roundBuff.buffer == NULL) {
1304 mtctx->roundBuff.capacity = 0;
1305 return ERROR(memory_allocation);
1306 }
1307 mtctx->roundBuff.capacity = capacity;
1308 }
1309 }
1310 DEBUGLOG(4, "roundBuff capacity : %u KB", (U32)(mtctx->roundBuff.capacity>>10));
1311 mtctx->roundBuff.pos = 0;
1312 mtctx->inBuff.buffer = g_nullBuffer;
1313 mtctx->inBuff.filled = 0;
1314 mtctx->inBuff.prefix = kNullRange;
1315 mtctx->doneJobID = 0;
1316 mtctx->nextJobID = 0;
1317 mtctx->frameEnded = 0;
1318 mtctx->allJobsCompleted = 0;
1319 mtctx->consumed = 0;
1320 mtctx->produced = 0;
1321 if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, mtctx->targetSectionSize,
1322 dict, dictSize, dictContentType))
1323 return ERROR(memory_allocation);
1324 return 0;
1325 }
1326
1327
1328 /* ZSTDMT_writeLastEmptyBlock()
1329 * Write a single empty block with an end-of-frame to finish a frame.
1330 * Job must be created from streaming variant.
1331 * This function is always successful if expected conditions are fulfilled.
1332 */
ZSTDMT_writeLastEmptyBlock(ZSTDMT_jobDescription * job)1333 static void ZSTDMT_writeLastEmptyBlock(ZSTDMT_jobDescription* job)
1334 {
1335 assert(job->lastJob == 1);
1336 assert(job->src.size == 0); /* last job is empty -> will be simplified into a last empty block */
1337 assert(job->firstJob == 0); /* cannot be first job, as it also needs to create frame header */
1338 assert(job->dstBuff.start == NULL); /* invoked from streaming variant only (otherwise, dstBuff might be user's output) */
1339 job->dstBuff = ZSTDMT_getBuffer(job->bufPool);
1340 if (job->dstBuff.start == NULL) {
1341 job->cSize = ERROR(memory_allocation);
1342 return;
1343 }
1344 assert(job->dstBuff.capacity >= ZSTD_blockHeaderSize); /* no buffer should ever be that small */
1345 job->src = kNullRange;
1346 job->cSize = ZSTD_writeLastEmptyBlock(job->dstBuff.start, job->dstBuff.capacity);
1347 assert(!ZSTD_isError(job->cSize));
1348 assert(job->consumed == 0);
1349 }
1350
ZSTDMT_createCompressionJob(ZSTDMT_CCtx * mtctx,size_t srcSize,ZSTD_EndDirective endOp)1351 static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* mtctx, size_t srcSize, ZSTD_EndDirective endOp)
1352 {
1353 unsigned const jobID = mtctx->nextJobID & mtctx->jobIDMask;
1354 int const endFrame = (endOp == ZSTD_e_end);
1355
1356 if (mtctx->nextJobID > mtctx->doneJobID + mtctx->jobIDMask) {
1357 DEBUGLOG(5, "ZSTDMT_createCompressionJob: will not create new job : table is full");
1358 assert((mtctx->nextJobID & mtctx->jobIDMask) == (mtctx->doneJobID & mtctx->jobIDMask));
1359 return 0;
1360 }
1361
1362 if (!mtctx->jobReady) {
1363 BYTE const* src = (BYTE const*)mtctx->inBuff.buffer.start;
1364 DEBUGLOG(5, "ZSTDMT_createCompressionJob: preparing job %u to compress %u bytes with %u preload ",
1365 mtctx->nextJobID, (U32)srcSize, (U32)mtctx->inBuff.prefix.size);
1366 mtctx->jobs[jobID].src.start = src;
1367 mtctx->jobs[jobID].src.size = srcSize;
1368 assert(mtctx->inBuff.filled >= srcSize);
1369 mtctx->jobs[jobID].prefix = mtctx->inBuff.prefix;
1370 mtctx->jobs[jobID].consumed = 0;
1371 mtctx->jobs[jobID].cSize = 0;
1372 mtctx->jobs[jobID].params = mtctx->params;
1373 mtctx->jobs[jobID].cdict = mtctx->nextJobID==0 ? mtctx->cdict : NULL;
1374 mtctx->jobs[jobID].fullFrameSize = mtctx->frameContentSize;
1375 mtctx->jobs[jobID].dstBuff = g_nullBuffer;
1376 mtctx->jobs[jobID].cctxPool = mtctx->cctxPool;
1377 mtctx->jobs[jobID].bufPool = mtctx->bufPool;
1378 mtctx->jobs[jobID].seqPool = mtctx->seqPool;
1379 mtctx->jobs[jobID].serial = &mtctx->serial;
1380 mtctx->jobs[jobID].jobID = mtctx->nextJobID;
1381 mtctx->jobs[jobID].firstJob = (mtctx->nextJobID==0);
1382 mtctx->jobs[jobID].lastJob = endFrame;
1383 mtctx->jobs[jobID].frameChecksumNeeded = mtctx->params.fParams.checksumFlag && endFrame && (mtctx->nextJobID>0);
1384 mtctx->jobs[jobID].dstFlushed = 0;
1385
1386 /* Update the round buffer pos and clear the input buffer to be reset */
1387 mtctx->roundBuff.pos += srcSize;
1388 mtctx->inBuff.buffer = g_nullBuffer;
1389 mtctx->inBuff.filled = 0;
1390 /* Set the prefix */
1391 if (!endFrame) {
1392 size_t const newPrefixSize = MIN(srcSize, mtctx->targetPrefixSize);
1393 mtctx->inBuff.prefix.start = src + srcSize - newPrefixSize;
1394 mtctx->inBuff.prefix.size = newPrefixSize;
1395 } else { /* endFrame==1 => no need for another input buffer */
1396 mtctx->inBuff.prefix = kNullRange;
1397 mtctx->frameEnded = endFrame;
1398 if (mtctx->nextJobID == 0) {
1399 /* single job exception : checksum is already calculated directly within worker thread */
1400 mtctx->params.fParams.checksumFlag = 0;
1401 } }
1402
1403 if ( (srcSize == 0)
1404 && (mtctx->nextJobID>0)/*single job must also write frame header*/ ) {
1405 DEBUGLOG(5, "ZSTDMT_createCompressionJob: creating a last empty block to end frame");
1406 assert(endOp == ZSTD_e_end); /* only possible case : need to end the frame with an empty last block */
1407 ZSTDMT_writeLastEmptyBlock(mtctx->jobs + jobID);
1408 mtctx->nextJobID++;
1409 return 0;
1410 }
1411 }
1412
1413 DEBUGLOG(5, "ZSTDMT_createCompressionJob: posting job %u : %u bytes (end:%u, jobNb == %u (mod:%u))",
1414 mtctx->nextJobID,
1415 (U32)mtctx->jobs[jobID].src.size,
1416 mtctx->jobs[jobID].lastJob,
1417 mtctx->nextJobID,
1418 jobID);
1419 if (POOL_tryAdd(mtctx->factory, ZSTDMT_compressionJob, &mtctx->jobs[jobID])) {
1420 mtctx->nextJobID++;
1421 mtctx->jobReady = 0;
1422 } else {
1423 DEBUGLOG(5, "ZSTDMT_createCompressionJob: no worker available for job %u", mtctx->nextJobID);
1424 mtctx->jobReady = 1;
1425 }
1426 return 0;
1427 }
1428
1429
1430 /*! ZSTDMT_flushProduced() :
1431 * flush whatever data has been produced but not yet flushed in current job.
1432 * move to next job if current one is fully flushed.
1433 * `output` : `pos` will be updated with amount of data flushed .
1434 * `blockToFlush` : if >0, the function will block and wait if there is no data available to flush .
1435 * @return : amount of data remaining within internal buffer, 0 if no more, 1 if unknown but > 0, or an error code */
ZSTDMT_flushProduced(ZSTDMT_CCtx * mtctx,ZSTD_outBuffer * output,unsigned blockToFlush,ZSTD_EndDirective end)1436 static size_t ZSTDMT_flushProduced(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, unsigned blockToFlush, ZSTD_EndDirective end)
1437 {
1438 unsigned const wJobID = mtctx->doneJobID & mtctx->jobIDMask;
1439 DEBUGLOG(5, "ZSTDMT_flushProduced (blocking:%u , job %u <= %u)",
1440 blockToFlush, mtctx->doneJobID, mtctx->nextJobID);
1441 assert(output->size >= output->pos);
1442
1443 ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[wJobID].job_mutex);
1444 if ( blockToFlush
1445 && (mtctx->doneJobID < mtctx->nextJobID) ) {
1446 assert(mtctx->jobs[wJobID].dstFlushed <= mtctx->jobs[wJobID].cSize);
1447 while (mtctx->jobs[wJobID].dstFlushed == mtctx->jobs[wJobID].cSize) { /* nothing to flush */
1448 if (mtctx->jobs[wJobID].consumed == mtctx->jobs[wJobID].src.size) {
1449 DEBUGLOG(5, "job %u is completely consumed (%u == %u) => don't wait for cond, there will be none",
1450 mtctx->doneJobID, (U32)mtctx->jobs[wJobID].consumed, (U32)mtctx->jobs[wJobID].src.size);
1451 break;
1452 }
1453 DEBUGLOG(5, "waiting for something to flush from job %u (currently flushed: %u bytes)",
1454 mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed);
1455 ZSTD_pthread_cond_wait(&mtctx->jobs[wJobID].job_cond, &mtctx->jobs[wJobID].job_mutex); /* block when nothing to flush but some to come */
1456 } }
1457
1458 /* try to flush something */
1459 { size_t cSize = mtctx->jobs[wJobID].cSize; /* shared */
1460 size_t const srcConsumed = mtctx->jobs[wJobID].consumed; /* shared */
1461 size_t const srcSize = mtctx->jobs[wJobID].src.size; /* read-only, could be done after mutex lock, but no-declaration-after-statement */
1462 ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
1463 if (ZSTD_isError(cSize)) {
1464 DEBUGLOG(5, "ZSTDMT_flushProduced: job %u : compression error detected : %s",
1465 mtctx->doneJobID, ZSTD_getErrorName(cSize));
1466 ZSTDMT_waitForAllJobsCompleted(mtctx);
1467 ZSTDMT_releaseAllJobResources(mtctx);
1468 return cSize;
1469 }
1470 /* add frame checksum if necessary (can only happen once) */
1471 assert(srcConsumed <= srcSize);
1472 if ( (srcConsumed == srcSize) /* job completed -> worker no longer active */
1473 && mtctx->jobs[wJobID].frameChecksumNeeded ) {
1474 U32 const checksum = (U32)XXH64_digest(&mtctx->serial.xxhState);
1475 DEBUGLOG(4, "ZSTDMT_flushProduced: writing checksum : %08X \n", checksum);
1476 MEM_writeLE32((char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].cSize, checksum);
1477 cSize += 4;
1478 mtctx->jobs[wJobID].cSize += 4; /* can write this shared value, as worker is no longer active */
1479 mtctx->jobs[wJobID].frameChecksumNeeded = 0;
1480 }
1481
1482 if (cSize > 0) { /* compression is ongoing or completed */
1483 size_t const toFlush = MIN(cSize - mtctx->jobs[wJobID].dstFlushed, output->size - output->pos);
1484 DEBUGLOG(5, "ZSTDMT_flushProduced: Flushing %u bytes from job %u (completion:%u/%u, generated:%u)",
1485 (U32)toFlush, mtctx->doneJobID, (U32)srcConsumed, (U32)srcSize, (U32)cSize);
1486 assert(mtctx->doneJobID < mtctx->nextJobID);
1487 assert(cSize >= mtctx->jobs[wJobID].dstFlushed);
1488 assert(mtctx->jobs[wJobID].dstBuff.start != NULL);
1489 if (toFlush > 0) {
1490 ZSTD_memcpy((char*)output->dst + output->pos,
1491 (const char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].dstFlushed,
1492 toFlush);
1493 }
1494 output->pos += toFlush;
1495 mtctx->jobs[wJobID].dstFlushed += toFlush; /* can write : this value is only used by mtctx */
1496
1497 if ( (srcConsumed == srcSize) /* job is completed */
1498 && (mtctx->jobs[wJobID].dstFlushed == cSize) ) { /* output buffer fully flushed => free this job position */
1499 DEBUGLOG(5, "Job %u completed (%u bytes), moving to next one",
1500 mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed);
1501 ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[wJobID].dstBuff);
1502 DEBUGLOG(5, "dstBuffer released");
1503 mtctx->jobs[wJobID].dstBuff = g_nullBuffer;
1504 mtctx->jobs[wJobID].cSize = 0; /* ensure this job slot is considered "not started" in future check */
1505 mtctx->consumed += srcSize;
1506 mtctx->produced += cSize;
1507 mtctx->doneJobID++;
1508 } }
1509
1510 /* return value : how many bytes left in buffer ; fake it to 1 when unknown but >0 */
1511 if (cSize > mtctx->jobs[wJobID].dstFlushed) return (cSize - mtctx->jobs[wJobID].dstFlushed);
1512 if (srcSize > srcConsumed) return 1; /* current job not completely compressed */
1513 }
1514 if (mtctx->doneJobID < mtctx->nextJobID) return 1; /* some more jobs ongoing */
1515 if (mtctx->jobReady) return 1; /* one job is ready to push, just not yet in the list */
1516 if (mtctx->inBuff.filled > 0) return 1; /* input is not empty, and still needs to be converted into a job */
1517 mtctx->allJobsCompleted = mtctx->frameEnded; /* all jobs are entirely flushed => if this one is last one, frame is completed */
1518 if (end == ZSTD_e_end) return !mtctx->frameEnded; /* for ZSTD_e_end, question becomes : is frame completed ? instead of : are internal buffers fully flushed ? */
1519 return 0; /* internal buffers fully flushed */
1520 }
1521
1522 /**
1523 * Returns the range of data used by the earliest job that is not yet complete.
1524 * If the data of the first job is broken up into two segments, we cover both
1525 * sections.
1526 */
ZSTDMT_getInputDataInUse(ZSTDMT_CCtx * mtctx)1527 static range_t ZSTDMT_getInputDataInUse(ZSTDMT_CCtx* mtctx)
1528 {
1529 unsigned const firstJobID = mtctx->doneJobID;
1530 unsigned const lastJobID = mtctx->nextJobID;
1531 unsigned jobID;
1532
1533 for (jobID = firstJobID; jobID < lastJobID; ++jobID) {
1534 unsigned const wJobID = jobID & mtctx->jobIDMask;
1535 size_t consumed;
1536
1537 ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[wJobID].job_mutex);
1538 consumed = mtctx->jobs[wJobID].consumed;
1539 ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
1540
1541 if (consumed < mtctx->jobs[wJobID].src.size) {
1542 range_t range = mtctx->jobs[wJobID].prefix;
1543 if (range.size == 0) {
1544 /* Empty prefix */
1545 range = mtctx->jobs[wJobID].src;
1546 }
1547 /* Job source in multiple segments not supported yet */
1548 assert(range.start <= mtctx->jobs[wJobID].src.start);
1549 return range;
1550 }
1551 }
1552 return kNullRange;
1553 }
1554
1555 /**
1556 * Returns non-zero iff buffer and range overlap.
1557 */
ZSTDMT_isOverlapped(buffer_t buffer,range_t range)1558 static int ZSTDMT_isOverlapped(buffer_t buffer, range_t range)
1559 {
1560 BYTE const* const bufferStart = (BYTE const*)buffer.start;
1561 BYTE const* const rangeStart = (BYTE const*)range.start;
1562
1563 if (rangeStart == NULL || bufferStart == NULL)
1564 return 0;
1565
1566 {
1567 BYTE const* const bufferEnd = bufferStart + buffer.capacity;
1568 BYTE const* const rangeEnd = rangeStart + range.size;
1569
1570 /* Empty ranges cannot overlap */
1571 if (bufferStart == bufferEnd || rangeStart == rangeEnd)
1572 return 0;
1573
1574 return bufferStart < rangeEnd && rangeStart < bufferEnd;
1575 }
1576 }
1577
ZSTDMT_doesOverlapWindow(buffer_t buffer,ZSTD_window_t window)1578 static int ZSTDMT_doesOverlapWindow(buffer_t buffer, ZSTD_window_t window)
1579 {
1580 range_t extDict;
1581 range_t prefix;
1582
1583 DEBUGLOG(5, "ZSTDMT_doesOverlapWindow");
1584 extDict.start = window.dictBase + window.lowLimit;
1585 extDict.size = window.dictLimit - window.lowLimit;
1586
1587 prefix.start = window.base + window.dictLimit;
1588 prefix.size = window.nextSrc - (window.base + window.dictLimit);
1589 DEBUGLOG(5, "extDict [0x%zx, 0x%zx)",
1590 (size_t)extDict.start,
1591 (size_t)extDict.start + extDict.size);
1592 DEBUGLOG(5, "prefix [0x%zx, 0x%zx)",
1593 (size_t)prefix.start,
1594 (size_t)prefix.start + prefix.size);
1595
1596 return ZSTDMT_isOverlapped(buffer, extDict)
1597 || ZSTDMT_isOverlapped(buffer, prefix);
1598 }
1599
ZSTDMT_waitForLdmComplete(ZSTDMT_CCtx * mtctx,buffer_t buffer)1600 static void ZSTDMT_waitForLdmComplete(ZSTDMT_CCtx* mtctx, buffer_t buffer)
1601 {
1602 if (mtctx->params.ldmParams.enableLdm == ZSTD_ps_enable) {
1603 ZSTD_pthread_mutex_t* mutex = &mtctx->serial.ldmWindowMutex;
1604 DEBUGLOG(5, "ZSTDMT_waitForLdmComplete");
1605 DEBUGLOG(5, "source [0x%zx, 0x%zx)",
1606 (size_t)buffer.start,
1607 (size_t)buffer.start + buffer.capacity);
1608 ZSTD_PTHREAD_MUTEX_LOCK(mutex);
1609 while (ZSTDMT_doesOverlapWindow(buffer, mtctx->serial.ldmWindow)) {
1610 DEBUGLOG(5, "Waiting for LDM to finish...");
1611 ZSTD_pthread_cond_wait(&mtctx->serial.ldmWindowCond, mutex);
1612 }
1613 DEBUGLOG(6, "Done waiting for LDM to finish");
1614 ZSTD_pthread_mutex_unlock(mutex);
1615 }
1616 }
1617
1618 /**
1619 * Attempts to set the inBuff to the next section to fill.
1620 * If any part of the new section is still in use we give up.
1621 * Returns non-zero if the buffer is filled.
1622 */
ZSTDMT_tryGetInputRange(ZSTDMT_CCtx * mtctx)1623 static int ZSTDMT_tryGetInputRange(ZSTDMT_CCtx* mtctx)
1624 {
1625 range_t const inUse = ZSTDMT_getInputDataInUse(mtctx);
1626 size_t const spaceLeft = mtctx->roundBuff.capacity - mtctx->roundBuff.pos;
1627 size_t const target = mtctx->targetSectionSize;
1628 buffer_t buffer;
1629
1630 DEBUGLOG(5, "ZSTDMT_tryGetInputRange");
1631 assert(mtctx->inBuff.buffer.start == NULL);
1632 assert(mtctx->roundBuff.capacity >= target);
1633
1634 if (spaceLeft < target) {
1635 /* ZSTD_invalidateRepCodes() doesn't work for extDict variants.
1636 * Simply copy the prefix to the beginning in that case.
1637 */
1638 BYTE* const start = (BYTE*)mtctx->roundBuff.buffer;
1639 size_t const prefixSize = mtctx->inBuff.prefix.size;
1640
1641 buffer.start = start;
1642 buffer.capacity = prefixSize;
1643 if (ZSTDMT_isOverlapped(buffer, inUse)) {
1644 DEBUGLOG(5, "Waiting for buffer...");
1645 return 0;
1646 }
1647 ZSTDMT_waitForLdmComplete(mtctx, buffer);
1648 ZSTD_memmove(start, mtctx->inBuff.prefix.start, prefixSize);
1649 mtctx->inBuff.prefix.start = start;
1650 mtctx->roundBuff.pos = prefixSize;
1651 }
1652 buffer.start = mtctx->roundBuff.buffer + mtctx->roundBuff.pos;
1653 buffer.capacity = target;
1654
1655 if (ZSTDMT_isOverlapped(buffer, inUse)) {
1656 DEBUGLOG(5, "Waiting for buffer...");
1657 return 0;
1658 }
1659 assert(!ZSTDMT_isOverlapped(buffer, mtctx->inBuff.prefix));
1660
1661 ZSTDMT_waitForLdmComplete(mtctx, buffer);
1662
1663 DEBUGLOG(5, "Using prefix range [%zx, %zx)",
1664 (size_t)mtctx->inBuff.prefix.start,
1665 (size_t)mtctx->inBuff.prefix.start + mtctx->inBuff.prefix.size);
1666 DEBUGLOG(5, "Using source range [%zx, %zx)",
1667 (size_t)buffer.start,
1668 (size_t)buffer.start + buffer.capacity);
1669
1670
1671 mtctx->inBuff.buffer = buffer;
1672 mtctx->inBuff.filled = 0;
1673 assert(mtctx->roundBuff.pos + buffer.capacity <= mtctx->roundBuff.capacity);
1674 return 1;
1675 }
1676
1677 typedef struct {
1678 size_t toLoad; /* The number of bytes to load from the input. */
1679 int flush; /* Boolean declaring if we must flush because we found a synchronization point. */
1680 } syncPoint_t;
1681
1682 /**
1683 * Searches through the input for a synchronization point. If one is found, we
1684 * will instruct the caller to flush, and return the number of bytes to load.
1685 * Otherwise, we will load as many bytes as possible and instruct the caller
1686 * to continue as normal.
1687 */
1688 static syncPoint_t
findSynchronizationPoint(ZSTDMT_CCtx const * mtctx,ZSTD_inBuffer const input)1689 findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input)
1690 {
1691 BYTE const* const istart = (BYTE const*)input.src + input.pos;
1692 U64 const primePower = mtctx->rsync.primePower;
1693 U64 const hitMask = mtctx->rsync.hitMask;
1694
1695 syncPoint_t syncPoint;
1696 U64 hash;
1697 BYTE const* prev;
1698 size_t pos;
1699
1700 syncPoint.toLoad = MIN(input.size - input.pos, mtctx->targetSectionSize - mtctx->inBuff.filled);
1701 syncPoint.flush = 0;
1702 if (!mtctx->params.rsyncable)
1703 /* Rsync is disabled. */
1704 return syncPoint;
1705 if (mtctx->inBuff.filled + input.size - input.pos < RSYNC_MIN_BLOCK_SIZE)
1706 /* We don't emit synchronization points if it would produce too small blocks.
1707 * We don't have enough input to find a synchronization point, so don't look.
1708 */
1709 return syncPoint;
1710 if (mtctx->inBuff.filled + syncPoint.toLoad < RSYNC_LENGTH)
1711 /* Not enough to compute the hash.
1712 * We will miss any synchronization points in this RSYNC_LENGTH byte
1713 * window. However, since it depends only in the internal buffers, if the
1714 * state is already synchronized, we will remain synchronized.
1715 * Additionally, the probability that we miss a synchronization point is
1716 * low: RSYNC_LENGTH / targetSectionSize.
1717 */
1718 return syncPoint;
1719 /* Initialize the loop variables. */
1720 if (mtctx->inBuff.filled < RSYNC_MIN_BLOCK_SIZE) {
1721 /* We don't need to scan the first RSYNC_MIN_BLOCK_SIZE positions
1722 * because they can't possibly be a sync point. So we can start
1723 * part way through the input buffer.
1724 */
1725 pos = RSYNC_MIN_BLOCK_SIZE - mtctx->inBuff.filled;
1726 if (pos >= RSYNC_LENGTH) {
1727 prev = istart + pos - RSYNC_LENGTH;
1728 hash = ZSTD_rollingHash_compute(prev, RSYNC_LENGTH);
1729 } else {
1730 assert(mtctx->inBuff.filled >= RSYNC_LENGTH);
1731 prev = (BYTE const*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - RSYNC_LENGTH;
1732 hash = ZSTD_rollingHash_compute(prev + pos, (RSYNC_LENGTH - pos));
1733 hash = ZSTD_rollingHash_append(hash, istart, pos);
1734 }
1735 } else {
1736 /* We have enough bytes buffered to initialize the hash,
1737 * and are have processed enough bytes to find a sync point.
1738 * Start scanning at the beginning of the input.
1739 */
1740 assert(mtctx->inBuff.filled >= RSYNC_MIN_BLOCK_SIZE);
1741 assert(RSYNC_MIN_BLOCK_SIZE >= RSYNC_LENGTH);
1742 pos = 0;
1743 prev = (BYTE const*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - RSYNC_LENGTH;
1744 hash = ZSTD_rollingHash_compute(prev, RSYNC_LENGTH);
1745 if ((hash & hitMask) == hitMask) {
1746 /* We're already at a sync point so don't load any more until
1747 * we're able to flush this sync point.
1748 * This likely happened because the job table was full so we
1749 * couldn't add our job.
1750 */
1751 syncPoint.toLoad = 0;
1752 syncPoint.flush = 1;
1753 return syncPoint;
1754 }
1755 }
1756 /* Starting with the hash of the previous RSYNC_LENGTH bytes, roll
1757 * through the input. If we hit a synchronization point, then cut the
1758 * job off, and tell the compressor to flush the job. Otherwise, load
1759 * all the bytes and continue as normal.
1760 * If we go too long without a synchronization point (targetSectionSize)
1761 * then a block will be emitted anyways, but this is okay, since if we
1762 * are already synchronized we will remain synchronized.
1763 */
1764 for (; pos < syncPoint.toLoad; ++pos) {
1765 BYTE const toRemove = pos < RSYNC_LENGTH ? prev[pos] : istart[pos - RSYNC_LENGTH];
1766 assert(pos < RSYNC_LENGTH || ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash);
1767 hash = ZSTD_rollingHash_rotate(hash, toRemove, istart[pos], primePower);
1768 assert(mtctx->inBuff.filled + pos >= RSYNC_MIN_BLOCK_SIZE);
1769 if ((hash & hitMask) == hitMask) {
1770 syncPoint.toLoad = pos + 1;
1771 syncPoint.flush = 1;
1772 break;
1773 }
1774 }
1775 return syncPoint;
1776 }
1777
ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx * mtctx)1778 size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx)
1779 {
1780 size_t hintInSize = mtctx->targetSectionSize - mtctx->inBuff.filled;
1781 if (hintInSize==0) hintInSize = mtctx->targetSectionSize;
1782 return hintInSize;
1783 }
1784
1785 /** ZSTDMT_compressStream_generic() :
1786 * internal use only - exposed to be invoked from zstd_compress.c
1787 * assumption : output and input are valid (pos <= size)
1788 * @return : minimum amount of data remaining to flush, 0 if none */
ZSTDMT_compressStream_generic(ZSTDMT_CCtx * mtctx,ZSTD_outBuffer * output,ZSTD_inBuffer * input,ZSTD_EndDirective endOp)1789 size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
1790 ZSTD_outBuffer* output,
1791 ZSTD_inBuffer* input,
1792 ZSTD_EndDirective endOp)
1793 {
1794 unsigned forwardInputProgress = 0;
1795 DEBUGLOG(5, "ZSTDMT_compressStream_generic (endOp=%u, srcSize=%u)",
1796 (U32)endOp, (U32)(input->size - input->pos));
1797 assert(output->pos <= output->size);
1798 assert(input->pos <= input->size);
1799
1800 if ((mtctx->frameEnded) && (endOp==ZSTD_e_continue)) {
1801 /* current frame being ended. Only flush/end are allowed */
1802 return ERROR(stage_wrong);
1803 }
1804
1805 /* fill input buffer */
1806 if ( (!mtctx->jobReady)
1807 && (input->size > input->pos) ) { /* support NULL input */
1808 if (mtctx->inBuff.buffer.start == NULL) {
1809 assert(mtctx->inBuff.filled == 0); /* Can't fill an empty buffer */
1810 if (!ZSTDMT_tryGetInputRange(mtctx)) {
1811 /* It is only possible for this operation to fail if there are
1812 * still compression jobs ongoing.
1813 */
1814 DEBUGLOG(5, "ZSTDMT_tryGetInputRange failed");
1815 assert(mtctx->doneJobID != mtctx->nextJobID);
1816 } else
1817 DEBUGLOG(5, "ZSTDMT_tryGetInputRange completed successfully : mtctx->inBuff.buffer.start = %p", mtctx->inBuff.buffer.start);
1818 }
1819 if (mtctx->inBuff.buffer.start != NULL) {
1820 syncPoint_t const syncPoint = findSynchronizationPoint(mtctx, *input);
1821 if (syncPoint.flush && endOp == ZSTD_e_continue) {
1822 endOp = ZSTD_e_flush;
1823 }
1824 assert(mtctx->inBuff.buffer.capacity >= mtctx->targetSectionSize);
1825 DEBUGLOG(5, "ZSTDMT_compressStream_generic: adding %u bytes on top of %u to buffer of size %u",
1826 (U32)syncPoint.toLoad, (U32)mtctx->inBuff.filled, (U32)mtctx->targetSectionSize);
1827 ZSTD_memcpy((char*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, (const char*)input->src + input->pos, syncPoint.toLoad);
1828 input->pos += syncPoint.toLoad;
1829 mtctx->inBuff.filled += syncPoint.toLoad;
1830 forwardInputProgress = syncPoint.toLoad>0;
1831 }
1832 }
1833 if ((input->pos < input->size) && (endOp == ZSTD_e_end)) {
1834 /* Can't end yet because the input is not fully consumed.
1835 * We are in one of these cases:
1836 * - mtctx->inBuff is NULL & empty: we couldn't get an input buffer so don't create a new job.
1837 * - We filled the input buffer: flush this job but don't end the frame.
1838 * - We hit a synchronization point: flush this job but don't end the frame.
1839 */
1840 assert(mtctx->inBuff.filled == 0 || mtctx->inBuff.filled == mtctx->targetSectionSize || mtctx->params.rsyncable);
1841 endOp = ZSTD_e_flush;
1842 }
1843
1844 if ( (mtctx->jobReady)
1845 || (mtctx->inBuff.filled >= mtctx->targetSectionSize) /* filled enough : let's compress */
1846 || ((endOp != ZSTD_e_continue) && (mtctx->inBuff.filled > 0)) /* something to flush : let's go */
1847 || ((endOp == ZSTD_e_end) && (!mtctx->frameEnded)) ) { /* must finish the frame with a zero-size block */
1848 size_t const jobSize = mtctx->inBuff.filled;
1849 assert(mtctx->inBuff.filled <= mtctx->targetSectionSize);
1850 FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, jobSize, endOp) , "");
1851 }
1852
1853 /* check for potential compressed data ready to be flushed */
1854 { size_t const remainingToFlush = ZSTDMT_flushProduced(mtctx, output, !forwardInputProgress, endOp); /* block if there was no forward input progress */
1855 if (input->pos < input->size) return MAX(remainingToFlush, 1); /* input not consumed : do not end flush yet */
1856 DEBUGLOG(5, "end of ZSTDMT_compressStream_generic: remainingToFlush = %u", (U32)remainingToFlush);
1857 return remainingToFlush;
1858 }
1859 }
1860