1 /* 2 * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. 3 * All rights reserved. 4 * 5 * This source code is licensed under both the BSD-style license (found in the 6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found 7 * in the COPYING file in the root directory of this source tree). 8 * You may select, at your option, one of the above-listed licenses. 9 */ 10 11 12 /* ====== Compiler specifics ====== */ 13 #if defined(_MSC_VER) 14 # pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */ 15 #endif 16 17 18 /* ====== Constants ====== */ 19 #define ZSTDMT_OVERLAPLOG_DEFAULT 0 20 21 22 /* ====== Dependencies ====== */ 23 #include <string.h> /* memcpy, memset */ 24 #include <limits.h> /* INT_MAX, UINT_MAX */ 25 #include "mem.h" /* MEM_STATIC */ 26 #include "pool.h" /* threadpool */ 27 #include "threading.h" /* mutex */ 28 #include "zstd_compress_internal.h" /* MIN, ERROR, ZSTD_*, ZSTD_highbit32 */ 29 #include "zstd_ldm.h" 30 #include "zstdmt_compress.h" 31 32 /* Guards code to support resizing the SeqPool. 33 * We will want to resize the SeqPool to save memory in the future. 34 * Until then, comment the code out since it is unused. 35 */ 36 #define ZSTD_RESIZE_SEQPOOL 0 37 38 /* ====== Debug ====== */ 39 #if defined(DEBUGLEVEL) && (DEBUGLEVEL>=2) \ 40 && !defined(_MSC_VER) \ 41 && !defined(__MINGW32__) 42 43 # include <stdio.h> 44 # include <unistd.h> 45 # include <sys/times.h> 46 47 # define DEBUG_PRINTHEX(l,p,n) { \ 48 unsigned debug_u; \ 49 for (debug_u=0; debug_u<(n); debug_u++) \ 50 RAWLOG(l, "%02X ", ((const unsigned char*)(p))[debug_u]); \ 51 RAWLOG(l, " \n"); \ 52 } 53 54 static unsigned long long GetCurrentClockTimeMicroseconds(void) 55 { 56 static clock_t _ticksPerSecond = 0; 57 if (_ticksPerSecond <= 0) _ticksPerSecond = sysconf(_SC_CLK_TCK); 58 59 { struct tms junk; clock_t newTicks = (clock_t) times(&junk); 60 return ((((unsigned long long)newTicks)*(1000000))/_ticksPerSecond); 61 } } 62 63 #define MUTEX_WAIT_TIME_DLEVEL 6 64 #define ZSTD_PTHREAD_MUTEX_LOCK(mutex) { \ 65 if (DEBUGLEVEL >= MUTEX_WAIT_TIME_DLEVEL) { \ 66 unsigned long long const beforeTime = GetCurrentClockTimeMicroseconds(); \ 67 ZSTD_pthread_mutex_lock(mutex); \ 68 { unsigned long long const afterTime = GetCurrentClockTimeMicroseconds(); \ 69 unsigned long long const elapsedTime = (afterTime-beforeTime); \ 70 if (elapsedTime > 1000) { /* or whatever threshold you like; I'm using 1 millisecond here */ \ 71 DEBUGLOG(MUTEX_WAIT_TIME_DLEVEL, "Thread took %llu microseconds to acquire mutex %s \n", \ 72 elapsedTime, #mutex); \ 73 } } \ 74 } else { \ 75 ZSTD_pthread_mutex_lock(mutex); \ 76 } \ 77 } 78 79 #else 80 81 # define ZSTD_PTHREAD_MUTEX_LOCK(m) ZSTD_pthread_mutex_lock(m) 82 # define DEBUG_PRINTHEX(l,p,n) {} 83 84 #endif 85 86 87 /* ===== Buffer Pool ===== */ 88 /* a single Buffer Pool can be invoked from multiple threads in parallel */ 89 90 typedef struct buffer_s { 91 void* start; 92 size_t capacity; 93 } buffer_t; 94 95 static const buffer_t g_nullBuffer = { NULL, 0 }; 96 97 typedef struct ZSTDMT_bufferPool_s { 98 ZSTD_pthread_mutex_t poolMutex; 99 size_t bufferSize; 100 unsigned totalBuffers; 101 unsigned nbBuffers; 102 ZSTD_customMem cMem; 103 buffer_t bTable[1]; /* variable size */ 104 } ZSTDMT_bufferPool; 105 106 static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned nbWorkers, ZSTD_customMem cMem) 107 { 108 unsigned const maxNbBuffers = 2*nbWorkers + 3; 109 ZSTDMT_bufferPool* const bufPool = (ZSTDMT_bufferPool*)ZSTD_calloc( 110 sizeof(ZSTDMT_bufferPool) + (maxNbBuffers-1) * sizeof(buffer_t), cMem); 111 if (bufPool==NULL) return NULL; 112 if (ZSTD_pthread_mutex_init(&bufPool->poolMutex, NULL)) { 113 ZSTD_free(bufPool, cMem); 114 return NULL; 115 } 116 bufPool->bufferSize = 64 KB; 117 bufPool->totalBuffers = maxNbBuffers; 118 bufPool->nbBuffers = 0; 119 bufPool->cMem = cMem; 120 return bufPool; 121 } 122 123 static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool* bufPool) 124 { 125 unsigned u; 126 DEBUGLOG(3, "ZSTDMT_freeBufferPool (address:%08X)", (U32)(size_t)bufPool); 127 if (!bufPool) return; /* compatibility with free on NULL */ 128 for (u=0; u<bufPool->totalBuffers; u++) { 129 DEBUGLOG(4, "free buffer %2u (address:%08X)", u, (U32)(size_t)bufPool->bTable[u].start); 130 ZSTD_free(bufPool->bTable[u].start, bufPool->cMem); 131 } 132 ZSTD_pthread_mutex_destroy(&bufPool->poolMutex); 133 ZSTD_free(bufPool, bufPool->cMem); 134 } 135 136 /* only works at initialization, not during compression */ 137 static size_t ZSTDMT_sizeof_bufferPool(ZSTDMT_bufferPool* bufPool) 138 { 139 size_t const poolSize = sizeof(*bufPool) 140 + (bufPool->totalBuffers - 1) * sizeof(buffer_t); 141 unsigned u; 142 size_t totalBufferSize = 0; 143 ZSTD_pthread_mutex_lock(&bufPool->poolMutex); 144 for (u=0; u<bufPool->totalBuffers; u++) 145 totalBufferSize += bufPool->bTable[u].capacity; 146 ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); 147 148 return poolSize + totalBufferSize; 149 } 150 151 /* ZSTDMT_setBufferSize() : 152 * all future buffers provided by this buffer pool will have _at least_ this size 153 * note : it's better for all buffers to have same size, 154 * as they become freely interchangeable, reducing malloc/free usages and memory fragmentation */ 155 static void ZSTDMT_setBufferSize(ZSTDMT_bufferPool* const bufPool, size_t const bSize) 156 { 157 ZSTD_pthread_mutex_lock(&bufPool->poolMutex); 158 DEBUGLOG(4, "ZSTDMT_setBufferSize: bSize = %u", (U32)bSize); 159 bufPool->bufferSize = bSize; 160 ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); 161 } 162 163 164 static ZSTDMT_bufferPool* ZSTDMT_expandBufferPool(ZSTDMT_bufferPool* srcBufPool, U32 nbWorkers) 165 { 166 unsigned const maxNbBuffers = 2*nbWorkers + 3; 167 if (srcBufPool==NULL) return NULL; 168 if (srcBufPool->totalBuffers >= maxNbBuffers) /* good enough */ 169 return srcBufPool; 170 /* need a larger buffer pool */ 171 { ZSTD_customMem const cMem = srcBufPool->cMem; 172 size_t const bSize = srcBufPool->bufferSize; /* forward parameters */ 173 ZSTDMT_bufferPool* newBufPool; 174 ZSTDMT_freeBufferPool(srcBufPool); 175 newBufPool = ZSTDMT_createBufferPool(nbWorkers, cMem); 176 if (newBufPool==NULL) return newBufPool; 177 ZSTDMT_setBufferSize(newBufPool, bSize); 178 return newBufPool; 179 } 180 } 181 182 /** ZSTDMT_getBuffer() : 183 * assumption : bufPool must be valid 184 * @return : a buffer, with start pointer and size 185 * note: allocation may fail, in this case, start==NULL and size==0 */ 186 static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool) 187 { 188 size_t const bSize = bufPool->bufferSize; 189 DEBUGLOG(5, "ZSTDMT_getBuffer: bSize = %u", (U32)bufPool->bufferSize); 190 ZSTD_pthread_mutex_lock(&bufPool->poolMutex); 191 if (bufPool->nbBuffers) { /* try to use an existing buffer */ 192 buffer_t const buf = bufPool->bTable[--(bufPool->nbBuffers)]; 193 size_t const availBufferSize = buf.capacity; 194 bufPool->bTable[bufPool->nbBuffers] = g_nullBuffer; 195 if ((availBufferSize >= bSize) & ((availBufferSize>>3) <= bSize)) { 196 /* large enough, but not too much */ 197 DEBUGLOG(5, "ZSTDMT_getBuffer: provide buffer %u of size %u", 198 bufPool->nbBuffers, (U32)buf.capacity); 199 ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); 200 return buf; 201 } 202 /* size conditions not respected : scratch this buffer, create new one */ 203 DEBUGLOG(5, "ZSTDMT_getBuffer: existing buffer does not meet size conditions => freeing"); 204 ZSTD_free(buf.start, bufPool->cMem); 205 } 206 ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); 207 /* create new buffer */ 208 DEBUGLOG(5, "ZSTDMT_getBuffer: create a new buffer"); 209 { buffer_t buffer; 210 void* const start = ZSTD_malloc(bSize, bufPool->cMem); 211 buffer.start = start; /* note : start can be NULL if malloc fails ! */ 212 buffer.capacity = (start==NULL) ? 0 : bSize; 213 if (start==NULL) { 214 DEBUGLOG(5, "ZSTDMT_getBuffer: buffer allocation failure !!"); 215 } else { 216 DEBUGLOG(5, "ZSTDMT_getBuffer: created buffer of size %u", (U32)bSize); 217 } 218 return buffer; 219 } 220 } 221 222 #if ZSTD_RESIZE_SEQPOOL 223 /** ZSTDMT_resizeBuffer() : 224 * assumption : bufPool must be valid 225 * @return : a buffer that is at least the buffer pool buffer size. 226 * If a reallocation happens, the data in the input buffer is copied. 227 */ 228 static buffer_t ZSTDMT_resizeBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buffer) 229 { 230 size_t const bSize = bufPool->bufferSize; 231 if (buffer.capacity < bSize) { 232 void* const start = ZSTD_malloc(bSize, bufPool->cMem); 233 buffer_t newBuffer; 234 newBuffer.start = start; 235 newBuffer.capacity = start == NULL ? 0 : bSize; 236 if (start != NULL) { 237 assert(newBuffer.capacity >= buffer.capacity); 238 memcpy(newBuffer.start, buffer.start, buffer.capacity); 239 DEBUGLOG(5, "ZSTDMT_resizeBuffer: created buffer of size %u", (U32)bSize); 240 return newBuffer; 241 } 242 DEBUGLOG(5, "ZSTDMT_resizeBuffer: buffer allocation failure !!"); 243 } 244 return buffer; 245 } 246 #endif 247 248 /* store buffer for later re-use, up to pool capacity */ 249 static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buf) 250 { 251 DEBUGLOG(5, "ZSTDMT_releaseBuffer"); 252 if (buf.start == NULL) return; /* compatible with release on NULL */ 253 ZSTD_pthread_mutex_lock(&bufPool->poolMutex); 254 if (bufPool->nbBuffers < bufPool->totalBuffers) { 255 bufPool->bTable[bufPool->nbBuffers++] = buf; /* stored for later use */ 256 DEBUGLOG(5, "ZSTDMT_releaseBuffer: stored buffer of size %u in slot %u", 257 (U32)buf.capacity, (U32)(bufPool->nbBuffers-1)); 258 ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); 259 return; 260 } 261 ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); 262 /* Reached bufferPool capacity (should not happen) */ 263 DEBUGLOG(5, "ZSTDMT_releaseBuffer: pool capacity reached => freeing "); 264 ZSTD_free(buf.start, bufPool->cMem); 265 } 266 267 268 /* ===== Seq Pool Wrapper ====== */ 269 270 static rawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0}; 271 272 typedef ZSTDMT_bufferPool ZSTDMT_seqPool; 273 274 static size_t ZSTDMT_sizeof_seqPool(ZSTDMT_seqPool* seqPool) 275 { 276 return ZSTDMT_sizeof_bufferPool(seqPool); 277 } 278 279 static rawSeqStore_t bufferToSeq(buffer_t buffer) 280 { 281 rawSeqStore_t seq = {NULL, 0, 0, 0}; 282 seq.seq = (rawSeq*)buffer.start; 283 seq.capacity = buffer.capacity / sizeof(rawSeq); 284 return seq; 285 } 286 287 static buffer_t seqToBuffer(rawSeqStore_t seq) 288 { 289 buffer_t buffer; 290 buffer.start = seq.seq; 291 buffer.capacity = seq.capacity * sizeof(rawSeq); 292 return buffer; 293 } 294 295 static rawSeqStore_t ZSTDMT_getSeq(ZSTDMT_seqPool* seqPool) 296 { 297 if (seqPool->bufferSize == 0) { 298 return kNullRawSeqStore; 299 } 300 return bufferToSeq(ZSTDMT_getBuffer(seqPool)); 301 } 302 303 #if ZSTD_RESIZE_SEQPOOL 304 static rawSeqStore_t ZSTDMT_resizeSeq(ZSTDMT_seqPool* seqPool, rawSeqStore_t seq) 305 { 306 return bufferToSeq(ZSTDMT_resizeBuffer(seqPool, seqToBuffer(seq))); 307 } 308 #endif 309 310 static void ZSTDMT_releaseSeq(ZSTDMT_seqPool* seqPool, rawSeqStore_t seq) 311 { 312 ZSTDMT_releaseBuffer(seqPool, seqToBuffer(seq)); 313 } 314 315 static void ZSTDMT_setNbSeq(ZSTDMT_seqPool* const seqPool, size_t const nbSeq) 316 { 317 ZSTDMT_setBufferSize(seqPool, nbSeq * sizeof(rawSeq)); 318 } 319 320 static ZSTDMT_seqPool* ZSTDMT_createSeqPool(unsigned nbWorkers, ZSTD_customMem cMem) 321 { 322 ZSTDMT_seqPool* const seqPool = ZSTDMT_createBufferPool(nbWorkers, cMem); 323 if (seqPool == NULL) return NULL; 324 ZSTDMT_setNbSeq(seqPool, 0); 325 return seqPool; 326 } 327 328 static void ZSTDMT_freeSeqPool(ZSTDMT_seqPool* seqPool) 329 { 330 ZSTDMT_freeBufferPool(seqPool); 331 } 332 333 static ZSTDMT_seqPool* ZSTDMT_expandSeqPool(ZSTDMT_seqPool* pool, U32 nbWorkers) 334 { 335 return ZSTDMT_expandBufferPool(pool, nbWorkers); 336 } 337 338 339 /* ===== CCtx Pool ===== */ 340 /* a single CCtx Pool can be invoked from multiple threads in parallel */ 341 342 typedef struct { 343 ZSTD_pthread_mutex_t poolMutex; 344 int totalCCtx; 345 int availCCtx; 346 ZSTD_customMem cMem; 347 ZSTD_CCtx* cctx[1]; /* variable size */ 348 } ZSTDMT_CCtxPool; 349 350 /* note : all CCtx borrowed from the pool should be released back to the pool _before_ freeing the pool */ 351 static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool) 352 { 353 int cid; 354 for (cid=0; cid<pool->totalCCtx; cid++) 355 ZSTD_freeCCtx(pool->cctx[cid]); /* note : compatible with free on NULL */ 356 ZSTD_pthread_mutex_destroy(&pool->poolMutex); 357 ZSTD_free(pool, pool->cMem); 358 } 359 360 /* ZSTDMT_createCCtxPool() : 361 * implies nbWorkers >= 1 , checked by caller ZSTDMT_createCCtx() */ 362 static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(int nbWorkers, 363 ZSTD_customMem cMem) 364 { 365 ZSTDMT_CCtxPool* const cctxPool = (ZSTDMT_CCtxPool*) ZSTD_calloc( 366 sizeof(ZSTDMT_CCtxPool) + (nbWorkers-1)*sizeof(ZSTD_CCtx*), cMem); 367 assert(nbWorkers > 0); 368 if (!cctxPool) return NULL; 369 if (ZSTD_pthread_mutex_init(&cctxPool->poolMutex, NULL)) { 370 ZSTD_free(cctxPool, cMem); 371 return NULL; 372 } 373 cctxPool->cMem = cMem; 374 cctxPool->totalCCtx = nbWorkers; 375 cctxPool->availCCtx = 1; /* at least one cctx for single-thread mode */ 376 cctxPool->cctx[0] = ZSTD_createCCtx_advanced(cMem); 377 if (!cctxPool->cctx[0]) { ZSTDMT_freeCCtxPool(cctxPool); return NULL; } 378 DEBUGLOG(3, "cctxPool created, with %u workers", nbWorkers); 379 return cctxPool; 380 } 381 382 static ZSTDMT_CCtxPool* ZSTDMT_expandCCtxPool(ZSTDMT_CCtxPool* srcPool, 383 int nbWorkers) 384 { 385 if (srcPool==NULL) return NULL; 386 if (nbWorkers <= srcPool->totalCCtx) return srcPool; /* good enough */ 387 /* need a larger cctx pool */ 388 { ZSTD_customMem const cMem = srcPool->cMem; 389 ZSTDMT_freeCCtxPool(srcPool); 390 return ZSTDMT_createCCtxPool(nbWorkers, cMem); 391 } 392 } 393 394 /* only works during initialization phase, not during compression */ 395 static size_t ZSTDMT_sizeof_CCtxPool(ZSTDMT_CCtxPool* cctxPool) 396 { 397 ZSTD_pthread_mutex_lock(&cctxPool->poolMutex); 398 { unsigned const nbWorkers = cctxPool->totalCCtx; 399 size_t const poolSize = sizeof(*cctxPool) 400 + (nbWorkers-1) * sizeof(ZSTD_CCtx*); 401 unsigned u; 402 size_t totalCCtxSize = 0; 403 for (u=0; u<nbWorkers; u++) { 404 totalCCtxSize += ZSTD_sizeof_CCtx(cctxPool->cctx[u]); 405 } 406 ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex); 407 assert(nbWorkers > 0); 408 return poolSize + totalCCtxSize; 409 } 410 } 411 412 static ZSTD_CCtx* ZSTDMT_getCCtx(ZSTDMT_CCtxPool* cctxPool) 413 { 414 DEBUGLOG(5, "ZSTDMT_getCCtx"); 415 ZSTD_pthread_mutex_lock(&cctxPool->poolMutex); 416 if (cctxPool->availCCtx) { 417 cctxPool->availCCtx--; 418 { ZSTD_CCtx* const cctx = cctxPool->cctx[cctxPool->availCCtx]; 419 ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex); 420 return cctx; 421 } } 422 ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex); 423 DEBUGLOG(5, "create one more CCtx"); 424 return ZSTD_createCCtx_advanced(cctxPool->cMem); /* note : can be NULL, when creation fails ! */ 425 } 426 427 static void ZSTDMT_releaseCCtx(ZSTDMT_CCtxPool* pool, ZSTD_CCtx* cctx) 428 { 429 if (cctx==NULL) return; /* compatibility with release on NULL */ 430 ZSTD_pthread_mutex_lock(&pool->poolMutex); 431 if (pool->availCCtx < pool->totalCCtx) 432 pool->cctx[pool->availCCtx++] = cctx; 433 else { 434 /* pool overflow : should not happen, since totalCCtx==nbWorkers */ 435 DEBUGLOG(4, "CCtx pool overflow : free cctx"); 436 ZSTD_freeCCtx(cctx); 437 } 438 ZSTD_pthread_mutex_unlock(&pool->poolMutex); 439 } 440 441 /* ==== Serial State ==== */ 442 443 typedef struct { 444 void const* start; 445 size_t size; 446 } range_t; 447 448 typedef struct { 449 /* All variables in the struct are protected by mutex. */ 450 ZSTD_pthread_mutex_t mutex; 451 ZSTD_pthread_cond_t cond; 452 ZSTD_CCtx_params params; 453 ldmState_t ldmState; 454 XXH64_state_t xxhState; 455 unsigned nextJobID; 456 /* Protects ldmWindow. 457 * Must be acquired after the main mutex when acquiring both. 458 */ 459 ZSTD_pthread_mutex_t ldmWindowMutex; 460 ZSTD_pthread_cond_t ldmWindowCond; /* Signaled when ldmWindow is updated */ 461 ZSTD_window_t ldmWindow; /* A thread-safe copy of ldmState.window */ 462 } serialState_t; 463 464 static int ZSTDMT_serialState_reset(serialState_t* serialState, ZSTDMT_seqPool* seqPool, ZSTD_CCtx_params params, size_t jobSize) 465 { 466 /* Adjust parameters */ 467 if (params.ldmParams.enableLdm) { 468 DEBUGLOG(4, "LDM window size = %u KB", (1U << params.cParams.windowLog) >> 10); 469 ZSTD_ldm_adjustParameters(¶ms.ldmParams, ¶ms.cParams); 470 assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog); 471 assert(params.ldmParams.hashRateLog < 32); 472 serialState->ldmState.hashPower = 473 ZSTD_rollingHash_primePower(params.ldmParams.minMatchLength); 474 } else { 475 memset(¶ms.ldmParams, 0, sizeof(params.ldmParams)); 476 } 477 serialState->nextJobID = 0; 478 if (params.fParams.checksumFlag) 479 XXH64_reset(&serialState->xxhState, 0); 480 if (params.ldmParams.enableLdm) { 481 ZSTD_customMem cMem = params.customMem; 482 unsigned const hashLog = params.ldmParams.hashLog; 483 size_t const hashSize = ((size_t)1 << hashLog) * sizeof(ldmEntry_t); 484 unsigned const bucketLog = 485 params.ldmParams.hashLog - params.ldmParams.bucketSizeLog; 486 size_t const bucketSize = (size_t)1 << bucketLog; 487 unsigned const prevBucketLog = 488 serialState->params.ldmParams.hashLog - 489 serialState->params.ldmParams.bucketSizeLog; 490 /* Size the seq pool tables */ 491 ZSTDMT_setNbSeq(seqPool, ZSTD_ldm_getMaxNbSeq(params.ldmParams, jobSize)); 492 /* Reset the window */ 493 ZSTD_window_clear(&serialState->ldmState.window); 494 serialState->ldmWindow = serialState->ldmState.window; 495 /* Resize tables and output space if necessary. */ 496 if (serialState->ldmState.hashTable == NULL || serialState->params.ldmParams.hashLog < hashLog) { 497 ZSTD_free(serialState->ldmState.hashTable, cMem); 498 serialState->ldmState.hashTable = (ldmEntry_t*)ZSTD_malloc(hashSize, cMem); 499 } 500 if (serialState->ldmState.bucketOffsets == NULL || prevBucketLog < bucketLog) { 501 ZSTD_free(serialState->ldmState.bucketOffsets, cMem); 502 serialState->ldmState.bucketOffsets = (BYTE*)ZSTD_malloc(bucketSize, cMem); 503 } 504 if (!serialState->ldmState.hashTable || !serialState->ldmState.bucketOffsets) 505 return 1; 506 /* Zero the tables */ 507 memset(serialState->ldmState.hashTable, 0, hashSize); 508 memset(serialState->ldmState.bucketOffsets, 0, bucketSize); 509 } 510 serialState->params = params; 511 serialState->params.jobSize = (U32)jobSize; 512 return 0; 513 } 514 515 static int ZSTDMT_serialState_init(serialState_t* serialState) 516 { 517 int initError = 0; 518 memset(serialState, 0, sizeof(*serialState)); 519 initError |= ZSTD_pthread_mutex_init(&serialState->mutex, NULL); 520 initError |= ZSTD_pthread_cond_init(&serialState->cond, NULL); 521 initError |= ZSTD_pthread_mutex_init(&serialState->ldmWindowMutex, NULL); 522 initError |= ZSTD_pthread_cond_init(&serialState->ldmWindowCond, NULL); 523 return initError; 524 } 525 526 static void ZSTDMT_serialState_free(serialState_t* serialState) 527 { 528 ZSTD_customMem cMem = serialState->params.customMem; 529 ZSTD_pthread_mutex_destroy(&serialState->mutex); 530 ZSTD_pthread_cond_destroy(&serialState->cond); 531 ZSTD_pthread_mutex_destroy(&serialState->ldmWindowMutex); 532 ZSTD_pthread_cond_destroy(&serialState->ldmWindowCond); 533 ZSTD_free(serialState->ldmState.hashTable, cMem); 534 ZSTD_free(serialState->ldmState.bucketOffsets, cMem); 535 } 536 537 static void ZSTDMT_serialState_update(serialState_t* serialState, 538 ZSTD_CCtx* jobCCtx, rawSeqStore_t seqStore, 539 range_t src, unsigned jobID) 540 { 541 /* Wait for our turn */ 542 ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex); 543 while (serialState->nextJobID < jobID) { 544 DEBUGLOG(5, "wait for serialState->cond"); 545 ZSTD_pthread_cond_wait(&serialState->cond, &serialState->mutex); 546 } 547 /* A future job may error and skip our job */ 548 if (serialState->nextJobID == jobID) { 549 /* It is now our turn, do any processing necessary */ 550 if (serialState->params.ldmParams.enableLdm) { 551 size_t error; 552 assert(seqStore.seq != NULL && seqStore.pos == 0 && 553 seqStore.size == 0 && seqStore.capacity > 0); 554 assert(src.size <= serialState->params.jobSize); 555 ZSTD_window_update(&serialState->ldmState.window, src.start, src.size); 556 error = ZSTD_ldm_generateSequences( 557 &serialState->ldmState, &seqStore, 558 &serialState->params.ldmParams, src.start, src.size); 559 /* We provide a large enough buffer to never fail. */ 560 assert(!ZSTD_isError(error)); (void)error; 561 /* Update ldmWindow to match the ldmState.window and signal the main 562 * thread if it is waiting for a buffer. 563 */ 564 ZSTD_PTHREAD_MUTEX_LOCK(&serialState->ldmWindowMutex); 565 serialState->ldmWindow = serialState->ldmState.window; 566 ZSTD_pthread_cond_signal(&serialState->ldmWindowCond); 567 ZSTD_pthread_mutex_unlock(&serialState->ldmWindowMutex); 568 } 569 if (serialState->params.fParams.checksumFlag && src.size > 0) 570 XXH64_update(&serialState->xxhState, src.start, src.size); 571 } 572 /* Now it is the next jobs turn */ 573 serialState->nextJobID++; 574 ZSTD_pthread_cond_broadcast(&serialState->cond); 575 ZSTD_pthread_mutex_unlock(&serialState->mutex); 576 577 if (seqStore.size > 0) { 578 size_t const err = ZSTD_referenceExternalSequences( 579 jobCCtx, seqStore.seq, seqStore.size); 580 assert(serialState->params.ldmParams.enableLdm); 581 assert(!ZSTD_isError(err)); 582 (void)err; 583 } 584 } 585 586 static void ZSTDMT_serialState_ensureFinished(serialState_t* serialState, 587 unsigned jobID, size_t cSize) 588 { 589 ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex); 590 if (serialState->nextJobID <= jobID) { 591 assert(ZSTD_isError(cSize)); (void)cSize; 592 DEBUGLOG(5, "Skipping past job %u because of error", jobID); 593 serialState->nextJobID = jobID + 1; 594 ZSTD_pthread_cond_broadcast(&serialState->cond); 595 596 ZSTD_PTHREAD_MUTEX_LOCK(&serialState->ldmWindowMutex); 597 ZSTD_window_clear(&serialState->ldmWindow); 598 ZSTD_pthread_cond_signal(&serialState->ldmWindowCond); 599 ZSTD_pthread_mutex_unlock(&serialState->ldmWindowMutex); 600 } 601 ZSTD_pthread_mutex_unlock(&serialState->mutex); 602 603 } 604 605 606 /* ------------------------------------------ */ 607 /* ===== Worker thread ===== */ 608 /* ------------------------------------------ */ 609 610 static const range_t kNullRange = { NULL, 0 }; 611 612 typedef struct { 613 size_t consumed; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx */ 614 size_t cSize; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx, then set0 by mtctx */ 615 ZSTD_pthread_mutex_t job_mutex; /* Thread-safe - used by mtctx and worker */ 616 ZSTD_pthread_cond_t job_cond; /* Thread-safe - used by mtctx and worker */ 617 ZSTDMT_CCtxPool* cctxPool; /* Thread-safe - used by mtctx and (all) workers */ 618 ZSTDMT_bufferPool* bufPool; /* Thread-safe - used by mtctx and (all) workers */ 619 ZSTDMT_seqPool* seqPool; /* Thread-safe - used by mtctx and (all) workers */ 620 serialState_t* serial; /* Thread-safe - used by mtctx and (all) workers */ 621 buffer_t dstBuff; /* set by worker (or mtctx), then read by worker & mtctx, then modified by mtctx => no barrier */ 622 range_t prefix; /* set by mtctx, then read by worker & mtctx => no barrier */ 623 range_t src; /* set by mtctx, then read by worker & mtctx => no barrier */ 624 unsigned jobID; /* set by mtctx, then read by worker => no barrier */ 625 unsigned firstJob; /* set by mtctx, then read by worker => no barrier */ 626 unsigned lastJob; /* set by mtctx, then read by worker => no barrier */ 627 ZSTD_CCtx_params params; /* set by mtctx, then read by worker => no barrier */ 628 const ZSTD_CDict* cdict; /* set by mtctx, then read by worker => no barrier */ 629 unsigned long long fullFrameSize; /* set by mtctx, then read by worker => no barrier */ 630 size_t dstFlushed; /* used only by mtctx */ 631 unsigned frameChecksumNeeded; /* used only by mtctx */ 632 } ZSTDMT_jobDescription; 633 634 #define JOB_ERROR(e) { \ 635 ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex); \ 636 job->cSize = e; \ 637 ZSTD_pthread_mutex_unlock(&job->job_mutex); \ 638 goto _endJob; \ 639 } 640 641 /* ZSTDMT_compressionJob() is a POOL_function type */ 642 static void ZSTDMT_compressionJob(void* jobDescription) 643 { 644 ZSTDMT_jobDescription* const job = (ZSTDMT_jobDescription*)jobDescription; 645 ZSTD_CCtx_params jobParams = job->params; /* do not modify job->params ! copy it, modify the copy */ 646 ZSTD_CCtx* const cctx = ZSTDMT_getCCtx(job->cctxPool); 647 rawSeqStore_t rawSeqStore = ZSTDMT_getSeq(job->seqPool); 648 buffer_t dstBuff = job->dstBuff; 649 size_t lastCBlockSize = 0; 650 651 /* resources */ 652 if (cctx==NULL) JOB_ERROR(ERROR(memory_allocation)); 653 if (dstBuff.start == NULL) { /* streaming job : doesn't provide a dstBuffer */ 654 dstBuff = ZSTDMT_getBuffer(job->bufPool); 655 if (dstBuff.start==NULL) JOB_ERROR(ERROR(memory_allocation)); 656 job->dstBuff = dstBuff; /* this value can be read in ZSTDMT_flush, when it copies the whole job */ 657 } 658 if (jobParams.ldmParams.enableLdm && rawSeqStore.seq == NULL) 659 JOB_ERROR(ERROR(memory_allocation)); 660 661 /* Don't compute the checksum for chunks, since we compute it externally, 662 * but write it in the header. 663 */ 664 if (job->jobID != 0) jobParams.fParams.checksumFlag = 0; 665 /* Don't run LDM for the chunks, since we handle it externally */ 666 jobParams.ldmParams.enableLdm = 0; 667 668 669 /* init */ 670 if (job->cdict) { 671 size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, job->cdict, jobParams, job->fullFrameSize); 672 assert(job->firstJob); /* only allowed for first job */ 673 if (ZSTD_isError(initError)) JOB_ERROR(initError); 674 } else { /* srcStart points at reloaded section */ 675 U64 const pledgedSrcSize = job->firstJob ? job->fullFrameSize : job->src.size; 676 { size_t const forceWindowError = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_c_forceMaxWindow, !job->firstJob); 677 if (ZSTD_isError(forceWindowError)) JOB_ERROR(forceWindowError); 678 } 679 { size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, 680 job->prefix.start, job->prefix.size, ZSTD_dct_rawContent, /* load dictionary in "content-only" mode (no header analysis) */ 681 ZSTD_dtlm_fast, 682 NULL, /*cdict*/ 683 jobParams, pledgedSrcSize); 684 if (ZSTD_isError(initError)) JOB_ERROR(initError); 685 } } 686 687 /* Perform serial step as early as possible, but after CCtx initialization */ 688 ZSTDMT_serialState_update(job->serial, cctx, rawSeqStore, job->src, job->jobID); 689 690 if (!job->firstJob) { /* flush and overwrite frame header when it's not first job */ 691 size_t const hSize = ZSTD_compressContinue(cctx, dstBuff.start, dstBuff.capacity, job->src.start, 0); 692 if (ZSTD_isError(hSize)) JOB_ERROR(hSize); 693 DEBUGLOG(5, "ZSTDMT_compressionJob: flush and overwrite %u bytes of frame header (not first job)", (U32)hSize); 694 ZSTD_invalidateRepCodes(cctx); 695 } 696 697 /* compress */ 698 { size_t const chunkSize = 4*ZSTD_BLOCKSIZE_MAX; 699 int const nbChunks = (int)((job->src.size + (chunkSize-1)) / chunkSize); 700 const BYTE* ip = (const BYTE*) job->src.start; 701 BYTE* const ostart = (BYTE*)dstBuff.start; 702 BYTE* op = ostart; 703 BYTE* oend = op + dstBuff.capacity; 704 int chunkNb; 705 if (sizeof(size_t) > sizeof(int)) assert(job->src.size < ((size_t)INT_MAX) * chunkSize); /* check overflow */ 706 DEBUGLOG(5, "ZSTDMT_compressionJob: compress %u bytes in %i blocks", (U32)job->src.size, nbChunks); 707 assert(job->cSize == 0); 708 for (chunkNb = 1; chunkNb < nbChunks; chunkNb++) { 709 size_t const cSize = ZSTD_compressContinue(cctx, op, oend-op, ip, chunkSize); 710 if (ZSTD_isError(cSize)) JOB_ERROR(cSize); 711 ip += chunkSize; 712 op += cSize; assert(op < oend); 713 /* stats */ 714 ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex); 715 job->cSize += cSize; 716 job->consumed = chunkSize * chunkNb; 717 DEBUGLOG(5, "ZSTDMT_compressionJob: compress new block : cSize==%u bytes (total: %u)", 718 (U32)cSize, (U32)job->cSize); 719 ZSTD_pthread_cond_signal(&job->job_cond); /* warns some more data is ready to be flushed */ 720 ZSTD_pthread_mutex_unlock(&job->job_mutex); 721 } 722 /* last block */ 723 assert(chunkSize > 0); 724 assert((chunkSize & (chunkSize - 1)) == 0); /* chunkSize must be power of 2 for mask==(chunkSize-1) to work */ 725 if ((nbChunks > 0) | job->lastJob /*must output a "last block" flag*/ ) { 726 size_t const lastBlockSize1 = job->src.size & (chunkSize-1); 727 size_t const lastBlockSize = ((lastBlockSize1==0) & (job->src.size>=chunkSize)) ? chunkSize : lastBlockSize1; 728 size_t const cSize = (job->lastJob) ? 729 ZSTD_compressEnd (cctx, op, oend-op, ip, lastBlockSize) : 730 ZSTD_compressContinue(cctx, op, oend-op, ip, lastBlockSize); 731 if (ZSTD_isError(cSize)) JOB_ERROR(cSize); 732 lastCBlockSize = cSize; 733 } } 734 735 _endJob: 736 ZSTDMT_serialState_ensureFinished(job->serial, job->jobID, job->cSize); 737 if (job->prefix.size > 0) 738 DEBUGLOG(5, "Finished with prefix: %zx", (size_t)job->prefix.start); 739 DEBUGLOG(5, "Finished with source: %zx", (size_t)job->src.start); 740 /* release resources */ 741 ZSTDMT_releaseSeq(job->seqPool, rawSeqStore); 742 ZSTDMT_releaseCCtx(job->cctxPool, cctx); 743 /* report */ 744 ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex); 745 if (ZSTD_isError(job->cSize)) assert(lastCBlockSize == 0); 746 job->cSize += lastCBlockSize; 747 job->consumed = job->src.size; /* when job->consumed == job->src.size , compression job is presumed completed */ 748 ZSTD_pthread_cond_signal(&job->job_cond); 749 ZSTD_pthread_mutex_unlock(&job->job_mutex); 750 } 751 752 753 /* ------------------------------------------ */ 754 /* ===== Multi-threaded compression ===== */ 755 /* ------------------------------------------ */ 756 757 typedef struct { 758 range_t prefix; /* read-only non-owned prefix buffer */ 759 buffer_t buffer; 760 size_t filled; 761 } inBuff_t; 762 763 typedef struct { 764 BYTE* buffer; /* The round input buffer. All jobs get references 765 * to pieces of the buffer. ZSTDMT_tryGetInputRange() 766 * handles handing out job input buffers, and makes 767 * sure it doesn't overlap with any pieces still in use. 768 */ 769 size_t capacity; /* The capacity of buffer. */ 770 size_t pos; /* The position of the current inBuff in the round 771 * buffer. Updated past the end if the inBuff once 772 * the inBuff is sent to the worker thread. 773 * pos <= capacity. 774 */ 775 } roundBuff_t; 776 777 static const roundBuff_t kNullRoundBuff = {NULL, 0, 0}; 778 779 #define RSYNC_LENGTH 32 780 781 typedef struct { 782 U64 hash; 783 U64 hitMask; 784 U64 primePower; 785 } rsyncState_t; 786 787 struct ZSTDMT_CCtx_s { 788 POOL_ctx* factory; 789 ZSTDMT_jobDescription* jobs; 790 ZSTDMT_bufferPool* bufPool; 791 ZSTDMT_CCtxPool* cctxPool; 792 ZSTDMT_seqPool* seqPool; 793 ZSTD_CCtx_params params; 794 size_t targetSectionSize; 795 size_t targetPrefixSize; 796 int jobReady; /* 1 => one job is already prepared, but pool has shortage of workers. Don't create a new job. */ 797 inBuff_t inBuff; 798 roundBuff_t roundBuff; 799 serialState_t serial; 800 rsyncState_t rsync; 801 unsigned singleBlockingThread; 802 unsigned jobIDMask; 803 unsigned doneJobID; 804 unsigned nextJobID; 805 unsigned frameEnded; 806 unsigned allJobsCompleted; 807 unsigned long long frameContentSize; 808 unsigned long long consumed; 809 unsigned long long produced; 810 ZSTD_customMem cMem; 811 ZSTD_CDict* cdictLocal; 812 const ZSTD_CDict* cdict; 813 }; 814 815 static void ZSTDMT_freeJobsTable(ZSTDMT_jobDescription* jobTable, U32 nbJobs, ZSTD_customMem cMem) 816 { 817 U32 jobNb; 818 if (jobTable == NULL) return; 819 for (jobNb=0; jobNb<nbJobs; jobNb++) { 820 ZSTD_pthread_mutex_destroy(&jobTable[jobNb].job_mutex); 821 ZSTD_pthread_cond_destroy(&jobTable[jobNb].job_cond); 822 } 823 ZSTD_free(jobTable, cMem); 824 } 825 826 /* ZSTDMT_allocJobsTable() 827 * allocate and init a job table. 828 * update *nbJobsPtr to next power of 2 value, as size of table */ 829 static ZSTDMT_jobDescription* ZSTDMT_createJobsTable(U32* nbJobsPtr, ZSTD_customMem cMem) 830 { 831 U32 const nbJobsLog2 = ZSTD_highbit32(*nbJobsPtr) + 1; 832 U32 const nbJobs = 1 << nbJobsLog2; 833 U32 jobNb; 834 ZSTDMT_jobDescription* const jobTable = (ZSTDMT_jobDescription*) 835 ZSTD_calloc(nbJobs * sizeof(ZSTDMT_jobDescription), cMem); 836 int initError = 0; 837 if (jobTable==NULL) return NULL; 838 *nbJobsPtr = nbJobs; 839 for (jobNb=0; jobNb<nbJobs; jobNb++) { 840 initError |= ZSTD_pthread_mutex_init(&jobTable[jobNb].job_mutex, NULL); 841 initError |= ZSTD_pthread_cond_init(&jobTable[jobNb].job_cond, NULL); 842 } 843 if (initError != 0) { 844 ZSTDMT_freeJobsTable(jobTable, nbJobs, cMem); 845 return NULL; 846 } 847 return jobTable; 848 } 849 850 static size_t ZSTDMT_expandJobsTable (ZSTDMT_CCtx* mtctx, U32 nbWorkers) { 851 U32 nbJobs = nbWorkers + 2; 852 if (nbJobs > mtctx->jobIDMask+1) { /* need more job capacity */ 853 ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem); 854 mtctx->jobIDMask = 0; 855 mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, mtctx->cMem); 856 if (mtctx->jobs==NULL) return ERROR(memory_allocation); 857 assert((nbJobs != 0) && ((nbJobs & (nbJobs - 1)) == 0)); /* ensure nbJobs is a power of 2 */ 858 mtctx->jobIDMask = nbJobs - 1; 859 } 860 return 0; 861 } 862 863 864 /* ZSTDMT_CCtxParam_setNbWorkers(): 865 * Internal use only */ 866 size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers) 867 { 868 return ZSTD_CCtxParams_setParameter(params, ZSTD_c_nbWorkers, (int)nbWorkers); 869 } 870 871 MEM_STATIC ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers, ZSTD_customMem cMem) 872 { 873 ZSTDMT_CCtx* mtctx; 874 U32 nbJobs = nbWorkers + 2; 875 int initError; 876 DEBUGLOG(3, "ZSTDMT_createCCtx_advanced (nbWorkers = %u)", nbWorkers); 877 878 if (nbWorkers < 1) return NULL; 879 nbWorkers = MIN(nbWorkers , ZSTDMT_NBWORKERS_MAX); 880 if ((cMem.customAlloc!=NULL) ^ (cMem.customFree!=NULL)) 881 /* invalid custom allocator */ 882 return NULL; 883 884 mtctx = (ZSTDMT_CCtx*) ZSTD_calloc(sizeof(ZSTDMT_CCtx), cMem); 885 if (!mtctx) return NULL; 886 ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers); 887 mtctx->cMem = cMem; 888 mtctx->allJobsCompleted = 1; 889 mtctx->factory = POOL_create_advanced(nbWorkers, 0, cMem); 890 mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, cMem); 891 assert(nbJobs > 0); assert((nbJobs & (nbJobs - 1)) == 0); /* ensure nbJobs is a power of 2 */ 892 mtctx->jobIDMask = nbJobs - 1; 893 mtctx->bufPool = ZSTDMT_createBufferPool(nbWorkers, cMem); 894 mtctx->cctxPool = ZSTDMT_createCCtxPool(nbWorkers, cMem); 895 mtctx->seqPool = ZSTDMT_createSeqPool(nbWorkers, cMem); 896 initError = ZSTDMT_serialState_init(&mtctx->serial); 897 mtctx->roundBuff = kNullRoundBuff; 898 if (!mtctx->factory | !mtctx->jobs | !mtctx->bufPool | !mtctx->cctxPool | !mtctx->seqPool | initError) { 899 ZSTDMT_freeCCtx(mtctx); 900 return NULL; 901 } 902 DEBUGLOG(3, "mt_cctx created, for %u threads", nbWorkers); 903 return mtctx; 904 } 905 906 ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, ZSTD_customMem cMem) 907 { 908 #ifdef ZSTD_MULTITHREAD 909 return ZSTDMT_createCCtx_advanced_internal(nbWorkers, cMem); 910 #else 911 (void)nbWorkers; 912 (void)cMem; 913 return NULL; 914 #endif 915 } 916 917 ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbWorkers) 918 { 919 return ZSTDMT_createCCtx_advanced(nbWorkers, ZSTD_defaultCMem); 920 } 921 922 923 /* ZSTDMT_releaseAllJobResources() : 924 * note : ensure all workers are killed first ! */ 925 static void ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx* mtctx) 926 { 927 unsigned jobID; 928 DEBUGLOG(3, "ZSTDMT_releaseAllJobResources"); 929 for (jobID=0; jobID <= mtctx->jobIDMask; jobID++) { 930 DEBUGLOG(4, "job%02u: release dst address %08X", jobID, (U32)(size_t)mtctx->jobs[jobID].dstBuff.start); 931 ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff); 932 mtctx->jobs[jobID].dstBuff = g_nullBuffer; 933 mtctx->jobs[jobID].cSize = 0; 934 } 935 memset(mtctx->jobs, 0, (mtctx->jobIDMask+1)*sizeof(ZSTDMT_jobDescription)); 936 mtctx->inBuff.buffer = g_nullBuffer; 937 mtctx->inBuff.filled = 0; 938 mtctx->allJobsCompleted = 1; 939 } 940 941 static void ZSTDMT_waitForAllJobsCompleted(ZSTDMT_CCtx* mtctx) 942 { 943 DEBUGLOG(4, "ZSTDMT_waitForAllJobsCompleted"); 944 while (mtctx->doneJobID < mtctx->nextJobID) { 945 unsigned const jobID = mtctx->doneJobID & mtctx->jobIDMask; 946 ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[jobID].job_mutex); 947 while (mtctx->jobs[jobID].consumed < mtctx->jobs[jobID].src.size) { 948 DEBUGLOG(4, "waiting for jobCompleted signal from job %u", mtctx->doneJobID); /* we want to block when waiting for data to flush */ 949 ZSTD_pthread_cond_wait(&mtctx->jobs[jobID].job_cond, &mtctx->jobs[jobID].job_mutex); 950 } 951 ZSTD_pthread_mutex_unlock(&mtctx->jobs[jobID].job_mutex); 952 mtctx->doneJobID++; 953 } 954 } 955 956 size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx) 957 { 958 if (mtctx==NULL) return 0; /* compatible with free on NULL */ 959 POOL_free(mtctx->factory); /* stop and free worker threads */ 960 ZSTDMT_releaseAllJobResources(mtctx); /* release job resources into pools first */ 961 ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem); 962 ZSTDMT_freeBufferPool(mtctx->bufPool); 963 ZSTDMT_freeCCtxPool(mtctx->cctxPool); 964 ZSTDMT_freeSeqPool(mtctx->seqPool); 965 ZSTDMT_serialState_free(&mtctx->serial); 966 ZSTD_freeCDict(mtctx->cdictLocal); 967 if (mtctx->roundBuff.buffer) 968 ZSTD_free(mtctx->roundBuff.buffer, mtctx->cMem); 969 ZSTD_free(mtctx, mtctx->cMem); 970 return 0; 971 } 972 973 size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx) 974 { 975 if (mtctx == NULL) return 0; /* supports sizeof NULL */ 976 return sizeof(*mtctx) 977 + POOL_sizeof(mtctx->factory) 978 + ZSTDMT_sizeof_bufferPool(mtctx->bufPool) 979 + (mtctx->jobIDMask+1) * sizeof(ZSTDMT_jobDescription) 980 + ZSTDMT_sizeof_CCtxPool(mtctx->cctxPool) 981 + ZSTDMT_sizeof_seqPool(mtctx->seqPool) 982 + ZSTD_sizeof_CDict(mtctx->cdictLocal) 983 + mtctx->roundBuff.capacity; 984 } 985 986 /* Internal only */ 987 size_t 988 ZSTDMT_CCtxParam_setMTCtxParameter(ZSTD_CCtx_params* params, 989 ZSTDMT_parameter parameter, 990 int value) 991 { 992 DEBUGLOG(4, "ZSTDMT_CCtxParam_setMTCtxParameter"); 993 switch(parameter) 994 { 995 case ZSTDMT_p_jobSize : 996 DEBUGLOG(4, "ZSTDMT_CCtxParam_setMTCtxParameter : set jobSize to %i", value); 997 return ZSTD_CCtxParams_setParameter(params, ZSTD_c_jobSize, value); 998 case ZSTDMT_p_overlapLog : 999 DEBUGLOG(4, "ZSTDMT_p_overlapLog : %i", value); 1000 return ZSTD_CCtxParams_setParameter(params, ZSTD_c_overlapLog, value); 1001 case ZSTDMT_p_rsyncable : 1002 DEBUGLOG(4, "ZSTD_p_rsyncable : %i", value); 1003 return ZSTD_CCtxParams_setParameter(params, ZSTD_c_rsyncable, value); 1004 default : 1005 return ERROR(parameter_unsupported); 1006 } 1007 } 1008 1009 size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int value) 1010 { 1011 DEBUGLOG(4, "ZSTDMT_setMTCtxParameter"); 1012 return ZSTDMT_CCtxParam_setMTCtxParameter(&mtctx->params, parameter, value); 1013 } 1014 1015 size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int* value) 1016 { 1017 switch (parameter) { 1018 case ZSTDMT_p_jobSize: 1019 return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_jobSize, value); 1020 case ZSTDMT_p_overlapLog: 1021 return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_overlapLog, value); 1022 case ZSTDMT_p_rsyncable: 1023 return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_rsyncable, value); 1024 default: 1025 return ERROR(parameter_unsupported); 1026 } 1027 } 1028 1029 /* Sets parameters relevant to the compression job, 1030 * initializing others to default values. */ 1031 static ZSTD_CCtx_params ZSTDMT_initJobCCtxParams(ZSTD_CCtx_params const params) 1032 { 1033 ZSTD_CCtx_params jobParams = params; 1034 /* Clear parameters related to multithreading */ 1035 jobParams.forceWindow = 0; 1036 jobParams.nbWorkers = 0; 1037 jobParams.jobSize = 0; 1038 jobParams.overlapLog = 0; 1039 jobParams.rsyncable = 0; 1040 memset(&jobParams.ldmParams, 0, sizeof(ldmParams_t)); 1041 memset(&jobParams.customMem, 0, sizeof(ZSTD_customMem)); 1042 return jobParams; 1043 } 1044 1045 1046 /* ZSTDMT_resize() : 1047 * @return : error code if fails, 0 on success */ 1048 static size_t ZSTDMT_resize(ZSTDMT_CCtx* mtctx, unsigned nbWorkers) 1049 { 1050 if (POOL_resize(mtctx->factory, nbWorkers)) return ERROR(memory_allocation); 1051 FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbWorkers) ); 1052 mtctx->bufPool = ZSTDMT_expandBufferPool(mtctx->bufPool, nbWorkers); 1053 if (mtctx->bufPool == NULL) return ERROR(memory_allocation); 1054 mtctx->cctxPool = ZSTDMT_expandCCtxPool(mtctx->cctxPool, nbWorkers); 1055 if (mtctx->cctxPool == NULL) return ERROR(memory_allocation); 1056 mtctx->seqPool = ZSTDMT_expandSeqPool(mtctx->seqPool, nbWorkers); 1057 if (mtctx->seqPool == NULL) return ERROR(memory_allocation); 1058 ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers); 1059 return 0; 1060 } 1061 1062 1063 /*! ZSTDMT_updateCParams_whileCompressing() : 1064 * Updates a selected set of compression parameters, remaining compatible with currently active frame. 1065 * New parameters will be applied to next compression job. */ 1066 void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_params* cctxParams) 1067 { 1068 U32 const saved_wlog = mtctx->params.cParams.windowLog; /* Do not modify windowLog while compressing */ 1069 int const compressionLevel = cctxParams->compressionLevel; 1070 DEBUGLOG(5, "ZSTDMT_updateCParams_whileCompressing (level:%i)", 1071 compressionLevel); 1072 mtctx->params.compressionLevel = compressionLevel; 1073 { ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams(cctxParams, 0, 0); 1074 cParams.windowLog = saved_wlog; 1075 mtctx->params.cParams = cParams; 1076 } 1077 } 1078 1079 /* ZSTDMT_getFrameProgression(): 1080 * tells how much data has been consumed (input) and produced (output) for current frame. 1081 * able to count progression inside worker threads. 1082 * Note : mutex will be acquired during statistics collection inside workers. */ 1083 ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx) 1084 { 1085 ZSTD_frameProgression fps; 1086 DEBUGLOG(5, "ZSTDMT_getFrameProgression"); 1087 fps.ingested = mtctx->consumed + mtctx->inBuff.filled; 1088 fps.consumed = mtctx->consumed; 1089 fps.produced = fps.flushed = mtctx->produced; 1090 fps.currentJobID = mtctx->nextJobID; 1091 fps.nbActiveWorkers = 0; 1092 { unsigned jobNb; 1093 unsigned lastJobNb = mtctx->nextJobID + mtctx->jobReady; assert(mtctx->jobReady <= 1); 1094 DEBUGLOG(6, "ZSTDMT_getFrameProgression: jobs: from %u to <%u (jobReady:%u)", 1095 mtctx->doneJobID, lastJobNb, mtctx->jobReady) 1096 for (jobNb = mtctx->doneJobID ; jobNb < lastJobNb ; jobNb++) { 1097 unsigned const wJobID = jobNb & mtctx->jobIDMask; 1098 ZSTDMT_jobDescription* jobPtr = &mtctx->jobs[wJobID]; 1099 ZSTD_pthread_mutex_lock(&jobPtr->job_mutex); 1100 { size_t const cResult = jobPtr->cSize; 1101 size_t const produced = ZSTD_isError(cResult) ? 0 : cResult; 1102 size_t const flushed = ZSTD_isError(cResult) ? 0 : jobPtr->dstFlushed; 1103 assert(flushed <= produced); 1104 fps.ingested += jobPtr->src.size; 1105 fps.consumed += jobPtr->consumed; 1106 fps.produced += produced; 1107 fps.flushed += flushed; 1108 fps.nbActiveWorkers += (jobPtr->consumed < jobPtr->src.size); 1109 } 1110 ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex); 1111 } 1112 } 1113 return fps; 1114 } 1115 1116 1117 size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx) 1118 { 1119 size_t toFlush; 1120 unsigned const jobID = mtctx->doneJobID; 1121 assert(jobID <= mtctx->nextJobID); 1122 if (jobID == mtctx->nextJobID) return 0; /* no active job => nothing to flush */ 1123 1124 /* look into oldest non-fully-flushed job */ 1125 { unsigned const wJobID = jobID & mtctx->jobIDMask; 1126 ZSTDMT_jobDescription* const jobPtr = &mtctx->jobs[wJobID]; 1127 ZSTD_pthread_mutex_lock(&jobPtr->job_mutex); 1128 { size_t const cResult = jobPtr->cSize; 1129 size_t const produced = ZSTD_isError(cResult) ? 0 : cResult; 1130 size_t const flushed = ZSTD_isError(cResult) ? 0 : jobPtr->dstFlushed; 1131 assert(flushed <= produced); 1132 toFlush = produced - flushed; 1133 if (toFlush==0 && (jobPtr->consumed >= jobPtr->src.size)) { 1134 /* doneJobID is not-fully-flushed, but toFlush==0 : doneJobID should be compressing some more data */ 1135 assert(jobPtr->consumed < jobPtr->src.size); 1136 } 1137 } 1138 ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex); 1139 } 1140 1141 return toFlush; 1142 } 1143 1144 1145 /* ------------------------------------------ */ 1146 /* ===== Multi-threaded compression ===== */ 1147 /* ------------------------------------------ */ 1148 1149 static unsigned ZSTDMT_computeTargetJobLog(ZSTD_CCtx_params const params) 1150 { 1151 if (params.ldmParams.enableLdm) 1152 /* In Long Range Mode, the windowLog is typically oversized. 1153 * In which case, it's preferable to determine the jobSize 1154 * based on chainLog instead. */ 1155 return MAX(21, params.cParams.chainLog + 4); 1156 return MAX(20, params.cParams.windowLog + 2); 1157 } 1158 1159 static int ZSTDMT_overlapLog_default(ZSTD_strategy strat) 1160 { 1161 switch(strat) 1162 { 1163 case ZSTD_btultra2: 1164 return 9; 1165 case ZSTD_btultra: 1166 case ZSTD_btopt: 1167 return 8; 1168 case ZSTD_btlazy2: 1169 case ZSTD_lazy2: 1170 return 7; 1171 case ZSTD_lazy: 1172 case ZSTD_greedy: 1173 case ZSTD_dfast: 1174 case ZSTD_fast: 1175 default:; 1176 } 1177 return 6; 1178 } 1179 1180 static int ZSTDMT_overlapLog(int ovlog, ZSTD_strategy strat) 1181 { 1182 assert(0 <= ovlog && ovlog <= 9); 1183 if (ovlog == 0) return ZSTDMT_overlapLog_default(strat); 1184 return ovlog; 1185 } 1186 1187 static size_t ZSTDMT_computeOverlapSize(ZSTD_CCtx_params const params) 1188 { 1189 int const overlapRLog = 9 - ZSTDMT_overlapLog(params.overlapLog, params.cParams.strategy); 1190 int ovLog = (overlapRLog >= 8) ? 0 : (params.cParams.windowLog - overlapRLog); 1191 assert(0 <= overlapRLog && overlapRLog <= 8); 1192 if (params.ldmParams.enableLdm) { 1193 /* In Long Range Mode, the windowLog is typically oversized. 1194 * In which case, it's preferable to determine the jobSize 1195 * based on chainLog instead. 1196 * Then, ovLog becomes a fraction of the jobSize, rather than windowSize */ 1197 ovLog = MIN(params.cParams.windowLog, ZSTDMT_computeTargetJobLog(params) - 2) 1198 - overlapRLog; 1199 } 1200 assert(0 <= ovLog && ovLog <= 30); 1201 DEBUGLOG(4, "overlapLog : %i", params.overlapLog); 1202 DEBUGLOG(4, "overlap size : %i", 1 << ovLog); 1203 return (ovLog==0) ? 0 : (size_t)1 << ovLog; 1204 } 1205 1206 static unsigned 1207 ZSTDMT_computeNbJobs(ZSTD_CCtx_params params, size_t srcSize, unsigned nbWorkers) 1208 { 1209 assert(nbWorkers>0); 1210 { size_t const jobSizeTarget = (size_t)1 << ZSTDMT_computeTargetJobLog(params); 1211 size_t const jobMaxSize = jobSizeTarget << 2; 1212 size_t const passSizeMax = jobMaxSize * nbWorkers; 1213 unsigned const multiplier = (unsigned)(srcSize / passSizeMax) + 1; 1214 unsigned const nbJobsLarge = multiplier * nbWorkers; 1215 unsigned const nbJobsMax = (unsigned)(srcSize / jobSizeTarget) + 1; 1216 unsigned const nbJobsSmall = MIN(nbJobsMax, nbWorkers); 1217 return (multiplier>1) ? nbJobsLarge : nbJobsSmall; 1218 } } 1219 1220 /* ZSTDMT_compress_advanced_internal() : 1221 * This is a blocking function : it will only give back control to caller after finishing its compression job. 1222 */ 1223 static size_t ZSTDMT_compress_advanced_internal( 1224 ZSTDMT_CCtx* mtctx, 1225 void* dst, size_t dstCapacity, 1226 const void* src, size_t srcSize, 1227 const ZSTD_CDict* cdict, 1228 ZSTD_CCtx_params params) 1229 { 1230 ZSTD_CCtx_params const jobParams = ZSTDMT_initJobCCtxParams(params); 1231 size_t const overlapSize = ZSTDMT_computeOverlapSize(params); 1232 unsigned const nbJobs = ZSTDMT_computeNbJobs(params, srcSize, params.nbWorkers); 1233 size_t const proposedJobSize = (srcSize + (nbJobs-1)) / nbJobs; 1234 size_t const avgJobSize = (((proposedJobSize-1) & 0x1FFFF) < 0x7FFF) ? proposedJobSize + 0xFFFF : proposedJobSize; /* avoid too small last block */ 1235 const char* const srcStart = (const char*)src; 1236 size_t remainingSrcSize = srcSize; 1237 unsigned const compressWithinDst = (dstCapacity >= ZSTD_compressBound(srcSize)) ? nbJobs : (unsigned)(dstCapacity / ZSTD_compressBound(avgJobSize)); /* presumes avgJobSize >= 256 KB, which should be the case */ 1238 size_t frameStartPos = 0, dstBufferPos = 0; 1239 assert(jobParams.nbWorkers == 0); 1240 assert(mtctx->cctxPool->totalCCtx == params.nbWorkers); 1241 1242 params.jobSize = (U32)avgJobSize; 1243 DEBUGLOG(4, "ZSTDMT_compress_advanced_internal: nbJobs=%2u (rawSize=%u bytes; fixedSize=%u) ", 1244 nbJobs, (U32)proposedJobSize, (U32)avgJobSize); 1245 1246 if ((nbJobs==1) | (params.nbWorkers<=1)) { /* fallback to single-thread mode : this is a blocking invocation anyway */ 1247 ZSTD_CCtx* const cctx = mtctx->cctxPool->cctx[0]; 1248 DEBUGLOG(4, "ZSTDMT_compress_advanced_internal: fallback to single-thread mode"); 1249 if (cdict) return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, jobParams.fParams); 1250 return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, NULL, 0, jobParams); 1251 } 1252 1253 assert(avgJobSize >= 256 KB); /* condition for ZSTD_compressBound(A) + ZSTD_compressBound(B) <= ZSTD_compressBound(A+B), required to compress directly into Dst (no additional buffer) */ 1254 ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(avgJobSize) ); 1255 if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, avgJobSize)) 1256 return ERROR(memory_allocation); 1257 1258 FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbJobs) ); /* only expands if necessary */ 1259 1260 { unsigned u; 1261 for (u=0; u<nbJobs; u++) { 1262 size_t const jobSize = MIN(remainingSrcSize, avgJobSize); 1263 size_t const dstBufferCapacity = ZSTD_compressBound(jobSize); 1264 buffer_t const dstAsBuffer = { (char*)dst + dstBufferPos, dstBufferCapacity }; 1265 buffer_t const dstBuffer = u < compressWithinDst ? dstAsBuffer : g_nullBuffer; 1266 size_t dictSize = u ? overlapSize : 0; 1267 1268 mtctx->jobs[u].prefix.start = srcStart + frameStartPos - dictSize; 1269 mtctx->jobs[u].prefix.size = dictSize; 1270 mtctx->jobs[u].src.start = srcStart + frameStartPos; 1271 mtctx->jobs[u].src.size = jobSize; assert(jobSize > 0); /* avoid job.src.size == 0 */ 1272 mtctx->jobs[u].consumed = 0; 1273 mtctx->jobs[u].cSize = 0; 1274 mtctx->jobs[u].cdict = (u==0) ? cdict : NULL; 1275 mtctx->jobs[u].fullFrameSize = srcSize; 1276 mtctx->jobs[u].params = jobParams; 1277 /* do not calculate checksum within sections, but write it in header for first section */ 1278 mtctx->jobs[u].dstBuff = dstBuffer; 1279 mtctx->jobs[u].cctxPool = mtctx->cctxPool; 1280 mtctx->jobs[u].bufPool = mtctx->bufPool; 1281 mtctx->jobs[u].seqPool = mtctx->seqPool; 1282 mtctx->jobs[u].serial = &mtctx->serial; 1283 mtctx->jobs[u].jobID = u; 1284 mtctx->jobs[u].firstJob = (u==0); 1285 mtctx->jobs[u].lastJob = (u==nbJobs-1); 1286 1287 DEBUGLOG(5, "ZSTDMT_compress_advanced_internal: posting job %u (%u bytes)", u, (U32)jobSize); 1288 DEBUG_PRINTHEX(6, mtctx->jobs[u].prefix.start, 12); 1289 POOL_add(mtctx->factory, ZSTDMT_compressionJob, &mtctx->jobs[u]); 1290 1291 frameStartPos += jobSize; 1292 dstBufferPos += dstBufferCapacity; 1293 remainingSrcSize -= jobSize; 1294 } } 1295 1296 /* collect result */ 1297 { size_t error = 0, dstPos = 0; 1298 unsigned jobID; 1299 for (jobID=0; jobID<nbJobs; jobID++) { 1300 DEBUGLOG(5, "waiting for job %u ", jobID); 1301 ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[jobID].job_mutex); 1302 while (mtctx->jobs[jobID].consumed < mtctx->jobs[jobID].src.size) { 1303 DEBUGLOG(5, "waiting for jobCompleted signal from job %u", jobID); 1304 ZSTD_pthread_cond_wait(&mtctx->jobs[jobID].job_cond, &mtctx->jobs[jobID].job_mutex); 1305 } 1306 ZSTD_pthread_mutex_unlock(&mtctx->jobs[jobID].job_mutex); 1307 DEBUGLOG(5, "ready to write job %u ", jobID); 1308 1309 { size_t const cSize = mtctx->jobs[jobID].cSize; 1310 if (ZSTD_isError(cSize)) error = cSize; 1311 if ((!error) && (dstPos + cSize > dstCapacity)) error = ERROR(dstSize_tooSmall); 1312 if (jobID) { /* note : job 0 is written directly at dst, which is correct position */ 1313 if (!error) 1314 memmove((char*)dst + dstPos, mtctx->jobs[jobID].dstBuff.start, cSize); /* may overlap when job compressed within dst */ 1315 if (jobID >= compressWithinDst) { /* job compressed into its own buffer, which must be released */ 1316 DEBUGLOG(5, "releasing buffer %u>=%u", jobID, compressWithinDst); 1317 ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff); 1318 } } 1319 mtctx->jobs[jobID].dstBuff = g_nullBuffer; 1320 mtctx->jobs[jobID].cSize = 0; 1321 dstPos += cSize ; 1322 } 1323 } /* for (jobID=0; jobID<nbJobs; jobID++) */ 1324 1325 DEBUGLOG(4, "checksumFlag : %u ", params.fParams.checksumFlag); 1326 if (params.fParams.checksumFlag) { 1327 U32 const checksum = (U32)XXH64_digest(&mtctx->serial.xxhState); 1328 if (dstPos + 4 > dstCapacity) { 1329 error = ERROR(dstSize_tooSmall); 1330 } else { 1331 DEBUGLOG(4, "writing checksum : %08X \n", checksum); 1332 MEM_writeLE32((char*)dst + dstPos, checksum); 1333 dstPos += 4; 1334 } } 1335 1336 if (!error) DEBUGLOG(4, "compressed size : %u ", (U32)dstPos); 1337 return error ? error : dstPos; 1338 } 1339 } 1340 1341 size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx, 1342 void* dst, size_t dstCapacity, 1343 const void* src, size_t srcSize, 1344 const ZSTD_CDict* cdict, 1345 ZSTD_parameters params, 1346 int overlapLog) 1347 { 1348 ZSTD_CCtx_params cctxParams = mtctx->params; 1349 cctxParams.cParams = params.cParams; 1350 cctxParams.fParams = params.fParams; 1351 assert(ZSTD_OVERLAPLOG_MIN <= overlapLog && overlapLog <= ZSTD_OVERLAPLOG_MAX); 1352 cctxParams.overlapLog = overlapLog; 1353 return ZSTDMT_compress_advanced_internal(mtctx, 1354 dst, dstCapacity, 1355 src, srcSize, 1356 cdict, cctxParams); 1357 } 1358 1359 1360 size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx, 1361 void* dst, size_t dstCapacity, 1362 const void* src, size_t srcSize, 1363 int compressionLevel) 1364 { 1365 ZSTD_parameters params = ZSTD_getParams(compressionLevel, srcSize, 0); 1366 int const overlapLog = ZSTDMT_overlapLog_default(params.cParams.strategy); 1367 params.fParams.contentSizeFlag = 1; 1368 return ZSTDMT_compress_advanced(mtctx, dst, dstCapacity, src, srcSize, NULL, params, overlapLog); 1369 } 1370 1371 1372 /* ====================================== */ 1373 /* ======= Streaming API ======= */ 1374 /* ====================================== */ 1375 1376 size_t ZSTDMT_initCStream_internal( 1377 ZSTDMT_CCtx* mtctx, 1378 const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType, 1379 const ZSTD_CDict* cdict, ZSTD_CCtx_params params, 1380 unsigned long long pledgedSrcSize) 1381 { 1382 DEBUGLOG(4, "ZSTDMT_initCStream_internal (pledgedSrcSize=%u, nbWorkers=%u, cctxPool=%u)", 1383 (U32)pledgedSrcSize, params.nbWorkers, mtctx->cctxPool->totalCCtx); 1384 1385 /* params supposed partially fully validated at this point */ 1386 assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams))); 1387 assert(!((dict) && (cdict))); /* either dict or cdict, not both */ 1388 1389 /* init */ 1390 if (params.nbWorkers != mtctx->params.nbWorkers) 1391 FORWARD_IF_ERROR( ZSTDMT_resize(mtctx, params.nbWorkers) ); 1392 1393 if (params.jobSize != 0 && params.jobSize < ZSTDMT_JOBSIZE_MIN) params.jobSize = ZSTDMT_JOBSIZE_MIN; 1394 if (params.jobSize > (size_t)ZSTDMT_JOBSIZE_MAX) params.jobSize = ZSTDMT_JOBSIZE_MAX; 1395 1396 mtctx->singleBlockingThread = (pledgedSrcSize <= ZSTDMT_JOBSIZE_MIN); /* do not trigger multi-threading when srcSize is too small */ 1397 if (mtctx->singleBlockingThread) { 1398 ZSTD_CCtx_params const singleThreadParams = ZSTDMT_initJobCCtxParams(params); 1399 DEBUGLOG(5, "ZSTDMT_initCStream_internal: switch to single blocking thread mode"); 1400 assert(singleThreadParams.nbWorkers == 0); 1401 return ZSTD_initCStream_internal(mtctx->cctxPool->cctx[0], 1402 dict, dictSize, cdict, 1403 singleThreadParams, pledgedSrcSize); 1404 } 1405 1406 DEBUGLOG(4, "ZSTDMT_initCStream_internal: %u workers", params.nbWorkers); 1407 1408 if (mtctx->allJobsCompleted == 0) { /* previous compression not correctly finished */ 1409 ZSTDMT_waitForAllJobsCompleted(mtctx); 1410 ZSTDMT_releaseAllJobResources(mtctx); 1411 mtctx->allJobsCompleted = 1; 1412 } 1413 1414 mtctx->params = params; 1415 mtctx->frameContentSize = pledgedSrcSize; 1416 if (dict) { 1417 ZSTD_freeCDict(mtctx->cdictLocal); 1418 mtctx->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, 1419 ZSTD_dlm_byCopy, dictContentType, /* note : a loadPrefix becomes an internal CDict */ 1420 params.cParams, mtctx->cMem); 1421 mtctx->cdict = mtctx->cdictLocal; 1422 if (mtctx->cdictLocal == NULL) return ERROR(memory_allocation); 1423 } else { 1424 ZSTD_freeCDict(mtctx->cdictLocal); 1425 mtctx->cdictLocal = NULL; 1426 mtctx->cdict = cdict; 1427 } 1428 1429 mtctx->targetPrefixSize = ZSTDMT_computeOverlapSize(params); 1430 DEBUGLOG(4, "overlapLog=%i => %u KB", params.overlapLog, (U32)(mtctx->targetPrefixSize>>10)); 1431 mtctx->targetSectionSize = params.jobSize; 1432 if (mtctx->targetSectionSize == 0) { 1433 mtctx->targetSectionSize = 1ULL << ZSTDMT_computeTargetJobLog(params); 1434 } 1435 if (params.rsyncable) { 1436 /* Aim for the targetsectionSize as the average job size. */ 1437 U32 const jobSizeMB = (U32)(mtctx->targetSectionSize >> 20); 1438 U32 const rsyncBits = ZSTD_highbit32(jobSizeMB) + 20; 1439 assert(jobSizeMB >= 1); 1440 DEBUGLOG(4, "rsyncLog = %u", rsyncBits); 1441 mtctx->rsync.hash = 0; 1442 mtctx->rsync.hitMask = (1ULL << rsyncBits) - 1; 1443 mtctx->rsync.primePower = ZSTD_rollingHash_primePower(RSYNC_LENGTH); 1444 } 1445 if (mtctx->targetSectionSize < mtctx->targetPrefixSize) mtctx->targetSectionSize = mtctx->targetPrefixSize; /* job size must be >= overlap size */ 1446 DEBUGLOG(4, "Job Size : %u KB (note : set to %u)", (U32)(mtctx->targetSectionSize>>10), (U32)params.jobSize); 1447 DEBUGLOG(4, "inBuff Size : %u KB", (U32)(mtctx->targetSectionSize>>10)); 1448 ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(mtctx->targetSectionSize)); 1449 { 1450 /* If ldm is enabled we need windowSize space. */ 1451 size_t const windowSize = mtctx->params.ldmParams.enableLdm ? (1U << mtctx->params.cParams.windowLog) : 0; 1452 /* Two buffers of slack, plus extra space for the overlap 1453 * This is the minimum slack that LDM works with. One extra because 1454 * flush might waste up to targetSectionSize-1 bytes. Another extra 1455 * for the overlap (if > 0), then one to fill which doesn't overlap 1456 * with the LDM window. 1457 */ 1458 size_t const nbSlackBuffers = 2 + (mtctx->targetPrefixSize > 0); 1459 size_t const slackSize = mtctx->targetSectionSize * nbSlackBuffers; 1460 /* Compute the total size, and always have enough slack */ 1461 size_t const nbWorkers = MAX(mtctx->params.nbWorkers, 1); 1462 size_t const sectionsSize = mtctx->targetSectionSize * nbWorkers; 1463 size_t const capacity = MAX(windowSize, sectionsSize) + slackSize; 1464 if (mtctx->roundBuff.capacity < capacity) { 1465 if (mtctx->roundBuff.buffer) 1466 ZSTD_free(mtctx->roundBuff.buffer, mtctx->cMem); 1467 mtctx->roundBuff.buffer = (BYTE*)ZSTD_malloc(capacity, mtctx->cMem); 1468 if (mtctx->roundBuff.buffer == NULL) { 1469 mtctx->roundBuff.capacity = 0; 1470 return ERROR(memory_allocation); 1471 } 1472 mtctx->roundBuff.capacity = capacity; 1473 } 1474 } 1475 DEBUGLOG(4, "roundBuff capacity : %u KB", (U32)(mtctx->roundBuff.capacity>>10)); 1476 mtctx->roundBuff.pos = 0; 1477 mtctx->inBuff.buffer = g_nullBuffer; 1478 mtctx->inBuff.filled = 0; 1479 mtctx->inBuff.prefix = kNullRange; 1480 mtctx->doneJobID = 0; 1481 mtctx->nextJobID = 0; 1482 mtctx->frameEnded = 0; 1483 mtctx->allJobsCompleted = 0; 1484 mtctx->consumed = 0; 1485 mtctx->produced = 0; 1486 if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, mtctx->targetSectionSize)) 1487 return ERROR(memory_allocation); 1488 return 0; 1489 } 1490 1491 size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx, 1492 const void* dict, size_t dictSize, 1493 ZSTD_parameters params, 1494 unsigned long long pledgedSrcSize) 1495 { 1496 ZSTD_CCtx_params cctxParams = mtctx->params; /* retrieve sticky params */ 1497 DEBUGLOG(4, "ZSTDMT_initCStream_advanced (pledgedSrcSize=%u)", (U32)pledgedSrcSize); 1498 cctxParams.cParams = params.cParams; 1499 cctxParams.fParams = params.fParams; 1500 return ZSTDMT_initCStream_internal(mtctx, dict, dictSize, ZSTD_dct_auto, NULL, 1501 cctxParams, pledgedSrcSize); 1502 } 1503 1504 size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx, 1505 const ZSTD_CDict* cdict, 1506 ZSTD_frameParameters fParams, 1507 unsigned long long pledgedSrcSize) 1508 { 1509 ZSTD_CCtx_params cctxParams = mtctx->params; 1510 if (cdict==NULL) return ERROR(dictionary_wrong); /* method incompatible with NULL cdict */ 1511 cctxParams.cParams = ZSTD_getCParamsFromCDict(cdict); 1512 cctxParams.fParams = fParams; 1513 return ZSTDMT_initCStream_internal(mtctx, NULL, 0 /*dictSize*/, ZSTD_dct_auto, cdict, 1514 cctxParams, pledgedSrcSize); 1515 } 1516 1517 1518 /* ZSTDMT_resetCStream() : 1519 * pledgedSrcSize can be zero == unknown (for the time being) 1520 * prefer using ZSTD_CONTENTSIZE_UNKNOWN, 1521 * as `0` might mean "empty" in the future */ 1522 size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize) 1523 { 1524 if (!pledgedSrcSize) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN; 1525 return ZSTDMT_initCStream_internal(mtctx, NULL, 0, ZSTD_dct_auto, 0, mtctx->params, 1526 pledgedSrcSize); 1527 } 1528 1529 size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel) { 1530 ZSTD_parameters const params = ZSTD_getParams(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0); 1531 ZSTD_CCtx_params cctxParams = mtctx->params; /* retrieve sticky params */ 1532 DEBUGLOG(4, "ZSTDMT_initCStream (cLevel=%i)", compressionLevel); 1533 cctxParams.cParams = params.cParams; 1534 cctxParams.fParams = params.fParams; 1535 return ZSTDMT_initCStream_internal(mtctx, NULL, 0, ZSTD_dct_auto, NULL, cctxParams, ZSTD_CONTENTSIZE_UNKNOWN); 1536 } 1537 1538 1539 /* ZSTDMT_writeLastEmptyBlock() 1540 * Write a single empty block with an end-of-frame to finish a frame. 1541 * Job must be created from streaming variant. 1542 * This function is always successful if expected conditions are fulfilled. 1543 */ 1544 static void ZSTDMT_writeLastEmptyBlock(ZSTDMT_jobDescription* job) 1545 { 1546 assert(job->lastJob == 1); 1547 assert(job->src.size == 0); /* last job is empty -> will be simplified into a last empty block */ 1548 assert(job->firstJob == 0); /* cannot be first job, as it also needs to create frame header */ 1549 assert(job->dstBuff.start == NULL); /* invoked from streaming variant only (otherwise, dstBuff might be user's output) */ 1550 job->dstBuff = ZSTDMT_getBuffer(job->bufPool); 1551 if (job->dstBuff.start == NULL) { 1552 job->cSize = ERROR(memory_allocation); 1553 return; 1554 } 1555 assert(job->dstBuff.capacity >= ZSTD_blockHeaderSize); /* no buffer should ever be that small */ 1556 job->src = kNullRange; 1557 job->cSize = ZSTD_writeLastEmptyBlock(job->dstBuff.start, job->dstBuff.capacity); 1558 assert(!ZSTD_isError(job->cSize)); 1559 assert(job->consumed == 0); 1560 } 1561 1562 static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* mtctx, size_t srcSize, ZSTD_EndDirective endOp) 1563 { 1564 unsigned const jobID = mtctx->nextJobID & mtctx->jobIDMask; 1565 int const endFrame = (endOp == ZSTD_e_end); 1566 1567 if (mtctx->nextJobID > mtctx->doneJobID + mtctx->jobIDMask) { 1568 DEBUGLOG(5, "ZSTDMT_createCompressionJob: will not create new job : table is full"); 1569 assert((mtctx->nextJobID & mtctx->jobIDMask) == (mtctx->doneJobID & mtctx->jobIDMask)); 1570 return 0; 1571 } 1572 1573 if (!mtctx->jobReady) { 1574 BYTE const* src = (BYTE const*)mtctx->inBuff.buffer.start; 1575 DEBUGLOG(5, "ZSTDMT_createCompressionJob: preparing job %u to compress %u bytes with %u preload ", 1576 mtctx->nextJobID, (U32)srcSize, (U32)mtctx->inBuff.prefix.size); 1577 mtctx->jobs[jobID].src.start = src; 1578 mtctx->jobs[jobID].src.size = srcSize; 1579 assert(mtctx->inBuff.filled >= srcSize); 1580 mtctx->jobs[jobID].prefix = mtctx->inBuff.prefix; 1581 mtctx->jobs[jobID].consumed = 0; 1582 mtctx->jobs[jobID].cSize = 0; 1583 mtctx->jobs[jobID].params = mtctx->params; 1584 mtctx->jobs[jobID].cdict = mtctx->nextJobID==0 ? mtctx->cdict : NULL; 1585 mtctx->jobs[jobID].fullFrameSize = mtctx->frameContentSize; 1586 mtctx->jobs[jobID].dstBuff = g_nullBuffer; 1587 mtctx->jobs[jobID].cctxPool = mtctx->cctxPool; 1588 mtctx->jobs[jobID].bufPool = mtctx->bufPool; 1589 mtctx->jobs[jobID].seqPool = mtctx->seqPool; 1590 mtctx->jobs[jobID].serial = &mtctx->serial; 1591 mtctx->jobs[jobID].jobID = mtctx->nextJobID; 1592 mtctx->jobs[jobID].firstJob = (mtctx->nextJobID==0); 1593 mtctx->jobs[jobID].lastJob = endFrame; 1594 mtctx->jobs[jobID].frameChecksumNeeded = mtctx->params.fParams.checksumFlag && endFrame && (mtctx->nextJobID>0); 1595 mtctx->jobs[jobID].dstFlushed = 0; 1596 1597 /* Update the round buffer pos and clear the input buffer to be reset */ 1598 mtctx->roundBuff.pos += srcSize; 1599 mtctx->inBuff.buffer = g_nullBuffer; 1600 mtctx->inBuff.filled = 0; 1601 /* Set the prefix */ 1602 if (!endFrame) { 1603 size_t const newPrefixSize = MIN(srcSize, mtctx->targetPrefixSize); 1604 mtctx->inBuff.prefix.start = src + srcSize - newPrefixSize; 1605 mtctx->inBuff.prefix.size = newPrefixSize; 1606 } else { /* endFrame==1 => no need for another input buffer */ 1607 mtctx->inBuff.prefix = kNullRange; 1608 mtctx->frameEnded = endFrame; 1609 if (mtctx->nextJobID == 0) { 1610 /* single job exception : checksum is already calculated directly within worker thread */ 1611 mtctx->params.fParams.checksumFlag = 0; 1612 } } 1613 1614 if ( (srcSize == 0) 1615 && (mtctx->nextJobID>0)/*single job must also write frame header*/ ) { 1616 DEBUGLOG(5, "ZSTDMT_createCompressionJob: creating a last empty block to end frame"); 1617 assert(endOp == ZSTD_e_end); /* only possible case : need to end the frame with an empty last block */ 1618 ZSTDMT_writeLastEmptyBlock(mtctx->jobs + jobID); 1619 mtctx->nextJobID++; 1620 return 0; 1621 } 1622 } 1623 1624 DEBUGLOG(5, "ZSTDMT_createCompressionJob: posting job %u : %u bytes (end:%u, jobNb == %u (mod:%u))", 1625 mtctx->nextJobID, 1626 (U32)mtctx->jobs[jobID].src.size, 1627 mtctx->jobs[jobID].lastJob, 1628 mtctx->nextJobID, 1629 jobID); 1630 if (POOL_tryAdd(mtctx->factory, ZSTDMT_compressionJob, &mtctx->jobs[jobID])) { 1631 mtctx->nextJobID++; 1632 mtctx->jobReady = 0; 1633 } else { 1634 DEBUGLOG(5, "ZSTDMT_createCompressionJob: no worker available for job %u", mtctx->nextJobID); 1635 mtctx->jobReady = 1; 1636 } 1637 return 0; 1638 } 1639 1640 1641 /*! ZSTDMT_flushProduced() : 1642 * flush whatever data has been produced but not yet flushed in current job. 1643 * move to next job if current one is fully flushed. 1644 * `output` : `pos` will be updated with amount of data flushed . 1645 * `blockToFlush` : if >0, the function will block and wait if there is no data available to flush . 1646 * @return : amount of data remaining within internal buffer, 0 if no more, 1 if unknown but > 0, or an error code */ 1647 static size_t ZSTDMT_flushProduced(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, unsigned blockToFlush, ZSTD_EndDirective end) 1648 { 1649 unsigned const wJobID = mtctx->doneJobID & mtctx->jobIDMask; 1650 DEBUGLOG(5, "ZSTDMT_flushProduced (blocking:%u , job %u <= %u)", 1651 blockToFlush, mtctx->doneJobID, mtctx->nextJobID); 1652 assert(output->size >= output->pos); 1653 1654 ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[wJobID].job_mutex); 1655 if ( blockToFlush 1656 && (mtctx->doneJobID < mtctx->nextJobID) ) { 1657 assert(mtctx->jobs[wJobID].dstFlushed <= mtctx->jobs[wJobID].cSize); 1658 while (mtctx->jobs[wJobID].dstFlushed == mtctx->jobs[wJobID].cSize) { /* nothing to flush */ 1659 if (mtctx->jobs[wJobID].consumed == mtctx->jobs[wJobID].src.size) { 1660 DEBUGLOG(5, "job %u is completely consumed (%u == %u) => don't wait for cond, there will be none", 1661 mtctx->doneJobID, (U32)mtctx->jobs[wJobID].consumed, (U32)mtctx->jobs[wJobID].src.size); 1662 break; 1663 } 1664 DEBUGLOG(5, "waiting for something to flush from job %u (currently flushed: %u bytes)", 1665 mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed); 1666 ZSTD_pthread_cond_wait(&mtctx->jobs[wJobID].job_cond, &mtctx->jobs[wJobID].job_mutex); /* block when nothing to flush but some to come */ 1667 } } 1668 1669 /* try to flush something */ 1670 { size_t cSize = mtctx->jobs[wJobID].cSize; /* shared */ 1671 size_t const srcConsumed = mtctx->jobs[wJobID].consumed; /* shared */ 1672 size_t const srcSize = mtctx->jobs[wJobID].src.size; /* read-only, could be done after mutex lock, but no-declaration-after-statement */ 1673 ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex); 1674 if (ZSTD_isError(cSize)) { 1675 DEBUGLOG(5, "ZSTDMT_flushProduced: job %u : compression error detected : %s", 1676 mtctx->doneJobID, ZSTD_getErrorName(cSize)); 1677 ZSTDMT_waitForAllJobsCompleted(mtctx); 1678 ZSTDMT_releaseAllJobResources(mtctx); 1679 return cSize; 1680 } 1681 /* add frame checksum if necessary (can only happen once) */ 1682 assert(srcConsumed <= srcSize); 1683 if ( (srcConsumed == srcSize) /* job completed -> worker no longer active */ 1684 && mtctx->jobs[wJobID].frameChecksumNeeded ) { 1685 U32 const checksum = (U32)XXH64_digest(&mtctx->serial.xxhState); 1686 DEBUGLOG(4, "ZSTDMT_flushProduced: writing checksum : %08X \n", checksum); 1687 MEM_writeLE32((char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].cSize, checksum); 1688 cSize += 4; 1689 mtctx->jobs[wJobID].cSize += 4; /* can write this shared value, as worker is no longer active */ 1690 mtctx->jobs[wJobID].frameChecksumNeeded = 0; 1691 } 1692 1693 if (cSize > 0) { /* compression is ongoing or completed */ 1694 size_t const toFlush = MIN(cSize - mtctx->jobs[wJobID].dstFlushed, output->size - output->pos); 1695 DEBUGLOG(5, "ZSTDMT_flushProduced: Flushing %u bytes from job %u (completion:%u/%u, generated:%u)", 1696 (U32)toFlush, mtctx->doneJobID, (U32)srcConsumed, (U32)srcSize, (U32)cSize); 1697 assert(mtctx->doneJobID < mtctx->nextJobID); 1698 assert(cSize >= mtctx->jobs[wJobID].dstFlushed); 1699 assert(mtctx->jobs[wJobID].dstBuff.start != NULL); 1700 memcpy((char*)output->dst + output->pos, 1701 (const char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].dstFlushed, 1702 toFlush); 1703 output->pos += toFlush; 1704 mtctx->jobs[wJobID].dstFlushed += toFlush; /* can write : this value is only used by mtctx */ 1705 1706 if ( (srcConsumed == srcSize) /* job is completed */ 1707 && (mtctx->jobs[wJobID].dstFlushed == cSize) ) { /* output buffer fully flushed => free this job position */ 1708 DEBUGLOG(5, "Job %u completed (%u bytes), moving to next one", 1709 mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed); 1710 ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[wJobID].dstBuff); 1711 DEBUGLOG(5, "dstBuffer released"); 1712 mtctx->jobs[wJobID].dstBuff = g_nullBuffer; 1713 mtctx->jobs[wJobID].cSize = 0; /* ensure this job slot is considered "not started" in future check */ 1714 mtctx->consumed += srcSize; 1715 mtctx->produced += cSize; 1716 mtctx->doneJobID++; 1717 } } 1718 1719 /* return value : how many bytes left in buffer ; fake it to 1 when unknown but >0 */ 1720 if (cSize > mtctx->jobs[wJobID].dstFlushed) return (cSize - mtctx->jobs[wJobID].dstFlushed); 1721 if (srcSize > srcConsumed) return 1; /* current job not completely compressed */ 1722 } 1723 if (mtctx->doneJobID < mtctx->nextJobID) return 1; /* some more jobs ongoing */ 1724 if (mtctx->jobReady) return 1; /* one job is ready to push, just not yet in the list */ 1725 if (mtctx->inBuff.filled > 0) return 1; /* input is not empty, and still needs to be converted into a job */ 1726 mtctx->allJobsCompleted = mtctx->frameEnded; /* all jobs are entirely flushed => if this one is last one, frame is completed */ 1727 if (end == ZSTD_e_end) return !mtctx->frameEnded; /* for ZSTD_e_end, question becomes : is frame completed ? instead of : are internal buffers fully flushed ? */ 1728 return 0; /* internal buffers fully flushed */ 1729 } 1730 1731 /** 1732 * Returns the range of data used by the earliest job that is not yet complete. 1733 * If the data of the first job is broken up into two segments, we cover both 1734 * sections. 1735 */ 1736 static range_t ZSTDMT_getInputDataInUse(ZSTDMT_CCtx* mtctx) 1737 { 1738 unsigned const firstJobID = mtctx->doneJobID; 1739 unsigned const lastJobID = mtctx->nextJobID; 1740 unsigned jobID; 1741 1742 for (jobID = firstJobID; jobID < lastJobID; ++jobID) { 1743 unsigned const wJobID = jobID & mtctx->jobIDMask; 1744 size_t consumed; 1745 1746 ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[wJobID].job_mutex); 1747 consumed = mtctx->jobs[wJobID].consumed; 1748 ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex); 1749 1750 if (consumed < mtctx->jobs[wJobID].src.size) { 1751 range_t range = mtctx->jobs[wJobID].prefix; 1752 if (range.size == 0) { 1753 /* Empty prefix */ 1754 range = mtctx->jobs[wJobID].src; 1755 } 1756 /* Job source in multiple segments not supported yet */ 1757 assert(range.start <= mtctx->jobs[wJobID].src.start); 1758 return range; 1759 } 1760 } 1761 return kNullRange; 1762 } 1763 1764 /** 1765 * Returns non-zero iff buffer and range overlap. 1766 */ 1767 static int ZSTDMT_isOverlapped(buffer_t buffer, range_t range) 1768 { 1769 BYTE const* const bufferStart = (BYTE const*)buffer.start; 1770 BYTE const* const bufferEnd = bufferStart + buffer.capacity; 1771 BYTE const* const rangeStart = (BYTE const*)range.start; 1772 BYTE const* const rangeEnd = rangeStart + range.size; 1773 1774 if (rangeStart == NULL || bufferStart == NULL) 1775 return 0; 1776 /* Empty ranges cannot overlap */ 1777 if (bufferStart == bufferEnd || rangeStart == rangeEnd) 1778 return 0; 1779 1780 return bufferStart < rangeEnd && rangeStart < bufferEnd; 1781 } 1782 1783 static int ZSTDMT_doesOverlapWindow(buffer_t buffer, ZSTD_window_t window) 1784 { 1785 range_t extDict; 1786 range_t prefix; 1787 1788 DEBUGLOG(5, "ZSTDMT_doesOverlapWindow"); 1789 extDict.start = window.dictBase + window.lowLimit; 1790 extDict.size = window.dictLimit - window.lowLimit; 1791 1792 prefix.start = window.base + window.dictLimit; 1793 prefix.size = window.nextSrc - (window.base + window.dictLimit); 1794 DEBUGLOG(5, "extDict [0x%zx, 0x%zx)", 1795 (size_t)extDict.start, 1796 (size_t)extDict.start + extDict.size); 1797 DEBUGLOG(5, "prefix [0x%zx, 0x%zx)", 1798 (size_t)prefix.start, 1799 (size_t)prefix.start + prefix.size); 1800 1801 return ZSTDMT_isOverlapped(buffer, extDict) 1802 || ZSTDMT_isOverlapped(buffer, prefix); 1803 } 1804 1805 static void ZSTDMT_waitForLdmComplete(ZSTDMT_CCtx* mtctx, buffer_t buffer) 1806 { 1807 if (mtctx->params.ldmParams.enableLdm) { 1808 ZSTD_pthread_mutex_t* mutex = &mtctx->serial.ldmWindowMutex; 1809 DEBUGLOG(5, "ZSTDMT_waitForLdmComplete"); 1810 DEBUGLOG(5, "source [0x%zx, 0x%zx)", 1811 (size_t)buffer.start, 1812 (size_t)buffer.start + buffer.capacity); 1813 ZSTD_PTHREAD_MUTEX_LOCK(mutex); 1814 while (ZSTDMT_doesOverlapWindow(buffer, mtctx->serial.ldmWindow)) { 1815 DEBUGLOG(5, "Waiting for LDM to finish..."); 1816 ZSTD_pthread_cond_wait(&mtctx->serial.ldmWindowCond, mutex); 1817 } 1818 DEBUGLOG(6, "Done waiting for LDM to finish"); 1819 ZSTD_pthread_mutex_unlock(mutex); 1820 } 1821 } 1822 1823 /** 1824 * Attempts to set the inBuff to the next section to fill. 1825 * If any part of the new section is still in use we give up. 1826 * Returns non-zero if the buffer is filled. 1827 */ 1828 static int ZSTDMT_tryGetInputRange(ZSTDMT_CCtx* mtctx) 1829 { 1830 range_t const inUse = ZSTDMT_getInputDataInUse(mtctx); 1831 size_t const spaceLeft = mtctx->roundBuff.capacity - mtctx->roundBuff.pos; 1832 size_t const target = mtctx->targetSectionSize; 1833 buffer_t buffer; 1834 1835 DEBUGLOG(5, "ZSTDMT_tryGetInputRange"); 1836 assert(mtctx->inBuff.buffer.start == NULL); 1837 assert(mtctx->roundBuff.capacity >= target); 1838 1839 if (spaceLeft < target) { 1840 /* ZSTD_invalidateRepCodes() doesn't work for extDict variants. 1841 * Simply copy the prefix to the beginning in that case. 1842 */ 1843 BYTE* const start = (BYTE*)mtctx->roundBuff.buffer; 1844 size_t const prefixSize = mtctx->inBuff.prefix.size; 1845 1846 buffer.start = start; 1847 buffer.capacity = prefixSize; 1848 if (ZSTDMT_isOverlapped(buffer, inUse)) { 1849 DEBUGLOG(5, "Waiting for buffer..."); 1850 return 0; 1851 } 1852 ZSTDMT_waitForLdmComplete(mtctx, buffer); 1853 memmove(start, mtctx->inBuff.prefix.start, prefixSize); 1854 mtctx->inBuff.prefix.start = start; 1855 mtctx->roundBuff.pos = prefixSize; 1856 } 1857 buffer.start = mtctx->roundBuff.buffer + mtctx->roundBuff.pos; 1858 buffer.capacity = target; 1859 1860 if (ZSTDMT_isOverlapped(buffer, inUse)) { 1861 DEBUGLOG(5, "Waiting for buffer..."); 1862 return 0; 1863 } 1864 assert(!ZSTDMT_isOverlapped(buffer, mtctx->inBuff.prefix)); 1865 1866 ZSTDMT_waitForLdmComplete(mtctx, buffer); 1867 1868 DEBUGLOG(5, "Using prefix range [%zx, %zx)", 1869 (size_t)mtctx->inBuff.prefix.start, 1870 (size_t)mtctx->inBuff.prefix.start + mtctx->inBuff.prefix.size); 1871 DEBUGLOG(5, "Using source range [%zx, %zx)", 1872 (size_t)buffer.start, 1873 (size_t)buffer.start + buffer.capacity); 1874 1875 1876 mtctx->inBuff.buffer = buffer; 1877 mtctx->inBuff.filled = 0; 1878 assert(mtctx->roundBuff.pos + buffer.capacity <= mtctx->roundBuff.capacity); 1879 return 1; 1880 } 1881 1882 typedef struct { 1883 size_t toLoad; /* The number of bytes to load from the input. */ 1884 int flush; /* Boolean declaring if we must flush because we found a synchronization point. */ 1885 } syncPoint_t; 1886 1887 /** 1888 * Searches through the input for a synchronization point. If one is found, we 1889 * will instruct the caller to flush, and return the number of bytes to load. 1890 * Otherwise, we will load as many bytes as possible and instruct the caller 1891 * to continue as normal. 1892 */ 1893 static syncPoint_t 1894 findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input) 1895 { 1896 BYTE const* const istart = (BYTE const*)input.src + input.pos; 1897 U64 const primePower = mtctx->rsync.primePower; 1898 U64 const hitMask = mtctx->rsync.hitMask; 1899 1900 syncPoint_t syncPoint; 1901 U64 hash; 1902 BYTE const* prev; 1903 size_t pos; 1904 1905 syncPoint.toLoad = MIN(input.size - input.pos, mtctx->targetSectionSize - mtctx->inBuff.filled); 1906 syncPoint.flush = 0; 1907 if (!mtctx->params.rsyncable) 1908 /* Rsync is disabled. */ 1909 return syncPoint; 1910 if (mtctx->inBuff.filled + syncPoint.toLoad < RSYNC_LENGTH) 1911 /* Not enough to compute the hash. 1912 * We will miss any synchronization points in this RSYNC_LENGTH byte 1913 * window. However, since it depends only in the internal buffers, if the 1914 * state is already synchronized, we will remain synchronized. 1915 * Additionally, the probability that we miss a synchronization point is 1916 * low: RSYNC_LENGTH / targetSectionSize. 1917 */ 1918 return syncPoint; 1919 /* Initialize the loop variables. */ 1920 if (mtctx->inBuff.filled >= RSYNC_LENGTH) { 1921 /* We have enough bytes buffered to initialize the hash. 1922 * Start scanning at the beginning of the input. 1923 */ 1924 pos = 0; 1925 prev = (BYTE const*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - RSYNC_LENGTH; 1926 hash = ZSTD_rollingHash_compute(prev, RSYNC_LENGTH); 1927 } else { 1928 /* We don't have enough bytes buffered to initialize the hash, but 1929 * we know we have at least RSYNC_LENGTH bytes total. 1930 * Start scanning after the first RSYNC_LENGTH bytes less the bytes 1931 * already buffered. 1932 */ 1933 pos = RSYNC_LENGTH - mtctx->inBuff.filled; 1934 prev = (BYTE const*)mtctx->inBuff.buffer.start - pos; 1935 hash = ZSTD_rollingHash_compute(mtctx->inBuff.buffer.start, mtctx->inBuff.filled); 1936 hash = ZSTD_rollingHash_append(hash, istart, pos); 1937 } 1938 /* Starting with the hash of the previous RSYNC_LENGTH bytes, roll 1939 * through the input. If we hit a synchronization point, then cut the 1940 * job off, and tell the compressor to flush the job. Otherwise, load 1941 * all the bytes and continue as normal. 1942 * If we go too long without a synchronization point (targetSectionSize) 1943 * then a block will be emitted anyways, but this is okay, since if we 1944 * are already synchronized we will remain synchronized. 1945 */ 1946 for (; pos < syncPoint.toLoad; ++pos) { 1947 BYTE const toRemove = pos < RSYNC_LENGTH ? prev[pos] : istart[pos - RSYNC_LENGTH]; 1948 /* if (pos >= RSYNC_LENGTH) assert(ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash); */ 1949 hash = ZSTD_rollingHash_rotate(hash, toRemove, istart[pos], primePower); 1950 if ((hash & hitMask) == hitMask) { 1951 syncPoint.toLoad = pos + 1; 1952 syncPoint.flush = 1; 1953 break; 1954 } 1955 } 1956 return syncPoint; 1957 } 1958 1959 size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx) 1960 { 1961 size_t hintInSize = mtctx->targetSectionSize - mtctx->inBuff.filled; 1962 if (hintInSize==0) hintInSize = mtctx->targetSectionSize; 1963 return hintInSize; 1964 } 1965 1966 /** ZSTDMT_compressStream_generic() : 1967 * internal use only - exposed to be invoked from zstd_compress.c 1968 * assumption : output and input are valid (pos <= size) 1969 * @return : minimum amount of data remaining to flush, 0 if none */ 1970 size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx, 1971 ZSTD_outBuffer* output, 1972 ZSTD_inBuffer* input, 1973 ZSTD_EndDirective endOp) 1974 { 1975 unsigned forwardInputProgress = 0; 1976 DEBUGLOG(5, "ZSTDMT_compressStream_generic (endOp=%u, srcSize=%u)", 1977 (U32)endOp, (U32)(input->size - input->pos)); 1978 assert(output->pos <= output->size); 1979 assert(input->pos <= input->size); 1980 1981 if (mtctx->singleBlockingThread) { /* delegate to single-thread (synchronous) */ 1982 return ZSTD_compressStream2(mtctx->cctxPool->cctx[0], output, input, endOp); 1983 } 1984 1985 if ((mtctx->frameEnded) && (endOp==ZSTD_e_continue)) { 1986 /* current frame being ended. Only flush/end are allowed */ 1987 return ERROR(stage_wrong); 1988 } 1989 1990 /* single-pass shortcut (note : synchronous-mode) */ 1991 if ( (!mtctx->params.rsyncable) /* rsyncable mode is disabled */ 1992 && (mtctx->nextJobID == 0) /* just started */ 1993 && (mtctx->inBuff.filled == 0) /* nothing buffered */ 1994 && (!mtctx->jobReady) /* no job already created */ 1995 && (endOp == ZSTD_e_end) /* end order */ 1996 && (output->size - output->pos >= ZSTD_compressBound(input->size - input->pos)) ) { /* enough space in dst */ 1997 size_t const cSize = ZSTDMT_compress_advanced_internal(mtctx, 1998 (char*)output->dst + output->pos, output->size - output->pos, 1999 (const char*)input->src + input->pos, input->size - input->pos, 2000 mtctx->cdict, mtctx->params); 2001 if (ZSTD_isError(cSize)) return cSize; 2002 input->pos = input->size; 2003 output->pos += cSize; 2004 mtctx->allJobsCompleted = 1; 2005 mtctx->frameEnded = 1; 2006 return 0; 2007 } 2008 2009 /* fill input buffer */ 2010 if ( (!mtctx->jobReady) 2011 && (input->size > input->pos) ) { /* support NULL input */ 2012 if (mtctx->inBuff.buffer.start == NULL) { 2013 assert(mtctx->inBuff.filled == 0); /* Can't fill an empty buffer */ 2014 if (!ZSTDMT_tryGetInputRange(mtctx)) { 2015 /* It is only possible for this operation to fail if there are 2016 * still compression jobs ongoing. 2017 */ 2018 DEBUGLOG(5, "ZSTDMT_tryGetInputRange failed"); 2019 assert(mtctx->doneJobID != mtctx->nextJobID); 2020 } else 2021 DEBUGLOG(5, "ZSTDMT_tryGetInputRange completed successfully : mtctx->inBuff.buffer.start = %p", mtctx->inBuff.buffer.start); 2022 } 2023 if (mtctx->inBuff.buffer.start != NULL) { 2024 syncPoint_t const syncPoint = findSynchronizationPoint(mtctx, *input); 2025 if (syncPoint.flush && endOp == ZSTD_e_continue) { 2026 endOp = ZSTD_e_flush; 2027 } 2028 assert(mtctx->inBuff.buffer.capacity >= mtctx->targetSectionSize); 2029 DEBUGLOG(5, "ZSTDMT_compressStream_generic: adding %u bytes on top of %u to buffer of size %u", 2030 (U32)syncPoint.toLoad, (U32)mtctx->inBuff.filled, (U32)mtctx->targetSectionSize); 2031 memcpy((char*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, (const char*)input->src + input->pos, syncPoint.toLoad); 2032 input->pos += syncPoint.toLoad; 2033 mtctx->inBuff.filled += syncPoint.toLoad; 2034 forwardInputProgress = syncPoint.toLoad>0; 2035 } 2036 if ((input->pos < input->size) && (endOp == ZSTD_e_end)) 2037 endOp = ZSTD_e_flush; /* can't end now : not all input consumed */ 2038 } 2039 2040 if ( (mtctx->jobReady) 2041 || (mtctx->inBuff.filled >= mtctx->targetSectionSize) /* filled enough : let's compress */ 2042 || ((endOp != ZSTD_e_continue) && (mtctx->inBuff.filled > 0)) /* something to flush : let's go */ 2043 || ((endOp == ZSTD_e_end) && (!mtctx->frameEnded)) ) { /* must finish the frame with a zero-size block */ 2044 size_t const jobSize = mtctx->inBuff.filled; 2045 assert(mtctx->inBuff.filled <= mtctx->targetSectionSize); 2046 FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, jobSize, endOp) ); 2047 } 2048 2049 /* check for potential compressed data ready to be flushed */ 2050 { size_t const remainingToFlush = ZSTDMT_flushProduced(mtctx, output, !forwardInputProgress, endOp); /* block if there was no forward input progress */ 2051 if (input->pos < input->size) return MAX(remainingToFlush, 1); /* input not consumed : do not end flush yet */ 2052 DEBUGLOG(5, "end of ZSTDMT_compressStream_generic: remainingToFlush = %u", (U32)remainingToFlush); 2053 return remainingToFlush; 2054 } 2055 } 2056 2057 2058 size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input) 2059 { 2060 FORWARD_IF_ERROR( ZSTDMT_compressStream_generic(mtctx, output, input, ZSTD_e_continue) ); 2061 2062 /* recommended next input size : fill current input buffer */ 2063 return mtctx->targetSectionSize - mtctx->inBuff.filled; /* note : could be zero when input buffer is fully filled and no more availability to create new job */ 2064 } 2065 2066 2067 static size_t ZSTDMT_flushStream_internal(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_EndDirective endFrame) 2068 { 2069 size_t const srcSize = mtctx->inBuff.filled; 2070 DEBUGLOG(5, "ZSTDMT_flushStream_internal"); 2071 2072 if ( mtctx->jobReady /* one job ready for a worker to pick up */ 2073 || (srcSize > 0) /* still some data within input buffer */ 2074 || ((endFrame==ZSTD_e_end) && !mtctx->frameEnded)) { /* need a last 0-size block to end frame */ 2075 DEBUGLOG(5, "ZSTDMT_flushStream_internal : create a new job (%u bytes, end:%u)", 2076 (U32)srcSize, (U32)endFrame); 2077 FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, srcSize, endFrame) ); 2078 } 2079 2080 /* check if there is any data available to flush */ 2081 return ZSTDMT_flushProduced(mtctx, output, 1 /* blockToFlush */, endFrame); 2082 } 2083 2084 2085 size_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output) 2086 { 2087 DEBUGLOG(5, "ZSTDMT_flushStream"); 2088 if (mtctx->singleBlockingThread) 2089 return ZSTD_flushStream(mtctx->cctxPool->cctx[0], output); 2090 return ZSTDMT_flushStream_internal(mtctx, output, ZSTD_e_flush); 2091 } 2092 2093 size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output) 2094 { 2095 DEBUGLOG(4, "ZSTDMT_endStream"); 2096 if (mtctx->singleBlockingThread) 2097 return ZSTD_endStream(mtctx->cctxPool->cctx[0], output); 2098 return ZSTDMT_flushStream_internal(mtctx, output, ZSTD_e_end); 2099 } 2100