1 /* 2 * Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc. 3 * All rights reserved. 4 * 5 * This source code is licensed under both the BSD-style license (found in the 6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found 7 * in the COPYING file in the root directory of this source tree). 8 */ 9 10 11 /* ************************************* 12 * Includes 13 ***************************************/ 14 #include "util.h" /* Compiler options, UTIL_GetFileSize, UTIL_sleep */ 15 #include <stdlib.h> /* malloc, free */ 16 #include <string.h> /* memset */ 17 #include <stdio.h> /* fprintf, fopen, ftello64 */ 18 #include <time.h> /* clock_t, clock, CLOCKS_PER_SEC */ 19 #include <ctype.h> /* toupper */ 20 #include <errno.h> /* errno */ 21 22 #include "timefn.h" /* UTIL_time_t, UTIL_getTime, UTIL_clockSpanMicro, UTIL_waitForNextTick */ 23 #include "mem.h" 24 #define ZSTD_STATIC_LINKING_ONLY 25 #include "zstd.h" 26 #include "datagen.h" /* RDG_genBuffer */ 27 #include "xxhash.h" 28 29 #include "zstd_zlibwrapper.h" 30 31 32 33 /*-************************************ 34 * Tuning parameters 35 **************************************/ 36 #ifndef ZSTDCLI_CLEVEL_DEFAULT 37 # define ZSTDCLI_CLEVEL_DEFAULT 3 38 #endif 39 40 41 /*-************************************ 42 * Constants 43 **************************************/ 44 #define COMPRESSOR_NAME "Zstandard wrapper for zlib command line interface" 45 #ifndef ZSTD_VERSION 46 # define ZSTD_VERSION "v" ZSTD_VERSION_STRING 47 #endif 48 #define AUTHOR "Yann Collet" 49 #define WELCOME_MESSAGE "*** %s %i-bits %s, by %s ***\n", COMPRESSOR_NAME, (int)(sizeof(size_t)*8), ZSTD_VERSION, AUTHOR 50 51 #ifndef ZSTD_GIT_COMMIT 52 # define ZSTD_GIT_COMMIT_STRING "" 53 #else 54 # define ZSTD_GIT_COMMIT_STRING ZSTD_EXPAND_AND_QUOTE(ZSTD_GIT_COMMIT) 55 #endif 56 57 #define NBLOOPS 3 58 #define TIMELOOP_MICROSEC 1*1000000ULL /* 1 second */ 59 #define ACTIVEPERIOD_MICROSEC 70*1000000ULL /* 70 seconds */ 60 #define COOLPERIOD_SEC 10 61 62 #define KB *(1 <<10) 63 #define MB *(1 <<20) 64 #define GB *(1U<<30) 65 66 static const size_t maxMemory = (sizeof(size_t)==4) ? (2 GB - 64 MB) : (size_t)(1ULL << ((sizeof(size_t)*8)-31)); 67 68 static U32 g_compressibilityDefault = 50; 69 70 71 /* ************************************* 72 * console display 73 ***************************************/ 74 #define DEFAULT_DISPLAY_LEVEL 2 75 #define DISPLAY(...) fprintf(displayOut, __VA_ARGS__) 76 #define DISPLAYLEVEL(l, ...) if (g_displayLevel>=l) { DISPLAY(__VA_ARGS__); } 77 static unsigned g_displayLevel = DEFAULT_DISPLAY_LEVEL; /* 0 : no display; 1: errors; 2 : + result + interaction + warnings; 3 : + progression; 4 : + information */ 78 static FILE* displayOut; 79 80 #define DISPLAYUPDATE(l, ...) if (g_displayLevel>=l) { \ 81 if ((clock() - g_time > refreshRate) || (g_displayLevel>=4)) \ 82 { g_time = clock(); DISPLAY(__VA_ARGS__); \ 83 if (g_displayLevel>=4) fflush(displayOut); } } 84 static const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100; 85 static clock_t g_time = 0; 86 87 88 /* ************************************* 89 * Exceptions 90 ***************************************/ 91 #ifndef DEBUG 92 # define DEBUG 0 93 #endif 94 #define DEBUGOUTPUT(...) { if (DEBUG) DISPLAY(__VA_ARGS__); } 95 #define EXM_THROW(error, ...) \ 96 { \ 97 DEBUGOUTPUT("Error defined at %s, line %i : \n", __FILE__, __LINE__); \ 98 DISPLAYLEVEL(1, "Error %i : ", error); \ 99 DISPLAYLEVEL(1, __VA_ARGS__); \ 100 DISPLAYLEVEL(1, "\n"); \ 101 exit(error); \ 102 } 103 104 105 /* ************************************* 106 * Benchmark Parameters 107 ***************************************/ 108 static unsigned g_nbIterations = NBLOOPS; 109 static size_t g_blockSize = 0; 110 int g_additionalParam = 0; 111 112 void BMK_setNotificationLevel(unsigned level) { g_displayLevel=level; } 113 114 void BMK_setAdditionalParam(int additionalParam) { g_additionalParam=additionalParam; } 115 116 void BMK_SetNbIterations(unsigned nbLoops) 117 { 118 g_nbIterations = nbLoops; 119 DISPLAYLEVEL(3, "- test >= %u seconds per compression / decompression -\n", g_nbIterations); 120 } 121 122 void BMK_SetBlockSize(size_t blockSize) 123 { 124 g_blockSize = blockSize; 125 DISPLAYLEVEL(2, "using blocks of size %u KB \n", (unsigned)(blockSize>>10)); 126 } 127 128 129 /* ******************************************************** 130 * Bench functions 131 **********************************************************/ 132 #undef MIN 133 #undef MAX 134 #define MIN(a,b) ((a)<(b) ? (a) : (b)) 135 #define MAX(a,b) ((a)>(b) ? (a) : (b)) 136 137 typedef struct 138 { 139 z_const char* srcPtr; 140 size_t srcSize; 141 char* cPtr; 142 size_t cRoom; 143 size_t cSize; 144 char* resPtr; 145 size_t resSize; 146 } blockParam_t; 147 148 typedef enum { BMK_ZSTD, BMK_ZSTD_STREAM, BMK_ZLIB, BMK_ZWRAP_ZLIB, BMK_ZWRAP_ZSTD, BMK_ZLIB_REUSE, BMK_ZWRAP_ZLIB_REUSE, BMK_ZWRAP_ZSTD_REUSE } BMK_compressor; 149 150 151 static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize, 152 const char* displayName, int cLevel, 153 const size_t* fileSizes, U32 nbFiles, 154 const void* dictBuffer, size_t dictBufferSize, BMK_compressor compressor) 155 { 156 size_t const blockSize = (g_blockSize>=32 ? g_blockSize : srcSize) + (!srcSize) /* avoid div by 0 */ ; 157 size_t const avgSize = MIN(g_blockSize, (srcSize / nbFiles)); 158 U32 const maxNbBlocks = (U32) ((srcSize + (blockSize-1)) / blockSize) + nbFiles; 159 blockParam_t* const blockTable = (blockParam_t*) malloc(maxNbBlocks * sizeof(blockParam_t)); 160 size_t const maxCompressedSize = ZSTD_compressBound(srcSize) + (maxNbBlocks * 1024); /* add some room for safety */ 161 void* const compressedBuffer = malloc(maxCompressedSize); 162 void* const resultBuffer = malloc(srcSize); 163 ZSTD_CCtx* const ctx = ZSTD_createCCtx(); 164 ZSTD_DCtx* const dctx = ZSTD_createDCtx(); 165 U32 nbBlocks; 166 167 /* checks */ 168 if (!compressedBuffer || !resultBuffer || !blockTable || !ctx || !dctx) 169 EXM_THROW(31, "allocation error : not enough memory"); 170 171 /* init */ 172 if (strlen(displayName)>17) displayName += strlen(displayName)-17; /* can only display 17 characters */ 173 174 /* Init blockTable data */ 175 { z_const char* srcPtr = (z_const char*)srcBuffer; 176 char* cPtr = (char*)compressedBuffer; 177 char* resPtr = (char*)resultBuffer; 178 U32 fileNb; 179 for (nbBlocks=0, fileNb=0; fileNb<nbFiles; fileNb++) { 180 size_t remaining = fileSizes[fileNb]; 181 U32 const nbBlocksforThisFile = (U32)((remaining + (blockSize-1)) / blockSize); 182 U32 const blockEnd = nbBlocks + nbBlocksforThisFile; 183 for ( ; nbBlocks<blockEnd; nbBlocks++) { 184 size_t const thisBlockSize = MIN(remaining, blockSize); 185 blockTable[nbBlocks].srcPtr = srcPtr; 186 blockTable[nbBlocks].cPtr = cPtr; 187 blockTable[nbBlocks].resPtr = resPtr; 188 blockTable[nbBlocks].srcSize = thisBlockSize; 189 blockTable[nbBlocks].cRoom = ZSTD_compressBound(thisBlockSize); 190 srcPtr += thisBlockSize; 191 cPtr += blockTable[nbBlocks].cRoom; 192 resPtr += thisBlockSize; 193 remaining -= thisBlockSize; 194 } } } 195 196 /* warming up memory */ 197 RDG_genBuffer(compressedBuffer, maxCompressedSize, 0.10, 0.50, 1); 198 199 /* Bench */ 200 { U64 fastestC = (U64)(-1LL), fastestD = (U64)(-1LL); 201 U64 const crcOrig = XXH64(srcBuffer, srcSize, 0); 202 UTIL_time_t coolTime; 203 U64 const maxTime = (g_nbIterations * TIMELOOP_MICROSEC) + 100; 204 U64 totalCTime=0, totalDTime=0; 205 U32 cCompleted=0, dCompleted=0; 206 # define NB_MARKS 4 207 const char* const marks[NB_MARKS] = { " |", " /", " =", "\\" }; 208 U32 markNb = 0; 209 size_t cSize = 0; 210 double ratio = 0.; 211 212 coolTime = UTIL_getTime(); 213 DISPLAYLEVEL(2, "\r%79s\r", ""); 214 while (!cCompleted | !dCompleted) { 215 UTIL_time_t clockStart; 216 U64 clockLoop = g_nbIterations ? TIMELOOP_MICROSEC : 1; 217 218 /* overheat protection */ 219 if (UTIL_clockSpanMicro(coolTime) > ACTIVEPERIOD_MICROSEC) { 220 DISPLAYLEVEL(2, "\rcooling down ... \r"); 221 UTIL_sleep(COOLPERIOD_SEC); 222 coolTime = UTIL_getTime(); 223 } 224 225 /* Compression */ 226 DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->\r", marks[markNb], displayName, (unsigned)srcSize); 227 if (!cCompleted) memset(compressedBuffer, 0xE5, maxCompressedSize); /* warm up and erase result buffer */ 228 229 UTIL_sleepMilli(1); /* give processor time to other processes */ 230 UTIL_waitForNextTick(); 231 clockStart = UTIL_getTime(); 232 233 if (!cCompleted) { /* still some time to do compression tests */ 234 U32 nbLoops = 0; 235 if (compressor == BMK_ZSTD) { 236 ZSTD_parameters const zparams = ZSTD_getParams(cLevel, avgSize, dictBufferSize); 237 ZSTD_customMem const cmem = { NULL, NULL, NULL }; 238 ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dictBuffer, dictBufferSize, ZSTD_dlm_byRef, ZSTD_dct_auto, zparams.cParams, cmem); 239 if (cdict==NULL) EXM_THROW(1, "ZSTD_createCDict_advanced() allocation failure"); 240 241 do { 242 U32 blockNb; 243 size_t rSize; 244 for (blockNb=0; blockNb<nbBlocks; blockNb++) { 245 if (dictBufferSize) { 246 rSize = ZSTD_compress_usingCDict(ctx, 247 blockTable[blockNb].cPtr, blockTable[blockNb].cRoom, 248 blockTable[blockNb].srcPtr,blockTable[blockNb].srcSize, 249 cdict); 250 } else { 251 rSize = ZSTD_compressCCtx (ctx, 252 blockTable[blockNb].cPtr, blockTable[blockNb].cRoom, 253 blockTable[blockNb].srcPtr,blockTable[blockNb].srcSize, cLevel); 254 } 255 if (ZSTD_isError(rSize)) EXM_THROW(1, "ZSTD_compress_usingCDict() failed : %s", ZSTD_getErrorName(rSize)); 256 blockTable[blockNb].cSize = rSize; 257 } 258 nbLoops++; 259 } while (UTIL_clockSpanMicro(clockStart) < clockLoop); 260 ZSTD_freeCDict(cdict); 261 } else if (compressor == BMK_ZSTD_STREAM) { 262 ZSTD_parameters const zparams = ZSTD_getParams(cLevel, avgSize, dictBufferSize); 263 ZSTD_inBuffer inBuffer; 264 ZSTD_outBuffer outBuffer; 265 ZSTD_CStream* zbc = ZSTD_createCStream(); 266 size_t rSize; 267 if (zbc == NULL) EXM_THROW(1, "ZSTD_createCStream() allocation failure"); 268 rSize = ZSTD_initCStream_advanced(zbc, dictBuffer, dictBufferSize, zparams, avgSize); 269 if (ZSTD_isError(rSize)) EXM_THROW(1, "ZSTD_initCStream_advanced() failed : %s", ZSTD_getErrorName(rSize)); 270 do { 271 U32 blockNb; 272 for (blockNb=0; blockNb<nbBlocks; blockNb++) { 273 rSize = ZSTD_resetCStream(zbc, blockTable[blockNb].srcSize); 274 if (ZSTD_isError(rSize)) EXM_THROW(1, "ZSTD_resetCStream() failed : %s", ZSTD_getErrorName(rSize)); 275 inBuffer.src = blockTable[blockNb].srcPtr; 276 inBuffer.size = blockTable[blockNb].srcSize; 277 inBuffer.pos = 0; 278 outBuffer.dst = blockTable[blockNb].cPtr; 279 outBuffer.size = blockTable[blockNb].cRoom; 280 outBuffer.pos = 0; 281 rSize = ZSTD_compressStream(zbc, &outBuffer, &inBuffer); 282 if (ZSTD_isError(rSize)) EXM_THROW(1, "ZSTD_compressStream() failed : %s", ZSTD_getErrorName(rSize)); 283 rSize = ZSTD_endStream(zbc, &outBuffer); 284 if (ZSTD_isError(rSize)) EXM_THROW(1, "ZSTD_endStream() failed : %s", ZSTD_getErrorName(rSize)); 285 blockTable[blockNb].cSize = outBuffer.pos; 286 } 287 nbLoops++; 288 } while (UTIL_clockSpanMicro(clockStart) < clockLoop); 289 ZSTD_freeCStream(zbc); 290 } else if (compressor == BMK_ZWRAP_ZLIB_REUSE || compressor == BMK_ZWRAP_ZSTD_REUSE || compressor == BMK_ZLIB_REUSE) { 291 z_stream def; 292 int ret; 293 int useSetDict = (dictBuffer != NULL); 294 if (compressor == BMK_ZLIB_REUSE || compressor == BMK_ZWRAP_ZLIB_REUSE) ZWRAP_useZSTDcompression(0); 295 else ZWRAP_useZSTDcompression(1); 296 def.zalloc = Z_NULL; 297 def.zfree = Z_NULL; 298 def.opaque = Z_NULL; 299 ret = deflateInit(&def, cLevel); 300 if (ret != Z_OK) EXM_THROW(1, "deflateInit failure"); 301 /* if (ZWRAP_isUsingZSTDcompression()) { 302 ret = ZWRAP_setPledgedSrcSize(&def, avgSize); 303 if (ret != Z_OK) EXM_THROW(1, "ZWRAP_setPledgedSrcSize failure"); 304 } */ 305 do { 306 U32 blockNb; 307 for (blockNb=0; blockNb<nbBlocks; blockNb++) { 308 if (ZWRAP_isUsingZSTDcompression()) 309 ret = ZWRAP_deflateReset_keepDict(&def); /* reuse dictionary to make compression faster */ 310 else 311 ret = deflateReset(&def); 312 if (ret != Z_OK) EXM_THROW(1, "deflateReset failure"); 313 if (useSetDict) { 314 ret = deflateSetDictionary(&def, (const z_Bytef*)dictBuffer, dictBufferSize); 315 if (ret != Z_OK) EXM_THROW(1, "deflateSetDictionary failure"); 316 if (ZWRAP_isUsingZSTDcompression()) useSetDict = 0; /* zstd doesn't require deflateSetDictionary after ZWRAP_deflateReset_keepDict */ 317 } 318 def.next_in = (z_const z_Bytef*) blockTable[blockNb].srcPtr; 319 def.avail_in = (uInt)blockTable[blockNb].srcSize; 320 def.total_in = 0; 321 def.next_out = (z_Bytef*) blockTable[blockNb].cPtr; 322 def.avail_out = (uInt)blockTable[blockNb].cRoom; 323 def.total_out = 0; 324 ret = deflate(&def, Z_FINISH); 325 if (ret != Z_STREAM_END) EXM_THROW(1, "deflate failure ret=%d srcSize=%d" , ret, (int)blockTable[blockNb].srcSize); 326 blockTable[blockNb].cSize = def.total_out; 327 } 328 nbLoops++; 329 } while (UTIL_clockSpanMicro(clockStart) < clockLoop); 330 ret = deflateEnd(&def); 331 if (ret != Z_OK) EXM_THROW(1, "deflateEnd failure"); 332 } else { 333 z_stream def; 334 if (compressor == BMK_ZLIB || compressor == BMK_ZWRAP_ZLIB) ZWRAP_useZSTDcompression(0); 335 else ZWRAP_useZSTDcompression(1); 336 do { 337 U32 blockNb; 338 for (blockNb=0; blockNb<nbBlocks; blockNb++) { 339 int ret; 340 def.zalloc = Z_NULL; 341 def.zfree = Z_NULL; 342 def.opaque = Z_NULL; 343 ret = deflateInit(&def, cLevel); 344 if (ret != Z_OK) EXM_THROW(1, "deflateInit failure"); 345 if (dictBuffer) { 346 ret = deflateSetDictionary(&def, (const z_Bytef*)dictBuffer, dictBufferSize); 347 if (ret != Z_OK) EXM_THROW(1, "deflateSetDictionary failure"); 348 } 349 def.next_in = (z_const z_Bytef*) blockTable[blockNb].srcPtr; 350 def.avail_in = (uInt)blockTable[blockNb].srcSize; 351 def.total_in = 0; 352 def.next_out = (z_Bytef*) blockTable[blockNb].cPtr; 353 def.avail_out = (uInt)blockTable[blockNb].cRoom; 354 def.total_out = 0; 355 ret = deflate(&def, Z_FINISH); 356 if (ret != Z_STREAM_END) EXM_THROW(1, "deflate failure"); 357 ret = deflateEnd(&def); 358 if (ret != Z_OK) EXM_THROW(1, "deflateEnd failure"); 359 blockTable[blockNb].cSize = def.total_out; 360 } 361 nbLoops++; 362 } while (UTIL_clockSpanMicro(clockStart) < clockLoop); 363 } 364 { U64 const clockSpan = UTIL_clockSpanMicro(clockStart); 365 if (clockSpan < fastestC*nbLoops) fastestC = clockSpan / nbLoops; 366 totalCTime += clockSpan; 367 cCompleted = totalCTime>maxTime; 368 } } 369 370 cSize = 0; 371 { U32 blockNb; for (blockNb=0; blockNb<nbBlocks; blockNb++) cSize += blockTable[blockNb].cSize; } 372 ratio = (double)srcSize / (double)cSize; 373 markNb = (markNb+1) % NB_MARKS; 374 DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->%10u (%5.3f),%6.1f MB/s\r", 375 marks[markNb], displayName, (unsigned)srcSize, (unsigned)cSize, ratio, 376 (double)srcSize / fastestC ); 377 378 (void)fastestD; (void)crcOrig; /* unused when decompression disabled */ 379 #if 1 380 /* Decompression */ 381 if (!dCompleted) memset(resultBuffer, 0xD6, srcSize); /* warm result buffer */ 382 383 UTIL_sleepMilli(1); /* give processor time to other processes */ 384 UTIL_waitForNextTick(); 385 clockStart = UTIL_getTime(); 386 387 if (!dCompleted) { 388 U32 nbLoops = 0; 389 if (compressor == BMK_ZSTD) { 390 ZSTD_DDict* ddict = ZSTD_createDDict(dictBuffer, dictBufferSize); 391 if (!ddict) EXM_THROW(2, "ZSTD_createDDict() allocation failure"); 392 do { 393 unsigned blockNb; 394 for (blockNb=0; blockNb<nbBlocks; blockNb++) { 395 size_t const regenSize = ZSTD_decompress_usingDDict(dctx, 396 blockTable[blockNb].resPtr, blockTable[blockNb].srcSize, 397 blockTable[blockNb].cPtr, blockTable[blockNb].cSize, 398 ddict); 399 if (ZSTD_isError(regenSize)) { 400 DISPLAY("ZSTD_decompress_usingDDict() failed on block %u : %s \n", 401 blockNb, ZSTD_getErrorName(regenSize)); 402 clockLoop = 0; /* force immediate test end */ 403 break; 404 } 405 blockTable[blockNb].resSize = regenSize; 406 } 407 nbLoops++; 408 } while (UTIL_clockSpanMicro(clockStart) < clockLoop); 409 ZSTD_freeDDict(ddict); 410 } else if (compressor == BMK_ZSTD_STREAM) { 411 ZSTD_inBuffer inBuffer; 412 ZSTD_outBuffer outBuffer; 413 ZSTD_DStream* zbd = ZSTD_createDStream(); 414 size_t rSize; 415 if (zbd == NULL) EXM_THROW(1, "ZSTD_createDStream() allocation failure"); 416 rSize = ZSTD_initDStream_usingDict(zbd, dictBuffer, dictBufferSize); 417 if (ZSTD_isError(rSize)) EXM_THROW(1, "ZSTD_initDStream() failed : %s", ZSTD_getErrorName(rSize)); 418 do { 419 U32 blockNb; 420 for (blockNb=0; blockNb<nbBlocks; blockNb++) { 421 rSize = ZSTD_resetDStream(zbd); 422 if (ZSTD_isError(rSize)) EXM_THROW(1, "ZSTD_resetDStream() failed : %s", ZSTD_getErrorName(rSize)); 423 inBuffer.src = blockTable[blockNb].cPtr; 424 inBuffer.size = blockTable[blockNb].cSize; 425 inBuffer.pos = 0; 426 outBuffer.dst = blockTable[blockNb].resPtr; 427 outBuffer.size = blockTable[blockNb].srcSize; 428 outBuffer.pos = 0; 429 rSize = ZSTD_decompressStream(zbd, &outBuffer, &inBuffer); 430 if (ZSTD_isError(rSize)) EXM_THROW(1, "ZSTD_decompressStream() failed : %s", ZSTD_getErrorName(rSize)); 431 blockTable[blockNb].resSize = outBuffer.pos; 432 } 433 nbLoops++; 434 } while (UTIL_clockSpanMicro(clockStart) < clockLoop); 435 ZSTD_freeDStream(zbd); 436 } else if (compressor == BMK_ZWRAP_ZLIB_REUSE || compressor == BMK_ZWRAP_ZSTD_REUSE || compressor == BMK_ZLIB_REUSE) { 437 z_stream inf; 438 int ret; 439 if (compressor == BMK_ZLIB_REUSE) ZWRAP_setDecompressionType(ZWRAP_FORCE_ZLIB); 440 else ZWRAP_setDecompressionType(ZWRAP_AUTO); 441 inf.zalloc = Z_NULL; 442 inf.zfree = Z_NULL; 443 inf.opaque = Z_NULL; 444 ret = inflateInit(&inf); 445 if (ret != Z_OK) EXM_THROW(1, "inflateInit failure"); 446 do { 447 U32 blockNb; 448 for (blockNb=0; blockNb<nbBlocks; blockNb++) { 449 if (ZWRAP_isUsingZSTDdecompression(&inf)) 450 ret = ZWRAP_inflateReset_keepDict(&inf); /* reuse dictionary to make decompression faster; inflate will return Z_NEED_DICT only for the first time */ 451 else 452 ret = inflateReset(&inf); 453 if (ret != Z_OK) EXM_THROW(1, "inflateReset failure"); 454 inf.next_in = (z_const z_Bytef*) blockTable[blockNb].cPtr; 455 inf.avail_in = (uInt)blockTable[blockNb].cSize; 456 inf.total_in = 0; 457 inf.next_out = (z_Bytef*) blockTable[blockNb].resPtr; 458 inf.avail_out = (uInt)blockTable[blockNb].srcSize; 459 inf.total_out = 0; 460 ret = inflate(&inf, Z_FINISH); 461 if (ret == Z_NEED_DICT) { 462 ret = inflateSetDictionary(&inf, (const z_Bytef*)dictBuffer, dictBufferSize); 463 if (ret != Z_OK) EXM_THROW(1, "inflateSetDictionary failure"); 464 ret = inflate(&inf, Z_FINISH); 465 } 466 if (ret != Z_STREAM_END) EXM_THROW(1, "inflate failure"); 467 blockTable[blockNb].resSize = inf.total_out; 468 } 469 nbLoops++; 470 } while (UTIL_clockSpanMicro(clockStart) < clockLoop); 471 ret = inflateEnd(&inf); 472 if (ret != Z_OK) EXM_THROW(1, "inflateEnd failure"); 473 } else { 474 z_stream inf; 475 if (compressor == BMK_ZLIB) ZWRAP_setDecompressionType(ZWRAP_FORCE_ZLIB); 476 else ZWRAP_setDecompressionType(ZWRAP_AUTO); 477 do { 478 U32 blockNb; 479 for (blockNb=0; blockNb<nbBlocks; blockNb++) { 480 int ret; 481 inf.zalloc = Z_NULL; 482 inf.zfree = Z_NULL; 483 inf.opaque = Z_NULL; 484 ret = inflateInit(&inf); 485 if (ret != Z_OK) EXM_THROW(1, "inflateInit failure"); 486 inf.next_in = (z_const z_Bytef*) blockTable[blockNb].cPtr; 487 inf.avail_in = (uInt)blockTable[blockNb].cSize; 488 inf.total_in = 0; 489 inf.next_out = (z_Bytef*) blockTable[blockNb].resPtr; 490 inf.avail_out = (uInt)blockTable[blockNb].srcSize; 491 inf.total_out = 0; 492 ret = inflate(&inf, Z_FINISH); 493 if (ret == Z_NEED_DICT) { 494 ret = inflateSetDictionary(&inf, (const z_Bytef*) dictBuffer, dictBufferSize); 495 if (ret != Z_OK) EXM_THROW(1, "inflateSetDictionary failure"); 496 ret = inflate(&inf, Z_FINISH); 497 } 498 if (ret != Z_STREAM_END) EXM_THROW(1, "inflate failure"); 499 ret = inflateEnd(&inf); 500 if (ret != Z_OK) EXM_THROW(1, "inflateEnd failure"); 501 blockTable[blockNb].resSize = inf.total_out; 502 } 503 nbLoops++; 504 } while (UTIL_clockSpanMicro(clockStart) < clockLoop); 505 } 506 { U64 const clockSpan = UTIL_clockSpanMicro(clockStart); 507 if (clockSpan < fastestD*nbLoops) fastestD = clockSpan / nbLoops; 508 totalDTime += clockSpan; 509 dCompleted = totalDTime>maxTime; 510 } } 511 512 markNb = (markNb+1) % NB_MARKS; 513 DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->%10u (%5.3f),%6.1f MB/s ,%6.1f MB/s\r", 514 marks[markNb], displayName, (unsigned)srcSize, (unsigned)cSize, ratio, 515 (double)srcSize / fastestC, 516 (double)srcSize / fastestD ); 517 518 /* CRC Checking */ 519 { U64 const crcCheck = XXH64(resultBuffer, srcSize, 0); 520 if (crcOrig!=crcCheck) { 521 size_t u; 522 DISPLAY("!!! WARNING !!! %14s : Invalid Checksum : %x != %x \n", displayName, (unsigned)crcOrig, (unsigned)crcCheck); 523 for (u=0; u<srcSize; u++) { 524 if (((const BYTE*)srcBuffer)[u] != ((const BYTE*)resultBuffer)[u]) { 525 unsigned segNb, bNb, pos; 526 size_t bacc = 0; 527 DISPLAY("Decoding error at pos %u ", (unsigned)u); 528 for (segNb = 0; segNb < nbBlocks; segNb++) { 529 if (bacc + blockTable[segNb].srcSize > u) break; 530 bacc += blockTable[segNb].srcSize; 531 } 532 pos = (U32)(u - bacc); 533 bNb = pos / (128 KB); 534 DISPLAY("(block %u, sub %u, pos %u) \n", segNb, bNb, pos); 535 break; 536 } 537 if (u==srcSize-1) { /* should never happen */ 538 DISPLAY("no difference detected\n"); 539 } } 540 break; 541 } } /* CRC Checking */ 542 #endif 543 } /* for (testNb = 1; testNb <= (g_nbIterations + !g_nbIterations); testNb++) */ 544 545 if (g_displayLevel == 1) { 546 double cSpeed = (double)srcSize / fastestC; 547 double dSpeed = (double)srcSize / fastestD; 548 if (g_additionalParam) 549 DISPLAY("-%-3i%11i (%5.3f) %6.2f MB/s %6.1f MB/s %s (param=%d)\n", cLevel, (int)cSize, ratio, cSpeed, dSpeed, displayName, g_additionalParam); 550 else 551 DISPLAY("-%-3i%11i (%5.3f) %6.2f MB/s %6.1f MB/s %s\n", cLevel, (int)cSize, ratio, cSpeed, dSpeed, displayName); 552 } 553 DISPLAYLEVEL(2, "%2i#\n", cLevel); 554 } /* Bench */ 555 556 /* clean up */ 557 free(blockTable); 558 free(compressedBuffer); 559 free(resultBuffer); 560 ZSTD_freeCCtx(ctx); 561 ZSTD_freeDCtx(dctx); 562 return 0; 563 } 564 565 566 static size_t BMK_findMaxMem(U64 requiredMem) 567 { 568 size_t const step = 64 MB; 569 BYTE* testmem = NULL; 570 571 requiredMem = (((requiredMem >> 26) + 1) << 26); 572 requiredMem += step; 573 if (requiredMem > maxMemory) requiredMem = maxMemory; 574 575 do { 576 testmem = (BYTE*)malloc((size_t)requiredMem); 577 requiredMem -= step; 578 } while (!testmem && requiredMem); /* do not allocate zero bytes */ 579 580 free(testmem); 581 return (size_t)(requiredMem+1); /* avoid zero */ 582 } 583 584 static void BMK_benchCLevel(void* srcBuffer, size_t benchedSize, 585 const char* displayName, int cLevel, int cLevelLast, 586 const size_t* fileSizes, unsigned nbFiles, 587 const void* dictBuffer, size_t dictBufferSize) 588 { 589 int l; 590 591 const char* pch = strrchr(displayName, '\\'); /* Windows */ 592 if (!pch) pch = strrchr(displayName, '/'); /* Linux */ 593 if (pch) displayName = pch+1; 594 595 SET_REALTIME_PRIORITY; 596 597 if (g_displayLevel == 1 && !g_additionalParam) 598 DISPLAY("bench %s %s: input %u bytes, %u seconds, %u KB blocks\n", 599 ZSTD_VERSION_STRING, ZSTD_GIT_COMMIT_STRING, 600 (unsigned)benchedSize, g_nbIterations, (unsigned)(g_blockSize>>10)); 601 602 if (cLevelLast < cLevel) cLevelLast = cLevel; 603 604 DISPLAY("benchmarking zstd %s (using ZSTD_CStream)\n", ZSTD_VERSION_STRING); 605 for (l=cLevel; l <= cLevelLast; l++) { 606 BMK_benchMem(srcBuffer, benchedSize, 607 displayName, l, 608 fileSizes, nbFiles, 609 dictBuffer, dictBufferSize, BMK_ZSTD_STREAM); 610 } 611 612 DISPLAY("benchmarking zstd %s (using ZSTD_CCtx)\n", ZSTD_VERSION_STRING); 613 for (l=cLevel; l <= cLevelLast; l++) { 614 BMK_benchMem(srcBuffer, benchedSize, 615 displayName, l, 616 fileSizes, nbFiles, 617 dictBuffer, dictBufferSize, BMK_ZSTD); 618 } 619 620 DISPLAY("benchmarking zstd %s (using zlibWrapper)\n", ZSTD_VERSION_STRING); 621 for (l=cLevel; l <= cLevelLast; l++) { 622 BMK_benchMem(srcBuffer, benchedSize, 623 displayName, l, 624 fileSizes, nbFiles, 625 dictBuffer, dictBufferSize, BMK_ZWRAP_ZSTD_REUSE); 626 } 627 628 DISPLAY("benchmarking zstd %s (zlibWrapper not reusing a context)\n", ZSTD_VERSION_STRING); 629 for (l=cLevel; l <= cLevelLast; l++) { 630 BMK_benchMem(srcBuffer, benchedSize, 631 displayName, l, 632 fileSizes, nbFiles, 633 dictBuffer, dictBufferSize, BMK_ZWRAP_ZSTD); 634 } 635 636 637 if (cLevelLast > Z_BEST_COMPRESSION) cLevelLast = Z_BEST_COMPRESSION; 638 639 DISPLAY("\n"); 640 DISPLAY("benchmarking zlib %s\n", ZLIB_VERSION); 641 for (l=cLevel; l <= cLevelLast; l++) { 642 BMK_benchMem(srcBuffer, benchedSize, 643 displayName, l, 644 fileSizes, nbFiles, 645 dictBuffer, dictBufferSize, BMK_ZLIB_REUSE); 646 } 647 648 DISPLAY("benchmarking zlib %s (zlib not reusing a context)\n", ZLIB_VERSION); 649 for (l=cLevel; l <= cLevelLast; l++) { 650 BMK_benchMem(srcBuffer, benchedSize, 651 displayName, l, 652 fileSizes, nbFiles, 653 dictBuffer, dictBufferSize, BMK_ZLIB); 654 } 655 656 DISPLAY("benchmarking zlib %s (using zlibWrapper)\n", ZLIB_VERSION); 657 for (l=cLevel; l <= cLevelLast; l++) { 658 BMK_benchMem(srcBuffer, benchedSize, 659 displayName, l, 660 fileSizes, nbFiles, 661 dictBuffer, dictBufferSize, BMK_ZWRAP_ZLIB_REUSE); 662 } 663 664 DISPLAY("benchmarking zlib %s (zlibWrapper not reusing a context)\n", ZLIB_VERSION); 665 for (l=cLevel; l <= cLevelLast; l++) { 666 BMK_benchMem(srcBuffer, benchedSize, 667 displayName, l, 668 fileSizes, nbFiles, 669 dictBuffer, dictBufferSize, BMK_ZWRAP_ZLIB); 670 } 671 } 672 673 674 /*! BMK_loadFiles() : 675 Loads `buffer` with content of files listed within `fileNamesTable`. 676 At most, fills `buffer` entirely */ 677 static void BMK_loadFiles(void* buffer, size_t bufferSize, 678 size_t* fileSizes, 679 const char** fileNamesTable, unsigned nbFiles) 680 { 681 size_t pos = 0, totalSize = 0; 682 unsigned n; 683 for (n=0; n<nbFiles; n++) { 684 FILE* f; 685 U64 fileSize = UTIL_getFileSize(fileNamesTable[n]); 686 if (UTIL_isDirectory(fileNamesTable[n])) { 687 DISPLAYLEVEL(2, "Ignoring %s directory... \n", fileNamesTable[n]); 688 fileSizes[n] = 0; 689 continue; 690 } 691 if (fileSize == UTIL_FILESIZE_UNKNOWN) { 692 DISPLAYLEVEL(2, "Cannot determine size of %s ... \n", fileNamesTable[n]); 693 fileSizes[n] = 0; 694 continue; 695 } 696 f = fopen(fileNamesTable[n], "rb"); 697 if (f==NULL) EXM_THROW(10, "impossible to open file %s", fileNamesTable[n]); 698 DISPLAYUPDATE(2, "Loading %s... \r", fileNamesTable[n]); 699 if (fileSize > bufferSize-pos) fileSize = bufferSize-pos, nbFiles=n; /* buffer too small - stop after this file */ 700 { size_t const readSize = fread(((char*)buffer)+pos, 1, (size_t)fileSize, f); 701 if (readSize != (size_t)fileSize) EXM_THROW(11, "could not read %s", fileNamesTable[n]); 702 pos += readSize; } 703 fileSizes[n] = (size_t)fileSize; 704 totalSize += (size_t)fileSize; 705 fclose(f); 706 } 707 708 if (totalSize == 0) EXM_THROW(12, "no data to bench"); 709 } 710 711 static void BMK_benchFileTable(const char** fileNamesTable, unsigned nbFiles, 712 const char* dictFileName, int cLevel, int cLevelLast) 713 { 714 void* srcBuffer; 715 size_t benchedSize; 716 void* dictBuffer = NULL; 717 size_t dictBufferSize = 0; 718 size_t* fileSizes = (size_t*)malloc(nbFiles * sizeof(size_t)); 719 U64 const totalSizeToLoad = UTIL_getTotalFileSize(fileNamesTable, nbFiles); 720 char mfName[20] = {0}; 721 722 if (!fileSizes) EXM_THROW(12, "not enough memory for fileSizes"); 723 724 /* Load dictionary */ 725 if (dictFileName != NULL) { 726 U64 const dictFileSize = UTIL_getFileSize(dictFileName); 727 if (dictFileSize > 64 MB) 728 EXM_THROW(10, "dictionary file %s too large", dictFileName); 729 dictBufferSize = (size_t)dictFileSize; 730 dictBuffer = malloc(dictBufferSize); 731 if (dictBuffer==NULL) 732 EXM_THROW(11, "not enough memory for dictionary (%u bytes)", (unsigned)dictBufferSize); 733 BMK_loadFiles(dictBuffer, dictBufferSize, fileSizes, &dictFileName, 1); 734 } 735 736 /* Memory allocation & restrictions */ 737 benchedSize = BMK_findMaxMem(totalSizeToLoad * 3) / 3; 738 if ((U64)benchedSize > totalSizeToLoad) benchedSize = (size_t)totalSizeToLoad; 739 if (benchedSize < totalSizeToLoad) 740 DISPLAY("Not enough memory; testing %u MB only...\n", (unsigned)(benchedSize >> 20)); 741 srcBuffer = malloc(benchedSize + !benchedSize); 742 if (!srcBuffer) EXM_THROW(12, "not enough memory"); 743 744 /* Load input buffer */ 745 BMK_loadFiles(srcBuffer, benchedSize, fileSizes, fileNamesTable, nbFiles); 746 747 /* Bench */ 748 snprintf (mfName, sizeof(mfName), " %u files", nbFiles); 749 { const char* displayName = (nbFiles > 1) ? mfName : fileNamesTable[0]; 750 BMK_benchCLevel(srcBuffer, benchedSize, 751 displayName, cLevel, cLevelLast, 752 fileSizes, nbFiles, 753 dictBuffer, dictBufferSize); 754 } 755 756 /* clean up */ 757 free(srcBuffer); 758 free(dictBuffer); 759 free(fileSizes); 760 } 761 762 763 static void BMK_syntheticTest(int cLevel, int cLevelLast, double compressibility) 764 { 765 char name[20] = {0}; 766 size_t benchedSize = 10000000; 767 void* const srcBuffer = malloc(benchedSize); 768 769 /* Memory allocation */ 770 if (!srcBuffer) EXM_THROW(21, "not enough memory"); 771 772 /* Fill input buffer */ 773 RDG_genBuffer(srcBuffer, benchedSize, compressibility, 0.0, 0); 774 775 /* Bench */ 776 snprintf (name, sizeof(name), "Synthetic %2u%%", (unsigned)(compressibility*100)); 777 BMK_benchCLevel(srcBuffer, benchedSize, name, cLevel, cLevelLast, &benchedSize, 1, NULL, 0); 778 779 /* clean up */ 780 free(srcBuffer); 781 } 782 783 784 int BMK_benchFiles(const char** fileNamesTable, unsigned nbFiles, 785 const char* dictFileName, int cLevel, int cLevelLast) 786 { 787 double const compressibility = (double)g_compressibilityDefault / 100; 788 789 if (nbFiles == 0) 790 BMK_syntheticTest(cLevel, cLevelLast, compressibility); 791 else 792 BMK_benchFileTable(fileNamesTable, nbFiles, dictFileName, cLevel, cLevelLast); 793 return 0; 794 } 795 796 797 798 799 /*-************************************ 800 * Command Line 801 **************************************/ 802 static int usage(const char* programName) 803 { 804 DISPLAY(WELCOME_MESSAGE); 805 DISPLAY( "Usage :\n"); 806 DISPLAY( " %s [args] [FILE(s)] [-o file]\n", programName); 807 DISPLAY( "\n"); 808 DISPLAY( "FILE : a filename\n"); 809 DISPLAY( " with no FILE, or when FILE is - , read standard input\n"); 810 DISPLAY( "Arguments :\n"); 811 DISPLAY( " -D file: use `file` as Dictionary \n"); 812 DISPLAY( " -h/-H : display help/long help and exit\n"); 813 DISPLAY( " -V : display Version number and exit\n"); 814 DISPLAY( " -v : verbose mode; specify multiple times to increase log level (default:%d)\n", DEFAULT_DISPLAY_LEVEL); 815 DISPLAY( " -q : suppress warnings; specify twice to suppress errors too\n"); 816 #ifdef UTIL_HAS_CREATEFILELIST 817 DISPLAY( " -r : operate recursively on directories\n"); 818 #endif 819 DISPLAY( "\n"); 820 DISPLAY( "Benchmark arguments :\n"); 821 DISPLAY( " -b# : benchmark file(s), using # compression level (default : %d) \n", ZSTDCLI_CLEVEL_DEFAULT); 822 DISPLAY( " -e# : test all compression levels from -bX to # (default: %d)\n", ZSTDCLI_CLEVEL_DEFAULT); 823 DISPLAY( " -i# : minimum evaluation time in seconds (default : 3s)\n"); 824 DISPLAY( " -B# : cut file into independent blocks of size # (default: no block)\n"); 825 return 0; 826 } 827 828 static int badusage(const char* programName) 829 { 830 DISPLAYLEVEL(1, "Incorrect parameters\n"); 831 if (g_displayLevel >= 1) usage(programName); 832 return 1; 833 } 834 835 static void waitEnter(void) 836 { 837 int unused; 838 DISPLAY("Press enter to continue...\n"); 839 unused = getchar(); 840 (void)unused; 841 } 842 843 /*! readU32FromChar() : 844 @return : unsigned integer value reach from input in `char` format 845 Will also modify `*stringPtr`, advancing it to position where it stopped reading. 846 Note : this function can overflow if digit string > MAX_UINT */ 847 static unsigned readU32FromChar(const char** stringPtr) 848 { 849 unsigned result = 0; 850 while ((**stringPtr >='0') && (**stringPtr <='9')) 851 result *= 10, result += (unsigned)(**stringPtr - '0'), (*stringPtr)++ ; 852 return result; 853 } 854 855 856 #define CLEAN_RETURN(i) { operationResult = (i); goto _end; } 857 858 int main(int argCount, char** argv) 859 { 860 int argNb, 861 main_pause=0, 862 nextEntryIsDictionary=0, 863 operationResult=0, 864 nextArgumentIsFile=0; 865 int cLevel = ZSTDCLI_CLEVEL_DEFAULT; 866 int cLevelLast = 1; 867 unsigned recursive = 0; 868 FileNamesTable* filenames = UTIL_allocateFileNamesTable((size_t)argCount); 869 const char* programName = argv[0]; 870 const char* dictFileName = NULL; 871 char* dynNameSpace = NULL; 872 873 /* init */ 874 if (filenames==NULL) { DISPLAY("zstd: %s \n", strerror(errno)); exit(1); } 875 displayOut = stderr; 876 877 /* Pick out program name from path. Don't rely on stdlib because of conflicting behavior */ 878 { size_t pos; 879 for (pos = strlen(programName); pos > 0; pos--) { if (programName[pos] == '/') { pos++; break; } } 880 programName += pos; 881 } 882 883 /* command switches */ 884 for(argNb=1; argNb<argCount; argNb++) { 885 const char* argument = argv[argNb]; 886 if(!argument) continue; /* Protection if argument empty */ 887 888 if (nextArgumentIsFile==0) { 889 890 /* long commands (--long-word) */ 891 if (!strcmp(argument, "--")) { nextArgumentIsFile=1; continue; } 892 if (!strcmp(argument, "--version")) { displayOut=stdout; DISPLAY(WELCOME_MESSAGE); CLEAN_RETURN(0); } 893 if (!strcmp(argument, "--help")) { displayOut=stdout; CLEAN_RETURN(usage(programName)); } 894 if (!strcmp(argument, "--verbose")) { g_displayLevel++; continue; } 895 if (!strcmp(argument, "--quiet")) { g_displayLevel--; continue; } 896 897 /* Decode commands (note : aggregated commands are allowed) */ 898 if (argument[0]=='-') { 899 argument++; 900 901 while (argument[0]!=0) { 902 switch(argument[0]) 903 { 904 /* Display help */ 905 case 'V': displayOut=stdout; DISPLAY(WELCOME_MESSAGE); CLEAN_RETURN(0); /* Version Only */ 906 case 'H': 907 case 'h': displayOut=stdout; CLEAN_RETURN(usage(programName)); 908 909 /* Use file content as dictionary */ 910 case 'D': nextEntryIsDictionary = 1; argument++; break; 911 912 /* Verbose mode */ 913 case 'v': g_displayLevel++; argument++; break; 914 915 /* Quiet mode */ 916 case 'q': g_displayLevel--; argument++; break; 917 918 #ifdef UTIL_HAS_CREATEFILELIST 919 /* recursive */ 920 case 'r': recursive=1; argument++; break; 921 #endif 922 923 /* Benchmark */ 924 case 'b': 925 /* first compression Level */ 926 argument++; 927 cLevel = (int)readU32FromChar(&argument); 928 break; 929 930 /* range bench (benchmark only) */ 931 case 'e': 932 /* last compression Level */ 933 argument++; 934 cLevelLast = (int)readU32FromChar(&argument); 935 break; 936 937 /* Modify Nb Iterations (benchmark only) */ 938 case 'i': 939 argument++; 940 { U32 const iters = readU32FromChar(&argument); 941 BMK_setNotificationLevel(g_displayLevel); 942 BMK_SetNbIterations(iters); 943 } 944 break; 945 946 /* cut input into blocks (benchmark only) */ 947 case 'B': 948 argument++; 949 { size_t bSize = readU32FromChar(&argument); 950 if (toupper(*argument)=='K') bSize<<=10, argument++; /* allows using KB notation */ 951 if (toupper(*argument)=='M') bSize<<=20, argument++; 952 if (toupper(*argument)=='B') argument++; 953 BMK_setNotificationLevel(g_displayLevel); 954 BMK_SetBlockSize(bSize); 955 } 956 break; 957 958 /* Pause at the end (-p) or set an additional param (-p#) (hidden option) */ 959 case 'p': argument++; 960 if ((*argument>='0') && (*argument<='9')) { 961 BMK_setAdditionalParam((int)readU32FromChar(&argument)); 962 } else 963 main_pause=1; 964 break; 965 /* unknown command */ 966 default : CLEAN_RETURN(badusage(programName)); 967 } 968 } 969 continue; 970 } /* if (argument[0]=='-') */ 971 972 } /* if (nextArgumentIsAFile==0) */ 973 974 if (nextEntryIsDictionary) { 975 nextEntryIsDictionary = 0; 976 dictFileName = argument; 977 continue; 978 } 979 980 /* add filename to list */ 981 UTIL_refFilename(filenames, argument); 982 } 983 984 /* Welcome message (if verbose) */ 985 DISPLAYLEVEL(3, WELCOME_MESSAGE); 986 987 #ifdef UTIL_HAS_CREATEFILELIST 988 if (recursive) { 989 UTIL_expandFNT(&filenames, 1); 990 } 991 #endif 992 993 BMK_setNotificationLevel(g_displayLevel); 994 BMK_benchFiles(filenames->fileNames, (unsigned)filenames->tableSize, dictFileName, cLevel, cLevelLast); 995 996 _end: 997 if (main_pause) waitEnter(); 998 free(dynNameSpace); 999 UTIL_freeFileNamesTable(filenames); 1000 return operationResult; 1001 } 1002