1 /*
2 * Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
3 * All rights reserved.
4 *
5 * This source code is licensed under both the BSD-style license (found in the
6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7 * in the COPYING file in the root directory of this source tree).
8 */
9
10
11 /* *************************************
12 * Includes
13 ***************************************/
14 #include "util.h" /* Compiler options, UTIL_GetFileSize, UTIL_sleep */
15 #include <stdlib.h> /* malloc, free */
16 #include <string.h> /* memset */
17 #include <stdio.h> /* fprintf, fopen, ftello64 */
18 #include <time.h> /* clock_t, clock, CLOCKS_PER_SEC */
19 #include <ctype.h> /* toupper */
20 #include <errno.h> /* errno */
21
22 #include "timefn.h" /* UTIL_time_t, UTIL_getTime, UTIL_clockSpanMicro, UTIL_waitForNextTick */
23 #include "mem.h"
24 #define ZSTD_STATIC_LINKING_ONLY
25 #include "zstd.h"
26 #include "datagen.h" /* RDG_genBuffer */
27 #include "xxhash.h"
28
29 #include "zstd_zlibwrapper.h"
30
31
32
33 /*-************************************
34 * Tuning parameters
35 **************************************/
36 #ifndef ZSTDCLI_CLEVEL_DEFAULT
37 # define ZSTDCLI_CLEVEL_DEFAULT 3
38 #endif
39
40
41 /*-************************************
42 * Constants
43 **************************************/
44 #define COMPRESSOR_NAME "Zstandard wrapper for zlib command line interface"
45 #ifndef ZSTD_VERSION
46 # define ZSTD_VERSION "v" ZSTD_VERSION_STRING
47 #endif
48 #define AUTHOR "Yann Collet"
49 #define WELCOME_MESSAGE "*** %s %i-bits %s, by %s ***\n", COMPRESSOR_NAME, (int)(sizeof(size_t)*8), ZSTD_VERSION, AUTHOR
50
51 #ifndef ZSTD_GIT_COMMIT
52 # define ZSTD_GIT_COMMIT_STRING ""
53 #else
54 # define ZSTD_GIT_COMMIT_STRING ZSTD_EXPAND_AND_QUOTE(ZSTD_GIT_COMMIT)
55 #endif
56
57 #define NBLOOPS 3
58 #define TIMELOOP_MICROSEC 1*1000000ULL /* 1 second */
59 #define ACTIVEPERIOD_MICROSEC 70*1000000ULL /* 70 seconds */
60 #define COOLPERIOD_SEC 10
61
62 #define KB *(1 <<10)
63 #define MB *(1 <<20)
64 #define GB *(1U<<30)
65
66 static const size_t maxMemory = (sizeof(size_t)==4) ? (2 GB - 64 MB) : (size_t)(1ULL << ((sizeof(size_t)*8)-31));
67
68 static U32 g_compressibilityDefault = 50;
69
70
71 /* *************************************
72 * console display
73 ***************************************/
74 #define DEFAULT_DISPLAY_LEVEL 2
75 #define DISPLAY(...) fprintf(displayOut, __VA_ARGS__)
76 #define DISPLAYLEVEL(l, ...) if (g_displayLevel>=l) { DISPLAY(__VA_ARGS__); }
77 static unsigned g_displayLevel = DEFAULT_DISPLAY_LEVEL; /* 0 : no display; 1: errors; 2 : + result + interaction + warnings; 3 : + progression; 4 : + information */
78 static FILE* displayOut;
79
80 #define DISPLAYUPDATE(l, ...) if (g_displayLevel>=l) { \
81 if ((clock() - g_time > refreshRate) || (g_displayLevel>=4)) \
82 { g_time = clock(); DISPLAY(__VA_ARGS__); \
83 if (g_displayLevel>=4) fflush(displayOut); } }
84 static const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100;
85 static clock_t g_time = 0;
86
87
88 /* *************************************
89 * Exceptions
90 ***************************************/
91 #ifndef DEBUG
92 # define DEBUG 0
93 #endif
94 #define DEBUGOUTPUT(...) { if (DEBUG) DISPLAY(__VA_ARGS__); }
95 #define EXM_THROW(error, ...) \
96 { \
97 DEBUGOUTPUT("Error defined at %s, line %i : \n", __FILE__, __LINE__); \
98 DISPLAYLEVEL(1, "Error %i : ", error); \
99 DISPLAYLEVEL(1, __VA_ARGS__); \
100 DISPLAYLEVEL(1, "\n"); \
101 exit(error); \
102 }
103
104
105 /* *************************************
106 * Benchmark Parameters
107 ***************************************/
108 static unsigned g_nbIterations = NBLOOPS;
109 static size_t g_blockSize = 0;
110 int g_additionalParam = 0;
111
BMK_setNotificationLevel(unsigned level)112 void BMK_setNotificationLevel(unsigned level) { g_displayLevel=level; }
113
BMK_setAdditionalParam(int additionalParam)114 void BMK_setAdditionalParam(int additionalParam) { g_additionalParam=additionalParam; }
115
BMK_SetNbIterations(unsigned nbLoops)116 void BMK_SetNbIterations(unsigned nbLoops)
117 {
118 g_nbIterations = nbLoops;
119 DISPLAYLEVEL(3, "- test >= %u seconds per compression / decompression -\n", g_nbIterations);
120 }
121
BMK_SetBlockSize(size_t blockSize)122 void BMK_SetBlockSize(size_t blockSize)
123 {
124 g_blockSize = blockSize;
125 DISPLAYLEVEL(2, "using blocks of size %u KB \n", (unsigned)(blockSize>>10));
126 }
127
128
129 /* ********************************************************
130 * Bench functions
131 **********************************************************/
132 #undef MIN
133 #undef MAX
134 #define MIN(a,b) ((a)<(b) ? (a) : (b))
135 #define MAX(a,b) ((a)>(b) ? (a) : (b))
136
137 typedef struct
138 {
139 z_const char* srcPtr;
140 size_t srcSize;
141 char* cPtr;
142 size_t cRoom;
143 size_t cSize;
144 char* resPtr;
145 size_t resSize;
146 } blockParam_t;
147
148 typedef enum { BMK_ZSTD, BMK_ZSTD_STREAM, BMK_ZLIB, BMK_ZWRAP_ZLIB, BMK_ZWRAP_ZSTD, BMK_ZLIB_REUSE, BMK_ZWRAP_ZLIB_REUSE, BMK_ZWRAP_ZSTD_REUSE } BMK_compressor;
149
150
BMK_benchMem(z_const void * srcBuffer,size_t srcSize,const char * displayName,int cLevel,const size_t * fileSizes,U32 nbFiles,const void * dictBuffer,size_t dictBufferSize,BMK_compressor compressor)151 static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
152 const char* displayName, int cLevel,
153 const size_t* fileSizes, U32 nbFiles,
154 const void* dictBuffer, size_t dictBufferSize, BMK_compressor compressor)
155 {
156 size_t const blockSize = (g_blockSize>=32 ? g_blockSize : srcSize) + (!srcSize) /* avoid div by 0 */ ;
157 size_t const avgSize = MIN(g_blockSize, (srcSize / nbFiles));
158 U32 const maxNbBlocks = (U32) ((srcSize + (blockSize-1)) / blockSize) + nbFiles;
159 blockParam_t* const blockTable = (blockParam_t*) malloc(maxNbBlocks * sizeof(blockParam_t));
160 size_t const maxCompressedSize = ZSTD_compressBound(srcSize) + (maxNbBlocks * 1024); /* add some room for safety */
161 void* const compressedBuffer = malloc(maxCompressedSize);
162 void* const resultBuffer = malloc(srcSize);
163 ZSTD_CCtx* const ctx = ZSTD_createCCtx();
164 ZSTD_DCtx* const dctx = ZSTD_createDCtx();
165 U32 nbBlocks;
166
167 /* checks */
168 if (!compressedBuffer || !resultBuffer || !blockTable || !ctx || !dctx)
169 EXM_THROW(31, "allocation error : not enough memory");
170
171 /* init */
172 if (strlen(displayName)>17) displayName += strlen(displayName)-17; /* can only display 17 characters */
173
174 /* Init blockTable data */
175 { z_const char* srcPtr = (z_const char*)srcBuffer;
176 char* cPtr = (char*)compressedBuffer;
177 char* resPtr = (char*)resultBuffer;
178 U32 fileNb;
179 for (nbBlocks=0, fileNb=0; fileNb<nbFiles; fileNb++) {
180 size_t remaining = fileSizes[fileNb];
181 U32 const nbBlocksforThisFile = (U32)((remaining + (blockSize-1)) / blockSize);
182 U32 const blockEnd = nbBlocks + nbBlocksforThisFile;
183 for ( ; nbBlocks<blockEnd; nbBlocks++) {
184 size_t const thisBlockSize = MIN(remaining, blockSize);
185 blockTable[nbBlocks].srcPtr = srcPtr;
186 blockTable[nbBlocks].cPtr = cPtr;
187 blockTable[nbBlocks].resPtr = resPtr;
188 blockTable[nbBlocks].srcSize = thisBlockSize;
189 blockTable[nbBlocks].cRoom = ZSTD_compressBound(thisBlockSize);
190 srcPtr += thisBlockSize;
191 cPtr += blockTable[nbBlocks].cRoom;
192 resPtr += thisBlockSize;
193 remaining -= thisBlockSize;
194 } } }
195
196 /* warming up memory */
197 RDG_genBuffer(compressedBuffer, maxCompressedSize, 0.10, 0.50, 1);
198
199 /* Bench */
200 { U64 fastestC = (U64)(-1LL), fastestD = (U64)(-1LL);
201 U64 const crcOrig = XXH64(srcBuffer, srcSize, 0);
202 UTIL_time_t coolTime;
203 U64 const maxTime = (g_nbIterations * TIMELOOP_MICROSEC) + 100;
204 U64 totalCTime=0, totalDTime=0;
205 U32 cCompleted=0, dCompleted=0;
206 # define NB_MARKS 4
207 const char* const marks[NB_MARKS] = { " |", " /", " =", "\\" };
208 U32 markNb = 0;
209 size_t cSize = 0;
210 double ratio = 0.;
211
212 coolTime = UTIL_getTime();
213 DISPLAYLEVEL(2, "\r%79s\r", "");
214 while (!cCompleted | !dCompleted) {
215 UTIL_time_t clockStart;
216 U64 clockLoop = g_nbIterations ? TIMELOOP_MICROSEC : 1;
217
218 /* overheat protection */
219 if (UTIL_clockSpanMicro(coolTime) > ACTIVEPERIOD_MICROSEC) {
220 DISPLAYLEVEL(2, "\rcooling down ... \r");
221 UTIL_sleep(COOLPERIOD_SEC);
222 coolTime = UTIL_getTime();
223 }
224
225 /* Compression */
226 DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->\r", marks[markNb], displayName, (unsigned)srcSize);
227 if (!cCompleted) memset(compressedBuffer, 0xE5, maxCompressedSize); /* warm up and erase result buffer */
228
229 UTIL_sleepMilli(1); /* give processor time to other processes */
230 UTIL_waitForNextTick();
231 clockStart = UTIL_getTime();
232
233 if (!cCompleted) { /* still some time to do compression tests */
234 U32 nbLoops = 0;
235 if (compressor == BMK_ZSTD) {
236 ZSTD_parameters const zparams = ZSTD_getParams(cLevel, avgSize, dictBufferSize);
237 ZSTD_customMem const cmem = { NULL, NULL, NULL };
238 ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dictBuffer, dictBufferSize, ZSTD_dlm_byRef, ZSTD_dct_auto, zparams.cParams, cmem);
239 if (cdict==NULL) EXM_THROW(1, "ZSTD_createCDict_advanced() allocation failure");
240
241 do {
242 U32 blockNb;
243 size_t rSize;
244 for (blockNb=0; blockNb<nbBlocks; blockNb++) {
245 if (dictBufferSize) {
246 rSize = ZSTD_compress_usingCDict(ctx,
247 blockTable[blockNb].cPtr, blockTable[blockNb].cRoom,
248 blockTable[blockNb].srcPtr,blockTable[blockNb].srcSize,
249 cdict);
250 } else {
251 rSize = ZSTD_compressCCtx (ctx,
252 blockTable[blockNb].cPtr, blockTable[blockNb].cRoom,
253 blockTable[blockNb].srcPtr,blockTable[blockNb].srcSize, cLevel);
254 }
255 if (ZSTD_isError(rSize)) EXM_THROW(1, "ZSTD_compress_usingCDict() failed : %s", ZSTD_getErrorName(rSize));
256 blockTable[blockNb].cSize = rSize;
257 }
258 nbLoops++;
259 } while (UTIL_clockSpanMicro(clockStart) < clockLoop);
260 ZSTD_freeCDict(cdict);
261 } else if (compressor == BMK_ZSTD_STREAM) {
262 ZSTD_parameters const zparams = ZSTD_getParams(cLevel, avgSize, dictBufferSize);
263 ZSTD_inBuffer inBuffer;
264 ZSTD_outBuffer outBuffer;
265 ZSTD_CStream* zbc = ZSTD_createCStream();
266 size_t rSize;
267 ZSTD_CCtx_params* cctxParams = ZSTD_createCCtxParams();
268
269 if (!cctxParams) EXM_THROW(1, "ZSTD_createCCtxParams() allocation failure");
270 if (zbc == NULL) EXM_THROW(1, "ZSTD_createCStream() allocation failure");
271
272 { int initErr = 0;
273 initErr |= ZSTD_isError(ZSTD_CCtx_reset(zbc, ZSTD_reset_session_only));
274 initErr |= ZSTD_isError(ZSTD_CCtxParams_init_advanced(cctxParams, zparams));
275 initErr |= ZSTD_isError(ZSTD_CCtx_setParametersUsingCCtxParams(zbc, cctxParams));
276 initErr |= ZSTD_isError(ZSTD_CCtx_setPledgedSrcSize(zbc, avgSize));
277 initErr |= ZSTD_isError(ZSTD_CCtx_loadDictionary(zbc, dictBuffer, dictBufferSize));
278
279 ZSTD_freeCCtxParams(cctxParams);
280 if (initErr) EXM_THROW(1, "CCtx init failed!");
281 }
282
283 do {
284 U32 blockNb;
285 for (blockNb=0; blockNb<nbBlocks; blockNb++) {
286 rSize = ZSTD_CCtx_reset(zbc, ZSTD_reset_session_only);
287 if (ZSTD_isError(rSize)) EXM_THROW(1, "ZSTD_CCtx_reset() failed : %s", ZSTD_getErrorName(rSize));
288 rSize = ZSTD_CCtx_setPledgedSrcSize(zbc, blockTable[blockNb].srcSize);
289 if (ZSTD_isError(rSize)) EXM_THROW(1, "ZSTD_CCtx_setPledgedSrcSize() failed : %s", ZSTD_getErrorName(rSize));
290 inBuffer.src = blockTable[blockNb].srcPtr;
291 inBuffer.size = blockTable[blockNb].srcSize;
292 inBuffer.pos = 0;
293 outBuffer.dst = blockTable[blockNb].cPtr;
294 outBuffer.size = blockTable[blockNb].cRoom;
295 outBuffer.pos = 0;
296 rSize = ZSTD_compressStream(zbc, &outBuffer, &inBuffer);
297 if (ZSTD_isError(rSize)) EXM_THROW(1, "ZSTD_compressStream() failed : %s", ZSTD_getErrorName(rSize));
298 rSize = ZSTD_endStream(zbc, &outBuffer);
299 if (ZSTD_isError(rSize)) EXM_THROW(1, "ZSTD_endStream() failed : %s", ZSTD_getErrorName(rSize));
300 blockTable[blockNb].cSize = outBuffer.pos;
301 }
302 nbLoops++;
303 } while (UTIL_clockSpanMicro(clockStart) < clockLoop);
304 ZSTD_freeCStream(zbc);
305 } else if (compressor == BMK_ZWRAP_ZLIB_REUSE || compressor == BMK_ZWRAP_ZSTD_REUSE || compressor == BMK_ZLIB_REUSE) {
306 z_stream def;
307 int ret;
308 int useSetDict = (dictBuffer != NULL);
309 if (compressor == BMK_ZLIB_REUSE || compressor == BMK_ZWRAP_ZLIB_REUSE) ZWRAP_useZSTDcompression(0);
310 else ZWRAP_useZSTDcompression(1);
311 def.zalloc = Z_NULL;
312 def.zfree = Z_NULL;
313 def.opaque = Z_NULL;
314 ret = deflateInit(&def, cLevel);
315 if (ret != Z_OK) EXM_THROW(1, "deflateInit failure");
316 /* if (ZWRAP_isUsingZSTDcompression()) {
317 ret = ZWRAP_setPledgedSrcSize(&def, avgSize);
318 if (ret != Z_OK) EXM_THROW(1, "ZWRAP_setPledgedSrcSize failure");
319 } */
320 do {
321 U32 blockNb;
322 for (blockNb=0; blockNb<nbBlocks; blockNb++) {
323 if (ZWRAP_isUsingZSTDcompression())
324 ret = ZWRAP_deflateReset_keepDict(&def); /* reuse dictionary to make compression faster */
325 else
326 ret = deflateReset(&def);
327 if (ret != Z_OK) EXM_THROW(1, "deflateReset failure");
328 if (useSetDict) {
329 ret = deflateSetDictionary(&def, (const z_Bytef*)dictBuffer, dictBufferSize);
330 if (ret != Z_OK) EXM_THROW(1, "deflateSetDictionary failure");
331 if (ZWRAP_isUsingZSTDcompression()) useSetDict = 0; /* zstd doesn't require deflateSetDictionary after ZWRAP_deflateReset_keepDict */
332 }
333 def.next_in = (z_const z_Bytef*) blockTable[blockNb].srcPtr;
334 def.avail_in = (uInt)blockTable[blockNb].srcSize;
335 def.total_in = 0;
336 def.next_out = (z_Bytef*) blockTable[blockNb].cPtr;
337 def.avail_out = (uInt)blockTable[blockNb].cRoom;
338 def.total_out = 0;
339 ret = deflate(&def, Z_FINISH);
340 if (ret != Z_STREAM_END) EXM_THROW(1, "deflate failure ret=%d srcSize=%d" , ret, (int)blockTable[blockNb].srcSize);
341 blockTable[blockNb].cSize = def.total_out;
342 }
343 nbLoops++;
344 } while (UTIL_clockSpanMicro(clockStart) < clockLoop);
345 ret = deflateEnd(&def);
346 if (ret != Z_OK) EXM_THROW(1, "deflateEnd failure");
347 } else {
348 z_stream def;
349 if (compressor == BMK_ZLIB || compressor == BMK_ZWRAP_ZLIB) ZWRAP_useZSTDcompression(0);
350 else ZWRAP_useZSTDcompression(1);
351 do {
352 U32 blockNb;
353 for (blockNb=0; blockNb<nbBlocks; blockNb++) {
354 int ret;
355 def.zalloc = Z_NULL;
356 def.zfree = Z_NULL;
357 def.opaque = Z_NULL;
358 ret = deflateInit(&def, cLevel);
359 if (ret != Z_OK) EXM_THROW(1, "deflateInit failure");
360 if (dictBuffer) {
361 ret = deflateSetDictionary(&def, (const z_Bytef*)dictBuffer, dictBufferSize);
362 if (ret != Z_OK) EXM_THROW(1, "deflateSetDictionary failure");
363 }
364 def.next_in = (z_const z_Bytef*) blockTable[blockNb].srcPtr;
365 def.avail_in = (uInt)blockTable[blockNb].srcSize;
366 def.total_in = 0;
367 def.next_out = (z_Bytef*) blockTable[blockNb].cPtr;
368 def.avail_out = (uInt)blockTable[blockNb].cRoom;
369 def.total_out = 0;
370 ret = deflate(&def, Z_FINISH);
371 if (ret != Z_STREAM_END) EXM_THROW(1, "deflate failure");
372 ret = deflateEnd(&def);
373 if (ret != Z_OK) EXM_THROW(1, "deflateEnd failure");
374 blockTable[blockNb].cSize = def.total_out;
375 }
376 nbLoops++;
377 } while (UTIL_clockSpanMicro(clockStart) < clockLoop);
378 }
379 { U64 const clockSpan = UTIL_clockSpanMicro(clockStart);
380 if (clockSpan < fastestC*nbLoops) fastestC = clockSpan / nbLoops;
381 totalCTime += clockSpan;
382 cCompleted = totalCTime>maxTime;
383 } }
384
385 cSize = 0;
386 { U32 blockNb; for (blockNb=0; blockNb<nbBlocks; blockNb++) cSize += blockTable[blockNb].cSize; }
387 ratio = (double)srcSize / (double)cSize;
388 markNb = (markNb+1) % NB_MARKS;
389 DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->%10u (%5.3f),%6.1f MB/s\r",
390 marks[markNb], displayName, (unsigned)srcSize, (unsigned)cSize, ratio,
391 (double)srcSize / fastestC );
392
393 (void)fastestD; (void)crcOrig; /* unused when decompression disabled */
394 #if 1
395 /* Decompression */
396 if (!dCompleted) memset(resultBuffer, 0xD6, srcSize); /* warm result buffer */
397
398 UTIL_sleepMilli(1); /* give processor time to other processes */
399 UTIL_waitForNextTick();
400 clockStart = UTIL_getTime();
401
402 if (!dCompleted) {
403 U32 nbLoops = 0;
404 if (compressor == BMK_ZSTD) {
405 ZSTD_DDict* ddict = ZSTD_createDDict(dictBuffer, dictBufferSize);
406 if (!ddict) EXM_THROW(2, "ZSTD_createDDict() allocation failure");
407 do {
408 unsigned blockNb;
409 for (blockNb=0; blockNb<nbBlocks; blockNb++) {
410 size_t const regenSize = ZSTD_decompress_usingDDict(dctx,
411 blockTable[blockNb].resPtr, blockTable[blockNb].srcSize,
412 blockTable[blockNb].cPtr, blockTable[blockNb].cSize,
413 ddict);
414 if (ZSTD_isError(regenSize)) {
415 DISPLAY("ZSTD_decompress_usingDDict() failed on block %u : %s \n",
416 blockNb, ZSTD_getErrorName(regenSize));
417 clockLoop = 0; /* force immediate test end */
418 break;
419 }
420 blockTable[blockNb].resSize = regenSize;
421 }
422 nbLoops++;
423 } while (UTIL_clockSpanMicro(clockStart) < clockLoop);
424 ZSTD_freeDDict(ddict);
425 } else if (compressor == BMK_ZSTD_STREAM) {
426 ZSTD_inBuffer inBuffer;
427 ZSTD_outBuffer outBuffer;
428 ZSTD_DStream* zbd = ZSTD_createDStream();
429 size_t rSize;
430 if (zbd == NULL) EXM_THROW(1, "ZSTD_createDStream() allocation failure");
431 rSize = ZSTD_initDStream_usingDict(zbd, dictBuffer, dictBufferSize);
432 if (ZSTD_isError(rSize)) EXM_THROW(1, "ZSTD_initDStream() failed : %s", ZSTD_getErrorName(rSize));
433 do {
434 U32 blockNb;
435 for (blockNb=0; blockNb<nbBlocks; blockNb++) {
436 rSize = ZSTD_DCtx_reset(zbd, ZSTD_reset_session_only);
437 if (ZSTD_isError(rSize)) EXM_THROW(1, "ZSTD_DCtx_reset() failed : %s", ZSTD_getErrorName(rSize));
438 inBuffer.src = blockTable[blockNb].cPtr;
439 inBuffer.size = blockTable[blockNb].cSize;
440 inBuffer.pos = 0;
441 outBuffer.dst = blockTable[blockNb].resPtr;
442 outBuffer.size = blockTable[blockNb].srcSize;
443 outBuffer.pos = 0;
444 rSize = ZSTD_decompressStream(zbd, &outBuffer, &inBuffer);
445 if (ZSTD_isError(rSize)) EXM_THROW(1, "ZSTD_decompressStream() failed : %s", ZSTD_getErrorName(rSize));
446 blockTable[blockNb].resSize = outBuffer.pos;
447 }
448 nbLoops++;
449 } while (UTIL_clockSpanMicro(clockStart) < clockLoop);
450 ZSTD_freeDStream(zbd);
451 } else if (compressor == BMK_ZWRAP_ZLIB_REUSE || compressor == BMK_ZWRAP_ZSTD_REUSE || compressor == BMK_ZLIB_REUSE) {
452 z_stream inf;
453 int ret;
454 if (compressor == BMK_ZLIB_REUSE) ZWRAP_setDecompressionType(ZWRAP_FORCE_ZLIB);
455 else ZWRAP_setDecompressionType(ZWRAP_AUTO);
456 inf.zalloc = Z_NULL;
457 inf.zfree = Z_NULL;
458 inf.opaque = Z_NULL;
459 ret = inflateInit(&inf);
460 if (ret != Z_OK) EXM_THROW(1, "inflateInit failure");
461 do {
462 U32 blockNb;
463 for (blockNb=0; blockNb<nbBlocks; blockNb++) {
464 if (ZWRAP_isUsingZSTDdecompression(&inf))
465 ret = ZWRAP_inflateReset_keepDict(&inf); /* reuse dictionary to make decompression faster; inflate will return Z_NEED_DICT only for the first time */
466 else
467 ret = inflateReset(&inf);
468 if (ret != Z_OK) EXM_THROW(1, "inflateReset failure");
469 inf.next_in = (z_const z_Bytef*) blockTable[blockNb].cPtr;
470 inf.avail_in = (uInt)blockTable[blockNb].cSize;
471 inf.total_in = 0;
472 inf.next_out = (z_Bytef*) blockTable[blockNb].resPtr;
473 inf.avail_out = (uInt)blockTable[blockNb].srcSize;
474 inf.total_out = 0;
475 ret = inflate(&inf, Z_FINISH);
476 if (ret == Z_NEED_DICT) {
477 ret = inflateSetDictionary(&inf, (const z_Bytef*)dictBuffer, dictBufferSize);
478 if (ret != Z_OK) EXM_THROW(1, "inflateSetDictionary failure");
479 ret = inflate(&inf, Z_FINISH);
480 }
481 if (ret != Z_STREAM_END) EXM_THROW(1, "inflate failure");
482 blockTable[blockNb].resSize = inf.total_out;
483 }
484 nbLoops++;
485 } while (UTIL_clockSpanMicro(clockStart) < clockLoop);
486 ret = inflateEnd(&inf);
487 if (ret != Z_OK) EXM_THROW(1, "inflateEnd failure");
488 } else {
489 z_stream inf;
490 if (compressor == BMK_ZLIB) ZWRAP_setDecompressionType(ZWRAP_FORCE_ZLIB);
491 else ZWRAP_setDecompressionType(ZWRAP_AUTO);
492 do {
493 U32 blockNb;
494 for (blockNb=0; blockNb<nbBlocks; blockNb++) {
495 int ret;
496 inf.zalloc = Z_NULL;
497 inf.zfree = Z_NULL;
498 inf.opaque = Z_NULL;
499 ret = inflateInit(&inf);
500 if (ret != Z_OK) EXM_THROW(1, "inflateInit failure");
501 inf.next_in = (z_const z_Bytef*) blockTable[blockNb].cPtr;
502 inf.avail_in = (uInt)blockTable[blockNb].cSize;
503 inf.total_in = 0;
504 inf.next_out = (z_Bytef*) blockTable[blockNb].resPtr;
505 inf.avail_out = (uInt)blockTable[blockNb].srcSize;
506 inf.total_out = 0;
507 ret = inflate(&inf, Z_FINISH);
508 if (ret == Z_NEED_DICT) {
509 ret = inflateSetDictionary(&inf, (const z_Bytef*) dictBuffer, dictBufferSize);
510 if (ret != Z_OK) EXM_THROW(1, "inflateSetDictionary failure");
511 ret = inflate(&inf, Z_FINISH);
512 }
513 if (ret != Z_STREAM_END) EXM_THROW(1, "inflate failure");
514 ret = inflateEnd(&inf);
515 if (ret != Z_OK) EXM_THROW(1, "inflateEnd failure");
516 blockTable[blockNb].resSize = inf.total_out;
517 }
518 nbLoops++;
519 } while (UTIL_clockSpanMicro(clockStart) < clockLoop);
520 }
521 { U64 const clockSpan = UTIL_clockSpanMicro(clockStart);
522 if (clockSpan < fastestD*nbLoops) fastestD = clockSpan / nbLoops;
523 totalDTime += clockSpan;
524 dCompleted = totalDTime>maxTime;
525 } }
526
527 markNb = (markNb+1) % NB_MARKS;
528 DISPLAYLEVEL(2, "%2s-%-17.17s :%10u ->%10u (%5.3f),%6.1f MB/s ,%6.1f MB/s\r",
529 marks[markNb], displayName, (unsigned)srcSize, (unsigned)cSize, ratio,
530 (double)srcSize / fastestC,
531 (double)srcSize / fastestD );
532
533 /* CRC Checking */
534 { U64 const crcCheck = XXH64(resultBuffer, srcSize, 0);
535 if (crcOrig!=crcCheck) {
536 size_t u;
537 DISPLAY("!!! WARNING !!! %14s : Invalid Checksum : %x != %x \n", displayName, (unsigned)crcOrig, (unsigned)crcCheck);
538 for (u=0; u<srcSize; u++) {
539 if (((const BYTE*)srcBuffer)[u] != ((const BYTE*)resultBuffer)[u]) {
540 unsigned segNb, bNb, pos;
541 size_t bacc = 0;
542 DISPLAY("Decoding error at pos %u ", (unsigned)u);
543 for (segNb = 0; segNb < nbBlocks; segNb++) {
544 if (bacc + blockTable[segNb].srcSize > u) break;
545 bacc += blockTable[segNb].srcSize;
546 }
547 pos = (U32)(u - bacc);
548 bNb = pos / (128 KB);
549 DISPLAY("(block %u, sub %u, pos %u) \n", segNb, bNb, pos);
550 break;
551 }
552 if (u==srcSize-1) { /* should never happen */
553 DISPLAY("no difference detected\n");
554 } }
555 break;
556 } } /* CRC Checking */
557 #endif
558 } /* for (testNb = 1; testNb <= (g_nbIterations + !g_nbIterations); testNb++) */
559
560 if (g_displayLevel == 1) {
561 double cSpeed = (double)srcSize / fastestC;
562 double dSpeed = (double)srcSize / fastestD;
563 if (g_additionalParam)
564 DISPLAY("-%-3i%11i (%5.3f) %6.2f MB/s %6.1f MB/s %s (param=%d)\n", cLevel, (int)cSize, ratio, cSpeed, dSpeed, displayName, g_additionalParam);
565 else
566 DISPLAY("-%-3i%11i (%5.3f) %6.2f MB/s %6.1f MB/s %s\n", cLevel, (int)cSize, ratio, cSpeed, dSpeed, displayName);
567 }
568 DISPLAYLEVEL(2, "%2i#\n", cLevel);
569 } /* Bench */
570
571 /* clean up */
572 free(blockTable);
573 free(compressedBuffer);
574 free(resultBuffer);
575 ZSTD_freeCCtx(ctx);
576 ZSTD_freeDCtx(dctx);
577 return 0;
578 }
579
580
BMK_findMaxMem(U64 requiredMem)581 static size_t BMK_findMaxMem(U64 requiredMem)
582 {
583 size_t const step = 64 MB;
584 BYTE* testmem = NULL;
585
586 requiredMem = (((requiredMem >> 26) + 1) << 26);
587 requiredMem += step;
588 if (requiredMem > maxMemory) requiredMem = maxMemory;
589
590 do {
591 testmem = (BYTE*)malloc((size_t)requiredMem);
592 requiredMem -= step;
593 } while (!testmem && requiredMem); /* do not allocate zero bytes */
594
595 free(testmem);
596 return (size_t)(requiredMem+1); /* avoid zero */
597 }
598
BMK_benchCLevel(void * srcBuffer,size_t benchedSize,const char * displayName,int cLevel,int cLevelLast,const size_t * fileSizes,unsigned nbFiles,const void * dictBuffer,size_t dictBufferSize)599 static void BMK_benchCLevel(void* srcBuffer, size_t benchedSize,
600 const char* displayName, int cLevel, int cLevelLast,
601 const size_t* fileSizes, unsigned nbFiles,
602 const void* dictBuffer, size_t dictBufferSize)
603 {
604 int l;
605
606 const char* pch = strrchr(displayName, '\\'); /* Windows */
607 if (!pch) pch = strrchr(displayName, '/'); /* Linux */
608 if (pch) displayName = pch+1;
609
610 SET_REALTIME_PRIORITY;
611
612 if (g_displayLevel == 1 && !g_additionalParam)
613 DISPLAY("bench %s %s: input %u bytes, %u seconds, %u KB blocks\n",
614 ZSTD_VERSION_STRING, ZSTD_GIT_COMMIT_STRING,
615 (unsigned)benchedSize, g_nbIterations, (unsigned)(g_blockSize>>10));
616
617 if (cLevelLast < cLevel) cLevelLast = cLevel;
618
619 DISPLAY("benchmarking zstd %s (using ZSTD_CStream)\n", ZSTD_VERSION_STRING);
620 for (l=cLevel; l <= cLevelLast; l++) {
621 BMK_benchMem(srcBuffer, benchedSize,
622 displayName, l,
623 fileSizes, nbFiles,
624 dictBuffer, dictBufferSize, BMK_ZSTD_STREAM);
625 }
626
627 DISPLAY("benchmarking zstd %s (using ZSTD_CCtx)\n", ZSTD_VERSION_STRING);
628 for (l=cLevel; l <= cLevelLast; l++) {
629 BMK_benchMem(srcBuffer, benchedSize,
630 displayName, l,
631 fileSizes, nbFiles,
632 dictBuffer, dictBufferSize, BMK_ZSTD);
633 }
634
635 DISPLAY("benchmarking zstd %s (using zlibWrapper)\n", ZSTD_VERSION_STRING);
636 for (l=cLevel; l <= cLevelLast; l++) {
637 BMK_benchMem(srcBuffer, benchedSize,
638 displayName, l,
639 fileSizes, nbFiles,
640 dictBuffer, dictBufferSize, BMK_ZWRAP_ZSTD_REUSE);
641 }
642
643 DISPLAY("benchmarking zstd %s (zlibWrapper not reusing a context)\n", ZSTD_VERSION_STRING);
644 for (l=cLevel; l <= cLevelLast; l++) {
645 BMK_benchMem(srcBuffer, benchedSize,
646 displayName, l,
647 fileSizes, nbFiles,
648 dictBuffer, dictBufferSize, BMK_ZWRAP_ZSTD);
649 }
650
651
652 if (cLevelLast > Z_BEST_COMPRESSION) cLevelLast = Z_BEST_COMPRESSION;
653
654 DISPLAY("\n");
655 DISPLAY("benchmarking zlib %s\n", ZLIB_VERSION);
656 for (l=cLevel; l <= cLevelLast; l++) {
657 BMK_benchMem(srcBuffer, benchedSize,
658 displayName, l,
659 fileSizes, nbFiles,
660 dictBuffer, dictBufferSize, BMK_ZLIB_REUSE);
661 }
662
663 DISPLAY("benchmarking zlib %s (zlib not reusing a context)\n", ZLIB_VERSION);
664 for (l=cLevel; l <= cLevelLast; l++) {
665 BMK_benchMem(srcBuffer, benchedSize,
666 displayName, l,
667 fileSizes, nbFiles,
668 dictBuffer, dictBufferSize, BMK_ZLIB);
669 }
670
671 DISPLAY("benchmarking zlib %s (using zlibWrapper)\n", ZLIB_VERSION);
672 for (l=cLevel; l <= cLevelLast; l++) {
673 BMK_benchMem(srcBuffer, benchedSize,
674 displayName, l,
675 fileSizes, nbFiles,
676 dictBuffer, dictBufferSize, BMK_ZWRAP_ZLIB_REUSE);
677 }
678
679 DISPLAY("benchmarking zlib %s (zlibWrapper not reusing a context)\n", ZLIB_VERSION);
680 for (l=cLevel; l <= cLevelLast; l++) {
681 BMK_benchMem(srcBuffer, benchedSize,
682 displayName, l,
683 fileSizes, nbFiles,
684 dictBuffer, dictBufferSize, BMK_ZWRAP_ZLIB);
685 }
686 }
687
688
689 /*! BMK_loadFiles() :
690 Loads `buffer` with content of files listed within `fileNamesTable`.
691 At most, fills `buffer` entirely */
BMK_loadFiles(void * buffer,size_t bufferSize,size_t * fileSizes,const char ** fileNamesTable,unsigned nbFiles)692 static void BMK_loadFiles(void* buffer, size_t bufferSize,
693 size_t* fileSizes,
694 const char** fileNamesTable, unsigned nbFiles)
695 {
696 size_t pos = 0, totalSize = 0;
697 unsigned n;
698 for (n=0; n<nbFiles; n++) {
699 FILE* f;
700 U64 fileSize = UTIL_getFileSize(fileNamesTable[n]);
701 if (UTIL_isDirectory(fileNamesTable[n])) {
702 DISPLAYLEVEL(2, "Ignoring %s directory... \n", fileNamesTable[n]);
703 fileSizes[n] = 0;
704 continue;
705 }
706 if (fileSize == UTIL_FILESIZE_UNKNOWN) {
707 DISPLAYLEVEL(2, "Cannot determine size of %s ... \n", fileNamesTable[n]);
708 fileSizes[n] = 0;
709 continue;
710 }
711 f = fopen(fileNamesTable[n], "rb");
712 if (f==NULL) EXM_THROW(10, "impossible to open file %s", fileNamesTable[n]);
713 DISPLAYUPDATE(2, "Loading %s... \r", fileNamesTable[n]);
714 if (fileSize > bufferSize-pos) fileSize = bufferSize-pos, nbFiles=n; /* buffer too small - stop after this file */
715 { size_t const readSize = fread(((char*)buffer)+pos, 1, (size_t)fileSize, f);
716 if (readSize != (size_t)fileSize) EXM_THROW(11, "could not read %s", fileNamesTable[n]);
717 pos += readSize; }
718 fileSizes[n] = (size_t)fileSize;
719 totalSize += (size_t)fileSize;
720 fclose(f);
721 }
722
723 if (totalSize == 0) EXM_THROW(12, "no data to bench");
724 }
725
BMK_benchFileTable(const char ** fileNamesTable,unsigned nbFiles,const char * dictFileName,int cLevel,int cLevelLast)726 static void BMK_benchFileTable(const char** fileNamesTable, unsigned nbFiles,
727 const char* dictFileName, int cLevel, int cLevelLast)
728 {
729 void* srcBuffer;
730 size_t benchedSize;
731 void* dictBuffer = NULL;
732 size_t dictBufferSize = 0;
733 size_t* fileSizes = (size_t*)malloc(nbFiles * sizeof(size_t));
734 U64 const totalSizeToLoad = UTIL_getTotalFileSize(fileNamesTable, nbFiles);
735 char mfName[20] = {0};
736
737 if (!fileSizes) EXM_THROW(12, "not enough memory for fileSizes");
738
739 /* Load dictionary */
740 if (dictFileName != NULL) {
741 U64 const dictFileSize = UTIL_getFileSize(dictFileName);
742 if (dictFileSize > 64 MB)
743 EXM_THROW(10, "dictionary file %s too large", dictFileName);
744 dictBufferSize = (size_t)dictFileSize;
745 dictBuffer = malloc(dictBufferSize);
746 if (dictBuffer==NULL)
747 EXM_THROW(11, "not enough memory for dictionary (%u bytes)", (unsigned)dictBufferSize);
748 BMK_loadFiles(dictBuffer, dictBufferSize, fileSizes, &dictFileName, 1);
749 }
750
751 /* Memory allocation & restrictions */
752 benchedSize = BMK_findMaxMem(totalSizeToLoad * 3) / 3;
753 if ((U64)benchedSize > totalSizeToLoad) benchedSize = (size_t)totalSizeToLoad;
754 if (benchedSize < totalSizeToLoad)
755 DISPLAY("Not enough memory; testing %u MB only...\n", (unsigned)(benchedSize >> 20));
756 srcBuffer = malloc(benchedSize + !benchedSize);
757 if (!srcBuffer) EXM_THROW(12, "not enough memory");
758
759 /* Load input buffer */
760 BMK_loadFiles(srcBuffer, benchedSize, fileSizes, fileNamesTable, nbFiles);
761
762 /* Bench */
763 snprintf (mfName, sizeof(mfName), " %u files", nbFiles);
764 { const char* displayName = (nbFiles > 1) ? mfName : fileNamesTable[0];
765 BMK_benchCLevel(srcBuffer, benchedSize,
766 displayName, cLevel, cLevelLast,
767 fileSizes, nbFiles,
768 dictBuffer, dictBufferSize);
769 }
770
771 /* clean up */
772 free(srcBuffer);
773 free(dictBuffer);
774 free(fileSizes);
775 }
776
777
BMK_syntheticTest(int cLevel,int cLevelLast,double compressibility)778 static void BMK_syntheticTest(int cLevel, int cLevelLast, double compressibility)
779 {
780 char name[20] = {0};
781 size_t benchedSize = 10000000;
782 void* const srcBuffer = malloc(benchedSize);
783
784 /* Memory allocation */
785 if (!srcBuffer) EXM_THROW(21, "not enough memory");
786
787 /* Fill input buffer */
788 RDG_genBuffer(srcBuffer, benchedSize, compressibility, 0.0, 0);
789
790 /* Bench */
791 snprintf (name, sizeof(name), "Synthetic %2u%%", (unsigned)(compressibility*100));
792 BMK_benchCLevel(srcBuffer, benchedSize, name, cLevel, cLevelLast, &benchedSize, 1, NULL, 0);
793
794 /* clean up */
795 free(srcBuffer);
796 }
797
798
BMK_benchFiles(const char ** fileNamesTable,unsigned nbFiles,const char * dictFileName,int cLevel,int cLevelLast)799 int BMK_benchFiles(const char** fileNamesTable, unsigned nbFiles,
800 const char* dictFileName, int cLevel, int cLevelLast)
801 {
802 double const compressibility = (double)g_compressibilityDefault / 100;
803
804 if (nbFiles == 0)
805 BMK_syntheticTest(cLevel, cLevelLast, compressibility);
806 else
807 BMK_benchFileTable(fileNamesTable, nbFiles, dictFileName, cLevel, cLevelLast);
808 return 0;
809 }
810
811
812
813
814 /*-************************************
815 * Command Line
816 **************************************/
usage(const char * programName)817 static int usage(const char* programName)
818 {
819 DISPLAY(WELCOME_MESSAGE);
820 DISPLAY( "Usage :\n");
821 DISPLAY( " %s [args] [FILE(s)] [-o file]\n", programName);
822 DISPLAY( "\n");
823 DISPLAY( "FILE : a filename\n");
824 DISPLAY( " with no FILE, or when FILE is - , read standard input\n");
825 DISPLAY( "Arguments :\n");
826 DISPLAY( " -D file: use `file` as Dictionary \n");
827 DISPLAY( " -h/-H : display help/long help and exit\n");
828 DISPLAY( " -V : display Version number and exit\n");
829 DISPLAY( " -v : verbose mode; specify multiple times to increase log level (default:%d)\n", DEFAULT_DISPLAY_LEVEL);
830 DISPLAY( " -q : suppress warnings; specify twice to suppress errors too\n");
831 #ifdef UTIL_HAS_CREATEFILELIST
832 DISPLAY( " -r : operate recursively on directories\n");
833 #endif
834 DISPLAY( "\n");
835 DISPLAY( "Benchmark arguments :\n");
836 DISPLAY( " -b# : benchmark file(s), using # compression level (default : %d) \n", ZSTDCLI_CLEVEL_DEFAULT);
837 DISPLAY( " -e# : test all compression levels from -bX to # (default: %d)\n", ZSTDCLI_CLEVEL_DEFAULT);
838 DISPLAY( " -i# : minimum evaluation time in seconds (default : 3s)\n");
839 DISPLAY( " -B# : cut file into independent blocks of size # (default: no block)\n");
840 return 0;
841 }
842
badusage(const char * programName)843 static int badusage(const char* programName)
844 {
845 DISPLAYLEVEL(1, "Incorrect parameters\n");
846 if (g_displayLevel >= 1) usage(programName);
847 return 1;
848 }
849
waitEnter(void)850 static void waitEnter(void)
851 {
852 int unused;
853 DISPLAY("Press enter to continue...\n");
854 unused = getchar();
855 (void)unused;
856 }
857
858 /*! readU32FromChar() :
859 @return : unsigned integer value reach from input in `char` format
860 Will also modify `*stringPtr`, advancing it to position where it stopped reading.
861 Note : this function can overflow if digit string > MAX_UINT */
readU32FromChar(const char ** stringPtr)862 static unsigned readU32FromChar(const char** stringPtr)
863 {
864 unsigned result = 0;
865 while ((**stringPtr >='0') && (**stringPtr <='9'))
866 result *= 10, result += (unsigned)(**stringPtr - '0'), (*stringPtr)++ ;
867 return result;
868 }
869
870
871 #define CLEAN_RETURN(i) { operationResult = (i); goto _end; }
872
main(int argCount,char ** argv)873 int main(int argCount, char** argv)
874 {
875 int argNb,
876 main_pause=0,
877 nextEntryIsDictionary=0,
878 operationResult=0,
879 nextArgumentIsFile=0;
880 int cLevel = ZSTDCLI_CLEVEL_DEFAULT;
881 int cLevelLast = 1;
882 unsigned recursive = 0;
883 FileNamesTable* filenames = UTIL_allocateFileNamesTable((size_t)argCount);
884 const char* programName = argv[0];
885 const char* dictFileName = NULL;
886 char* dynNameSpace = NULL;
887
888 /* init */
889 if (filenames==NULL) { DISPLAY("zstd: %s \n", strerror(errno)); exit(1); }
890 displayOut = stderr;
891
892 /* Pick out program name from path. Don't rely on stdlib because of conflicting behavior */
893 { size_t pos;
894 for (pos = strlen(programName); pos > 0; pos--) { if (programName[pos] == '/') { pos++; break; } }
895 programName += pos;
896 }
897
898 /* command switches */
899 for(argNb=1; argNb<argCount; argNb++) {
900 const char* argument = argv[argNb];
901 if(!argument) continue; /* Protection if argument empty */
902
903 if (nextArgumentIsFile==0) {
904
905 /* long commands (--long-word) */
906 if (!strcmp(argument, "--")) { nextArgumentIsFile=1; continue; }
907 if (!strcmp(argument, "--version")) { displayOut=stdout; DISPLAY(WELCOME_MESSAGE); CLEAN_RETURN(0); }
908 if (!strcmp(argument, "--help")) { displayOut=stdout; CLEAN_RETURN(usage(programName)); }
909 if (!strcmp(argument, "--verbose")) { g_displayLevel++; continue; }
910 if (!strcmp(argument, "--quiet")) { g_displayLevel--; continue; }
911
912 /* Decode commands (note : aggregated commands are allowed) */
913 if (argument[0]=='-') {
914 argument++;
915
916 while (argument[0]!=0) {
917 switch(argument[0])
918 {
919 /* Display help */
920 case 'V': displayOut=stdout; DISPLAY(WELCOME_MESSAGE); CLEAN_RETURN(0); /* Version Only */
921 case 'H':
922 case 'h': displayOut=stdout; CLEAN_RETURN(usage(programName));
923
924 /* Use file content as dictionary */
925 case 'D': nextEntryIsDictionary = 1; argument++; break;
926
927 /* Verbose mode */
928 case 'v': g_displayLevel++; argument++; break;
929
930 /* Quiet mode */
931 case 'q': g_displayLevel--; argument++; break;
932
933 #ifdef UTIL_HAS_CREATEFILELIST
934 /* recursive */
935 case 'r': recursive=1; argument++; break;
936 #endif
937
938 /* Benchmark */
939 case 'b':
940 /* first compression Level */
941 argument++;
942 cLevel = (int)readU32FromChar(&argument);
943 break;
944
945 /* range bench (benchmark only) */
946 case 'e':
947 /* last compression Level */
948 argument++;
949 cLevelLast = (int)readU32FromChar(&argument);
950 break;
951
952 /* Modify Nb Iterations (benchmark only) */
953 case 'i':
954 argument++;
955 { U32 const iters = readU32FromChar(&argument);
956 BMK_setNotificationLevel(g_displayLevel);
957 BMK_SetNbIterations(iters);
958 }
959 break;
960
961 /* cut input into blocks (benchmark only) */
962 case 'B':
963 argument++;
964 { size_t bSize = readU32FromChar(&argument);
965 if (toupper(*argument)=='K') bSize<<=10, argument++; /* allows using KB notation */
966 if (toupper(*argument)=='M') bSize<<=20, argument++;
967 if (toupper(*argument)=='B') argument++;
968 BMK_setNotificationLevel(g_displayLevel);
969 BMK_SetBlockSize(bSize);
970 }
971 break;
972
973 /* Pause at the end (-p) or set an additional param (-p#) (hidden option) */
974 case 'p': argument++;
975 if ((*argument>='0') && (*argument<='9')) {
976 BMK_setAdditionalParam((int)readU32FromChar(&argument));
977 } else
978 main_pause=1;
979 break;
980 /* unknown command */
981 default : CLEAN_RETURN(badusage(programName));
982 }
983 }
984 continue;
985 } /* if (argument[0]=='-') */
986
987 } /* if (nextArgumentIsAFile==0) */
988
989 if (nextEntryIsDictionary) {
990 nextEntryIsDictionary = 0;
991 dictFileName = argument;
992 continue;
993 }
994
995 /* add filename to list */
996 UTIL_refFilename(filenames, argument);
997 }
998
999 /* Welcome message (if verbose) */
1000 DISPLAYLEVEL(3, WELCOME_MESSAGE);
1001
1002 #ifdef UTIL_HAS_CREATEFILELIST
1003 if (recursive) {
1004 UTIL_expandFNT(&filenames, 1);
1005 }
1006 #endif
1007
1008 BMK_setNotificationLevel(g_displayLevel);
1009 BMK_benchFiles(filenames->fileNames, (unsigned)filenames->tableSize, dictFileName, cLevel, cLevelLast);
1010
1011 _end:
1012 if (main_pause) waitEnter();
1013 free(dynNameSpace);
1014 UTIL_freeFileNamesTable(filenames);
1015 return operationResult;
1016 }
1017