1 /* 2 * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. 3 * All rights reserved. 4 * 5 * This source code is licensed under both the BSD-style license (found in the 6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found 7 * in the COPYING file in the root directory of this source tree). 8 * You may select, at your option, one of the above-listed licenses. 9 */ 10 11 /* ***************************************************************************** 12 * Constructs a dictionary using a heuristic based on the following paper: 13 * 14 * Liao, Petri, Moffat, Wirth 15 * Effective Construction of Relative Lempel-Ziv Dictionaries 16 * Published in WWW 2016. 17 * 18 * Adapted from code originally written by @ot (Giuseppe Ottaviano). 19 ******************************************************************************/ 20 21 /*-************************************* 22 * Dependencies 23 ***************************************/ 24 #include <stdio.h> /* fprintf */ 25 #include <stdlib.h> /* malloc, free, qsort */ 26 #include <string.h> /* memset */ 27 #include <time.h> /* clock */ 28 29 #include "mem.h" /* read */ 30 #include "pool.h" 31 #include "threading.h" 32 #include "zstd_internal.h" /* includes zstd.h */ 33 #ifndef ZDICT_STATIC_LINKING_ONLY 34 #define ZDICT_STATIC_LINKING_ONLY 35 #endif 36 #include "zdict.h" 37 38 /*-************************************* 39 * Constants 40 ***************************************/ 41 #define COVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((U32)-1) : ((U32)1 GB)) 42 43 /*-************************************* 44 * Console display 45 ***************************************/ 46 static int g_displayLevel = 2; 47 #define DISPLAY(...) \ 48 { \ 49 fprintf(stderr, __VA_ARGS__); \ 50 fflush(stderr); \ 51 } 52 #define LOCALDISPLAYLEVEL(displayLevel, l, ...) \ 53 if (displayLevel >= l) { \ 54 DISPLAY(__VA_ARGS__); \ 55 } /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */ 56 #define DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__) 57 58 #define LOCALDISPLAYUPDATE(displayLevel, l, ...) \ 59 if (displayLevel >= l) { \ 60 if ((clock() - g_time > refreshRate) || (displayLevel >= 4)) { \ 61 g_time = clock(); \ 62 DISPLAY(__VA_ARGS__); \ 63 } \ 64 } 65 #define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__) 66 static const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100; 67 static clock_t g_time = 0; 68 69 /*-************************************* 70 * Hash table 71 *************************************** 72 * A small specialized hash map for storing activeDmers. 73 * The map does not resize, so if it becomes full it will loop forever. 74 * Thus, the map must be large enough to store every value. 75 * The map implements linear probing and keeps its load less than 0.5. 76 */ 77 78 #define MAP_EMPTY_VALUE ((U32)-1) 79 typedef struct COVER_map_pair_t_s { 80 U32 key; 81 U32 value; 82 } COVER_map_pair_t; 83 84 typedef struct COVER_map_s { 85 COVER_map_pair_t *data; 86 U32 sizeLog; 87 U32 size; 88 U32 sizeMask; 89 } COVER_map_t; 90 91 /** 92 * Clear the map. 93 */ 94 static void COVER_map_clear(COVER_map_t *map) { 95 memset(map->data, MAP_EMPTY_VALUE, map->size * sizeof(COVER_map_pair_t)); 96 } 97 98 /** 99 * Initializes a map of the given size. 100 * Returns 1 on success and 0 on failure. 101 * The map must be destroyed with COVER_map_destroy(). 102 * The map is only guaranteed to be large enough to hold size elements. 103 */ 104 static int COVER_map_init(COVER_map_t *map, U32 size) { 105 map->sizeLog = ZSTD_highbit32(size) + 2; 106 map->size = (U32)1 << map->sizeLog; 107 map->sizeMask = map->size - 1; 108 map->data = (COVER_map_pair_t *)malloc(map->size * sizeof(COVER_map_pair_t)); 109 if (!map->data) { 110 map->sizeLog = 0; 111 map->size = 0; 112 return 0; 113 } 114 COVER_map_clear(map); 115 return 1; 116 } 117 118 /** 119 * Internal hash function 120 */ 121 static const U32 prime4bytes = 2654435761U; 122 static U32 COVER_map_hash(COVER_map_t *map, U32 key) { 123 return (key * prime4bytes) >> (32 - map->sizeLog); 124 } 125 126 /** 127 * Helper function that returns the index that a key should be placed into. 128 */ 129 static U32 COVER_map_index(COVER_map_t *map, U32 key) { 130 const U32 hash = COVER_map_hash(map, key); 131 U32 i; 132 for (i = hash;; i = (i + 1) & map->sizeMask) { 133 COVER_map_pair_t *pos = &map->data[i]; 134 if (pos->value == MAP_EMPTY_VALUE) { 135 return i; 136 } 137 if (pos->key == key) { 138 return i; 139 } 140 } 141 } 142 143 /** 144 * Returns the pointer to the value for key. 145 * If key is not in the map, it is inserted and the value is set to 0. 146 * The map must not be full. 147 */ 148 static U32 *COVER_map_at(COVER_map_t *map, U32 key) { 149 COVER_map_pair_t *pos = &map->data[COVER_map_index(map, key)]; 150 if (pos->value == MAP_EMPTY_VALUE) { 151 pos->key = key; 152 pos->value = 0; 153 } 154 return &pos->value; 155 } 156 157 /** 158 * Deletes key from the map if present. 159 */ 160 static void COVER_map_remove(COVER_map_t *map, U32 key) { 161 U32 i = COVER_map_index(map, key); 162 COVER_map_pair_t *del = &map->data[i]; 163 U32 shift = 1; 164 if (del->value == MAP_EMPTY_VALUE) { 165 return; 166 } 167 for (i = (i + 1) & map->sizeMask;; i = (i + 1) & map->sizeMask) { 168 COVER_map_pair_t *const pos = &map->data[i]; 169 /* If the position is empty we are done */ 170 if (pos->value == MAP_EMPTY_VALUE) { 171 del->value = MAP_EMPTY_VALUE; 172 return; 173 } 174 /* If pos can be moved to del do so */ 175 if (((i - COVER_map_hash(map, pos->key)) & map->sizeMask) >= shift) { 176 del->key = pos->key; 177 del->value = pos->value; 178 del = pos; 179 shift = 1; 180 } else { 181 ++shift; 182 } 183 } 184 } 185 186 /** 187 * Destroyes a map that is inited with COVER_map_init(). 188 */ 189 static void COVER_map_destroy(COVER_map_t *map) { 190 if (map->data) { 191 free(map->data); 192 } 193 map->data = NULL; 194 map->size = 0; 195 } 196 197 /*-************************************* 198 * Context 199 ***************************************/ 200 201 typedef struct { 202 const BYTE *samples; 203 size_t *offsets; 204 const size_t *samplesSizes; 205 size_t nbSamples; 206 U32 *suffix; 207 size_t suffixSize; 208 U32 *freqs; 209 U32 *dmerAt; 210 unsigned d; 211 } COVER_ctx_t; 212 213 /* We need a global context for qsort... */ 214 static COVER_ctx_t *g_ctx = NULL; 215 216 /*-************************************* 217 * Helper functions 218 ***************************************/ 219 220 /** 221 * Returns the sum of the sample sizes. 222 */ 223 static size_t COVER_sum(const size_t *samplesSizes, unsigned nbSamples) { 224 size_t sum = 0; 225 size_t i; 226 for (i = 0; i < nbSamples; ++i) { 227 sum += samplesSizes[i]; 228 } 229 return sum; 230 } 231 232 /** 233 * Returns -1 if the dmer at lp is less than the dmer at rp. 234 * Return 0 if the dmers at lp and rp are equal. 235 * Returns 1 if the dmer at lp is greater than the dmer at rp. 236 */ 237 static int COVER_cmp(COVER_ctx_t *ctx, const void *lp, const void *rp) { 238 U32 const lhs = *(U32 const *)lp; 239 U32 const rhs = *(U32 const *)rp; 240 return memcmp(ctx->samples + lhs, ctx->samples + rhs, ctx->d); 241 } 242 /** 243 * Faster version for d <= 8. 244 */ 245 static int COVER_cmp8(COVER_ctx_t *ctx, const void *lp, const void *rp) { 246 U64 const mask = (ctx->d == 8) ? (U64)-1 : (((U64)1 << (8 * ctx->d)) - 1); 247 U64 const lhs = MEM_readLE64(ctx->samples + *(U32 const *)lp) & mask; 248 U64 const rhs = MEM_readLE64(ctx->samples + *(U32 const *)rp) & mask; 249 if (lhs < rhs) { 250 return -1; 251 } 252 return (lhs > rhs); 253 } 254 255 /** 256 * Same as COVER_cmp() except ties are broken by pointer value 257 * NOTE: g_ctx must be set to call this function. A global is required because 258 * qsort doesn't take an opaque pointer. 259 */ 260 static int COVER_strict_cmp(const void *lp, const void *rp) { 261 int result = COVER_cmp(g_ctx, lp, rp); 262 if (result == 0) { 263 result = lp < rp ? -1 : 1; 264 } 265 return result; 266 } 267 /** 268 * Faster version for d <= 8. 269 */ 270 static int COVER_strict_cmp8(const void *lp, const void *rp) { 271 int result = COVER_cmp8(g_ctx, lp, rp); 272 if (result == 0) { 273 result = lp < rp ? -1 : 1; 274 } 275 return result; 276 } 277 278 /** 279 * Returns the first pointer in [first, last) whose element does not compare 280 * less than value. If no such element exists it returns last. 281 */ 282 static const size_t *COVER_lower_bound(const size_t *first, const size_t *last, 283 size_t value) { 284 size_t count = last - first; 285 while (count != 0) { 286 size_t step = count / 2; 287 const size_t *ptr = first; 288 ptr += step; 289 if (*ptr < value) { 290 first = ++ptr; 291 count -= step + 1; 292 } else { 293 count = step; 294 } 295 } 296 return first; 297 } 298 299 /** 300 * Generic groupBy function. 301 * Groups an array sorted by cmp into groups with equivalent values. 302 * Calls grp for each group. 303 */ 304 static void 305 COVER_groupBy(const void *data, size_t count, size_t size, COVER_ctx_t *ctx, 306 int (*cmp)(COVER_ctx_t *, const void *, const void *), 307 void (*grp)(COVER_ctx_t *, const void *, const void *)) { 308 const BYTE *ptr = (const BYTE *)data; 309 size_t num = 0; 310 while (num < count) { 311 const BYTE *grpEnd = ptr + size; 312 ++num; 313 while (num < count && cmp(ctx, ptr, grpEnd) == 0) { 314 grpEnd += size; 315 ++num; 316 } 317 grp(ctx, ptr, grpEnd); 318 ptr = grpEnd; 319 } 320 } 321 322 /*-************************************* 323 * Cover functions 324 ***************************************/ 325 326 /** 327 * Called on each group of positions with the same dmer. 328 * Counts the frequency of each dmer and saves it in the suffix array. 329 * Fills `ctx->dmerAt`. 330 */ 331 static void COVER_group(COVER_ctx_t *ctx, const void *group, 332 const void *groupEnd) { 333 /* The group consists of all the positions with the same first d bytes. */ 334 const U32 *grpPtr = (const U32 *)group; 335 const U32 *grpEnd = (const U32 *)groupEnd; 336 /* The dmerId is how we will reference this dmer. 337 * This allows us to map the whole dmer space to a much smaller space, the 338 * size of the suffix array. 339 */ 340 const U32 dmerId = (U32)(grpPtr - ctx->suffix); 341 /* Count the number of samples this dmer shows up in */ 342 U32 freq = 0; 343 /* Details */ 344 const size_t *curOffsetPtr = ctx->offsets; 345 const size_t *offsetsEnd = ctx->offsets + ctx->nbSamples; 346 /* Once *grpPtr >= curSampleEnd this occurrence of the dmer is in a 347 * different sample than the last. 348 */ 349 size_t curSampleEnd = ctx->offsets[0]; 350 for (; grpPtr != grpEnd; ++grpPtr) { 351 /* Save the dmerId for this position so we can get back to it. */ 352 ctx->dmerAt[*grpPtr] = dmerId; 353 /* Dictionaries only help for the first reference to the dmer. 354 * After that zstd can reference the match from the previous reference. 355 * So only count each dmer once for each sample it is in. 356 */ 357 if (*grpPtr < curSampleEnd) { 358 continue; 359 } 360 freq += 1; 361 /* Binary search to find the end of the sample *grpPtr is in. 362 * In the common case that grpPtr + 1 == grpEnd we can skip the binary 363 * search because the loop is over. 364 */ 365 if (grpPtr + 1 != grpEnd) { 366 const size_t *sampleEndPtr = 367 COVER_lower_bound(curOffsetPtr, offsetsEnd, *grpPtr); 368 curSampleEnd = *sampleEndPtr; 369 curOffsetPtr = sampleEndPtr + 1; 370 } 371 } 372 /* At this point we are never going to look at this segment of the suffix 373 * array again. We take advantage of this fact to save memory. 374 * We store the frequency of the dmer in the first position of the group, 375 * which is dmerId. 376 */ 377 ctx->suffix[dmerId] = freq; 378 } 379 380 /** 381 * A segment is a range in the source as well as the score of the segment. 382 */ 383 typedef struct { 384 U32 begin; 385 U32 end; 386 U32 score; 387 } COVER_segment_t; 388 389 /** 390 * Selects the best segment in an epoch. 391 * Segments of are scored according to the function: 392 * 393 * Let F(d) be the frequency of dmer d. 394 * Let S_i be the dmer at position i of segment S which has length k. 395 * 396 * Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1}) 397 * 398 * Once the dmer d is in the dictionay we set F(d) = 0. 399 */ 400 static COVER_segment_t COVER_selectSegment(const COVER_ctx_t *ctx, U32 *freqs, 401 COVER_map_t *activeDmers, U32 begin, 402 U32 end, 403 ZDICT_cover_params_t parameters) { 404 /* Constants */ 405 const U32 k = parameters.k; 406 const U32 d = parameters.d; 407 const U32 dmersInK = k - d + 1; 408 /* Try each segment (activeSegment) and save the best (bestSegment) */ 409 COVER_segment_t bestSegment = {0, 0, 0}; 410 COVER_segment_t activeSegment; 411 /* Reset the activeDmers in the segment */ 412 COVER_map_clear(activeDmers); 413 /* The activeSegment starts at the beginning of the epoch. */ 414 activeSegment.begin = begin; 415 activeSegment.end = begin; 416 activeSegment.score = 0; 417 /* Slide the activeSegment through the whole epoch. 418 * Save the best segment in bestSegment. 419 */ 420 while (activeSegment.end < end) { 421 /* The dmerId for the dmer at the next position */ 422 U32 newDmer = ctx->dmerAt[activeSegment.end]; 423 /* The entry in activeDmers for this dmerId */ 424 U32 *newDmerOcc = COVER_map_at(activeDmers, newDmer); 425 /* If the dmer isn't already present in the segment add its score. */ 426 if (*newDmerOcc == 0) { 427 /* The paper suggest using the L-0.5 norm, but experiments show that it 428 * doesn't help. 429 */ 430 activeSegment.score += freqs[newDmer]; 431 } 432 /* Add the dmer to the segment */ 433 activeSegment.end += 1; 434 *newDmerOcc += 1; 435 436 /* If the window is now too large, drop the first position */ 437 if (activeSegment.end - activeSegment.begin == dmersInK + 1) { 438 U32 delDmer = ctx->dmerAt[activeSegment.begin]; 439 U32 *delDmerOcc = COVER_map_at(activeDmers, delDmer); 440 activeSegment.begin += 1; 441 *delDmerOcc -= 1; 442 /* If this is the last occurence of the dmer, subtract its score */ 443 if (*delDmerOcc == 0) { 444 COVER_map_remove(activeDmers, delDmer); 445 activeSegment.score -= freqs[delDmer]; 446 } 447 } 448 449 /* If this segment is the best so far save it */ 450 if (activeSegment.score > bestSegment.score) { 451 bestSegment = activeSegment; 452 } 453 } 454 { 455 /* Trim off the zero frequency head and tail from the segment. */ 456 U32 newBegin = bestSegment.end; 457 U32 newEnd = bestSegment.begin; 458 U32 pos; 459 for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) { 460 U32 freq = freqs[ctx->dmerAt[pos]]; 461 if (freq != 0) { 462 newBegin = MIN(newBegin, pos); 463 newEnd = pos + 1; 464 } 465 } 466 bestSegment.begin = newBegin; 467 bestSegment.end = newEnd; 468 } 469 { 470 /* Zero out the frequency of each dmer covered by the chosen segment. */ 471 U32 pos; 472 for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) { 473 freqs[ctx->dmerAt[pos]] = 0; 474 } 475 } 476 return bestSegment; 477 } 478 479 /** 480 * Check the validity of the parameters. 481 * Returns non-zero if the parameters are valid and 0 otherwise. 482 */ 483 static int COVER_checkParameters(ZDICT_cover_params_t parameters, 484 size_t maxDictSize) { 485 /* k and d are required parameters */ 486 if (parameters.d == 0 || parameters.k == 0) { 487 return 0; 488 } 489 /* k <= maxDictSize */ 490 if (parameters.k > maxDictSize) { 491 return 0; 492 } 493 /* d <= k */ 494 if (parameters.d > parameters.k) { 495 return 0; 496 } 497 return 1; 498 } 499 500 /** 501 * Clean up a context initialized with `COVER_ctx_init()`. 502 */ 503 static void COVER_ctx_destroy(COVER_ctx_t *ctx) { 504 if (!ctx) { 505 return; 506 } 507 if (ctx->suffix) { 508 free(ctx->suffix); 509 ctx->suffix = NULL; 510 } 511 if (ctx->freqs) { 512 free(ctx->freqs); 513 ctx->freqs = NULL; 514 } 515 if (ctx->dmerAt) { 516 free(ctx->dmerAt); 517 ctx->dmerAt = NULL; 518 } 519 if (ctx->offsets) { 520 free(ctx->offsets); 521 ctx->offsets = NULL; 522 } 523 } 524 525 /** 526 * Prepare a context for dictionary building. 527 * The context is only dependent on the parameter `d` and can used multiple 528 * times. 529 * Returns 1 on success or zero on error. 530 * The context must be destroyed with `COVER_ctx_destroy()`. 531 */ 532 static int COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer, 533 const size_t *samplesSizes, unsigned nbSamples, 534 unsigned d) { 535 const BYTE *const samples = (const BYTE *)samplesBuffer; 536 const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples); 537 /* Checks */ 538 if (totalSamplesSize < MAX(d, sizeof(U64)) || 539 totalSamplesSize >= (size_t)COVER_MAX_SAMPLES_SIZE) { 540 DISPLAYLEVEL(1, "Total samples size is too large, maximum size is %u MB\n", 541 (COVER_MAX_SAMPLES_SIZE >> 20)); 542 return 0; 543 } 544 /* Zero the context */ 545 memset(ctx, 0, sizeof(*ctx)); 546 DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbSamples, 547 (U32)totalSamplesSize); 548 ctx->samples = samples; 549 ctx->samplesSizes = samplesSizes; 550 ctx->nbSamples = nbSamples; 551 /* Partial suffix array */ 552 ctx->suffixSize = totalSamplesSize - MAX(d, sizeof(U64)) + 1; 553 ctx->suffix = (U32 *)malloc(ctx->suffixSize * sizeof(U32)); 554 /* Maps index to the dmerID */ 555 ctx->dmerAt = (U32 *)malloc(ctx->suffixSize * sizeof(U32)); 556 /* The offsets of each file */ 557 ctx->offsets = (size_t *)malloc((nbSamples + 1) * sizeof(size_t)); 558 if (!ctx->suffix || !ctx->dmerAt || !ctx->offsets) { 559 DISPLAYLEVEL(1, "Failed to allocate scratch buffers\n"); 560 COVER_ctx_destroy(ctx); 561 return 0; 562 } 563 ctx->freqs = NULL; 564 ctx->d = d; 565 566 /* Fill offsets from the samlesSizes */ 567 { 568 U32 i; 569 ctx->offsets[0] = 0; 570 for (i = 1; i <= nbSamples; ++i) { 571 ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1]; 572 } 573 } 574 DISPLAYLEVEL(2, "Constructing partial suffix array\n"); 575 { 576 /* suffix is a partial suffix array. 577 * It only sorts suffixes by their first parameters.d bytes. 578 * The sort is stable, so each dmer group is sorted by position in input. 579 */ 580 U32 i; 581 for (i = 0; i < ctx->suffixSize; ++i) { 582 ctx->suffix[i] = i; 583 } 584 /* qsort doesn't take an opaque pointer, so pass as a global */ 585 g_ctx = ctx; 586 qsort(ctx->suffix, ctx->suffixSize, sizeof(U32), 587 (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp)); 588 } 589 DISPLAYLEVEL(2, "Computing frequencies\n"); 590 /* For each dmer group (group of positions with the same first d bytes): 591 * 1. For each position we set dmerAt[position] = dmerID. The dmerID is 592 * (groupBeginPtr - suffix). This allows us to go from position to 593 * dmerID so we can look up values in freq. 594 * 2. We calculate how many samples the dmer occurs in and save it in 595 * freqs[dmerId]. 596 */ 597 COVER_groupBy(ctx->suffix, ctx->suffixSize, sizeof(U32), ctx, 598 (ctx->d <= 8 ? &COVER_cmp8 : &COVER_cmp), &COVER_group); 599 ctx->freqs = ctx->suffix; 600 ctx->suffix = NULL; 601 return 1; 602 } 603 604 /** 605 * Given the prepared context build the dictionary. 606 */ 607 static size_t COVER_buildDictionary(const COVER_ctx_t *ctx, U32 *freqs, 608 COVER_map_t *activeDmers, void *dictBuffer, 609 size_t dictBufferCapacity, 610 ZDICT_cover_params_t parameters) { 611 BYTE *const dict = (BYTE *)dictBuffer; 612 size_t tail = dictBufferCapacity; 613 /* Divide the data up into epochs of equal size. 614 * We will select at least one segment from each epoch. 615 */ 616 const U32 epochs = (U32)(dictBufferCapacity / parameters.k); 617 const U32 epochSize = (U32)(ctx->suffixSize / epochs); 618 size_t epoch; 619 DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n", epochs, 620 epochSize); 621 /* Loop through the epochs until there are no more segments or the dictionary 622 * is full. 623 */ 624 for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs) { 625 const U32 epochBegin = (U32)(epoch * epochSize); 626 const U32 epochEnd = epochBegin + epochSize; 627 size_t segmentSize; 628 /* Select a segment */ 629 COVER_segment_t segment = COVER_selectSegment( 630 ctx, freqs, activeDmers, epochBegin, epochEnd, parameters); 631 /* If the segment covers no dmers, then we are out of content */ 632 if (segment.score == 0) { 633 break; 634 } 635 /* Trim the segment if necessary and if it is too small then we are done */ 636 segmentSize = MIN(segment.end - segment.begin + parameters.d - 1, tail); 637 if (segmentSize < parameters.d) { 638 break; 639 } 640 /* We fill the dictionary from the back to allow the best segments to be 641 * referenced with the smallest offsets. 642 */ 643 tail -= segmentSize; 644 memcpy(dict + tail, ctx->samples + segment.begin, segmentSize); 645 DISPLAYUPDATE( 646 2, "\r%u%% ", 647 (U32)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity)); 648 } 649 DISPLAYLEVEL(2, "\r%79s\r", ""); 650 return tail; 651 } 652 653 ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover( 654 void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, 655 const size_t *samplesSizes, unsigned nbSamples, 656 ZDICT_cover_params_t parameters) { 657 BYTE *const dict = (BYTE *)dictBuffer; 658 COVER_ctx_t ctx; 659 COVER_map_t activeDmers; 660 /* Checks */ 661 if (!COVER_checkParameters(parameters, dictBufferCapacity)) { 662 DISPLAYLEVEL(1, "Cover parameters incorrect\n"); 663 return ERROR(GENERIC); 664 } 665 if (nbSamples == 0) { 666 DISPLAYLEVEL(1, "Cover must have at least one input file\n"); 667 return ERROR(GENERIC); 668 } 669 if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) { 670 DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n", 671 ZDICT_DICTSIZE_MIN); 672 return ERROR(dstSize_tooSmall); 673 } 674 /* Initialize global data */ 675 g_displayLevel = parameters.zParams.notificationLevel; 676 /* Initialize context and activeDmers */ 677 if (!COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, 678 parameters.d)) { 679 return ERROR(GENERIC); 680 } 681 if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) { 682 DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n"); 683 COVER_ctx_destroy(&ctx); 684 return ERROR(GENERIC); 685 } 686 687 DISPLAYLEVEL(2, "Building dictionary\n"); 688 { 689 const size_t tail = 690 COVER_buildDictionary(&ctx, ctx.freqs, &activeDmers, dictBuffer, 691 dictBufferCapacity, parameters); 692 const size_t dictionarySize = ZDICT_finalizeDictionary( 693 dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail, 694 samplesBuffer, samplesSizes, nbSamples, parameters.zParams); 695 if (!ZSTD_isError(dictionarySize)) { 696 DISPLAYLEVEL(2, "Constructed dictionary of size %u\n", 697 (U32)dictionarySize); 698 } 699 COVER_ctx_destroy(&ctx); 700 COVER_map_destroy(&activeDmers); 701 return dictionarySize; 702 } 703 } 704 705 /** 706 * COVER_best_t is used for two purposes: 707 * 1. Synchronizing threads. 708 * 2. Saving the best parameters and dictionary. 709 * 710 * All of the methods except COVER_best_init() are thread safe if zstd is 711 * compiled with multithreaded support. 712 */ 713 typedef struct COVER_best_s { 714 ZSTD_pthread_mutex_t mutex; 715 ZSTD_pthread_cond_t cond; 716 size_t liveJobs; 717 void *dict; 718 size_t dictSize; 719 ZDICT_cover_params_t parameters; 720 size_t compressedSize; 721 } COVER_best_t; 722 723 /** 724 * Initialize the `COVER_best_t`. 725 */ 726 static void COVER_best_init(COVER_best_t *best) { 727 if (best==NULL) return; /* compatible with init on NULL */ 728 (void)ZSTD_pthread_mutex_init(&best->mutex, NULL); 729 (void)ZSTD_pthread_cond_init(&best->cond, NULL); 730 best->liveJobs = 0; 731 best->dict = NULL; 732 best->dictSize = 0; 733 best->compressedSize = (size_t)-1; 734 memset(&best->parameters, 0, sizeof(best->parameters)); 735 } 736 737 /** 738 * Wait until liveJobs == 0. 739 */ 740 static void COVER_best_wait(COVER_best_t *best) { 741 if (!best) { 742 return; 743 } 744 ZSTD_pthread_mutex_lock(&best->mutex); 745 while (best->liveJobs != 0) { 746 ZSTD_pthread_cond_wait(&best->cond, &best->mutex); 747 } 748 ZSTD_pthread_mutex_unlock(&best->mutex); 749 } 750 751 /** 752 * Call COVER_best_wait() and then destroy the COVER_best_t. 753 */ 754 static void COVER_best_destroy(COVER_best_t *best) { 755 if (!best) { 756 return; 757 } 758 COVER_best_wait(best); 759 if (best->dict) { 760 free(best->dict); 761 } 762 ZSTD_pthread_mutex_destroy(&best->mutex); 763 ZSTD_pthread_cond_destroy(&best->cond); 764 } 765 766 /** 767 * Called when a thread is about to be launched. 768 * Increments liveJobs. 769 */ 770 static void COVER_best_start(COVER_best_t *best) { 771 if (!best) { 772 return; 773 } 774 ZSTD_pthread_mutex_lock(&best->mutex); 775 ++best->liveJobs; 776 ZSTD_pthread_mutex_unlock(&best->mutex); 777 } 778 779 /** 780 * Called when a thread finishes executing, both on error or success. 781 * Decrements liveJobs and signals any waiting threads if liveJobs == 0. 782 * If this dictionary is the best so far save it and its parameters. 783 */ 784 static void COVER_best_finish(COVER_best_t *best, size_t compressedSize, 785 ZDICT_cover_params_t parameters, void *dict, 786 size_t dictSize) { 787 if (!best) { 788 return; 789 } 790 { 791 size_t liveJobs; 792 ZSTD_pthread_mutex_lock(&best->mutex); 793 --best->liveJobs; 794 liveJobs = best->liveJobs; 795 /* If the new dictionary is better */ 796 if (compressedSize < best->compressedSize) { 797 /* Allocate space if necessary */ 798 if (!best->dict || best->dictSize < dictSize) { 799 if (best->dict) { 800 free(best->dict); 801 } 802 best->dict = malloc(dictSize); 803 if (!best->dict) { 804 best->compressedSize = ERROR(GENERIC); 805 best->dictSize = 0; 806 return; 807 } 808 } 809 /* Save the dictionary, parameters, and size */ 810 memcpy(best->dict, dict, dictSize); 811 best->dictSize = dictSize; 812 best->parameters = parameters; 813 best->compressedSize = compressedSize; 814 } 815 ZSTD_pthread_mutex_unlock(&best->mutex); 816 if (liveJobs == 0) { 817 ZSTD_pthread_cond_broadcast(&best->cond); 818 } 819 } 820 } 821 822 /** 823 * Parameters for COVER_tryParameters(). 824 */ 825 typedef struct COVER_tryParameters_data_s { 826 const COVER_ctx_t *ctx; 827 COVER_best_t *best; 828 size_t dictBufferCapacity; 829 ZDICT_cover_params_t parameters; 830 } COVER_tryParameters_data_t; 831 832 /** 833 * Tries a set of parameters and upates the COVER_best_t with the results. 834 * This function is thread safe if zstd is compiled with multithreaded support. 835 * It takes its parameters as an *OWNING* opaque pointer to support threading. 836 */ 837 static void COVER_tryParameters(void *opaque) { 838 /* Save parameters as local variables */ 839 COVER_tryParameters_data_t *const data = (COVER_tryParameters_data_t *)opaque; 840 const COVER_ctx_t *const ctx = data->ctx; 841 const ZDICT_cover_params_t parameters = data->parameters; 842 size_t dictBufferCapacity = data->dictBufferCapacity; 843 size_t totalCompressedSize = ERROR(GENERIC); 844 /* Allocate space for hash table, dict, and freqs */ 845 COVER_map_t activeDmers; 846 BYTE *const dict = (BYTE * const)malloc(dictBufferCapacity); 847 U32 *freqs = (U32 *)malloc(ctx->suffixSize * sizeof(U32)); 848 if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) { 849 DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n"); 850 goto _cleanup; 851 } 852 if (!dict || !freqs) { 853 DISPLAYLEVEL(1, "Failed to allocate buffers: out of memory\n"); 854 goto _cleanup; 855 } 856 /* Copy the frequencies because we need to modify them */ 857 memcpy(freqs, ctx->freqs, ctx->suffixSize * sizeof(U32)); 858 /* Build the dictionary */ 859 { 860 const size_t tail = COVER_buildDictionary(ctx, freqs, &activeDmers, dict, 861 dictBufferCapacity, parameters); 862 dictBufferCapacity = ZDICT_finalizeDictionary( 863 dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail, 864 ctx->samples, ctx->samplesSizes, (unsigned)ctx->nbSamples, 865 parameters.zParams); 866 if (ZDICT_isError(dictBufferCapacity)) { 867 DISPLAYLEVEL(1, "Failed to finalize dictionary\n"); 868 goto _cleanup; 869 } 870 } 871 /* Check total compressed size */ 872 { 873 /* Pointers */ 874 ZSTD_CCtx *cctx; 875 ZSTD_CDict *cdict; 876 void *dst; 877 /* Local variables */ 878 size_t dstCapacity; 879 size_t i; 880 /* Allocate dst with enough space to compress the maximum sized sample */ 881 { 882 size_t maxSampleSize = 0; 883 for (i = 0; i < ctx->nbSamples; ++i) { 884 maxSampleSize = MAX(ctx->samplesSizes[i], maxSampleSize); 885 } 886 dstCapacity = ZSTD_compressBound(maxSampleSize); 887 dst = malloc(dstCapacity); 888 } 889 /* Create the cctx and cdict */ 890 cctx = ZSTD_createCCtx(); 891 cdict = ZSTD_createCDict(dict, dictBufferCapacity, 892 parameters.zParams.compressionLevel); 893 if (!dst || !cctx || !cdict) { 894 goto _compressCleanup; 895 } 896 /* Compress each sample and sum their sizes (or error) */ 897 totalCompressedSize = dictBufferCapacity; 898 for (i = 0; i < ctx->nbSamples; ++i) { 899 const size_t size = ZSTD_compress_usingCDict( 900 cctx, dst, dstCapacity, ctx->samples + ctx->offsets[i], 901 ctx->samplesSizes[i], cdict); 902 if (ZSTD_isError(size)) { 903 totalCompressedSize = ERROR(GENERIC); 904 goto _compressCleanup; 905 } 906 totalCompressedSize += size; 907 } 908 _compressCleanup: 909 ZSTD_freeCCtx(cctx); 910 ZSTD_freeCDict(cdict); 911 if (dst) { 912 free(dst); 913 } 914 } 915 916 _cleanup: 917 COVER_best_finish(data->best, totalCompressedSize, parameters, dict, 918 dictBufferCapacity); 919 free(data); 920 COVER_map_destroy(&activeDmers); 921 if (dict) { 922 free(dict); 923 } 924 if (freqs) { 925 free(freqs); 926 } 927 } 928 929 ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover( 930 void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, 931 const size_t *samplesSizes, unsigned nbSamples, 932 ZDICT_cover_params_t *parameters) { 933 /* constants */ 934 const unsigned nbThreads = parameters->nbThreads; 935 const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d; 936 const unsigned kMaxD = parameters->d == 0 ? 8 : parameters->d; 937 const unsigned kMinK = parameters->k == 0 ? 50 : parameters->k; 938 const unsigned kMaxK = parameters->k == 0 ? 2000 : parameters->k; 939 const unsigned kSteps = parameters->steps == 0 ? 40 : parameters->steps; 940 const unsigned kStepSize = MAX((kMaxK - kMinK) / kSteps, 1); 941 const unsigned kIterations = 942 (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize); 943 /* Local variables */ 944 const int displayLevel = parameters->zParams.notificationLevel; 945 unsigned iteration = 1; 946 unsigned d; 947 unsigned k; 948 COVER_best_t best; 949 POOL_ctx *pool = NULL; 950 /* Checks */ 951 if (kMinK < kMaxD || kMaxK < kMinK) { 952 LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n"); 953 return ERROR(GENERIC); 954 } 955 if (nbSamples == 0) { 956 DISPLAYLEVEL(1, "Cover must have at least one input file\n"); 957 return ERROR(GENERIC); 958 } 959 if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) { 960 DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n", 961 ZDICT_DICTSIZE_MIN); 962 return ERROR(dstSize_tooSmall); 963 } 964 if (nbThreads > 1) { 965 pool = POOL_create(nbThreads, 1); 966 if (!pool) { 967 return ERROR(memory_allocation); 968 } 969 } 970 /* Initialization */ 971 COVER_best_init(&best); 972 /* Turn down global display level to clean up display at level 2 and below */ 973 g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1; 974 /* Loop through d first because each new value needs a new context */ 975 LOCALDISPLAYLEVEL(displayLevel, 2, "Trying %u different sets of parameters\n", 976 kIterations); 977 for (d = kMinD; d <= kMaxD; d += 2) { 978 /* Initialize the context for this value of d */ 979 COVER_ctx_t ctx; 980 LOCALDISPLAYLEVEL(displayLevel, 3, "d=%u\n", d); 981 if (!COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d)) { 982 LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n"); 983 COVER_best_destroy(&best); 984 POOL_free(pool); 985 return ERROR(GENERIC); 986 } 987 /* Loop through k reusing the same context */ 988 for (k = kMinK; k <= kMaxK; k += kStepSize) { 989 /* Prepare the arguments */ 990 COVER_tryParameters_data_t *data = (COVER_tryParameters_data_t *)malloc( 991 sizeof(COVER_tryParameters_data_t)); 992 LOCALDISPLAYLEVEL(displayLevel, 3, "k=%u\n", k); 993 if (!data) { 994 LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to allocate parameters\n"); 995 COVER_best_destroy(&best); 996 COVER_ctx_destroy(&ctx); 997 POOL_free(pool); 998 return ERROR(GENERIC); 999 } 1000 data->ctx = &ctx; 1001 data->best = &best; 1002 data->dictBufferCapacity = dictBufferCapacity; 1003 data->parameters = *parameters; 1004 data->parameters.k = k; 1005 data->parameters.d = d; 1006 data->parameters.steps = kSteps; 1007 data->parameters.zParams.notificationLevel = g_displayLevel; 1008 /* Check the parameters */ 1009 if (!COVER_checkParameters(data->parameters, dictBufferCapacity)) { 1010 DISPLAYLEVEL(1, "Cover parameters incorrect\n"); 1011 free(data); 1012 continue; 1013 } 1014 /* Call the function and pass ownership of data to it */ 1015 COVER_best_start(&best); 1016 if (pool) { 1017 POOL_add(pool, &COVER_tryParameters, data); 1018 } else { 1019 COVER_tryParameters(data); 1020 } 1021 /* Print status */ 1022 LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%% ", 1023 (U32)((iteration * 100) / kIterations)); 1024 ++iteration; 1025 } 1026 COVER_best_wait(&best); 1027 COVER_ctx_destroy(&ctx); 1028 } 1029 LOCALDISPLAYLEVEL(displayLevel, 2, "\r%79s\r", ""); 1030 /* Fill the output buffer and parameters with output of the best parameters */ 1031 { 1032 const size_t dictSize = best.dictSize; 1033 if (ZSTD_isError(best.compressedSize)) { 1034 const size_t compressedSize = best.compressedSize; 1035 COVER_best_destroy(&best); 1036 POOL_free(pool); 1037 return compressedSize; 1038 } 1039 *parameters = best.parameters; 1040 memcpy(dictBuffer, best.dict, dictSize); 1041 COVER_best_destroy(&best); 1042 POOL_free(pool); 1043 return dictSize; 1044 } 1045 } 1046