1 /* 2 * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. 3 * All rights reserved. 4 * 5 * This source code is licensed under both the BSD-style license (found in the 6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found 7 * in the COPYING file in the root directory of this source tree). 8 * You may select, at your option, one of the above-listed licenses. 9 */ 10 11 /* ***************************************************************************** 12 * Constructs a dictionary using a heuristic based on the following paper: 13 * 14 * Liao, Petri, Moffat, Wirth 15 * Effective Construction of Relative Lempel-Ziv Dictionaries 16 * Published in WWW 2016. 17 * 18 * Adapted from code originally written by @ot (Giuseppe Ottaviano). 19 ******************************************************************************/ 20 21 /*-************************************* 22 * Dependencies 23 ***************************************/ 24 #include <stdio.h> /* fprintf */ 25 #include <stdlib.h> /* malloc, free, qsort */ 26 #include <string.h> /* memset */ 27 #include <time.h> /* clock */ 28 29 #include "mem.h" /* read */ 30 #include "pool.h" 31 #include "threading.h" 32 #include "zstd_internal.h" /* includes zstd.h */ 33 #ifndef ZDICT_STATIC_LINKING_ONLY 34 #define ZDICT_STATIC_LINKING_ONLY 35 #endif 36 #include "zdict.h" 37 38 /*-************************************* 39 * Constants 40 ***************************************/ 41 #define COVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((U32)-1) : ((U32)1 GB)) 42 43 /*-************************************* 44 * Console display 45 ***************************************/ 46 static int g_displayLevel = 2; 47 #define DISPLAY(...) \ 48 { \ 49 fprintf(stderr, __VA_ARGS__); \ 50 fflush(stderr); \ 51 } 52 #define LOCALDISPLAYLEVEL(displayLevel, l, ...) \ 53 if (displayLevel >= l) { \ 54 DISPLAY(__VA_ARGS__); \ 55 } /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */ 56 #define DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__) 57 58 #define LOCALDISPLAYUPDATE(displayLevel, l, ...) \ 59 if (displayLevel >= l) { \ 60 if ((clock() - g_time > refreshRate) || (displayLevel >= 4)) { \ 61 g_time = clock(); \ 62 DISPLAY(__VA_ARGS__); \ 63 } \ 64 } 65 #define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__) 66 static const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100; 67 static clock_t g_time = 0; 68 69 /*-************************************* 70 * Hash table 71 *************************************** 72 * A small specialized hash map for storing activeDmers. 73 * The map does not resize, so if it becomes full it will loop forever. 74 * Thus, the map must be large enough to store every value. 75 * The map implements linear probing and keeps its load less than 0.5. 76 */ 77 78 #define MAP_EMPTY_VALUE ((U32)-1) 79 typedef struct COVER_map_pair_t_s { 80 U32 key; 81 U32 value; 82 } COVER_map_pair_t; 83 84 typedef struct COVER_map_s { 85 COVER_map_pair_t *data; 86 U32 sizeLog; 87 U32 size; 88 U32 sizeMask; 89 } COVER_map_t; 90 91 /** 92 * Clear the map. 93 */ 94 static void COVER_map_clear(COVER_map_t *map) { 95 memset(map->data, MAP_EMPTY_VALUE, map->size * sizeof(COVER_map_pair_t)); 96 } 97 98 /** 99 * Initializes a map of the given size. 100 * Returns 1 on success and 0 on failure. 101 * The map must be destroyed with COVER_map_destroy(). 102 * The map is only guaranteed to be large enough to hold size elements. 103 */ 104 static int COVER_map_init(COVER_map_t *map, U32 size) { 105 map->sizeLog = ZSTD_highbit32(size) + 2; 106 map->size = (U32)1 << map->sizeLog; 107 map->sizeMask = map->size - 1; 108 map->data = (COVER_map_pair_t *)malloc(map->size * sizeof(COVER_map_pair_t)); 109 if (!map->data) { 110 map->sizeLog = 0; 111 map->size = 0; 112 return 0; 113 } 114 COVER_map_clear(map); 115 return 1; 116 } 117 118 /** 119 * Internal hash function 120 */ 121 static const U32 prime4bytes = 2654435761U; 122 static U32 COVER_map_hash(COVER_map_t *map, U32 key) { 123 return (key * prime4bytes) >> (32 - map->sizeLog); 124 } 125 126 /** 127 * Helper function that returns the index that a key should be placed into. 128 */ 129 static U32 COVER_map_index(COVER_map_t *map, U32 key) { 130 const U32 hash = COVER_map_hash(map, key); 131 U32 i; 132 for (i = hash;; i = (i + 1) & map->sizeMask) { 133 COVER_map_pair_t *pos = &map->data[i]; 134 if (pos->value == MAP_EMPTY_VALUE) { 135 return i; 136 } 137 if (pos->key == key) { 138 return i; 139 } 140 } 141 } 142 143 /** 144 * Returns the pointer to the value for key. 145 * If key is not in the map, it is inserted and the value is set to 0. 146 * The map must not be full. 147 */ 148 static U32 *COVER_map_at(COVER_map_t *map, U32 key) { 149 COVER_map_pair_t *pos = &map->data[COVER_map_index(map, key)]; 150 if (pos->value == MAP_EMPTY_VALUE) { 151 pos->key = key; 152 pos->value = 0; 153 } 154 return &pos->value; 155 } 156 157 /** 158 * Deletes key from the map if present. 159 */ 160 static void COVER_map_remove(COVER_map_t *map, U32 key) { 161 U32 i = COVER_map_index(map, key); 162 COVER_map_pair_t *del = &map->data[i]; 163 U32 shift = 1; 164 if (del->value == MAP_EMPTY_VALUE) { 165 return; 166 } 167 for (i = (i + 1) & map->sizeMask;; i = (i + 1) & map->sizeMask) { 168 COVER_map_pair_t *const pos = &map->data[i]; 169 /* If the position is empty we are done */ 170 if (pos->value == MAP_EMPTY_VALUE) { 171 del->value = MAP_EMPTY_VALUE; 172 return; 173 } 174 /* If pos can be moved to del do so */ 175 if (((i - COVER_map_hash(map, pos->key)) & map->sizeMask) >= shift) { 176 del->key = pos->key; 177 del->value = pos->value; 178 del = pos; 179 shift = 1; 180 } else { 181 ++shift; 182 } 183 } 184 } 185 186 /** 187 * Destroyes a map that is inited with COVER_map_init(). 188 */ 189 static void COVER_map_destroy(COVER_map_t *map) { 190 if (map->data) { 191 free(map->data); 192 } 193 map->data = NULL; 194 map->size = 0; 195 } 196 197 /*-************************************* 198 * Context 199 ***************************************/ 200 201 typedef struct { 202 const BYTE *samples; 203 size_t *offsets; 204 const size_t *samplesSizes; 205 size_t nbSamples; 206 U32 *suffix; 207 size_t suffixSize; 208 U32 *freqs; 209 U32 *dmerAt; 210 unsigned d; 211 } COVER_ctx_t; 212 213 /* We need a global context for qsort... */ 214 static COVER_ctx_t *g_ctx = NULL; 215 216 /*-************************************* 217 * Helper functions 218 ***************************************/ 219 220 /** 221 * Returns the sum of the sample sizes. 222 */ 223 static size_t COVER_sum(const size_t *samplesSizes, unsigned nbSamples) { 224 size_t sum = 0; 225 size_t i; 226 for (i = 0; i < nbSamples; ++i) { 227 sum += samplesSizes[i]; 228 } 229 return sum; 230 } 231 232 /** 233 * Returns -1 if the dmer at lp is less than the dmer at rp. 234 * Return 0 if the dmers at lp and rp are equal. 235 * Returns 1 if the dmer at lp is greater than the dmer at rp. 236 */ 237 static int COVER_cmp(COVER_ctx_t *ctx, const void *lp, const void *rp) { 238 U32 const lhs = *(U32 const *)lp; 239 U32 const rhs = *(U32 const *)rp; 240 return memcmp(ctx->samples + lhs, ctx->samples + rhs, ctx->d); 241 } 242 /** 243 * Faster version for d <= 8. 244 */ 245 static int COVER_cmp8(COVER_ctx_t *ctx, const void *lp, const void *rp) { 246 U64 const mask = (ctx->d == 8) ? (U64)-1 : (((U64)1 << (8 * ctx->d)) - 1); 247 U64 const lhs = MEM_readLE64(ctx->samples + *(U32 const *)lp) & mask; 248 U64 const rhs = MEM_readLE64(ctx->samples + *(U32 const *)rp) & mask; 249 if (lhs < rhs) { 250 return -1; 251 } 252 return (lhs > rhs); 253 } 254 255 /** 256 * Same as COVER_cmp() except ties are broken by pointer value 257 * NOTE: g_ctx must be set to call this function. A global is required because 258 * qsort doesn't take an opaque pointer. 259 */ 260 static int COVER_strict_cmp(const void *lp, const void *rp) { 261 int result = COVER_cmp(g_ctx, lp, rp); 262 if (result == 0) { 263 result = lp < rp ? -1 : 1; 264 } 265 return result; 266 } 267 /** 268 * Faster version for d <= 8. 269 */ 270 static int COVER_strict_cmp8(const void *lp, const void *rp) { 271 int result = COVER_cmp8(g_ctx, lp, rp); 272 if (result == 0) { 273 result = lp < rp ? -1 : 1; 274 } 275 return result; 276 } 277 278 /** 279 * Returns the first pointer in [first, last) whose element does not compare 280 * less than value. If no such element exists it returns last. 281 */ 282 static const size_t *COVER_lower_bound(const size_t *first, const size_t *last, 283 size_t value) { 284 size_t count = last - first; 285 while (count != 0) { 286 size_t step = count / 2; 287 const size_t *ptr = first; 288 ptr += step; 289 if (*ptr < value) { 290 first = ++ptr; 291 count -= step + 1; 292 } else { 293 count = step; 294 } 295 } 296 return first; 297 } 298 299 /** 300 * Generic groupBy function. 301 * Groups an array sorted by cmp into groups with equivalent values. 302 * Calls grp for each group. 303 */ 304 static void 305 COVER_groupBy(const void *data, size_t count, size_t size, COVER_ctx_t *ctx, 306 int (*cmp)(COVER_ctx_t *, const void *, const void *), 307 void (*grp)(COVER_ctx_t *, const void *, const void *)) { 308 const BYTE *ptr = (const BYTE *)data; 309 size_t num = 0; 310 while (num < count) { 311 const BYTE *grpEnd = ptr + size; 312 ++num; 313 while (num < count && cmp(ctx, ptr, grpEnd) == 0) { 314 grpEnd += size; 315 ++num; 316 } 317 grp(ctx, ptr, grpEnd); 318 ptr = grpEnd; 319 } 320 } 321 322 /*-************************************* 323 * Cover functions 324 ***************************************/ 325 326 /** 327 * Called on each group of positions with the same dmer. 328 * Counts the frequency of each dmer and saves it in the suffix array. 329 * Fills `ctx->dmerAt`. 330 */ 331 static void COVER_group(COVER_ctx_t *ctx, const void *group, 332 const void *groupEnd) { 333 /* The group consists of all the positions with the same first d bytes. */ 334 const U32 *grpPtr = (const U32 *)group; 335 const U32 *grpEnd = (const U32 *)groupEnd; 336 /* The dmerId is how we will reference this dmer. 337 * This allows us to map the whole dmer space to a much smaller space, the 338 * size of the suffix array. 339 */ 340 const U32 dmerId = (U32)(grpPtr - ctx->suffix); 341 /* Count the number of samples this dmer shows up in */ 342 U32 freq = 0; 343 /* Details */ 344 const size_t *curOffsetPtr = ctx->offsets; 345 const size_t *offsetsEnd = ctx->offsets + ctx->nbSamples; 346 /* Once *grpPtr >= curSampleEnd this occurrence of the dmer is in a 347 * different sample than the last. 348 */ 349 size_t curSampleEnd = ctx->offsets[0]; 350 for (; grpPtr != grpEnd; ++grpPtr) { 351 /* Save the dmerId for this position so we can get back to it. */ 352 ctx->dmerAt[*grpPtr] = dmerId; 353 /* Dictionaries only help for the first reference to the dmer. 354 * After that zstd can reference the match from the previous reference. 355 * So only count each dmer once for each sample it is in. 356 */ 357 if (*grpPtr < curSampleEnd) { 358 continue; 359 } 360 freq += 1; 361 /* Binary search to find the end of the sample *grpPtr is in. 362 * In the common case that grpPtr + 1 == grpEnd we can skip the binary 363 * search because the loop is over. 364 */ 365 if (grpPtr + 1 != grpEnd) { 366 const size_t *sampleEndPtr = 367 COVER_lower_bound(curOffsetPtr, offsetsEnd, *grpPtr); 368 curSampleEnd = *sampleEndPtr; 369 curOffsetPtr = sampleEndPtr + 1; 370 } 371 } 372 /* At this point we are never going to look at this segment of the suffix 373 * array again. We take advantage of this fact to save memory. 374 * We store the frequency of the dmer in the first position of the group, 375 * which is dmerId. 376 */ 377 ctx->suffix[dmerId] = freq; 378 } 379 380 /** 381 * A segment is a range in the source as well as the score of the segment. 382 */ 383 typedef struct { 384 U32 begin; 385 U32 end; 386 U32 score; 387 } COVER_segment_t; 388 389 /** 390 * Selects the best segment in an epoch. 391 * Segments of are scored according to the function: 392 * 393 * Let F(d) be the frequency of dmer d. 394 * Let S_i be the dmer at position i of segment S which has length k. 395 * 396 * Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1}) 397 * 398 * Once the dmer d is in the dictionay we set F(d) = 0. 399 */ 400 static COVER_segment_t COVER_selectSegment(const COVER_ctx_t *ctx, U32 *freqs, 401 COVER_map_t *activeDmers, U32 begin, 402 U32 end, 403 ZDICT_cover_params_t parameters) { 404 /* Constants */ 405 const U32 k = parameters.k; 406 const U32 d = parameters.d; 407 const U32 dmersInK = k - d + 1; 408 /* Try each segment (activeSegment) and save the best (bestSegment) */ 409 COVER_segment_t bestSegment = {0, 0, 0}; 410 COVER_segment_t activeSegment; 411 /* Reset the activeDmers in the segment */ 412 COVER_map_clear(activeDmers); 413 /* The activeSegment starts at the beginning of the epoch. */ 414 activeSegment.begin = begin; 415 activeSegment.end = begin; 416 activeSegment.score = 0; 417 /* Slide the activeSegment through the whole epoch. 418 * Save the best segment in bestSegment. 419 */ 420 while (activeSegment.end < end) { 421 /* The dmerId for the dmer at the next position */ 422 U32 newDmer = ctx->dmerAt[activeSegment.end]; 423 /* The entry in activeDmers for this dmerId */ 424 U32 *newDmerOcc = COVER_map_at(activeDmers, newDmer); 425 /* If the dmer isn't already present in the segment add its score. */ 426 if (*newDmerOcc == 0) { 427 /* The paper suggest using the L-0.5 norm, but experiments show that it 428 * doesn't help. 429 */ 430 activeSegment.score += freqs[newDmer]; 431 } 432 /* Add the dmer to the segment */ 433 activeSegment.end += 1; 434 *newDmerOcc += 1; 435 436 /* If the window is now too large, drop the first position */ 437 if (activeSegment.end - activeSegment.begin == dmersInK + 1) { 438 U32 delDmer = ctx->dmerAt[activeSegment.begin]; 439 U32 *delDmerOcc = COVER_map_at(activeDmers, delDmer); 440 activeSegment.begin += 1; 441 *delDmerOcc -= 1; 442 /* If this is the last occurence of the dmer, subtract its score */ 443 if (*delDmerOcc == 0) { 444 COVER_map_remove(activeDmers, delDmer); 445 activeSegment.score -= freqs[delDmer]; 446 } 447 } 448 449 /* If this segment is the best so far save it */ 450 if (activeSegment.score > bestSegment.score) { 451 bestSegment = activeSegment; 452 } 453 } 454 { 455 /* Trim off the zero frequency head and tail from the segment. */ 456 U32 newBegin = bestSegment.end; 457 U32 newEnd = bestSegment.begin; 458 U32 pos; 459 for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) { 460 U32 freq = freqs[ctx->dmerAt[pos]]; 461 if (freq != 0) { 462 newBegin = MIN(newBegin, pos); 463 newEnd = pos + 1; 464 } 465 } 466 bestSegment.begin = newBegin; 467 bestSegment.end = newEnd; 468 } 469 { 470 /* Zero out the frequency of each dmer covered by the chosen segment. */ 471 U32 pos; 472 for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) { 473 freqs[ctx->dmerAt[pos]] = 0; 474 } 475 } 476 return bestSegment; 477 } 478 479 /** 480 * Check the validity of the parameters. 481 * Returns non-zero if the parameters are valid and 0 otherwise. 482 */ 483 static int COVER_checkParameters(ZDICT_cover_params_t parameters, 484 size_t maxDictSize) { 485 /* k and d are required parameters */ 486 if (parameters.d == 0 || parameters.k == 0) { 487 return 0; 488 } 489 /* k <= maxDictSize */ 490 if (parameters.k > maxDictSize) { 491 return 0; 492 } 493 /* d <= k */ 494 if (parameters.d > parameters.k) { 495 return 0; 496 } 497 return 1; 498 } 499 500 /** 501 * Clean up a context initialized with `COVER_ctx_init()`. 502 */ 503 static void COVER_ctx_destroy(COVER_ctx_t *ctx) { 504 if (!ctx) { 505 return; 506 } 507 if (ctx->suffix) { 508 free(ctx->suffix); 509 ctx->suffix = NULL; 510 } 511 if (ctx->freqs) { 512 free(ctx->freqs); 513 ctx->freqs = NULL; 514 } 515 if (ctx->dmerAt) { 516 free(ctx->dmerAt); 517 ctx->dmerAt = NULL; 518 } 519 if (ctx->offsets) { 520 free(ctx->offsets); 521 ctx->offsets = NULL; 522 } 523 } 524 525 /** 526 * Prepare a context for dictionary building. 527 * The context is only dependent on the parameter `d` and can used multiple 528 * times. 529 * Returns 1 on success or zero on error. 530 * The context must be destroyed with `COVER_ctx_destroy()`. 531 */ 532 static int COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer, 533 const size_t *samplesSizes, unsigned nbSamples, 534 unsigned d) { 535 const BYTE *const samples = (const BYTE *)samplesBuffer; 536 const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples); 537 /* Checks */ 538 if (totalSamplesSize < MAX(d, sizeof(U64)) || 539 totalSamplesSize >= (size_t)COVER_MAX_SAMPLES_SIZE) { 540 DISPLAYLEVEL(1, "Total samples size is too large (%u MB), maximum size is %u MB\n", 541 (U32)(totalSamplesSize>>20), (COVER_MAX_SAMPLES_SIZE >> 20)); 542 return 0; 543 } 544 /* Zero the context */ 545 memset(ctx, 0, sizeof(*ctx)); 546 DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbSamples, 547 (U32)totalSamplesSize); 548 ctx->samples = samples; 549 ctx->samplesSizes = samplesSizes; 550 ctx->nbSamples = nbSamples; 551 /* Partial suffix array */ 552 ctx->suffixSize = totalSamplesSize - MAX(d, sizeof(U64)) + 1; 553 ctx->suffix = (U32 *)malloc(ctx->suffixSize * sizeof(U32)); 554 /* Maps index to the dmerID */ 555 ctx->dmerAt = (U32 *)malloc(ctx->suffixSize * sizeof(U32)); 556 /* The offsets of each file */ 557 ctx->offsets = (size_t *)malloc((nbSamples + 1) * sizeof(size_t)); 558 if (!ctx->suffix || !ctx->dmerAt || !ctx->offsets) { 559 DISPLAYLEVEL(1, "Failed to allocate scratch buffers\n"); 560 COVER_ctx_destroy(ctx); 561 return 0; 562 } 563 ctx->freqs = NULL; 564 ctx->d = d; 565 566 /* Fill offsets from the samlesSizes */ 567 { 568 U32 i; 569 ctx->offsets[0] = 0; 570 for (i = 1; i <= nbSamples; ++i) { 571 ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1]; 572 } 573 } 574 DISPLAYLEVEL(2, "Constructing partial suffix array\n"); 575 { 576 /* suffix is a partial suffix array. 577 * It only sorts suffixes by their first parameters.d bytes. 578 * The sort is stable, so each dmer group is sorted by position in input. 579 */ 580 U32 i; 581 for (i = 0; i < ctx->suffixSize; ++i) { 582 ctx->suffix[i] = i; 583 } 584 /* qsort doesn't take an opaque pointer, so pass as a global */ 585 g_ctx = ctx; 586 qsort(ctx->suffix, ctx->suffixSize, sizeof(U32), 587 (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp)); 588 } 589 DISPLAYLEVEL(2, "Computing frequencies\n"); 590 /* For each dmer group (group of positions with the same first d bytes): 591 * 1. For each position we set dmerAt[position] = dmerID. The dmerID is 592 * (groupBeginPtr - suffix). This allows us to go from position to 593 * dmerID so we can look up values in freq. 594 * 2. We calculate how many samples the dmer occurs in and save it in 595 * freqs[dmerId]. 596 */ 597 COVER_groupBy(ctx->suffix, ctx->suffixSize, sizeof(U32), ctx, 598 (ctx->d <= 8 ? &COVER_cmp8 : &COVER_cmp), &COVER_group); 599 ctx->freqs = ctx->suffix; 600 ctx->suffix = NULL; 601 return 1; 602 } 603 604 /** 605 * Given the prepared context build the dictionary. 606 */ 607 static size_t COVER_buildDictionary(const COVER_ctx_t *ctx, U32 *freqs, 608 COVER_map_t *activeDmers, void *dictBuffer, 609 size_t dictBufferCapacity, 610 ZDICT_cover_params_t parameters) { 611 BYTE *const dict = (BYTE *)dictBuffer; 612 size_t tail = dictBufferCapacity; 613 /* Divide the data up into epochs of equal size. 614 * We will select at least one segment from each epoch. 615 */ 616 const U32 epochs = (U32)(dictBufferCapacity / parameters.k); 617 const U32 epochSize = (U32)(ctx->suffixSize / epochs); 618 size_t epoch; 619 DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n", epochs, 620 epochSize); 621 /* Loop through the epochs until there are no more segments or the dictionary 622 * is full. 623 */ 624 for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs) { 625 const U32 epochBegin = (U32)(epoch * epochSize); 626 const U32 epochEnd = epochBegin + epochSize; 627 size_t segmentSize; 628 /* Select a segment */ 629 COVER_segment_t segment = COVER_selectSegment( 630 ctx, freqs, activeDmers, epochBegin, epochEnd, parameters); 631 /* If the segment covers no dmers, then we are out of content */ 632 if (segment.score == 0) { 633 break; 634 } 635 /* Trim the segment if necessary and if it is too small then we are done */ 636 segmentSize = MIN(segment.end - segment.begin + parameters.d - 1, tail); 637 if (segmentSize < parameters.d) { 638 break; 639 } 640 /* We fill the dictionary from the back to allow the best segments to be 641 * referenced with the smallest offsets. 642 */ 643 tail -= segmentSize; 644 memcpy(dict + tail, ctx->samples + segment.begin, segmentSize); 645 DISPLAYUPDATE( 646 2, "\r%u%% ", 647 (U32)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity)); 648 } 649 DISPLAYLEVEL(2, "\r%79s\r", ""); 650 return tail; 651 } 652 653 ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover( 654 void *dictBuffer, size_t dictBufferCapacity, 655 const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, 656 ZDICT_cover_params_t parameters) 657 { 658 BYTE* const dict = (BYTE*)dictBuffer; 659 COVER_ctx_t ctx; 660 COVER_map_t activeDmers; 661 662 /* Initialize global data */ 663 g_displayLevel = parameters.zParams.notificationLevel; 664 /* Checks */ 665 if (!COVER_checkParameters(parameters, dictBufferCapacity)) { 666 DISPLAYLEVEL(1, "Cover parameters incorrect\n"); 667 return ERROR(GENERIC); 668 } 669 if (nbSamples == 0) { 670 DISPLAYLEVEL(1, "Cover must have at least one input file\n"); 671 return ERROR(GENERIC); 672 } 673 if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) { 674 DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n", 675 ZDICT_DICTSIZE_MIN); 676 return ERROR(dstSize_tooSmall); 677 } 678 /* Initialize context and activeDmers */ 679 if (!COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, 680 parameters.d)) { 681 return ERROR(GENERIC); 682 } 683 if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) { 684 DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n"); 685 COVER_ctx_destroy(&ctx); 686 return ERROR(GENERIC); 687 } 688 689 DISPLAYLEVEL(2, "Building dictionary\n"); 690 { 691 const size_t tail = 692 COVER_buildDictionary(&ctx, ctx.freqs, &activeDmers, dictBuffer, 693 dictBufferCapacity, parameters); 694 const size_t dictionarySize = ZDICT_finalizeDictionary( 695 dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail, 696 samplesBuffer, samplesSizes, nbSamples, parameters.zParams); 697 if (!ZSTD_isError(dictionarySize)) { 698 DISPLAYLEVEL(2, "Constructed dictionary of size %u\n", 699 (U32)dictionarySize); 700 } 701 COVER_ctx_destroy(&ctx); 702 COVER_map_destroy(&activeDmers); 703 return dictionarySize; 704 } 705 } 706 707 /** 708 * COVER_best_t is used for two purposes: 709 * 1. Synchronizing threads. 710 * 2. Saving the best parameters and dictionary. 711 * 712 * All of the methods except COVER_best_init() are thread safe if zstd is 713 * compiled with multithreaded support. 714 */ 715 typedef struct COVER_best_s { 716 ZSTD_pthread_mutex_t mutex; 717 ZSTD_pthread_cond_t cond; 718 size_t liveJobs; 719 void *dict; 720 size_t dictSize; 721 ZDICT_cover_params_t parameters; 722 size_t compressedSize; 723 } COVER_best_t; 724 725 /** 726 * Initialize the `COVER_best_t`. 727 */ 728 static void COVER_best_init(COVER_best_t *best) { 729 if (best==NULL) return; /* compatible with init on NULL */ 730 (void)ZSTD_pthread_mutex_init(&best->mutex, NULL); 731 (void)ZSTD_pthread_cond_init(&best->cond, NULL); 732 best->liveJobs = 0; 733 best->dict = NULL; 734 best->dictSize = 0; 735 best->compressedSize = (size_t)-1; 736 memset(&best->parameters, 0, sizeof(best->parameters)); 737 } 738 739 /** 740 * Wait until liveJobs == 0. 741 */ 742 static void COVER_best_wait(COVER_best_t *best) { 743 if (!best) { 744 return; 745 } 746 ZSTD_pthread_mutex_lock(&best->mutex); 747 while (best->liveJobs != 0) { 748 ZSTD_pthread_cond_wait(&best->cond, &best->mutex); 749 } 750 ZSTD_pthread_mutex_unlock(&best->mutex); 751 } 752 753 /** 754 * Call COVER_best_wait() and then destroy the COVER_best_t. 755 */ 756 static void COVER_best_destroy(COVER_best_t *best) { 757 if (!best) { 758 return; 759 } 760 COVER_best_wait(best); 761 if (best->dict) { 762 free(best->dict); 763 } 764 ZSTD_pthread_mutex_destroy(&best->mutex); 765 ZSTD_pthread_cond_destroy(&best->cond); 766 } 767 768 /** 769 * Called when a thread is about to be launched. 770 * Increments liveJobs. 771 */ 772 static void COVER_best_start(COVER_best_t *best) { 773 if (!best) { 774 return; 775 } 776 ZSTD_pthread_mutex_lock(&best->mutex); 777 ++best->liveJobs; 778 ZSTD_pthread_mutex_unlock(&best->mutex); 779 } 780 781 /** 782 * Called when a thread finishes executing, both on error or success. 783 * Decrements liveJobs and signals any waiting threads if liveJobs == 0. 784 * If this dictionary is the best so far save it and its parameters. 785 */ 786 static void COVER_best_finish(COVER_best_t *best, size_t compressedSize, 787 ZDICT_cover_params_t parameters, void *dict, 788 size_t dictSize) { 789 if (!best) { 790 return; 791 } 792 { 793 size_t liveJobs; 794 ZSTD_pthread_mutex_lock(&best->mutex); 795 --best->liveJobs; 796 liveJobs = best->liveJobs; 797 /* If the new dictionary is better */ 798 if (compressedSize < best->compressedSize) { 799 /* Allocate space if necessary */ 800 if (!best->dict || best->dictSize < dictSize) { 801 if (best->dict) { 802 free(best->dict); 803 } 804 best->dict = malloc(dictSize); 805 if (!best->dict) { 806 best->compressedSize = ERROR(GENERIC); 807 best->dictSize = 0; 808 return; 809 } 810 } 811 /* Save the dictionary, parameters, and size */ 812 memcpy(best->dict, dict, dictSize); 813 best->dictSize = dictSize; 814 best->parameters = parameters; 815 best->compressedSize = compressedSize; 816 } 817 ZSTD_pthread_mutex_unlock(&best->mutex); 818 if (liveJobs == 0) { 819 ZSTD_pthread_cond_broadcast(&best->cond); 820 } 821 } 822 } 823 824 /** 825 * Parameters for COVER_tryParameters(). 826 */ 827 typedef struct COVER_tryParameters_data_s { 828 const COVER_ctx_t *ctx; 829 COVER_best_t *best; 830 size_t dictBufferCapacity; 831 ZDICT_cover_params_t parameters; 832 } COVER_tryParameters_data_t; 833 834 /** 835 * Tries a set of parameters and upates the COVER_best_t with the results. 836 * This function is thread safe if zstd is compiled with multithreaded support. 837 * It takes its parameters as an *OWNING* opaque pointer to support threading. 838 */ 839 static void COVER_tryParameters(void *opaque) { 840 /* Save parameters as local variables */ 841 COVER_tryParameters_data_t *const data = (COVER_tryParameters_data_t *)opaque; 842 const COVER_ctx_t *const ctx = data->ctx; 843 const ZDICT_cover_params_t parameters = data->parameters; 844 size_t dictBufferCapacity = data->dictBufferCapacity; 845 size_t totalCompressedSize = ERROR(GENERIC); 846 /* Allocate space for hash table, dict, and freqs */ 847 COVER_map_t activeDmers; 848 BYTE *const dict = (BYTE * const)malloc(dictBufferCapacity); 849 U32 *freqs = (U32 *)malloc(ctx->suffixSize * sizeof(U32)); 850 if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) { 851 DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n"); 852 goto _cleanup; 853 } 854 if (!dict || !freqs) { 855 DISPLAYLEVEL(1, "Failed to allocate buffers: out of memory\n"); 856 goto _cleanup; 857 } 858 /* Copy the frequencies because we need to modify them */ 859 memcpy(freqs, ctx->freqs, ctx->suffixSize * sizeof(U32)); 860 /* Build the dictionary */ 861 { 862 const size_t tail = COVER_buildDictionary(ctx, freqs, &activeDmers, dict, 863 dictBufferCapacity, parameters); 864 dictBufferCapacity = ZDICT_finalizeDictionary( 865 dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail, 866 ctx->samples, ctx->samplesSizes, (unsigned)ctx->nbSamples, 867 parameters.zParams); 868 if (ZDICT_isError(dictBufferCapacity)) { 869 DISPLAYLEVEL(1, "Failed to finalize dictionary\n"); 870 goto _cleanup; 871 } 872 } 873 /* Check total compressed size */ 874 { 875 /* Pointers */ 876 ZSTD_CCtx *cctx; 877 ZSTD_CDict *cdict; 878 void *dst; 879 /* Local variables */ 880 size_t dstCapacity; 881 size_t i; 882 /* Allocate dst with enough space to compress the maximum sized sample */ 883 { 884 size_t maxSampleSize = 0; 885 for (i = 0; i < ctx->nbSamples; ++i) { 886 maxSampleSize = MAX(ctx->samplesSizes[i], maxSampleSize); 887 } 888 dstCapacity = ZSTD_compressBound(maxSampleSize); 889 dst = malloc(dstCapacity); 890 } 891 /* Create the cctx and cdict */ 892 cctx = ZSTD_createCCtx(); 893 cdict = ZSTD_createCDict(dict, dictBufferCapacity, 894 parameters.zParams.compressionLevel); 895 if (!dst || !cctx || !cdict) { 896 goto _compressCleanup; 897 } 898 /* Compress each sample and sum their sizes (or error) */ 899 totalCompressedSize = dictBufferCapacity; 900 for (i = 0; i < ctx->nbSamples; ++i) { 901 const size_t size = ZSTD_compress_usingCDict( 902 cctx, dst, dstCapacity, ctx->samples + ctx->offsets[i], 903 ctx->samplesSizes[i], cdict); 904 if (ZSTD_isError(size)) { 905 totalCompressedSize = ERROR(GENERIC); 906 goto _compressCleanup; 907 } 908 totalCompressedSize += size; 909 } 910 _compressCleanup: 911 ZSTD_freeCCtx(cctx); 912 ZSTD_freeCDict(cdict); 913 if (dst) { 914 free(dst); 915 } 916 } 917 918 _cleanup: 919 COVER_best_finish(data->best, totalCompressedSize, parameters, dict, 920 dictBufferCapacity); 921 free(data); 922 COVER_map_destroy(&activeDmers); 923 if (dict) { 924 free(dict); 925 } 926 if (freqs) { 927 free(freqs); 928 } 929 } 930 931 ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover( 932 void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, 933 const size_t *samplesSizes, unsigned nbSamples, 934 ZDICT_cover_params_t *parameters) { 935 /* constants */ 936 const unsigned nbThreads = parameters->nbThreads; 937 const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d; 938 const unsigned kMaxD = parameters->d == 0 ? 8 : parameters->d; 939 const unsigned kMinK = parameters->k == 0 ? 50 : parameters->k; 940 const unsigned kMaxK = parameters->k == 0 ? 2000 : parameters->k; 941 const unsigned kSteps = parameters->steps == 0 ? 40 : parameters->steps; 942 const unsigned kStepSize = MAX((kMaxK - kMinK) / kSteps, 1); 943 const unsigned kIterations = 944 (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize); 945 /* Local variables */ 946 const int displayLevel = parameters->zParams.notificationLevel; 947 unsigned iteration = 1; 948 unsigned d; 949 unsigned k; 950 COVER_best_t best; 951 POOL_ctx *pool = NULL; 952 953 /* Checks */ 954 if (kMinK < kMaxD || kMaxK < kMinK) { 955 LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n"); 956 return ERROR(GENERIC); 957 } 958 if (nbSamples == 0) { 959 DISPLAYLEVEL(1, "Cover must have at least one input file\n"); 960 return ERROR(GENERIC); 961 } 962 if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) { 963 DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n", 964 ZDICT_DICTSIZE_MIN); 965 return ERROR(dstSize_tooSmall); 966 } 967 if (nbThreads > 1) { 968 pool = POOL_create(nbThreads, 1); 969 if (!pool) { 970 return ERROR(memory_allocation); 971 } 972 } 973 /* Initialization */ 974 COVER_best_init(&best); 975 /* Turn down global display level to clean up display at level 2 and below */ 976 g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1; 977 /* Loop through d first because each new value needs a new context */ 978 LOCALDISPLAYLEVEL(displayLevel, 2, "Trying %u different sets of parameters\n", 979 kIterations); 980 for (d = kMinD; d <= kMaxD; d += 2) { 981 /* Initialize the context for this value of d */ 982 COVER_ctx_t ctx; 983 LOCALDISPLAYLEVEL(displayLevel, 3, "d=%u\n", d); 984 if (!COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d)) { 985 LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n"); 986 COVER_best_destroy(&best); 987 POOL_free(pool); 988 return ERROR(GENERIC); 989 } 990 /* Loop through k reusing the same context */ 991 for (k = kMinK; k <= kMaxK; k += kStepSize) { 992 /* Prepare the arguments */ 993 COVER_tryParameters_data_t *data = (COVER_tryParameters_data_t *)malloc( 994 sizeof(COVER_tryParameters_data_t)); 995 LOCALDISPLAYLEVEL(displayLevel, 3, "k=%u\n", k); 996 if (!data) { 997 LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to allocate parameters\n"); 998 COVER_best_destroy(&best); 999 COVER_ctx_destroy(&ctx); 1000 POOL_free(pool); 1001 return ERROR(GENERIC); 1002 } 1003 data->ctx = &ctx; 1004 data->best = &best; 1005 data->dictBufferCapacity = dictBufferCapacity; 1006 data->parameters = *parameters; 1007 data->parameters.k = k; 1008 data->parameters.d = d; 1009 data->parameters.steps = kSteps; 1010 data->parameters.zParams.notificationLevel = g_displayLevel; 1011 /* Check the parameters */ 1012 if (!COVER_checkParameters(data->parameters, dictBufferCapacity)) { 1013 DISPLAYLEVEL(1, "Cover parameters incorrect\n"); 1014 free(data); 1015 continue; 1016 } 1017 /* Call the function and pass ownership of data to it */ 1018 COVER_best_start(&best); 1019 if (pool) { 1020 POOL_add(pool, &COVER_tryParameters, data); 1021 } else { 1022 COVER_tryParameters(data); 1023 } 1024 /* Print status */ 1025 LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%% ", 1026 (U32)((iteration * 100) / kIterations)); 1027 ++iteration; 1028 } 1029 COVER_best_wait(&best); 1030 COVER_ctx_destroy(&ctx); 1031 } 1032 LOCALDISPLAYLEVEL(displayLevel, 2, "\r%79s\r", ""); 1033 /* Fill the output buffer and parameters with output of the best parameters */ 1034 { 1035 const size_t dictSize = best.dictSize; 1036 if (ZSTD_isError(best.compressedSize)) { 1037 const size_t compressedSize = best.compressedSize; 1038 COVER_best_destroy(&best); 1039 POOL_free(pool); 1040 return compressedSize; 1041 } 1042 *parameters = best.parameters; 1043 memcpy(dictBuffer, best.dict, dictSize); 1044 COVER_best_destroy(&best); 1045 POOL_free(pool); 1046 return dictSize; 1047 } 1048 } 1049