1 /* 2 * Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc. 3 * All rights reserved. 4 * 5 * This source code is licensed under both the BSD-style license (found in the 6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found 7 * in the COPYING file in the root directory of this source tree). 8 * You may select, at your option, one of the above-listed licenses. 9 */ 10 11 #include "zstd_compress_internal.h" 12 #include "hist.h" 13 #include "zstd_opt.h" 14 15 16 #define ZSTD_LITFREQ_ADD 2 /* scaling factor for litFreq, so that frequencies adapt faster to new stats */ 17 #define ZSTD_FREQ_DIV 4 /* log factor when using previous stats to init next stats */ 18 #define ZSTD_MAX_PRICE (1<<30) 19 20 #define ZSTD_PREDEF_THRESHOLD 1024 /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */ 21 22 23 /*-************************************* 24 * Price functions for optimal parser 25 ***************************************/ 26 27 #if 0 /* approximation at bit level */ 28 # define BITCOST_ACCURACY 0 29 # define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY) 30 # define WEIGHT(stat) ((void)opt, ZSTD_bitWeight(stat)) 31 #elif 0 /* fractional bit accuracy */ 32 # define BITCOST_ACCURACY 8 33 # define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY) 34 # define WEIGHT(stat,opt) ((void)opt, ZSTD_fracWeight(stat)) 35 #else /* opt==approx, ultra==accurate */ 36 # define BITCOST_ACCURACY 8 37 # define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY) 38 # define WEIGHT(stat,opt) (opt ? ZSTD_fracWeight(stat) : ZSTD_bitWeight(stat)) 39 #endif 40 41 MEM_STATIC U32 ZSTD_bitWeight(U32 stat) 42 { 43 return (ZSTD_highbit32(stat+1) * BITCOST_MULTIPLIER); 44 } 45 46 MEM_STATIC U32 ZSTD_fracWeight(U32 rawStat) 47 { 48 U32 const stat = rawStat + 1; 49 U32 const hb = ZSTD_highbit32(stat); 50 U32 const BWeight = hb * BITCOST_MULTIPLIER; 51 U32 const FWeight = (stat << BITCOST_ACCURACY) >> hb; 52 U32 const weight = BWeight + FWeight; 53 assert(hb + BITCOST_ACCURACY < 31); 54 return weight; 55 } 56 57 #if (DEBUGLEVEL>=2) 58 /* debugging function, 59 * @return price in bytes as fractional value 60 * for debug messages only */ 61 MEM_STATIC double ZSTD_fCost(U32 price) 62 { 63 return (double)price / (BITCOST_MULTIPLIER*8); 64 } 65 #endif 66 67 static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel) 68 { 69 optPtr->litSumBasePrice = WEIGHT(optPtr->litSum, optLevel); 70 optPtr->litLengthSumBasePrice = WEIGHT(optPtr->litLengthSum, optLevel); 71 optPtr->matchLengthSumBasePrice = WEIGHT(optPtr->matchLengthSum, optLevel); 72 optPtr->offCodeSumBasePrice = WEIGHT(optPtr->offCodeSum, optLevel); 73 } 74 75 76 /* ZSTD_downscaleStat() : 77 * reduce all elements in table by a factor 2^(ZSTD_FREQ_DIV+malus) 78 * return the resulting sum of elements */ 79 static U32 ZSTD_downscaleStat(unsigned* table, U32 lastEltIndex, int malus) 80 { 81 U32 s, sum=0; 82 DEBUGLOG(5, "ZSTD_downscaleStat (nbElts=%u)", (unsigned)lastEltIndex+1); 83 assert(ZSTD_FREQ_DIV+malus > 0 && ZSTD_FREQ_DIV+malus < 31); 84 for (s=0; s<lastEltIndex+1; s++) { 85 table[s] = 1 + (table[s] >> (ZSTD_FREQ_DIV+malus)); 86 sum += table[s]; 87 } 88 return sum; 89 } 90 91 /* ZSTD_rescaleFreqs() : 92 * if first block (detected by optPtr->litLengthSum == 0) : init statistics 93 * take hints from dictionary if there is one 94 * or init from zero, using src for literals stats, or flat 1 for match symbols 95 * otherwise downscale existing stats, to be used as seed for next block. 96 */ 97 static void 98 ZSTD_rescaleFreqs(optState_t* const optPtr, 99 const BYTE* const src, size_t const srcSize, 100 int const optLevel) 101 { 102 DEBUGLOG(5, "ZSTD_rescaleFreqs (srcSize=%u)", (unsigned)srcSize); 103 optPtr->priceType = zop_dynamic; 104 105 if (optPtr->litLengthSum == 0) { /* first block : init */ 106 if (srcSize <= ZSTD_PREDEF_THRESHOLD) { /* heuristic */ 107 DEBUGLOG(5, "(srcSize <= ZSTD_PREDEF_THRESHOLD) => zop_predef"); 108 optPtr->priceType = zop_predef; 109 } 110 111 assert(optPtr->symbolCosts != NULL); 112 if (optPtr->symbolCosts->huf.repeatMode == HUF_repeat_valid) { 113 /* huffman table presumed generated by dictionary */ 114 optPtr->priceType = zop_dynamic; 115 116 assert(optPtr->litFreq != NULL); 117 optPtr->litSum = 0; 118 { unsigned lit; 119 for (lit=0; lit<=MaxLit; lit++) { 120 U32 const scaleLog = 11; /* scale to 2K */ 121 U32 const bitCost = HUF_getNbBits(optPtr->symbolCosts->huf.CTable, lit); 122 assert(bitCost <= scaleLog); 123 optPtr->litFreq[lit] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/; 124 optPtr->litSum += optPtr->litFreq[lit]; 125 } } 126 127 { unsigned ll; 128 FSE_CState_t llstate; 129 FSE_initCState(&llstate, optPtr->symbolCosts->fse.litlengthCTable); 130 optPtr->litLengthSum = 0; 131 for (ll=0; ll<=MaxLL; ll++) { 132 U32 const scaleLog = 10; /* scale to 1K */ 133 U32 const bitCost = FSE_getMaxNbBits(llstate.symbolTT, ll); 134 assert(bitCost < scaleLog); 135 optPtr->litLengthFreq[ll] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/; 136 optPtr->litLengthSum += optPtr->litLengthFreq[ll]; 137 } } 138 139 { unsigned ml; 140 FSE_CState_t mlstate; 141 FSE_initCState(&mlstate, optPtr->symbolCosts->fse.matchlengthCTable); 142 optPtr->matchLengthSum = 0; 143 for (ml=0; ml<=MaxML; ml++) { 144 U32 const scaleLog = 10; 145 U32 const bitCost = FSE_getMaxNbBits(mlstate.symbolTT, ml); 146 assert(bitCost < scaleLog); 147 optPtr->matchLengthFreq[ml] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/; 148 optPtr->matchLengthSum += optPtr->matchLengthFreq[ml]; 149 } } 150 151 { unsigned of; 152 FSE_CState_t ofstate; 153 FSE_initCState(&ofstate, optPtr->symbolCosts->fse.offcodeCTable); 154 optPtr->offCodeSum = 0; 155 for (of=0; of<=MaxOff; of++) { 156 U32 const scaleLog = 10; 157 U32 const bitCost = FSE_getMaxNbBits(ofstate.symbolTT, of); 158 assert(bitCost < scaleLog); 159 optPtr->offCodeFreq[of] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/; 160 optPtr->offCodeSum += optPtr->offCodeFreq[of]; 161 } } 162 163 } else { /* not a dictionary */ 164 165 assert(optPtr->litFreq != NULL); 166 { unsigned lit = MaxLit; 167 HIST_count_simple(optPtr->litFreq, &lit, src, srcSize); /* use raw first block to init statistics */ 168 } 169 optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1); 170 171 { unsigned ll; 172 for (ll=0; ll<=MaxLL; ll++) 173 optPtr->litLengthFreq[ll] = 1; 174 } 175 optPtr->litLengthSum = MaxLL+1; 176 177 { unsigned ml; 178 for (ml=0; ml<=MaxML; ml++) 179 optPtr->matchLengthFreq[ml] = 1; 180 } 181 optPtr->matchLengthSum = MaxML+1; 182 183 { unsigned of; 184 for (of=0; of<=MaxOff; of++) 185 optPtr->offCodeFreq[of] = 1; 186 } 187 optPtr->offCodeSum = MaxOff+1; 188 189 } 190 191 } else { /* new block : re-use previous statistics, scaled down */ 192 193 optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1); 194 optPtr->litLengthSum = ZSTD_downscaleStat(optPtr->litLengthFreq, MaxLL, 0); 195 optPtr->matchLengthSum = ZSTD_downscaleStat(optPtr->matchLengthFreq, MaxML, 0); 196 optPtr->offCodeSum = ZSTD_downscaleStat(optPtr->offCodeFreq, MaxOff, 0); 197 } 198 199 ZSTD_setBasePrices(optPtr, optLevel); 200 } 201 202 /* ZSTD_rawLiteralsCost() : 203 * price of literals (only) in specified segment (which length can be 0). 204 * does not include price of literalLength symbol */ 205 static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength, 206 const optState_t* const optPtr, 207 int optLevel) 208 { 209 if (litLength == 0) return 0; 210 if (optPtr->priceType == zop_predef) 211 return (litLength*6) * BITCOST_MULTIPLIER; /* 6 bit per literal - no statistic used */ 212 213 /* dynamic statistics */ 214 { U32 price = litLength * optPtr->litSumBasePrice; 215 U32 u; 216 for (u=0; u < litLength; u++) { 217 assert(WEIGHT(optPtr->litFreq[literals[u]], optLevel) <= optPtr->litSumBasePrice); /* literal cost should never be negative */ 218 price -= WEIGHT(optPtr->litFreq[literals[u]], optLevel); 219 } 220 return price; 221 } 222 } 223 224 /* ZSTD_litLengthPrice() : 225 * cost of literalLength symbol */ 226 static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optPtr, int optLevel) 227 { 228 if (optPtr->priceType == zop_predef) return WEIGHT(litLength, optLevel); 229 230 /* dynamic statistics */ 231 { U32 const llCode = ZSTD_LLcode(litLength); 232 return (LL_bits[llCode] * BITCOST_MULTIPLIER) 233 + optPtr->litLengthSumBasePrice 234 - WEIGHT(optPtr->litLengthFreq[llCode], optLevel); 235 } 236 } 237 238 /* ZSTD_litLengthContribution() : 239 * @return ( cost(litlength) - cost(0) ) 240 * this value can then be added to rawLiteralsCost() 241 * to provide a cost which is directly comparable to a match ending at same position */ 242 static int ZSTD_litLengthContribution(U32 const litLength, const optState_t* const optPtr, int optLevel) 243 { 244 if (optPtr->priceType >= zop_predef) return WEIGHT(litLength, optLevel); 245 246 /* dynamic statistics */ 247 { U32 const llCode = ZSTD_LLcode(litLength); 248 int const contribution = (LL_bits[llCode] * BITCOST_MULTIPLIER) 249 + WEIGHT(optPtr->litLengthFreq[0], optLevel) /* note: log2litLengthSum cancel out */ 250 - WEIGHT(optPtr->litLengthFreq[llCode], optLevel); 251 #if 1 252 return contribution; 253 #else 254 return MAX(0, contribution); /* sometimes better, sometimes not ... */ 255 #endif 256 } 257 } 258 259 /* ZSTD_literalsContribution() : 260 * creates a fake cost for the literals part of a sequence 261 * which can be compared to the ending cost of a match 262 * should a new match start at this position */ 263 static int ZSTD_literalsContribution(const BYTE* const literals, U32 const litLength, 264 const optState_t* const optPtr, 265 int optLevel) 266 { 267 int const contribution = ZSTD_rawLiteralsCost(literals, litLength, optPtr, optLevel) 268 + ZSTD_litLengthContribution(litLength, optPtr, optLevel); 269 return contribution; 270 } 271 272 /* ZSTD_getMatchPrice() : 273 * Provides the cost of the match part (offset + matchLength) of a sequence 274 * Must be combined with ZSTD_fullLiteralsCost() to get the full cost of a sequence. 275 * optLevel: when <2, favors small offset for decompression speed (improved cache efficiency) */ 276 FORCE_INLINE_TEMPLATE U32 277 ZSTD_getMatchPrice(U32 const offset, 278 U32 const matchLength, 279 const optState_t* const optPtr, 280 int const optLevel) 281 { 282 U32 price; 283 U32 const offCode = ZSTD_highbit32(offset+1); 284 U32 const mlBase = matchLength - MINMATCH; 285 assert(matchLength >= MINMATCH); 286 287 if (optPtr->priceType == zop_predef) /* fixed scheme, do not use statistics */ 288 return WEIGHT(mlBase, optLevel) + ((16 + offCode) * BITCOST_MULTIPLIER); 289 290 /* dynamic statistics */ 291 price = (offCode * BITCOST_MULTIPLIER) + (optPtr->offCodeSumBasePrice - WEIGHT(optPtr->offCodeFreq[offCode], optLevel)); 292 if ((optLevel<2) /*static*/ && offCode >= 20) 293 price += (offCode-19)*2 * BITCOST_MULTIPLIER; /* handicap for long distance offsets, favor decompression speed */ 294 295 /* match Length */ 296 { U32 const mlCode = ZSTD_MLcode(mlBase); 297 price += (ML_bits[mlCode] * BITCOST_MULTIPLIER) + (optPtr->matchLengthSumBasePrice - WEIGHT(optPtr->matchLengthFreq[mlCode], optLevel)); 298 } 299 300 price += BITCOST_MULTIPLIER / 5; /* heuristic : make matches a bit more costly to favor less sequences -> faster decompression speed */ 301 302 DEBUGLOG(8, "ZSTD_getMatchPrice(ml:%u) = %u", matchLength, price); 303 return price; 304 } 305 306 /* ZSTD_updateStats() : 307 * assumption : literals + litLengtn <= iend */ 308 static void ZSTD_updateStats(optState_t* const optPtr, 309 U32 litLength, const BYTE* literals, 310 U32 offsetCode, U32 matchLength) 311 { 312 /* literals */ 313 { U32 u; 314 for (u=0; u < litLength; u++) 315 optPtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD; 316 optPtr->litSum += litLength*ZSTD_LITFREQ_ADD; 317 } 318 319 /* literal Length */ 320 { U32 const llCode = ZSTD_LLcode(litLength); 321 optPtr->litLengthFreq[llCode]++; 322 optPtr->litLengthSum++; 323 } 324 325 /* match offset code (0-2=>repCode; 3+=>offset+2) */ 326 { U32 const offCode = ZSTD_highbit32(offsetCode+1); 327 assert(offCode <= MaxOff); 328 optPtr->offCodeFreq[offCode]++; 329 optPtr->offCodeSum++; 330 } 331 332 /* match Length */ 333 { U32 const mlBase = matchLength - MINMATCH; 334 U32 const mlCode = ZSTD_MLcode(mlBase); 335 optPtr->matchLengthFreq[mlCode]++; 336 optPtr->matchLengthSum++; 337 } 338 } 339 340 341 /* ZSTD_readMINMATCH() : 342 * function safe only for comparisons 343 * assumption : memPtr must be at least 4 bytes before end of buffer */ 344 MEM_STATIC U32 ZSTD_readMINMATCH(const void* memPtr, U32 length) 345 { 346 switch (length) 347 { 348 default : 349 case 4 : return MEM_read32(memPtr); 350 case 3 : if (MEM_isLittleEndian()) 351 return MEM_read32(memPtr)<<8; 352 else 353 return MEM_read32(memPtr)>>8; 354 } 355 } 356 357 358 /* Update hashTable3 up to ip (excluded) 359 Assumption : always within prefix (i.e. not within extDict) */ 360 static U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_matchState_t* ms, const BYTE* const ip) 361 { 362 U32* const hashTable3 = ms->hashTable3; 363 U32 const hashLog3 = ms->hashLog3; 364 const BYTE* const base = ms->window.base; 365 U32 idx = ms->nextToUpdate3; 366 U32 const target = ms->nextToUpdate3 = (U32)(ip - base); 367 size_t const hash3 = ZSTD_hash3Ptr(ip, hashLog3); 368 assert(hashLog3 > 0); 369 370 while(idx < target) { 371 hashTable3[ZSTD_hash3Ptr(base+idx, hashLog3)] = idx; 372 idx++; 373 } 374 375 return hashTable3[hash3]; 376 } 377 378 379 /*-************************************* 380 * Binary Tree search 381 ***************************************/ 382 /** ZSTD_insertBt1() : add one or multiple positions to tree. 383 * ip : assumed <= iend-8 . 384 * @return : nb of positions added */ 385 static U32 ZSTD_insertBt1( 386 ZSTD_matchState_t* ms, 387 const BYTE* const ip, const BYTE* const iend, 388 U32 const mls, const int extDict) 389 { 390 const ZSTD_compressionParameters* const cParams = &ms->cParams; 391 U32* const hashTable = ms->hashTable; 392 U32 const hashLog = cParams->hashLog; 393 size_t const h = ZSTD_hashPtr(ip, hashLog, mls); 394 U32* const bt = ms->chainTable; 395 U32 const btLog = cParams->chainLog - 1; 396 U32 const btMask = (1 << btLog) - 1; 397 U32 matchIndex = hashTable[h]; 398 size_t commonLengthSmaller=0, commonLengthLarger=0; 399 const BYTE* const base = ms->window.base; 400 const BYTE* const dictBase = ms->window.dictBase; 401 const U32 dictLimit = ms->window.dictLimit; 402 const BYTE* const dictEnd = dictBase + dictLimit; 403 const BYTE* const prefixStart = base + dictLimit; 404 const BYTE* match; 405 const U32 current = (U32)(ip-base); 406 const U32 btLow = btMask >= current ? 0 : current - btMask; 407 U32* smallerPtr = bt + 2*(current&btMask); 408 U32* largerPtr = smallerPtr + 1; 409 U32 dummy32; /* to be nullified at the end */ 410 U32 const windowLow = ms->window.lowLimit; 411 U32 matchEndIdx = current+8+1; 412 size_t bestLength = 8; 413 U32 nbCompares = 1U << cParams->searchLog; 414 #ifdef ZSTD_C_PREDICT 415 U32 predictedSmall = *(bt + 2*((current-1)&btMask) + 0); 416 U32 predictedLarge = *(bt + 2*((current-1)&btMask) + 1); 417 predictedSmall += (predictedSmall>0); 418 predictedLarge += (predictedLarge>0); 419 #endif /* ZSTD_C_PREDICT */ 420 421 DEBUGLOG(8, "ZSTD_insertBt1 (%u)", current); 422 423 assert(ip <= iend-8); /* required for h calculation */ 424 hashTable[h] = current; /* Update Hash Table */ 425 426 assert(windowLow > 0); 427 while (nbCompares-- && (matchIndex >= windowLow)) { 428 U32* const nextPtr = bt + 2*(matchIndex & btMask); 429 size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ 430 assert(matchIndex < current); 431 432 #ifdef ZSTD_C_PREDICT /* note : can create issues when hlog small <= 11 */ 433 const U32* predictPtr = bt + 2*((matchIndex-1) & btMask); /* written this way, as bt is a roll buffer */ 434 if (matchIndex == predictedSmall) { 435 /* no need to check length, result known */ 436 *smallerPtr = matchIndex; 437 if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ 438 smallerPtr = nextPtr+1; /* new "smaller" => larger of match */ 439 matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ 440 predictedSmall = predictPtr[1] + (predictPtr[1]>0); 441 continue; 442 } 443 if (matchIndex == predictedLarge) { 444 *largerPtr = matchIndex; 445 if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ 446 largerPtr = nextPtr; 447 matchIndex = nextPtr[0]; 448 predictedLarge = predictPtr[0] + (predictPtr[0]>0); 449 continue; 450 } 451 #endif 452 453 if (!extDict || (matchIndex+matchLength >= dictLimit)) { 454 assert(matchIndex+matchLength >= dictLimit); /* might be wrong if actually extDict */ 455 match = base + matchIndex; 456 matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend); 457 } else { 458 match = dictBase + matchIndex; 459 matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart); 460 if (matchIndex+matchLength >= dictLimit) 461 match = base + matchIndex; /* to prepare for next usage of match[matchLength] */ 462 } 463 464 if (matchLength > bestLength) { 465 bestLength = matchLength; 466 if (matchLength > matchEndIdx - matchIndex) 467 matchEndIdx = matchIndex + (U32)matchLength; 468 } 469 470 if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */ 471 break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */ 472 } 473 474 if (match[matchLength] < ip[matchLength]) { /* necessarily within buffer */ 475 /* match is smaller than current */ 476 *smallerPtr = matchIndex; /* update smaller idx */ 477 commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ 478 if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */ 479 smallerPtr = nextPtr+1; /* new "candidate" => larger than match, which was smaller than target */ 480 matchIndex = nextPtr[1]; /* new matchIndex, larger than previous and closer to current */ 481 } else { 482 /* match is larger than current */ 483 *largerPtr = matchIndex; 484 commonLengthLarger = matchLength; 485 if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */ 486 largerPtr = nextPtr; 487 matchIndex = nextPtr[0]; 488 } } 489 490 *smallerPtr = *largerPtr = 0; 491 if (bestLength > 384) return MIN(192, (U32)(bestLength - 384)); /* speed optimization */ 492 assert(matchEndIdx > current + 8); 493 return matchEndIdx - (current + 8); 494 } 495 496 FORCE_INLINE_TEMPLATE 497 void ZSTD_updateTree_internal( 498 ZSTD_matchState_t* ms, 499 const BYTE* const ip, const BYTE* const iend, 500 const U32 mls, const ZSTD_dictMode_e dictMode) 501 { 502 const BYTE* const base = ms->window.base; 503 U32 const target = (U32)(ip - base); 504 U32 idx = ms->nextToUpdate; 505 DEBUGLOG(6, "ZSTD_updateTree_internal, from %u to %u (dictMode:%u)", 506 idx, target, dictMode); 507 508 while(idx < target) 509 idx += ZSTD_insertBt1(ms, base+idx, iend, mls, dictMode == ZSTD_extDict); 510 ms->nextToUpdate = target; 511 } 512 513 void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend) { 514 ZSTD_updateTree_internal(ms, ip, iend, ms->cParams.minMatch, ZSTD_noDict); 515 } 516 517 FORCE_INLINE_TEMPLATE 518 U32 ZSTD_insertBtAndGetAllMatches ( 519 ZSTD_matchState_t* ms, 520 const BYTE* const ip, const BYTE* const iLimit, const ZSTD_dictMode_e dictMode, 521 U32 rep[ZSTD_REP_NUM], 522 U32 const ll0, /* tells if associated literal length is 0 or not. This value must be 0 or 1 */ 523 ZSTD_match_t* matches, 524 const U32 lengthToBeat, 525 U32 const mls /* template */) 526 { 527 const ZSTD_compressionParameters* const cParams = &ms->cParams; 528 U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1); 529 const BYTE* const base = ms->window.base; 530 U32 const current = (U32)(ip-base); 531 U32 const hashLog = cParams->hashLog; 532 U32 const minMatch = (mls==3) ? 3 : 4; 533 U32* const hashTable = ms->hashTable; 534 size_t const h = ZSTD_hashPtr(ip, hashLog, mls); 535 U32 matchIndex = hashTable[h]; 536 U32* const bt = ms->chainTable; 537 U32 const btLog = cParams->chainLog - 1; 538 U32 const btMask= (1U << btLog) - 1; 539 size_t commonLengthSmaller=0, commonLengthLarger=0; 540 const BYTE* const dictBase = ms->window.dictBase; 541 U32 const dictLimit = ms->window.dictLimit; 542 const BYTE* const dictEnd = dictBase + dictLimit; 543 const BYTE* const prefixStart = base + dictLimit; 544 U32 const btLow = btMask >= current ? 0 : current - btMask; 545 U32 const windowLow = ms->window.lowLimit; 546 U32 const matchLow = windowLow ? windowLow : 1; 547 U32* smallerPtr = bt + 2*(current&btMask); 548 U32* largerPtr = bt + 2*(current&btMask) + 1; 549 U32 matchEndIdx = current+8+1; /* farthest referenced position of any match => detects repetitive patterns */ 550 U32 dummy32; /* to be nullified at the end */ 551 U32 mnum = 0; 552 U32 nbCompares = 1U << cParams->searchLog; 553 554 const ZSTD_matchState_t* dms = dictMode == ZSTD_dictMatchState ? ms->dictMatchState : NULL; 555 const ZSTD_compressionParameters* const dmsCParams = 556 dictMode == ZSTD_dictMatchState ? &dms->cParams : NULL; 557 const BYTE* const dmsBase = dictMode == ZSTD_dictMatchState ? dms->window.base : NULL; 558 const BYTE* const dmsEnd = dictMode == ZSTD_dictMatchState ? dms->window.nextSrc : NULL; 559 U32 const dmsHighLimit = dictMode == ZSTD_dictMatchState ? (U32)(dmsEnd - dmsBase) : 0; 560 U32 const dmsLowLimit = dictMode == ZSTD_dictMatchState ? dms->window.lowLimit : 0; 561 U32 const dmsIndexDelta = dictMode == ZSTD_dictMatchState ? windowLow - dmsHighLimit : 0; 562 U32 const dmsHashLog = dictMode == ZSTD_dictMatchState ? dmsCParams->hashLog : hashLog; 563 U32 const dmsBtLog = dictMode == ZSTD_dictMatchState ? dmsCParams->chainLog - 1 : btLog; 564 U32 const dmsBtMask = dictMode == ZSTD_dictMatchState ? (1U << dmsBtLog) - 1 : 0; 565 U32 const dmsBtLow = dictMode == ZSTD_dictMatchState && dmsBtMask < dmsHighLimit - dmsLowLimit ? dmsHighLimit - dmsBtMask : dmsLowLimit; 566 567 size_t bestLength = lengthToBeat-1; 568 DEBUGLOG(8, "ZSTD_insertBtAndGetAllMatches: current=%u", current); 569 570 /* check repCode */ 571 assert(ll0 <= 1); /* necessarily 1 or 0 */ 572 { U32 const lastR = ZSTD_REP_NUM + ll0; 573 U32 repCode; 574 for (repCode = ll0; repCode < lastR; repCode++) { 575 U32 const repOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode]; 576 U32 const repIndex = current - repOffset; 577 U32 repLen = 0; 578 assert(current >= dictLimit); 579 if (repOffset-1 /* intentional overflow, discards 0 and -1 */ < current-dictLimit) { /* equivalent to `current > repIndex >= dictLimit` */ 580 if (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repOffset, minMatch)) { 581 repLen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repOffset, iLimit) + minMatch; 582 } 583 } else { /* repIndex < dictLimit || repIndex >= current */ 584 const BYTE* const repMatch = dictMode == ZSTD_dictMatchState ? 585 dmsBase + repIndex - dmsIndexDelta : 586 dictBase + repIndex; 587 assert(current >= windowLow); 588 if ( dictMode == ZSTD_extDict 589 && ( ((repOffset-1) /*intentional overflow*/ < current - windowLow) /* equivalent to `current > repIndex >= windowLow` */ 590 & (((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */) 591 && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) { 592 repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dictEnd, prefixStart) + minMatch; 593 } 594 if (dictMode == ZSTD_dictMatchState 595 && ( ((repOffset-1) /*intentional overflow*/ < current - (dmsLowLimit + dmsIndexDelta)) /* equivalent to `current > repIndex >= dmsLowLimit` */ 596 & ((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */ 597 && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) { 598 repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dmsEnd, prefixStart) + minMatch; 599 } } 600 /* save longer solution */ 601 if (repLen > bestLength) { 602 DEBUGLOG(8, "found repCode %u (ll0:%u, offset:%u) of length %u", 603 repCode, ll0, repOffset, repLen); 604 bestLength = repLen; 605 matches[mnum].off = repCode - ll0; 606 matches[mnum].len = (U32)repLen; 607 mnum++; 608 if ( (repLen > sufficient_len) 609 | (ip+repLen == iLimit) ) { /* best possible */ 610 return mnum; 611 } } } } 612 613 /* HC3 match finder */ 614 if ((mls == 3) /*static*/ && (bestLength < mls)) { 615 U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(ms, ip); 616 if ((matchIndex3 >= matchLow) 617 & (current - matchIndex3 < (1<<18)) /*heuristic : longer distance likely too expensive*/ ) { 618 size_t mlen; 619 if ((dictMode == ZSTD_noDict) /*static*/ || (dictMode == ZSTD_dictMatchState) /*static*/ || (matchIndex3 >= dictLimit)) { 620 const BYTE* const match = base + matchIndex3; 621 mlen = ZSTD_count(ip, match, iLimit); 622 } else { 623 const BYTE* const match = dictBase + matchIndex3; 624 mlen = ZSTD_count_2segments(ip, match, iLimit, dictEnd, prefixStart); 625 } 626 627 /* save best solution */ 628 if (mlen >= mls /* == 3 > bestLength */) { 629 DEBUGLOG(8, "found small match with hlog3, of length %u", 630 (U32)mlen); 631 bestLength = mlen; 632 assert(current > matchIndex3); 633 assert(mnum==0); /* no prior solution */ 634 matches[0].off = (current - matchIndex3) + ZSTD_REP_MOVE; 635 matches[0].len = (U32)mlen; 636 mnum = 1; 637 if ( (mlen > sufficient_len) | 638 (ip+mlen == iLimit) ) { /* best possible length */ 639 ms->nextToUpdate = current+1; /* skip insertion */ 640 return 1; 641 } 642 } 643 } 644 /* no dictMatchState lookup: dicts don't have a populated HC3 table */ 645 } 646 647 hashTable[h] = current; /* Update Hash Table */ 648 649 while (nbCompares-- && (matchIndex >= matchLow)) { 650 U32* const nextPtr = bt + 2*(matchIndex & btMask); 651 size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ 652 const BYTE* match; 653 assert(current > matchIndex); 654 655 if ((dictMode == ZSTD_noDict) || (dictMode == ZSTD_dictMatchState) || (matchIndex+matchLength >= dictLimit)) { 656 assert(matchIndex+matchLength >= dictLimit); /* ensure the condition is correct when !extDict */ 657 match = base + matchIndex; 658 matchLength += ZSTD_count(ip+matchLength, match+matchLength, iLimit); 659 } else { 660 match = dictBase + matchIndex; 661 matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dictEnd, prefixStart); 662 if (matchIndex+matchLength >= dictLimit) 663 match = base + matchIndex; /* prepare for match[matchLength] */ 664 } 665 666 if (matchLength > bestLength) { 667 DEBUGLOG(8, "found match of length %u at distance %u (offCode=%u)", 668 (U32)matchLength, current - matchIndex, current - matchIndex + ZSTD_REP_MOVE); 669 assert(matchEndIdx > matchIndex); 670 if (matchLength > matchEndIdx - matchIndex) 671 matchEndIdx = matchIndex + (U32)matchLength; 672 bestLength = matchLength; 673 matches[mnum].off = (current - matchIndex) + ZSTD_REP_MOVE; 674 matches[mnum].len = (U32)matchLength; 675 mnum++; 676 if ( (matchLength > ZSTD_OPT_NUM) 677 | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) { 678 if (dictMode == ZSTD_dictMatchState) nbCompares = 0; /* break should also skip searching dms */ 679 break; /* drop, to preserve bt consistency (miss a little bit of compression) */ 680 } 681 } 682 683 if (match[matchLength] < ip[matchLength]) { 684 /* match smaller than current */ 685 *smallerPtr = matchIndex; /* update smaller idx */ 686 commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ 687 if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ 688 smallerPtr = nextPtr+1; /* new candidate => larger than match, which was smaller than current */ 689 matchIndex = nextPtr[1]; /* new matchIndex, larger than previous, closer to current */ 690 } else { 691 *largerPtr = matchIndex; 692 commonLengthLarger = matchLength; 693 if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ 694 largerPtr = nextPtr; 695 matchIndex = nextPtr[0]; 696 } } 697 698 *smallerPtr = *largerPtr = 0; 699 700 if (dictMode == ZSTD_dictMatchState && nbCompares) { 701 size_t const dmsH = ZSTD_hashPtr(ip, dmsHashLog, mls); 702 U32 dictMatchIndex = dms->hashTable[dmsH]; 703 const U32* const dmsBt = dms->chainTable; 704 commonLengthSmaller = commonLengthLarger = 0; 705 while (nbCompares-- && (dictMatchIndex > dmsLowLimit)) { 706 const U32* const nextPtr = dmsBt + 2*(dictMatchIndex & dmsBtMask); 707 size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ 708 const BYTE* match = dmsBase + dictMatchIndex; 709 matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dmsEnd, prefixStart); 710 if (dictMatchIndex+matchLength >= dmsHighLimit) 711 match = base + dictMatchIndex + dmsIndexDelta; /* to prepare for next usage of match[matchLength] */ 712 713 if (matchLength > bestLength) { 714 matchIndex = dictMatchIndex + dmsIndexDelta; 715 DEBUGLOG(8, "found dms match of length %u at distance %u (offCode=%u)", 716 (U32)matchLength, current - matchIndex, current - matchIndex + ZSTD_REP_MOVE); 717 if (matchLength > matchEndIdx - matchIndex) 718 matchEndIdx = matchIndex + (U32)matchLength; 719 bestLength = matchLength; 720 matches[mnum].off = (current - matchIndex) + ZSTD_REP_MOVE; 721 matches[mnum].len = (U32)matchLength; 722 mnum++; 723 if ( (matchLength > ZSTD_OPT_NUM) 724 | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) { 725 break; /* drop, to guarantee consistency (miss a little bit of compression) */ 726 } 727 } 728 729 if (dictMatchIndex <= dmsBtLow) { break; } /* beyond tree size, stop the search */ 730 if (match[matchLength] < ip[matchLength]) { 731 commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ 732 dictMatchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ 733 } else { 734 /* match is larger than current */ 735 commonLengthLarger = matchLength; 736 dictMatchIndex = nextPtr[0]; 737 } 738 } 739 } 740 741 assert(matchEndIdx > current+8); 742 ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */ 743 return mnum; 744 } 745 746 747 FORCE_INLINE_TEMPLATE U32 ZSTD_BtGetAllMatches ( 748 ZSTD_matchState_t* ms, 749 const BYTE* ip, const BYTE* const iHighLimit, const ZSTD_dictMode_e dictMode, 750 U32 rep[ZSTD_REP_NUM], U32 const ll0, 751 ZSTD_match_t* matches, U32 const lengthToBeat) 752 { 753 const ZSTD_compressionParameters* const cParams = &ms->cParams; 754 U32 const matchLengthSearch = cParams->minMatch; 755 DEBUGLOG(8, "ZSTD_BtGetAllMatches"); 756 if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */ 757 ZSTD_updateTree_internal(ms, ip, iHighLimit, matchLengthSearch, dictMode); 758 switch(matchLengthSearch) 759 { 760 case 3 : return ZSTD_insertBtAndGetAllMatches(ms, ip, iHighLimit, dictMode, rep, ll0, matches, lengthToBeat, 3); 761 default : 762 case 4 : return ZSTD_insertBtAndGetAllMatches(ms, ip, iHighLimit, dictMode, rep, ll0, matches, lengthToBeat, 4); 763 case 5 : return ZSTD_insertBtAndGetAllMatches(ms, ip, iHighLimit, dictMode, rep, ll0, matches, lengthToBeat, 5); 764 case 7 : 765 case 6 : return ZSTD_insertBtAndGetAllMatches(ms, ip, iHighLimit, dictMode, rep, ll0, matches, lengthToBeat, 6); 766 } 767 } 768 769 770 /*-******************************* 771 * Optimal parser 772 *********************************/ 773 typedef struct repcodes_s { 774 U32 rep[3]; 775 } repcodes_t; 776 777 static repcodes_t ZSTD_updateRep(U32 const rep[3], U32 const offset, U32 const ll0) 778 { 779 repcodes_t newReps; 780 if (offset >= ZSTD_REP_NUM) { /* full offset */ 781 newReps.rep[2] = rep[1]; 782 newReps.rep[1] = rep[0]; 783 newReps.rep[0] = offset - ZSTD_REP_MOVE; 784 } else { /* repcode */ 785 U32 const repCode = offset + ll0; 786 if (repCode > 0) { /* note : if repCode==0, no change */ 787 U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode]; 788 newReps.rep[2] = (repCode >= 2) ? rep[1] : rep[2]; 789 newReps.rep[1] = rep[0]; 790 newReps.rep[0] = currentOffset; 791 } else { /* repCode == 0 */ 792 memcpy(&newReps, rep, sizeof(newReps)); 793 } 794 } 795 return newReps; 796 } 797 798 799 static U32 ZSTD_totalLen(ZSTD_optimal_t sol) 800 { 801 return sol.litlen + sol.mlen; 802 } 803 804 #if 0 /* debug */ 805 806 static void 807 listStats(const U32* table, int lastEltID) 808 { 809 int const nbElts = lastEltID + 1; 810 int enb; 811 for (enb=0; enb < nbElts; enb++) { 812 (void)table; 813 //RAWLOG(2, "%3i:%3i, ", enb, table[enb]); 814 RAWLOG(2, "%4i,", table[enb]); 815 } 816 RAWLOG(2, " \n"); 817 } 818 819 #endif 820 821 FORCE_INLINE_TEMPLATE size_t 822 ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, 823 seqStore_t* seqStore, 824 U32 rep[ZSTD_REP_NUM], 825 const void* src, size_t srcSize, 826 const int optLevel, 827 const ZSTD_dictMode_e dictMode) 828 { 829 optState_t* const optStatePtr = &ms->opt; 830 const BYTE* const istart = (const BYTE*)src; 831 const BYTE* ip = istart; 832 const BYTE* anchor = istart; 833 const BYTE* const iend = istart + srcSize; 834 const BYTE* const ilimit = iend - 8; 835 const BYTE* const base = ms->window.base; 836 const BYTE* const prefixStart = base + ms->window.dictLimit; 837 const ZSTD_compressionParameters* const cParams = &ms->cParams; 838 839 U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1); 840 U32 const minMatch = (cParams->minMatch == 3) ? 3 : 4; 841 842 ZSTD_optimal_t* const opt = optStatePtr->priceTable; 843 ZSTD_match_t* const matches = optStatePtr->matchTable; 844 ZSTD_optimal_t lastSequence; 845 846 /* init */ 847 DEBUGLOG(5, "ZSTD_compressBlock_opt_generic: current=%u, prefix=%u, nextToUpdate=%u", 848 (U32)(ip - base), ms->window.dictLimit, ms->nextToUpdate); 849 assert(optLevel <= 2); 850 ms->nextToUpdate3 = ms->nextToUpdate; 851 ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize, optLevel); 852 ip += (ip==prefixStart); 853 854 /* Match Loop */ 855 while (ip < ilimit) { 856 U32 cur, last_pos = 0; 857 858 /* find first match */ 859 { U32 const litlen = (U32)(ip - anchor); 860 U32 const ll0 = !litlen; 861 U32 const nbMatches = ZSTD_BtGetAllMatches(ms, ip, iend, dictMode, rep, ll0, matches, minMatch); 862 if (!nbMatches) { ip++; continue; } 863 864 /* initialize opt[0] */ 865 { U32 i ; for (i=0; i<ZSTD_REP_NUM; i++) opt[0].rep[i] = rep[i]; } 866 opt[0].mlen = 0; /* means is_a_literal */ 867 opt[0].litlen = litlen; 868 opt[0].price = ZSTD_literalsContribution(anchor, litlen, optStatePtr, optLevel); 869 870 /* large match -> immediate encoding */ 871 { U32 const maxML = matches[nbMatches-1].len; 872 U32 const maxOffset = matches[nbMatches-1].off; 873 DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new serie", 874 nbMatches, maxML, maxOffset, (U32)(ip-prefixStart)); 875 876 if (maxML > sufficient_len) { 877 lastSequence.litlen = litlen; 878 lastSequence.mlen = maxML; 879 lastSequence.off = maxOffset; 880 DEBUGLOG(6, "large match (%u>%u), immediate encoding", 881 maxML, sufficient_len); 882 cur = 0; 883 last_pos = ZSTD_totalLen(lastSequence); 884 goto _shortestPath; 885 } } 886 887 /* set prices for first matches starting position == 0 */ 888 { U32 const literalsPrice = opt[0].price + ZSTD_litLengthPrice(0, optStatePtr, optLevel); 889 U32 pos; 890 U32 matchNb; 891 for (pos = 1; pos < minMatch; pos++) { 892 opt[pos].price = ZSTD_MAX_PRICE; /* mlen, litlen and price will be fixed during forward scanning */ 893 } 894 for (matchNb = 0; matchNb < nbMatches; matchNb++) { 895 U32 const offset = matches[matchNb].off; 896 U32 const end = matches[matchNb].len; 897 repcodes_t const repHistory = ZSTD_updateRep(rep, offset, ll0); 898 for ( ; pos <= end ; pos++ ) { 899 U32 const matchPrice = ZSTD_getMatchPrice(offset, pos, optStatePtr, optLevel); 900 U32 const sequencePrice = literalsPrice + matchPrice; 901 DEBUGLOG(7, "rPos:%u => set initial price : %.2f", 902 pos, ZSTD_fCost(sequencePrice)); 903 opt[pos].mlen = pos; 904 opt[pos].off = offset; 905 opt[pos].litlen = litlen; 906 opt[pos].price = sequencePrice; 907 ZSTD_STATIC_ASSERT(sizeof(opt[pos].rep) == sizeof(repHistory)); 908 memcpy(opt[pos].rep, &repHistory, sizeof(repHistory)); 909 } } 910 last_pos = pos-1; 911 } 912 } 913 914 /* check further positions */ 915 for (cur = 1; cur <= last_pos; cur++) { 916 const BYTE* const inr = ip + cur; 917 assert(cur < ZSTD_OPT_NUM); 918 DEBUGLOG(7, "cPos:%zi==rPos:%u", inr-istart, cur) 919 920 /* Fix current position with one literal if cheaper */ 921 { U32 const litlen = (opt[cur-1].mlen == 0) ? opt[cur-1].litlen + 1 : 1; 922 int const price = opt[cur-1].price 923 + ZSTD_rawLiteralsCost(ip+cur-1, 1, optStatePtr, optLevel) 924 + ZSTD_litLengthPrice(litlen, optStatePtr, optLevel) 925 - ZSTD_litLengthPrice(litlen-1, optStatePtr, optLevel); 926 assert(price < 1000000000); /* overflow check */ 927 if (price <= opt[cur].price) { 928 DEBUGLOG(7, "cPos:%zi==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)", 929 inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), litlen, 930 opt[cur-1].rep[0], opt[cur-1].rep[1], opt[cur-1].rep[2]); 931 opt[cur].mlen = 0; 932 opt[cur].off = 0; 933 opt[cur].litlen = litlen; 934 opt[cur].price = price; 935 memcpy(opt[cur].rep, opt[cur-1].rep, sizeof(opt[cur].rep)); 936 } else { 937 DEBUGLOG(7, "cPos:%zi==rPos:%u : literal would cost more (%.2f>%.2f) (hist:%u,%u,%u)", 938 inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), 939 opt[cur].rep[0], opt[cur].rep[1], opt[cur].rep[2]); 940 } 941 } 942 943 /* last match must start at a minimum distance of 8 from oend */ 944 if (inr > ilimit) continue; 945 946 if (cur == last_pos) break; 947 948 if ( (optLevel==0) /*static_test*/ 949 && (opt[cur+1].price <= opt[cur].price + (BITCOST_MULTIPLIER/2)) ) { 950 DEBUGLOG(7, "move to next rPos:%u : price is <=", cur+1); 951 continue; /* skip unpromising positions; about ~+6% speed, -0.01 ratio */ 952 } 953 954 { U32 const ll0 = (opt[cur].mlen != 0); 955 U32 const litlen = (opt[cur].mlen == 0) ? opt[cur].litlen : 0; 956 U32 const previousPrice = opt[cur].price; 957 U32 const basePrice = previousPrice + ZSTD_litLengthPrice(0, optStatePtr, optLevel); 958 U32 const nbMatches = ZSTD_BtGetAllMatches(ms, inr, iend, dictMode, opt[cur].rep, ll0, matches, minMatch); 959 U32 matchNb; 960 if (!nbMatches) { 961 DEBUGLOG(7, "rPos:%u : no match found", cur); 962 continue; 963 } 964 965 { U32 const maxML = matches[nbMatches-1].len; 966 DEBUGLOG(7, "cPos:%zi==rPos:%u, found %u matches, of maxLength=%u", 967 inr-istart, cur, nbMatches, maxML); 968 969 if ( (maxML > sufficient_len) 970 || (cur + maxML >= ZSTD_OPT_NUM) ) { 971 lastSequence.mlen = maxML; 972 lastSequence.off = matches[nbMatches-1].off; 973 lastSequence.litlen = litlen; 974 cur -= (opt[cur].mlen==0) ? opt[cur].litlen : 0; /* last sequence is actually only literals, fix cur to last match - note : may underflow, in which case, it's first sequence, and it's okay */ 975 last_pos = cur + ZSTD_totalLen(lastSequence); 976 if (cur > ZSTD_OPT_NUM) cur = 0; /* underflow => first match */ 977 goto _shortestPath; 978 } } 979 980 /* set prices using matches found at position == cur */ 981 for (matchNb = 0; matchNb < nbMatches; matchNb++) { 982 U32 const offset = matches[matchNb].off; 983 repcodes_t const repHistory = ZSTD_updateRep(opt[cur].rep, offset, ll0); 984 U32 const lastML = matches[matchNb].len; 985 U32 const startML = (matchNb>0) ? matches[matchNb-1].len+1 : minMatch; 986 U32 mlen; 987 988 DEBUGLOG(7, "testing match %u => offCode=%4u, mlen=%2u, llen=%2u", 989 matchNb, matches[matchNb].off, lastML, litlen); 990 991 for (mlen = lastML; mlen >= startML; mlen--) { /* scan downward */ 992 U32 const pos = cur + mlen; 993 int const price = basePrice + ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel); 994 995 if ((pos > last_pos) || (price < opt[pos].price)) { 996 DEBUGLOG(7, "rPos:%u (ml=%2u) => new better price (%.2f<%.2f)", 997 pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price)); 998 while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; } /* fill empty positions */ 999 opt[pos].mlen = mlen; 1000 opt[pos].off = offset; 1001 opt[pos].litlen = litlen; 1002 opt[pos].price = price; 1003 ZSTD_STATIC_ASSERT(sizeof(opt[pos].rep) == sizeof(repHistory)); 1004 memcpy(opt[pos].rep, &repHistory, sizeof(repHistory)); 1005 } else { 1006 DEBUGLOG(7, "rPos:%u (ml=%2u) => new price is worse (%.2f>=%.2f)", 1007 pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price)); 1008 if (optLevel==0) break; /* early update abort; gets ~+10% speed for about -0.01 ratio loss */ 1009 } 1010 } } } 1011 } /* for (cur = 1; cur <= last_pos; cur++) */ 1012 1013 lastSequence = opt[last_pos]; 1014 cur = last_pos > ZSTD_totalLen(lastSequence) ? last_pos - ZSTD_totalLen(lastSequence) : 0; /* single sequence, and it starts before `ip` */ 1015 assert(cur < ZSTD_OPT_NUM); /* control overflow*/ 1016 1017 _shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */ 1018 assert(opt[0].mlen == 0); 1019 1020 { U32 const storeEnd = cur + 1; 1021 U32 storeStart = storeEnd; 1022 U32 seqPos = cur; 1023 1024 DEBUGLOG(6, "start reverse traversal (last_pos:%u, cur:%u)", 1025 last_pos, cur); (void)last_pos; 1026 assert(storeEnd < ZSTD_OPT_NUM); 1027 DEBUGLOG(6, "last sequence copied into pos=%u (llen=%u,mlen=%u,ofc=%u)", 1028 storeEnd, lastSequence.litlen, lastSequence.mlen, lastSequence.off); 1029 opt[storeEnd] = lastSequence; 1030 while (seqPos > 0) { 1031 U32 const backDist = ZSTD_totalLen(opt[seqPos]); 1032 storeStart--; 1033 DEBUGLOG(6, "sequence from rPos=%u copied into pos=%u (llen=%u,mlen=%u,ofc=%u)", 1034 seqPos, storeStart, opt[seqPos].litlen, opt[seqPos].mlen, opt[seqPos].off); 1035 opt[storeStart] = opt[seqPos]; 1036 seqPos = (seqPos > backDist) ? seqPos - backDist : 0; 1037 } 1038 1039 /* save sequences */ 1040 DEBUGLOG(6, "sending selected sequences into seqStore") 1041 { U32 storePos; 1042 for (storePos=storeStart; storePos <= storeEnd; storePos++) { 1043 U32 const llen = opt[storePos].litlen; 1044 U32 const mlen = opt[storePos].mlen; 1045 U32 const offCode = opt[storePos].off; 1046 U32 const advance = llen + mlen; 1047 DEBUGLOG(6, "considering seq starting at %zi, llen=%u, mlen=%u", 1048 anchor - istart, (unsigned)llen, (unsigned)mlen); 1049 1050 if (mlen==0) { /* only literals => must be last "sequence", actually starting a new stream of sequences */ 1051 assert(storePos == storeEnd); /* must be last sequence */ 1052 ip = anchor + llen; /* last "sequence" is a bunch of literals => don't progress anchor */ 1053 continue; /* will finish */ 1054 } 1055 1056 /* repcodes update : like ZSTD_updateRep(), but update in place */ 1057 if (offCode >= ZSTD_REP_NUM) { /* full offset */ 1058 rep[2] = rep[1]; 1059 rep[1] = rep[0]; 1060 rep[0] = offCode - ZSTD_REP_MOVE; 1061 } else { /* repcode */ 1062 U32 const repCode = offCode + (llen==0); 1063 if (repCode) { /* note : if repCode==0, no change */ 1064 U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode]; 1065 if (repCode >= 2) rep[2] = rep[1]; 1066 rep[1] = rep[0]; 1067 rep[0] = currentOffset; 1068 } } 1069 1070 assert(anchor + llen <= iend); 1071 ZSTD_updateStats(optStatePtr, llen, anchor, offCode, mlen); 1072 ZSTD_storeSeq(seqStore, llen, anchor, offCode, mlen-MINMATCH); 1073 anchor += advance; 1074 ip = anchor; 1075 } } 1076 ZSTD_setBasePrices(optStatePtr, optLevel); 1077 } 1078 1079 } /* while (ip < ilimit) */ 1080 1081 /* Return the last literals size */ 1082 return iend - anchor; 1083 } 1084 1085 1086 size_t ZSTD_compressBlock_btopt( 1087 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], 1088 const void* src, size_t srcSize) 1089 { 1090 DEBUGLOG(5, "ZSTD_compressBlock_btopt"); 1091 return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_noDict); 1092 } 1093 1094 1095 /* used in 2-pass strategy */ 1096 static U32 ZSTD_upscaleStat(unsigned* table, U32 lastEltIndex, int bonus) 1097 { 1098 U32 s, sum=0; 1099 assert(ZSTD_FREQ_DIV+bonus >= 0); 1100 for (s=0; s<lastEltIndex+1; s++) { 1101 table[s] <<= ZSTD_FREQ_DIV+bonus; 1102 table[s]--; 1103 sum += table[s]; 1104 } 1105 return sum; 1106 } 1107 1108 /* used in 2-pass strategy */ 1109 MEM_STATIC void ZSTD_upscaleStats(optState_t* optPtr) 1110 { 1111 optPtr->litSum = ZSTD_upscaleStat(optPtr->litFreq, MaxLit, 0); 1112 optPtr->litLengthSum = ZSTD_upscaleStat(optPtr->litLengthFreq, MaxLL, 0); 1113 optPtr->matchLengthSum = ZSTD_upscaleStat(optPtr->matchLengthFreq, MaxML, 0); 1114 optPtr->offCodeSum = ZSTD_upscaleStat(optPtr->offCodeFreq, MaxOff, 0); 1115 } 1116 1117 /* ZSTD_initStats_ultra(): 1118 * make a first compression pass, just to seed stats with more accurate starting values. 1119 * only works on first block, with no dictionary and no ldm. 1120 * this function cannot error, hence its constract must be respected. 1121 */ 1122 static void 1123 ZSTD_initStats_ultra(ZSTD_matchState_t* ms, 1124 seqStore_t* seqStore, 1125 U32 rep[ZSTD_REP_NUM], 1126 const void* src, size_t srcSize) 1127 { 1128 U32 tmpRep[ZSTD_REP_NUM]; /* updated rep codes will sink here */ 1129 memcpy(tmpRep, rep, sizeof(tmpRep)); 1130 1131 DEBUGLOG(4, "ZSTD_initStats_ultra (srcSize=%zu)", srcSize); 1132 assert(ms->opt.litLengthSum == 0); /* first block */ 1133 assert(seqStore->sequences == seqStore->sequencesStart); /* no ldm */ 1134 assert(ms->window.dictLimit == ms->window.lowLimit); /* no dictionary */ 1135 assert(ms->window.dictLimit - ms->nextToUpdate <= 1); /* no prefix (note: intentional overflow, defined as 2-complement) */ 1136 1137 ZSTD_compressBlock_opt_generic(ms, seqStore, tmpRep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict); /* generate stats into ms->opt*/ 1138 1139 /* invalidate first scan from history */ 1140 ZSTD_resetSeqStore(seqStore); 1141 ms->window.base -= srcSize; 1142 ms->window.dictLimit += (U32)srcSize; 1143 ms->window.lowLimit = ms->window.dictLimit; 1144 ms->nextToUpdate = ms->window.dictLimit; 1145 ms->nextToUpdate3 = ms->window.dictLimit; 1146 1147 /* re-inforce weight of collected statistics */ 1148 ZSTD_upscaleStats(&ms->opt); 1149 } 1150 1151 size_t ZSTD_compressBlock_btultra( 1152 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], 1153 const void* src, size_t srcSize) 1154 { 1155 DEBUGLOG(5, "ZSTD_compressBlock_btultra (srcSize=%zu)", srcSize); 1156 return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict); 1157 } 1158 1159 size_t ZSTD_compressBlock_btultra2( 1160 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], 1161 const void* src, size_t srcSize) 1162 { 1163 U32 const current = (U32)((const BYTE*)src - ms->window.base); 1164 DEBUGLOG(5, "ZSTD_compressBlock_btultra2 (srcSize=%zu)", srcSize); 1165 1166 /* 2-pass strategy: 1167 * this strategy makes a first pass over first block to collect statistics 1168 * and seed next round's statistics with it. 1169 * After 1st pass, function forgets everything, and starts a new block. 1170 * Consequently, this can only work if no data has been previously loaded in tables, 1171 * aka, no dictionary, no prefix, no ldm preprocessing. 1172 * The compression ratio gain is generally small (~0.5% on first block), 1173 * the cost is 2x cpu time on first block. */ 1174 assert(srcSize <= ZSTD_BLOCKSIZE_MAX); 1175 if ( (ms->opt.litLengthSum==0) /* first block */ 1176 && (seqStore->sequences == seqStore->sequencesStart) /* no ldm */ 1177 && (ms->window.dictLimit == ms->window.lowLimit) /* no dictionary */ 1178 && (current == ms->window.dictLimit) /* start of frame, nothing already loaded nor skipped */ 1179 && (srcSize > ZSTD_PREDEF_THRESHOLD) 1180 ) { 1181 ZSTD_initStats_ultra(ms, seqStore, rep, src, srcSize); 1182 } 1183 1184 return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict); 1185 } 1186 1187 size_t ZSTD_compressBlock_btopt_dictMatchState( 1188 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], 1189 const void* src, size_t srcSize) 1190 { 1191 return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_dictMatchState); 1192 } 1193 1194 size_t ZSTD_compressBlock_btultra_dictMatchState( 1195 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], 1196 const void* src, size_t srcSize) 1197 { 1198 return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_dictMatchState); 1199 } 1200 1201 size_t ZSTD_compressBlock_btopt_extDict( 1202 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], 1203 const void* src, size_t srcSize) 1204 { 1205 return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_extDict); 1206 } 1207 1208 size_t ZSTD_compressBlock_btultra_extDict( 1209 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], 1210 const void* src, size_t srcSize) 1211 { 1212 return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_extDict); 1213 } 1214 1215 /* note : no btultra2 variant for extDict nor dictMatchState, 1216 * because btultra2 is not meant to work with dictionaries 1217 * and is only specific for the first block (no prefix) */ 1218