1 /* 2 * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. 3 * All rights reserved. 4 * 5 * This source code is licensed under both the BSD-style license (found in the 6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found 7 * in the COPYING file in the root directory of this source tree). 8 * You may select, at your option, one of the above-listed licenses. 9 */ 10 11 #include "zstd_compress_internal.h" 12 #include "zstd_double_fast.h" 13 14 15 void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms, 16 void const* end, ZSTD_dictTableLoadMethod_e dtlm) 17 { 18 const ZSTD_compressionParameters* const cParams = &ms->cParams; 19 U32* const hashLarge = ms->hashTable; 20 U32 const hBitsL = cParams->hashLog; 21 U32 const mls = cParams->minMatch; 22 U32* const hashSmall = ms->chainTable; 23 U32 const hBitsS = cParams->chainLog; 24 const BYTE* const base = ms->window.base; 25 const BYTE* ip = base + ms->nextToUpdate; 26 const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; 27 const U32 fastHashFillStep = 3; 28 29 /* Always insert every fastHashFillStep position into the hash tables. 30 * Insert the other positions into the large hash table if their entry 31 * is empty. 32 */ 33 for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) { 34 U32 const curr = (U32)(ip - base); 35 U32 i; 36 for (i = 0; i < fastHashFillStep; ++i) { 37 size_t const smHash = ZSTD_hashPtr(ip + i, hBitsS, mls); 38 size_t const lgHash = ZSTD_hashPtr(ip + i, hBitsL, 8); 39 if (i == 0) 40 hashSmall[smHash] = curr + i; 41 if (i == 0 || hashLarge[lgHash] == 0) 42 hashLarge[lgHash] = curr + i; 43 /* Only load extra positions for ZSTD_dtlm_full */ 44 if (dtlm == ZSTD_dtlm_fast) 45 break; 46 } } 47 } 48 49 50 FORCE_INLINE_TEMPLATE 51 size_t ZSTD_compressBlock_doubleFast_generic( 52 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], 53 void const* src, size_t srcSize, 54 U32 const mls /* template */, ZSTD_dictMode_e const dictMode) 55 { 56 ZSTD_compressionParameters const* cParams = &ms->cParams; 57 U32* const hashLong = ms->hashTable; 58 const U32 hBitsL = cParams->hashLog; 59 U32* const hashSmall = ms->chainTable; 60 const U32 hBitsS = cParams->chainLog; 61 const BYTE* const base = ms->window.base; 62 const BYTE* const istart = (const BYTE*)src; 63 const BYTE* ip = istart; 64 const BYTE* anchor = istart; 65 const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); 66 /* presumes that, if there is a dictionary, it must be using Attach mode */ 67 const U32 prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog); 68 const BYTE* const prefixLowest = base + prefixLowestIndex; 69 const BYTE* const iend = istart + srcSize; 70 const BYTE* const ilimit = iend - HASH_READ_SIZE; 71 U32 offset_1=rep[0], offset_2=rep[1]; 72 U32 offsetSaved = 0; 73 74 const ZSTD_matchState_t* const dms = ms->dictMatchState; 75 const ZSTD_compressionParameters* const dictCParams = 76 dictMode == ZSTD_dictMatchState ? 77 &dms->cParams : NULL; 78 const U32* const dictHashLong = dictMode == ZSTD_dictMatchState ? 79 dms->hashTable : NULL; 80 const U32* const dictHashSmall = dictMode == ZSTD_dictMatchState ? 81 dms->chainTable : NULL; 82 const U32 dictStartIndex = dictMode == ZSTD_dictMatchState ? 83 dms->window.dictLimit : 0; 84 const BYTE* const dictBase = dictMode == ZSTD_dictMatchState ? 85 dms->window.base : NULL; 86 const BYTE* const dictStart = dictMode == ZSTD_dictMatchState ? 87 dictBase + dictStartIndex : NULL; 88 const BYTE* const dictEnd = dictMode == ZSTD_dictMatchState ? 89 dms->window.nextSrc : NULL; 90 const U32 dictIndexDelta = dictMode == ZSTD_dictMatchState ? 91 prefixLowestIndex - (U32)(dictEnd - dictBase) : 92 0; 93 const U32 dictHBitsL = dictMode == ZSTD_dictMatchState ? 94 dictCParams->hashLog : hBitsL; 95 const U32 dictHBitsS = dictMode == ZSTD_dictMatchState ? 96 dictCParams->chainLog : hBitsS; 97 const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictStart)); 98 99 DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_generic"); 100 101 assert(dictMode == ZSTD_noDict || dictMode == ZSTD_dictMatchState); 102 103 /* if a dictionary is attached, it must be within window range */ 104 if (dictMode == ZSTD_dictMatchState) { 105 assert(ms->window.dictLimit + (1U << cParams->windowLog) >= endIndex); 106 } 107 108 /* init */ 109 ip += (dictAndPrefixLength == 0); 110 if (dictMode == ZSTD_noDict) { 111 U32 const curr = (U32)(ip - base); 112 U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog); 113 U32 const maxRep = curr - windowLow; 114 if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0; 115 if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0; 116 } 117 if (dictMode == ZSTD_dictMatchState) { 118 /* dictMatchState repCode checks don't currently handle repCode == 0 119 * disabling. */ 120 assert(offset_1 <= dictAndPrefixLength); 121 assert(offset_2 <= dictAndPrefixLength); 122 } 123 124 /* Main Search Loop */ 125 while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */ 126 size_t mLength; 127 U32 offset; 128 size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8); 129 size_t const h = ZSTD_hashPtr(ip, hBitsS, mls); 130 size_t const dictHL = ZSTD_hashPtr(ip, dictHBitsL, 8); 131 size_t const dictHS = ZSTD_hashPtr(ip, dictHBitsS, mls); 132 U32 const curr = (U32)(ip-base); 133 U32 const matchIndexL = hashLong[h2]; 134 U32 matchIndexS = hashSmall[h]; 135 const BYTE* matchLong = base + matchIndexL; 136 const BYTE* match = base + matchIndexS; 137 const U32 repIndex = curr + 1 - offset_1; 138 const BYTE* repMatch = (dictMode == ZSTD_dictMatchState 139 && repIndex < prefixLowestIndex) ? 140 dictBase + (repIndex - dictIndexDelta) : 141 base + repIndex; 142 hashLong[h2] = hashSmall[h] = curr; /* update hash tables */ 143 144 /* check dictMatchState repcode */ 145 if (dictMode == ZSTD_dictMatchState 146 && ((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */) 147 && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { 148 const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; 149 mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4; 150 ip++; 151 ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH); 152 goto _match_stored; 153 } 154 155 /* check noDict repcode */ 156 if ( dictMode == ZSTD_noDict 157 && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) { 158 mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; 159 ip++; 160 ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH); 161 goto _match_stored; 162 } 163 164 if (matchIndexL > prefixLowestIndex) { 165 /* check prefix long match */ 166 if (MEM_read64(matchLong) == MEM_read64(ip)) { 167 mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8; 168 offset = (U32)(ip-matchLong); 169 while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */ 170 goto _match_found; 171 } 172 } else if (dictMode == ZSTD_dictMatchState) { 173 /* check dictMatchState long match */ 174 U32 const dictMatchIndexL = dictHashLong[dictHL]; 175 const BYTE* dictMatchL = dictBase + dictMatchIndexL; 176 assert(dictMatchL < dictEnd); 177 178 if (dictMatchL > dictStart && MEM_read64(dictMatchL) == MEM_read64(ip)) { 179 mLength = ZSTD_count_2segments(ip+8, dictMatchL+8, iend, dictEnd, prefixLowest) + 8; 180 offset = (U32)(curr - dictMatchIndexL - dictIndexDelta); 181 while (((ip>anchor) & (dictMatchL>dictStart)) && (ip[-1] == dictMatchL[-1])) { ip--; dictMatchL--; mLength++; } /* catch up */ 182 goto _match_found; 183 } } 184 185 if (matchIndexS > prefixLowestIndex) { 186 /* check prefix short match */ 187 if (MEM_read32(match) == MEM_read32(ip)) { 188 goto _search_next_long; 189 } 190 } else if (dictMode == ZSTD_dictMatchState) { 191 /* check dictMatchState short match */ 192 U32 const dictMatchIndexS = dictHashSmall[dictHS]; 193 match = dictBase + dictMatchIndexS; 194 matchIndexS = dictMatchIndexS + dictIndexDelta; 195 196 if (match > dictStart && MEM_read32(match) == MEM_read32(ip)) { 197 goto _search_next_long; 198 } } 199 200 ip += ((ip-anchor) >> kSearchStrength) + 1; 201 #if defined(__aarch64__) 202 PREFETCH_L1(ip+256); 203 #endif 204 continue; 205 206 _search_next_long: 207 208 { size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8); 209 size_t const dictHLNext = ZSTD_hashPtr(ip+1, dictHBitsL, 8); 210 U32 const matchIndexL3 = hashLong[hl3]; 211 const BYTE* matchL3 = base + matchIndexL3; 212 hashLong[hl3] = curr + 1; 213 214 /* check prefix long +1 match */ 215 if (matchIndexL3 > prefixLowestIndex) { 216 if (MEM_read64(matchL3) == MEM_read64(ip+1)) { 217 mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8; 218 ip++; 219 offset = (U32)(ip-matchL3); 220 while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */ 221 goto _match_found; 222 } 223 } else if (dictMode == ZSTD_dictMatchState) { 224 /* check dict long +1 match */ 225 U32 const dictMatchIndexL3 = dictHashLong[dictHLNext]; 226 const BYTE* dictMatchL3 = dictBase + dictMatchIndexL3; 227 assert(dictMatchL3 < dictEnd); 228 if (dictMatchL3 > dictStart && MEM_read64(dictMatchL3) == MEM_read64(ip+1)) { 229 mLength = ZSTD_count_2segments(ip+1+8, dictMatchL3+8, iend, dictEnd, prefixLowest) + 8; 230 ip++; 231 offset = (U32)(curr + 1 - dictMatchIndexL3 - dictIndexDelta); 232 while (((ip>anchor) & (dictMatchL3>dictStart)) && (ip[-1] == dictMatchL3[-1])) { ip--; dictMatchL3--; mLength++; } /* catch up */ 233 goto _match_found; 234 } } } 235 236 /* if no long +1 match, explore the short match we found */ 237 if (dictMode == ZSTD_dictMatchState && matchIndexS < prefixLowestIndex) { 238 mLength = ZSTD_count_2segments(ip+4, match+4, iend, dictEnd, prefixLowest) + 4; 239 offset = (U32)(curr - matchIndexS); 240 while (((ip>anchor) & (match>dictStart)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ 241 } else { 242 mLength = ZSTD_count(ip+4, match+4, iend) + 4; 243 offset = (U32)(ip - match); 244 while (((ip>anchor) & (match>prefixLowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ 245 } 246 247 /* fall-through */ 248 249 _match_found: 250 offset_2 = offset_1; 251 offset_1 = offset; 252 253 ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH); 254 255 _match_stored: 256 /* match found */ 257 ip += mLength; 258 anchor = ip; 259 260 if (ip <= ilimit) { 261 /* Complementary insertion */ 262 /* done after iLimit test, as candidates could be > iend-8 */ 263 { U32 const indexToInsert = curr+2; 264 hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert; 265 hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base); 266 hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert; 267 hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base); 268 } 269 270 /* check immediate repcode */ 271 if (dictMode == ZSTD_dictMatchState) { 272 while (ip <= ilimit) { 273 U32 const current2 = (U32)(ip-base); 274 U32 const repIndex2 = current2 - offset_2; 275 const BYTE* repMatch2 = dictMode == ZSTD_dictMatchState 276 && repIndex2 < prefixLowestIndex ? 277 dictBase + repIndex2 - dictIndexDelta : 278 base + repIndex2; 279 if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */) 280 && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { 281 const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend; 282 size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4; 283 U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ 284 ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH); 285 hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2; 286 hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2; 287 ip += repLength2; 288 anchor = ip; 289 continue; 290 } 291 break; 292 } } 293 294 if (dictMode == ZSTD_noDict) { 295 while ( (ip <= ilimit) 296 && ( (offset_2>0) 297 & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) { 298 /* store sequence */ 299 size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4; 300 U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */ 301 hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base); 302 hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base); 303 ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, rLength-MINMATCH); 304 ip += rLength; 305 anchor = ip; 306 continue; /* faster when present ... (?) */ 307 } } } 308 } /* while (ip < ilimit) */ 309 310 /* save reps for next block */ 311 rep[0] = offset_1 ? offset_1 : offsetSaved; 312 rep[1] = offset_2 ? offset_2 : offsetSaved; 313 314 /* Return the last literals size */ 315 return (size_t)(iend - anchor); 316 } 317 318 319 size_t ZSTD_compressBlock_doubleFast( 320 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], 321 void const* src, size_t srcSize) 322 { 323 const U32 mls = ms->cParams.minMatch; 324 switch(mls) 325 { 326 default: /* includes case 3 */ 327 case 4 : 328 return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_noDict); 329 case 5 : 330 return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_noDict); 331 case 6 : 332 return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_noDict); 333 case 7 : 334 return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_noDict); 335 } 336 } 337 338 339 size_t ZSTD_compressBlock_doubleFast_dictMatchState( 340 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], 341 void const* src, size_t srcSize) 342 { 343 const U32 mls = ms->cParams.minMatch; 344 switch(mls) 345 { 346 default: /* includes case 3 */ 347 case 4 : 348 return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_dictMatchState); 349 case 5 : 350 return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_dictMatchState); 351 case 6 : 352 return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_dictMatchState); 353 case 7 : 354 return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_dictMatchState); 355 } 356 } 357 358 359 static size_t ZSTD_compressBlock_doubleFast_extDict_generic( 360 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], 361 void const* src, size_t srcSize, 362 U32 const mls /* template */) 363 { 364 ZSTD_compressionParameters const* cParams = &ms->cParams; 365 U32* const hashLong = ms->hashTable; 366 U32 const hBitsL = cParams->hashLog; 367 U32* const hashSmall = ms->chainTable; 368 U32 const hBitsS = cParams->chainLog; 369 const BYTE* const istart = (const BYTE*)src; 370 const BYTE* ip = istart; 371 const BYTE* anchor = istart; 372 const BYTE* const iend = istart + srcSize; 373 const BYTE* const ilimit = iend - 8; 374 const BYTE* const base = ms->window.base; 375 const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); 376 const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog); 377 const U32 dictStartIndex = lowLimit; 378 const U32 dictLimit = ms->window.dictLimit; 379 const U32 prefixStartIndex = (dictLimit > lowLimit) ? dictLimit : lowLimit; 380 const BYTE* const prefixStart = base + prefixStartIndex; 381 const BYTE* const dictBase = ms->window.dictBase; 382 const BYTE* const dictStart = dictBase + dictStartIndex; 383 const BYTE* const dictEnd = dictBase + prefixStartIndex; 384 U32 offset_1=rep[0], offset_2=rep[1]; 385 386 DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_extDict_generic (srcSize=%zu)", srcSize); 387 388 /* if extDict is invalidated due to maxDistance, switch to "regular" variant */ 389 if (prefixStartIndex == dictStartIndex) 390 return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, mls, ZSTD_noDict); 391 392 /* Search Loop */ 393 while (ip < ilimit) { /* < instead of <=, because (ip+1) */ 394 const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls); 395 const U32 matchIndex = hashSmall[hSmall]; 396 const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base; 397 const BYTE* match = matchBase + matchIndex; 398 399 const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8); 400 const U32 matchLongIndex = hashLong[hLong]; 401 const BYTE* const matchLongBase = matchLongIndex < prefixStartIndex ? dictBase : base; 402 const BYTE* matchLong = matchLongBase + matchLongIndex; 403 404 const U32 curr = (U32)(ip-base); 405 const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */ 406 const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base; 407 const BYTE* const repMatch = repBase + repIndex; 408 size_t mLength; 409 hashSmall[hSmall] = hashLong[hLong] = curr; /* update hash table */ 410 411 if ((((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex doesn't overlap dict + prefix */ 412 & (repIndex > dictStartIndex)) 413 && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { 414 const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; 415 mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4; 416 ip++; 417 ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH); 418 } else { 419 if ((matchLongIndex > dictStartIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) { 420 const BYTE* const matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend; 421 const BYTE* const lowMatchPtr = matchLongIndex < prefixStartIndex ? dictStart : prefixStart; 422 U32 offset; 423 mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, prefixStart) + 8; 424 offset = curr - matchLongIndex; 425 while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */ 426 offset_2 = offset_1; 427 offset_1 = offset; 428 ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH); 429 430 } else if ((matchIndex > dictStartIndex) && (MEM_read32(match) == MEM_read32(ip))) { 431 size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8); 432 U32 const matchIndex3 = hashLong[h3]; 433 const BYTE* const match3Base = matchIndex3 < prefixStartIndex ? dictBase : base; 434 const BYTE* match3 = match3Base + matchIndex3; 435 U32 offset; 436 hashLong[h3] = curr + 1; 437 if ( (matchIndex3 > dictStartIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) { 438 const BYTE* const matchEnd = matchIndex3 < prefixStartIndex ? dictEnd : iend; 439 const BYTE* const lowMatchPtr = matchIndex3 < prefixStartIndex ? dictStart : prefixStart; 440 mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, prefixStart) + 8; 441 ip++; 442 offset = curr+1 - matchIndex3; 443 while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */ 444 } else { 445 const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend; 446 const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart; 447 mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4; 448 offset = curr - matchIndex; 449 while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ 450 } 451 offset_2 = offset_1; 452 offset_1 = offset; 453 ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH); 454 455 } else { 456 ip += ((ip-anchor) >> kSearchStrength) + 1; 457 continue; 458 } } 459 460 /* move to next sequence start */ 461 ip += mLength; 462 anchor = ip; 463 464 if (ip <= ilimit) { 465 /* Complementary insertion */ 466 /* done after iLimit test, as candidates could be > iend-8 */ 467 { U32 const indexToInsert = curr+2; 468 hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert; 469 hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base); 470 hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert; 471 hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base); 472 } 473 474 /* check immediate repcode */ 475 while (ip <= ilimit) { 476 U32 const current2 = (U32)(ip-base); 477 U32 const repIndex2 = current2 - offset_2; 478 const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2; 479 if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) /* intentional overflow : ensure repIndex2 doesn't overlap dict + prefix */ 480 & (repIndex2 > dictStartIndex)) 481 && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { 482 const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; 483 size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; 484 U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ 485 ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH); 486 hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2; 487 hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2; 488 ip += repLength2; 489 anchor = ip; 490 continue; 491 } 492 break; 493 } } } 494 495 /* save reps for next block */ 496 rep[0] = offset_1; 497 rep[1] = offset_2; 498 499 /* Return the last literals size */ 500 return (size_t)(iend - anchor); 501 } 502 503 504 size_t ZSTD_compressBlock_doubleFast_extDict( 505 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], 506 void const* src, size_t srcSize) 507 { 508 U32 const mls = ms->cParams.minMatch; 509 switch(mls) 510 { 511 default: /* includes case 3 */ 512 case 4 : 513 return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 4); 514 case 5 : 515 return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 5); 516 case 6 : 517 return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 6); 518 case 7 : 519 return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 7); 520 } 521 } 522