1 /* 2 * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. 3 * All rights reserved. 4 * 5 * This source code is licensed under both the BSD-style license (found in the 6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found 7 * in the COPYING file in the root directory of this source tree). 8 * You may select, at your option, one of the above-listed licenses. 9 */ 10 11 #include "zstd_compress_internal.h" /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */ 12 #include "zstd_fast.h" 13 14 15 void ZSTD_fillHashTable(ZSTD_matchState_t* ms, 16 const void* const end, 17 ZSTD_dictTableLoadMethod_e dtlm) 18 { 19 const ZSTD_compressionParameters* const cParams = &ms->cParams; 20 U32* const hashTable = ms->hashTable; 21 U32 const hBits = cParams->hashLog; 22 U32 const mls = cParams->minMatch; 23 const BYTE* const base = ms->window.base; 24 const BYTE* ip = base + ms->nextToUpdate; 25 const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; 26 const U32 fastHashFillStep = 3; 27 28 /* Always insert every fastHashFillStep position into the hash table. 29 * Insert the other positions if their hash entry is empty. 30 */ 31 for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) { 32 U32 const current = (U32)(ip - base); 33 size_t const hash0 = ZSTD_hashPtr(ip, hBits, mls); 34 hashTable[hash0] = current; 35 if (dtlm == ZSTD_dtlm_fast) continue; 36 /* Only load extra positions for ZSTD_dtlm_full */ 37 { U32 p; 38 for (p = 1; p < fastHashFillStep; ++p) { 39 size_t const hash = ZSTD_hashPtr(ip + p, hBits, mls); 40 if (hashTable[hash] == 0) { /* not yet filled */ 41 hashTable[hash] = current + p; 42 } } } } 43 } 44 45 46 FORCE_INLINE_TEMPLATE size_t 47 ZSTD_compressBlock_fast_generic( 48 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], 49 void const* src, size_t srcSize, 50 U32 const mls) 51 { 52 const ZSTD_compressionParameters* const cParams = &ms->cParams; 53 U32* const hashTable = ms->hashTable; 54 U32 const hlog = cParams->hashLog; 55 /* support stepSize of 0 */ 56 size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1; 57 const BYTE* const base = ms->window.base; 58 const BYTE* const istart = (const BYTE*)src; 59 /* We check ip0 (ip + 0) and ip1 (ip + 1) each loop */ 60 const BYTE* ip0 = istart; 61 const BYTE* ip1; 62 const BYTE* anchor = istart; 63 const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); 64 const U32 maxDistance = 1U << cParams->windowLog; 65 const U32 validStartIndex = ms->window.dictLimit; 66 const U32 prefixStartIndex = (endIndex - validStartIndex > maxDistance) ? endIndex - maxDistance : validStartIndex; 67 const BYTE* const prefixStart = base + prefixStartIndex; 68 const BYTE* const iend = istart + srcSize; 69 const BYTE* const ilimit = iend - HASH_READ_SIZE; 70 U32 offset_1=rep[0], offset_2=rep[1]; 71 U32 offsetSaved = 0; 72 73 /* init */ 74 DEBUGLOG(5, "ZSTD_compressBlock_fast_generic"); 75 ip0 += (ip0 == prefixStart); 76 ip1 = ip0 + 1; 77 { U32 const maxRep = (U32)(ip0 - prefixStart); 78 if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0; 79 if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0; 80 } 81 82 /* Main Search Loop */ 83 while (ip1 < ilimit) { /* < instead of <=, because check at ip0+2 */ 84 size_t mLength; 85 BYTE const* ip2 = ip0 + 2; 86 size_t const h0 = ZSTD_hashPtr(ip0, hlog, mls); 87 U32 const val0 = MEM_read32(ip0); 88 size_t const h1 = ZSTD_hashPtr(ip1, hlog, mls); 89 U32 const val1 = MEM_read32(ip1); 90 U32 const current0 = (U32)(ip0-base); 91 U32 const current1 = (U32)(ip1-base); 92 U32 const matchIndex0 = hashTable[h0]; 93 U32 const matchIndex1 = hashTable[h1]; 94 BYTE const* repMatch = ip2-offset_1; 95 const BYTE* match0 = base + matchIndex0; 96 const BYTE* match1 = base + matchIndex1; 97 U32 offcode; 98 hashTable[h0] = current0; /* update hash table */ 99 hashTable[h1] = current1; /* update hash table */ 100 101 assert(ip0 + 1 == ip1); 102 103 if ((offset_1 > 0) & (MEM_read32(repMatch) == MEM_read32(ip2))) { 104 mLength = ip2[-1] == repMatch[-1] ? 1 : 0; 105 ip0 = ip2 - mLength; 106 match0 = repMatch - mLength; 107 offcode = 0; 108 goto _match; 109 } 110 if ((matchIndex0 > prefixStartIndex) && MEM_read32(match0) == val0) { 111 /* found a regular match */ 112 goto _offset; 113 } 114 if ((matchIndex1 > prefixStartIndex) && MEM_read32(match1) == val1) { 115 /* found a regular match after one literal */ 116 ip0 = ip1; 117 match0 = match1; 118 goto _offset; 119 } 120 { size_t const step = ((size_t)(ip0-anchor) >> (kSearchStrength - 1)) + stepSize; 121 assert(step >= 2); 122 ip0 += step; 123 ip1 += step; 124 continue; 125 } 126 _offset: /* Requires: ip0, match0 */ 127 /* Compute the offset code */ 128 offset_2 = offset_1; 129 offset_1 = (U32)(ip0-match0); 130 offcode = offset_1 + ZSTD_REP_MOVE; 131 mLength = 0; 132 /* Count the backwards match length */ 133 while (((ip0>anchor) & (match0>prefixStart)) 134 && (ip0[-1] == match0[-1])) { ip0--; match0--; mLength++; } /* catch up */ 135 136 _match: /* Requires: ip0, match0, offcode */ 137 /* Count the forward length */ 138 mLength += ZSTD_count(ip0+mLength+4, match0+mLength+4, iend) + 4; 139 ZSTD_storeSeq(seqStore, (size_t)(ip0-anchor), anchor, iend, offcode, mLength-MINMATCH); 140 /* match found */ 141 ip0 += mLength; 142 anchor = ip0; 143 ip1 = ip0 + 1; 144 145 if (ip0 <= ilimit) { 146 /* Fill Table */ 147 assert(base+current0+2 > istart); /* check base overflow */ 148 hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */ 149 hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base); 150 151 while ( ((ip0 <= ilimit) & (offset_2>0)) /* offset_2==0 means offset_2 is invalidated */ 152 && (MEM_read32(ip0) == MEM_read32(ip0 - offset_2)) ) { 153 /* store sequence */ 154 size_t const rLength = ZSTD_count(ip0+4, ip0+4-offset_2, iend) + 4; 155 { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */ 156 hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base); 157 ip0 += rLength; 158 ip1 = ip0 + 1; 159 ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, 0 /*offCode*/, rLength-MINMATCH); 160 anchor = ip0; 161 continue; /* faster when present (confirmed on gcc-8) ... (?) */ 162 } 163 } 164 } 165 166 /* save reps for next block */ 167 rep[0] = offset_1 ? offset_1 : offsetSaved; 168 rep[1] = offset_2 ? offset_2 : offsetSaved; 169 170 /* Return the last literals size */ 171 return (size_t)(iend - anchor); 172 } 173 174 175 size_t ZSTD_compressBlock_fast( 176 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], 177 void const* src, size_t srcSize) 178 { 179 U32 const mls = ms->cParams.minMatch; 180 assert(ms->dictMatchState == NULL); 181 switch(mls) 182 { 183 default: /* includes case 3 */ 184 case 4 : 185 return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 4); 186 case 5 : 187 return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 5); 188 case 6 : 189 return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 6); 190 case 7 : 191 return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 7); 192 } 193 } 194 195 FORCE_INLINE_TEMPLATE 196 size_t ZSTD_compressBlock_fast_dictMatchState_generic( 197 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], 198 void const* src, size_t srcSize, U32 const mls) 199 { 200 const ZSTD_compressionParameters* const cParams = &ms->cParams; 201 U32* const hashTable = ms->hashTable; 202 U32 const hlog = cParams->hashLog; 203 /* support stepSize of 0 */ 204 U32 const stepSize = cParams->targetLength + !(cParams->targetLength); 205 const BYTE* const base = ms->window.base; 206 const BYTE* const istart = (const BYTE*)src; 207 const BYTE* ip = istart; 208 const BYTE* anchor = istart; 209 const U32 prefixStartIndex = ms->window.dictLimit; 210 const BYTE* const prefixStart = base + prefixStartIndex; 211 const BYTE* const iend = istart + srcSize; 212 const BYTE* const ilimit = iend - HASH_READ_SIZE; 213 U32 offset_1=rep[0], offset_2=rep[1]; 214 U32 offsetSaved = 0; 215 216 const ZSTD_matchState_t* const dms = ms->dictMatchState; 217 const ZSTD_compressionParameters* const dictCParams = &dms->cParams ; 218 const U32* const dictHashTable = dms->hashTable; 219 const U32 dictStartIndex = dms->window.dictLimit; 220 const BYTE* const dictBase = dms->window.base; 221 const BYTE* const dictStart = dictBase + dictStartIndex; 222 const BYTE* const dictEnd = dms->window.nextSrc; 223 const U32 dictIndexDelta = prefixStartIndex - (U32)(dictEnd - dictBase); 224 const U32 dictAndPrefixLength = (U32)(ip - prefixStart + dictEnd - dictStart); 225 const U32 dictHLog = dictCParams->hashLog; 226 227 /* if a dictionary is still attached, it necessarily means that 228 * it is within window size. So we just check it. */ 229 const U32 maxDistance = 1U << cParams->windowLog; 230 const U32 endIndex = (U32)((size_t)(ip - base) + srcSize); 231 assert(endIndex - prefixStartIndex <= maxDistance); 232 (void)maxDistance; (void)endIndex; /* these variables are not used when assert() is disabled */ 233 234 /* ensure there will be no no underflow 235 * when translating a dict index into a local index */ 236 assert(prefixStartIndex >= (U32)(dictEnd - dictBase)); 237 238 /* init */ 239 DEBUGLOG(5, "ZSTD_compressBlock_fast_dictMatchState_generic"); 240 ip += (dictAndPrefixLength == 0); 241 /* dictMatchState repCode checks don't currently handle repCode == 0 242 * disabling. */ 243 assert(offset_1 <= dictAndPrefixLength); 244 assert(offset_2 <= dictAndPrefixLength); 245 246 /* Main Search Loop */ 247 while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */ 248 size_t mLength; 249 size_t const h = ZSTD_hashPtr(ip, hlog, mls); 250 U32 const current = (U32)(ip-base); 251 U32 const matchIndex = hashTable[h]; 252 const BYTE* match = base + matchIndex; 253 const U32 repIndex = current + 1 - offset_1; 254 const BYTE* repMatch = (repIndex < prefixStartIndex) ? 255 dictBase + (repIndex - dictIndexDelta) : 256 base + repIndex; 257 hashTable[h] = current; /* update hash table */ 258 259 if ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */ 260 && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { 261 const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; 262 mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4; 263 ip++; 264 ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH); 265 } else if ( (matchIndex <= prefixStartIndex) ) { 266 size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls); 267 U32 const dictMatchIndex = dictHashTable[dictHash]; 268 const BYTE* dictMatch = dictBase + dictMatchIndex; 269 if (dictMatchIndex <= dictStartIndex || 270 MEM_read32(dictMatch) != MEM_read32(ip)) { 271 assert(stepSize >= 1); 272 ip += ((ip-anchor) >> kSearchStrength) + stepSize; 273 continue; 274 } else { 275 /* found a dict match */ 276 U32 const offset = (U32)(current-dictMatchIndex-dictIndexDelta); 277 mLength = ZSTD_count_2segments(ip+4, dictMatch+4, iend, dictEnd, prefixStart) + 4; 278 while (((ip>anchor) & (dictMatch>dictStart)) 279 && (ip[-1] == dictMatch[-1])) { 280 ip--; dictMatch--; mLength++; 281 } /* catch up */ 282 offset_2 = offset_1; 283 offset_1 = offset; 284 ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH); 285 } 286 } else if (MEM_read32(match) != MEM_read32(ip)) { 287 /* it's not a match, and we're not going to check the dictionary */ 288 assert(stepSize >= 1); 289 ip += ((ip-anchor) >> kSearchStrength) + stepSize; 290 continue; 291 } else { 292 /* found a regular match */ 293 U32 const offset = (U32)(ip-match); 294 mLength = ZSTD_count(ip+4, match+4, iend) + 4; 295 while (((ip>anchor) & (match>prefixStart)) 296 && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ 297 offset_2 = offset_1; 298 offset_1 = offset; 299 ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH); 300 } 301 302 /* match found */ 303 ip += mLength; 304 anchor = ip; 305 306 if (ip <= ilimit) { 307 /* Fill Table */ 308 assert(base+current+2 > istart); /* check base overflow */ 309 hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2; /* here because current+2 could be > iend-8 */ 310 hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base); 311 312 /* check immediate repcode */ 313 while (ip <= ilimit) { 314 U32 const current2 = (U32)(ip-base); 315 U32 const repIndex2 = current2 - offset_2; 316 const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? 317 dictBase - dictIndexDelta + repIndex2 : 318 base + repIndex2; 319 if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */) 320 && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { 321 const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; 322 size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; 323 U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ 324 ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH); 325 hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2; 326 ip += repLength2; 327 anchor = ip; 328 continue; 329 } 330 break; 331 } 332 } 333 } 334 335 /* save reps for next block */ 336 rep[0] = offset_1 ? offset_1 : offsetSaved; 337 rep[1] = offset_2 ? offset_2 : offsetSaved; 338 339 /* Return the last literals size */ 340 return (size_t)(iend - anchor); 341 } 342 343 size_t ZSTD_compressBlock_fast_dictMatchState( 344 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], 345 void const* src, size_t srcSize) 346 { 347 U32 const mls = ms->cParams.minMatch; 348 assert(ms->dictMatchState != NULL); 349 switch(mls) 350 { 351 default: /* includes case 3 */ 352 case 4 : 353 return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 4); 354 case 5 : 355 return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 5); 356 case 6 : 357 return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 6); 358 case 7 : 359 return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 7); 360 } 361 } 362 363 364 static size_t ZSTD_compressBlock_fast_extDict_generic( 365 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], 366 void const* src, size_t srcSize, U32 const mls) 367 { 368 const ZSTD_compressionParameters* const cParams = &ms->cParams; 369 U32* const hashTable = ms->hashTable; 370 U32 const hlog = cParams->hashLog; 371 /* support stepSize of 0 */ 372 U32 const stepSize = cParams->targetLength + !(cParams->targetLength); 373 const BYTE* const base = ms->window.base; 374 const BYTE* const dictBase = ms->window.dictBase; 375 const BYTE* const istart = (const BYTE*)src; 376 const BYTE* ip = istart; 377 const BYTE* anchor = istart; 378 const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); 379 const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog); 380 const U32 dictStartIndex = lowLimit; 381 const BYTE* const dictStart = dictBase + dictStartIndex; 382 const U32 dictLimit = ms->window.dictLimit; 383 const U32 prefixStartIndex = dictLimit < lowLimit ? lowLimit : dictLimit; 384 const BYTE* const prefixStart = base + prefixStartIndex; 385 const BYTE* const dictEnd = dictBase + prefixStartIndex; 386 const BYTE* const iend = istart + srcSize; 387 const BYTE* const ilimit = iend - 8; 388 U32 offset_1=rep[0], offset_2=rep[1]; 389 390 DEBUGLOG(5, "ZSTD_compressBlock_fast_extDict_generic"); 391 392 /* switch to "regular" variant if extDict is invalidated due to maxDistance */ 393 if (prefixStartIndex == dictStartIndex) 394 return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, mls); 395 396 /* Search Loop */ 397 while (ip < ilimit) { /* < instead of <=, because (ip+1) */ 398 const size_t h = ZSTD_hashPtr(ip, hlog, mls); 399 const U32 matchIndex = hashTable[h]; 400 const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base; 401 const BYTE* match = matchBase + matchIndex; 402 const U32 current = (U32)(ip-base); 403 const U32 repIndex = current + 1 - offset_1; 404 const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base; 405 const BYTE* const repMatch = repBase + repIndex; 406 hashTable[h] = current; /* update hash table */ 407 assert(offset_1 <= current +1); /* check repIndex */ 408 409 if ( (((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > dictStartIndex)) 410 && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { 411 const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; 412 size_t const rLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4; 413 ip++; 414 ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, rLength-MINMATCH); 415 ip += rLength; 416 anchor = ip; 417 } else { 418 if ( (matchIndex < dictStartIndex) || 419 (MEM_read32(match) != MEM_read32(ip)) ) { 420 assert(stepSize >= 1); 421 ip += ((ip-anchor) >> kSearchStrength) + stepSize; 422 continue; 423 } 424 { const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend; 425 const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart; 426 U32 const offset = current - matchIndex; 427 size_t mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4; 428 while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ 429 offset_2 = offset_1; offset_1 = offset; /* update offset history */ 430 ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH); 431 ip += mLength; 432 anchor = ip; 433 } } 434 435 if (ip <= ilimit) { 436 /* Fill Table */ 437 hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2; 438 hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base); 439 /* check immediate repcode */ 440 while (ip <= ilimit) { 441 U32 const current2 = (U32)(ip-base); 442 U32 const repIndex2 = current2 - offset_2; 443 const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2; 444 if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (repIndex2 > dictStartIndex)) /* intentional overflow */ 445 && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { 446 const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; 447 size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; 448 { U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */ 449 ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, 0 /*offcode*/, repLength2-MINMATCH); 450 hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2; 451 ip += repLength2; 452 anchor = ip; 453 continue; 454 } 455 break; 456 } } } 457 458 /* save reps for next block */ 459 rep[0] = offset_1; 460 rep[1] = offset_2; 461 462 /* Return the last literals size */ 463 return (size_t)(iend - anchor); 464 } 465 466 467 size_t ZSTD_compressBlock_fast_extDict( 468 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], 469 void const* src, size_t srcSize) 470 { 471 U32 const mls = ms->cParams.minMatch; 472 switch(mls) 473 { 474 default: /* includes case 3 */ 475 case 4 : 476 return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 4); 477 case 5 : 478 return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 5); 479 case 6 : 480 return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 6); 481 case 7 : 482 return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 7); 483 } 484 } 485