1 /* 2 * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. 3 * All rights reserved. 4 * 5 * This source code is licensed under both the BSD-style license (found in the 6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found 7 * in the COPYING file in the root directory of this source tree). 8 * You may select, at your option, one of the above-listed licenses. 9 */ 10 11 #include "zstd_compress_internal.h" 12 #include "zstd_fast.h" 13 14 15 void ZSTD_fillHashTable(ZSTD_matchState_t* ms, 16 ZSTD_compressionParameters const* cParams, 17 void const* end) 18 { 19 U32* const hashTable = ms->hashTable; 20 U32 const hBits = cParams->hashLog; 21 U32 const mls = cParams->searchLength; 22 const BYTE* const base = ms->window.base; 23 const BYTE* ip = base + ms->nextToUpdate; 24 const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; 25 const U32 fastHashFillStep = 3; 26 27 /* Always insert every fastHashFillStep position into the hash table. 28 * Insert the other positions if their hash entry is empty. 29 */ 30 for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) { 31 U32 const current = (U32)(ip - base); 32 U32 i; 33 for (i = 0; i < fastHashFillStep; ++i) { 34 size_t const hash = ZSTD_hashPtr(ip + i, hBits, mls); 35 if (i == 0 || hashTable[hash] == 0) 36 hashTable[hash] = current + i; 37 } 38 } 39 } 40 41 FORCE_INLINE_TEMPLATE 42 size_t ZSTD_compressBlock_fast_generic( 43 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], 44 void const* src, size_t srcSize, 45 U32 const hlog, U32 const stepSize, U32 const mls) 46 { 47 U32* const hashTable = ms->hashTable; 48 const BYTE* const base = ms->window.base; 49 const BYTE* const istart = (const BYTE*)src; 50 const BYTE* ip = istart; 51 const BYTE* anchor = istart; 52 const U32 lowestIndex = ms->window.dictLimit; 53 const BYTE* const lowest = base + lowestIndex; 54 const BYTE* const iend = istart + srcSize; 55 const BYTE* const ilimit = iend - HASH_READ_SIZE; 56 U32 offset_1=rep[0], offset_2=rep[1]; 57 U32 offsetSaved = 0; 58 59 /* init */ 60 ip += (ip==lowest); 61 { U32 const maxRep = (U32)(ip-lowest); 62 if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0; 63 if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0; 64 } 65 66 /* Main Search Loop */ 67 while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */ 68 size_t mLength; 69 size_t const h = ZSTD_hashPtr(ip, hlog, mls); 70 U32 const current = (U32)(ip-base); 71 U32 const matchIndex = hashTable[h]; 72 const BYTE* match = base + matchIndex; 73 hashTable[h] = current; /* update hash table */ 74 75 if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) { 76 mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; 77 ip++; 78 ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH); 79 } else { 80 if ( (matchIndex <= lowestIndex) 81 || (MEM_read32(match) != MEM_read32(ip)) ) { 82 assert(stepSize >= 1); 83 ip += ((ip-anchor) >> kSearchStrength) + stepSize; 84 continue; 85 } 86 mLength = ZSTD_count(ip+4, match+4, iend) + 4; 87 { U32 const offset = (U32)(ip-match); 88 while (((ip>anchor) & (match>lowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ 89 offset_2 = offset_1; 90 offset_1 = offset; 91 ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH); 92 } } 93 94 /* match found */ 95 ip += mLength; 96 anchor = ip; 97 98 if (ip <= ilimit) { 99 /* Fill Table */ 100 hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2; /* here because current+2 could be > iend-8 */ 101 hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base); 102 /* check immediate repcode */ 103 while ( (ip <= ilimit) 104 && ( (offset_2>0) 105 & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) { 106 /* store sequence */ 107 size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4; 108 { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */ 109 hashTable[ZSTD_hashPtr(ip, hlog, mls)] = (U32)(ip-base); 110 ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH); 111 ip += rLength; 112 anchor = ip; 113 continue; /* faster when present ... (?) */ 114 } } } 115 116 /* save reps for next block */ 117 rep[0] = offset_1 ? offset_1 : offsetSaved; 118 rep[1] = offset_2 ? offset_2 : offsetSaved; 119 120 /* Return the last literals size */ 121 return iend - anchor; 122 } 123 124 125 size_t ZSTD_compressBlock_fast( 126 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], 127 ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize) 128 { 129 U32 const hlog = cParams->hashLog; 130 U32 const mls = cParams->searchLength; 131 U32 const stepSize = cParams->targetLength; 132 switch(mls) 133 { 134 default: /* includes case 3 */ 135 case 4 : 136 return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 4); 137 case 5 : 138 return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 5); 139 case 6 : 140 return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 6); 141 case 7 : 142 return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 7); 143 } 144 } 145 146 147 static size_t ZSTD_compressBlock_fast_extDict_generic( 148 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], 149 void const* src, size_t srcSize, 150 U32 const hlog, U32 const stepSize, U32 const mls) 151 { 152 U32* hashTable = ms->hashTable; 153 const BYTE* const base = ms->window.base; 154 const BYTE* const dictBase = ms->window.dictBase; 155 const BYTE* const istart = (const BYTE*)src; 156 const BYTE* ip = istart; 157 const BYTE* anchor = istart; 158 const U32 lowestIndex = ms->window.lowLimit; 159 const BYTE* const dictStart = dictBase + lowestIndex; 160 const U32 dictLimit = ms->window.dictLimit; 161 const BYTE* const lowPrefixPtr = base + dictLimit; 162 const BYTE* const dictEnd = dictBase + dictLimit; 163 const BYTE* const iend = istart + srcSize; 164 const BYTE* const ilimit = iend - 8; 165 U32 offset_1=rep[0], offset_2=rep[1]; 166 167 /* Search Loop */ 168 while (ip < ilimit) { /* < instead of <=, because (ip+1) */ 169 const size_t h = ZSTD_hashPtr(ip, hlog, mls); 170 const U32 matchIndex = hashTable[h]; 171 const BYTE* matchBase = matchIndex < dictLimit ? dictBase : base; 172 const BYTE* match = matchBase + matchIndex; 173 const U32 current = (U32)(ip-base); 174 const U32 repIndex = current + 1 - offset_1; /* offset_1 expected <= current +1 */ 175 const BYTE* repBase = repIndex < dictLimit ? dictBase : base; 176 const BYTE* repMatch = repBase + repIndex; 177 size_t mLength; 178 hashTable[h] = current; /* update hash table */ 179 180 if ( (((U32)((dictLimit-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex)) 181 && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { 182 const BYTE* repMatchEnd = repIndex < dictLimit ? dictEnd : iend; 183 mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, lowPrefixPtr) + 4; 184 ip++; 185 ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH); 186 } else { 187 if ( (matchIndex < lowestIndex) || 188 (MEM_read32(match) != MEM_read32(ip)) ) { 189 assert(stepSize >= 1); 190 ip += ((ip-anchor) >> kSearchStrength) + stepSize; 191 continue; 192 } 193 { const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend; 194 const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr; 195 U32 offset; 196 mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, lowPrefixPtr) + 4; 197 while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ 198 offset = current - matchIndex; 199 offset_2 = offset_1; 200 offset_1 = offset; 201 ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH); 202 } } 203 204 /* found a match : store it */ 205 ip += mLength; 206 anchor = ip; 207 208 if (ip <= ilimit) { 209 /* Fill Table */ 210 hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2; 211 hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base); 212 /* check immediate repcode */ 213 while (ip <= ilimit) { 214 U32 const current2 = (U32)(ip-base); 215 U32 const repIndex2 = current2 - offset_2; 216 const BYTE* repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2; 217 if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */ 218 && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { 219 const BYTE* const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend; 220 size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, lowPrefixPtr) + 4; 221 U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ 222 ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH); 223 hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2; 224 ip += repLength2; 225 anchor = ip; 226 continue; 227 } 228 break; 229 } } } 230 231 /* save reps for next block */ 232 rep[0] = offset_1; 233 rep[1] = offset_2; 234 235 /* Return the last literals size */ 236 return iend - anchor; 237 } 238 239 240 size_t ZSTD_compressBlock_fast_extDict( 241 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], 242 ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize) 243 { 244 U32 const hlog = cParams->hashLog; 245 U32 const mls = cParams->searchLength; 246 U32 const stepSize = cParams->targetLength; 247 switch(mls) 248 { 249 default: /* includes case 3 */ 250 case 4 : 251 return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 4); 252 case 5 : 253 return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 5); 254 case 6 : 255 return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 6); 256 case 7 : 257 return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 7); 258 } 259 } 260