1 /*
2 * Copyright (c) Yann Collet, Facebook, Inc.
3 * All rights reserved.
4 *
5 * This source code is licensed under both the BSD-style license (found in the
6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7 * in the COPYING file in the root directory of this source tree).
8 * You may select, at your option, one of the above-listed licenses.
9 */
10
11 #include "zstd_compress_internal.h"
12 #include "zstd_lazy.h"
13
14
15 /*-*************************************
16 * Binary Tree search
17 ***************************************/
18
19 static void
ZSTD_updateDUBT(ZSTD_matchState_t * ms,const BYTE * ip,const BYTE * iend,U32 mls)20 ZSTD_updateDUBT(ZSTD_matchState_t* ms,
21 const BYTE* ip, const BYTE* iend,
22 U32 mls)
23 {
24 const ZSTD_compressionParameters* const cParams = &ms->cParams;
25 U32* const hashTable = ms->hashTable;
26 U32 const hashLog = cParams->hashLog;
27
28 U32* const bt = ms->chainTable;
29 U32 const btLog = cParams->chainLog - 1;
30 U32 const btMask = (1 << btLog) - 1;
31
32 const BYTE* const base = ms->window.base;
33 U32 const target = (U32)(ip - base);
34 U32 idx = ms->nextToUpdate;
35
36 if (idx != target)
37 DEBUGLOG(7, "ZSTD_updateDUBT, from %u to %u (dictLimit:%u)",
38 idx, target, ms->window.dictLimit);
39 assert(ip + 8 <= iend); /* condition for ZSTD_hashPtr */
40 (void)iend;
41
42 assert(idx >= ms->window.dictLimit); /* condition for valid base+idx */
43 for ( ; idx < target ; idx++) {
44 size_t const h = ZSTD_hashPtr(base + idx, hashLog, mls); /* assumption : ip + 8 <= iend */
45 U32 const matchIndex = hashTable[h];
46
47 U32* const nextCandidatePtr = bt + 2*(idx&btMask);
48 U32* const sortMarkPtr = nextCandidatePtr + 1;
49
50 DEBUGLOG(8, "ZSTD_updateDUBT: insert %u", idx);
51 hashTable[h] = idx; /* Update Hash Table */
52 *nextCandidatePtr = matchIndex; /* update BT like a chain */
53 *sortMarkPtr = ZSTD_DUBT_UNSORTED_MARK;
54 }
55 ms->nextToUpdate = target;
56 }
57
58
59 /** ZSTD_insertDUBT1() :
60 * sort one already inserted but unsorted position
61 * assumption : curr >= btlow == (curr - btmask)
62 * doesn't fail */
63 static void
ZSTD_insertDUBT1(const ZSTD_matchState_t * ms,U32 curr,const BYTE * inputEnd,U32 nbCompares,U32 btLow,const ZSTD_dictMode_e dictMode)64 ZSTD_insertDUBT1(const ZSTD_matchState_t* ms,
65 U32 curr, const BYTE* inputEnd,
66 U32 nbCompares, U32 btLow,
67 const ZSTD_dictMode_e dictMode)
68 {
69 const ZSTD_compressionParameters* const cParams = &ms->cParams;
70 U32* const bt = ms->chainTable;
71 U32 const btLog = cParams->chainLog - 1;
72 U32 const btMask = (1 << btLog) - 1;
73 size_t commonLengthSmaller=0, commonLengthLarger=0;
74 const BYTE* const base = ms->window.base;
75 const BYTE* const dictBase = ms->window.dictBase;
76 const U32 dictLimit = ms->window.dictLimit;
77 const BYTE* const ip = (curr>=dictLimit) ? base + curr : dictBase + curr;
78 const BYTE* const iend = (curr>=dictLimit) ? inputEnd : dictBase + dictLimit;
79 const BYTE* const dictEnd = dictBase + dictLimit;
80 const BYTE* const prefixStart = base + dictLimit;
81 const BYTE* match;
82 U32* smallerPtr = bt + 2*(curr&btMask);
83 U32* largerPtr = smallerPtr + 1;
84 U32 matchIndex = *smallerPtr; /* this candidate is unsorted : next sorted candidate is reached through *smallerPtr, while *largerPtr contains previous unsorted candidate (which is already saved and can be overwritten) */
85 U32 dummy32; /* to be nullified at the end */
86 U32 const windowValid = ms->window.lowLimit;
87 U32 const maxDistance = 1U << cParams->windowLog;
88 U32 const windowLow = (curr - windowValid > maxDistance) ? curr - maxDistance : windowValid;
89
90
91 DEBUGLOG(8, "ZSTD_insertDUBT1(%u) (dictLimit=%u, lowLimit=%u)",
92 curr, dictLimit, windowLow);
93 assert(curr >= btLow);
94 assert(ip < iend); /* condition for ZSTD_count */
95
96 for (; nbCompares && (matchIndex > windowLow); --nbCompares) {
97 U32* const nextPtr = bt + 2*(matchIndex & btMask);
98 size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
99 assert(matchIndex < curr);
100 /* note : all candidates are now supposed sorted,
101 * but it's still possible to have nextPtr[1] == ZSTD_DUBT_UNSORTED_MARK
102 * when a real index has the same value as ZSTD_DUBT_UNSORTED_MARK */
103
104 if ( (dictMode != ZSTD_extDict)
105 || (matchIndex+matchLength >= dictLimit) /* both in current segment*/
106 || (curr < dictLimit) /* both in extDict */) {
107 const BYTE* const mBase = ( (dictMode != ZSTD_extDict)
108 || (matchIndex+matchLength >= dictLimit)) ?
109 base : dictBase;
110 assert( (matchIndex+matchLength >= dictLimit) /* might be wrong if extDict is incorrectly set to 0 */
111 || (curr < dictLimit) );
112 match = mBase + matchIndex;
113 matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
114 } else {
115 match = dictBase + matchIndex;
116 matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
117 if (matchIndex+matchLength >= dictLimit)
118 match = base + matchIndex; /* preparation for next read of match[matchLength] */
119 }
120
121 DEBUGLOG(8, "ZSTD_insertDUBT1: comparing %u with %u : found %u common bytes ",
122 curr, matchIndex, (U32)matchLength);
123
124 if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
125 break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
126 }
127
128 if (match[matchLength] < ip[matchLength]) { /* necessarily within buffer */
129 /* match is smaller than current */
130 *smallerPtr = matchIndex; /* update smaller idx */
131 commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
132 if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */
133 DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is smaller : next => %u",
134 matchIndex, btLow, nextPtr[1]);
135 smallerPtr = nextPtr+1; /* new "candidate" => larger than match, which was smaller than target */
136 matchIndex = nextPtr[1]; /* new matchIndex, larger than previous and closer to current */
137 } else {
138 /* match is larger than current */
139 *largerPtr = matchIndex;
140 commonLengthLarger = matchLength;
141 if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */
142 DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is larger => %u",
143 matchIndex, btLow, nextPtr[0]);
144 largerPtr = nextPtr;
145 matchIndex = nextPtr[0];
146 } }
147
148 *smallerPtr = *largerPtr = 0;
149 }
150
151
152 static size_t
ZSTD_DUBT_findBetterDictMatch(const ZSTD_matchState_t * ms,const BYTE * const ip,const BYTE * const iend,size_t * offsetPtr,size_t bestLength,U32 nbCompares,U32 const mls,const ZSTD_dictMode_e dictMode)153 ZSTD_DUBT_findBetterDictMatch (
154 const ZSTD_matchState_t* ms,
155 const BYTE* const ip, const BYTE* const iend,
156 size_t* offsetPtr,
157 size_t bestLength,
158 U32 nbCompares,
159 U32 const mls,
160 const ZSTD_dictMode_e dictMode)
161 {
162 const ZSTD_matchState_t * const dms = ms->dictMatchState;
163 const ZSTD_compressionParameters* const dmsCParams = &dms->cParams;
164 const U32 * const dictHashTable = dms->hashTable;
165 U32 const hashLog = dmsCParams->hashLog;
166 size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
167 U32 dictMatchIndex = dictHashTable[h];
168
169 const BYTE* const base = ms->window.base;
170 const BYTE* const prefixStart = base + ms->window.dictLimit;
171 U32 const curr = (U32)(ip-base);
172 const BYTE* const dictBase = dms->window.base;
173 const BYTE* const dictEnd = dms->window.nextSrc;
174 U32 const dictHighLimit = (U32)(dms->window.nextSrc - dms->window.base);
175 U32 const dictLowLimit = dms->window.lowLimit;
176 U32 const dictIndexDelta = ms->window.lowLimit - dictHighLimit;
177
178 U32* const dictBt = dms->chainTable;
179 U32 const btLog = dmsCParams->chainLog - 1;
180 U32 const btMask = (1 << btLog) - 1;
181 U32 const btLow = (btMask >= dictHighLimit - dictLowLimit) ? dictLowLimit : dictHighLimit - btMask;
182
183 size_t commonLengthSmaller=0, commonLengthLarger=0;
184
185 (void)dictMode;
186 assert(dictMode == ZSTD_dictMatchState);
187
188 for (; nbCompares && (dictMatchIndex > dictLowLimit); --nbCompares) {
189 U32* const nextPtr = dictBt + 2*(dictMatchIndex & btMask);
190 size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
191 const BYTE* match = dictBase + dictMatchIndex;
192 matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
193 if (dictMatchIndex+matchLength >= dictHighLimit)
194 match = base + dictMatchIndex + dictIndexDelta; /* to prepare for next usage of match[matchLength] */
195
196 if (matchLength > bestLength) {
197 U32 matchIndex = dictMatchIndex + dictIndexDelta;
198 if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) {
199 DEBUGLOG(9, "ZSTD_DUBT_findBetterDictMatch(%u) : found better match length %u -> %u and offsetCode %u -> %u (dictMatchIndex %u, matchIndex %u)",
200 curr, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, STORE_OFFSET(curr - matchIndex), dictMatchIndex, matchIndex);
201 bestLength = matchLength, *offsetPtr = STORE_OFFSET(curr - matchIndex);
202 }
203 if (ip+matchLength == iend) { /* reached end of input : ip[matchLength] is not valid, no way to know if it's larger or smaller than match */
204 break; /* drop, to guarantee consistency (miss a little bit of compression) */
205 }
206 }
207
208 if (match[matchLength] < ip[matchLength]) {
209 if (dictMatchIndex <= btLow) { break; } /* beyond tree size, stop the search */
210 commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
211 dictMatchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
212 } else {
213 /* match is larger than current */
214 if (dictMatchIndex <= btLow) { break; } /* beyond tree size, stop the search */
215 commonLengthLarger = matchLength;
216 dictMatchIndex = nextPtr[0];
217 }
218 }
219
220 if (bestLength >= MINMATCH) {
221 U32 const mIndex = curr - (U32)STORED_OFFSET(*offsetPtr); (void)mIndex;
222 DEBUGLOG(8, "ZSTD_DUBT_findBetterDictMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
223 curr, (U32)bestLength, (U32)*offsetPtr, mIndex);
224 }
225 return bestLength;
226
227 }
228
229
230 static size_t
ZSTD_DUBT_findBestMatch(ZSTD_matchState_t * ms,const BYTE * const ip,const BYTE * const iend,size_t * offsetPtr,U32 const mls,const ZSTD_dictMode_e dictMode)231 ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
232 const BYTE* const ip, const BYTE* const iend,
233 size_t* offsetPtr,
234 U32 const mls,
235 const ZSTD_dictMode_e dictMode)
236 {
237 const ZSTD_compressionParameters* const cParams = &ms->cParams;
238 U32* const hashTable = ms->hashTable;
239 U32 const hashLog = cParams->hashLog;
240 size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
241 U32 matchIndex = hashTable[h];
242
243 const BYTE* const base = ms->window.base;
244 U32 const curr = (U32)(ip-base);
245 U32 const windowLow = ZSTD_getLowestMatchIndex(ms, curr, cParams->windowLog);
246
247 U32* const bt = ms->chainTable;
248 U32 const btLog = cParams->chainLog - 1;
249 U32 const btMask = (1 << btLog) - 1;
250 U32 const btLow = (btMask >= curr) ? 0 : curr - btMask;
251 U32 const unsortLimit = MAX(btLow, windowLow);
252
253 U32* nextCandidate = bt + 2*(matchIndex&btMask);
254 U32* unsortedMark = bt + 2*(matchIndex&btMask) + 1;
255 U32 nbCompares = 1U << cParams->searchLog;
256 U32 nbCandidates = nbCompares;
257 U32 previousCandidate = 0;
258
259 DEBUGLOG(7, "ZSTD_DUBT_findBestMatch (%u) ", curr);
260 assert(ip <= iend-8); /* required for h calculation */
261 assert(dictMode != ZSTD_dedicatedDictSearch);
262
263 /* reach end of unsorted candidates list */
264 while ( (matchIndex > unsortLimit)
265 && (*unsortedMark == ZSTD_DUBT_UNSORTED_MARK)
266 && (nbCandidates > 1) ) {
267 DEBUGLOG(8, "ZSTD_DUBT_findBestMatch: candidate %u is unsorted",
268 matchIndex);
269 *unsortedMark = previousCandidate; /* the unsortedMark becomes a reversed chain, to move up back to original position */
270 previousCandidate = matchIndex;
271 matchIndex = *nextCandidate;
272 nextCandidate = bt + 2*(matchIndex&btMask);
273 unsortedMark = bt + 2*(matchIndex&btMask) + 1;
274 nbCandidates --;
275 }
276
277 /* nullify last candidate if it's still unsorted
278 * simplification, detrimental to compression ratio, beneficial for speed */
279 if ( (matchIndex > unsortLimit)
280 && (*unsortedMark==ZSTD_DUBT_UNSORTED_MARK) ) {
281 DEBUGLOG(7, "ZSTD_DUBT_findBestMatch: nullify last unsorted candidate %u",
282 matchIndex);
283 *nextCandidate = *unsortedMark = 0;
284 }
285
286 /* batch sort stacked candidates */
287 matchIndex = previousCandidate;
288 while (matchIndex) { /* will end on matchIndex == 0 */
289 U32* const nextCandidateIdxPtr = bt + 2*(matchIndex&btMask) + 1;
290 U32 const nextCandidateIdx = *nextCandidateIdxPtr;
291 ZSTD_insertDUBT1(ms, matchIndex, iend,
292 nbCandidates, unsortLimit, dictMode);
293 matchIndex = nextCandidateIdx;
294 nbCandidates++;
295 }
296
297 /* find longest match */
298 { size_t commonLengthSmaller = 0, commonLengthLarger = 0;
299 const BYTE* const dictBase = ms->window.dictBase;
300 const U32 dictLimit = ms->window.dictLimit;
301 const BYTE* const dictEnd = dictBase + dictLimit;
302 const BYTE* const prefixStart = base + dictLimit;
303 U32* smallerPtr = bt + 2*(curr&btMask);
304 U32* largerPtr = bt + 2*(curr&btMask) + 1;
305 U32 matchEndIdx = curr + 8 + 1;
306 U32 dummy32; /* to be nullified at the end */
307 size_t bestLength = 0;
308
309 matchIndex = hashTable[h];
310 hashTable[h] = curr; /* Update Hash Table */
311
312 for (; nbCompares && (matchIndex > windowLow); --nbCompares) {
313 U32* const nextPtr = bt + 2*(matchIndex & btMask);
314 size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
315 const BYTE* match;
316
317 if ((dictMode != ZSTD_extDict) || (matchIndex+matchLength >= dictLimit)) {
318 match = base + matchIndex;
319 matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
320 } else {
321 match = dictBase + matchIndex;
322 matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
323 if (matchIndex+matchLength >= dictLimit)
324 match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
325 }
326
327 if (matchLength > bestLength) {
328 if (matchLength > matchEndIdx - matchIndex)
329 matchEndIdx = matchIndex + (U32)matchLength;
330 if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) )
331 bestLength = matchLength, *offsetPtr = STORE_OFFSET(curr - matchIndex);
332 if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
333 if (dictMode == ZSTD_dictMatchState) {
334 nbCompares = 0; /* in addition to avoiding checking any
335 * further in this loop, make sure we
336 * skip checking in the dictionary. */
337 }
338 break; /* drop, to guarantee consistency (miss a little bit of compression) */
339 }
340 }
341
342 if (match[matchLength] < ip[matchLength]) {
343 /* match is smaller than current */
344 *smallerPtr = matchIndex; /* update smaller idx */
345 commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
346 if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
347 smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
348 matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
349 } else {
350 /* match is larger than current */
351 *largerPtr = matchIndex;
352 commonLengthLarger = matchLength;
353 if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
354 largerPtr = nextPtr;
355 matchIndex = nextPtr[0];
356 } }
357
358 *smallerPtr = *largerPtr = 0;
359
360 assert(nbCompares <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */
361 if (dictMode == ZSTD_dictMatchState && nbCompares) {
362 bestLength = ZSTD_DUBT_findBetterDictMatch(
363 ms, ip, iend,
364 offsetPtr, bestLength, nbCompares,
365 mls, dictMode);
366 }
367
368 assert(matchEndIdx > curr+8); /* ensure nextToUpdate is increased */
369 ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */
370 if (bestLength >= MINMATCH) {
371 U32 const mIndex = curr - (U32)STORED_OFFSET(*offsetPtr); (void)mIndex;
372 DEBUGLOG(8, "ZSTD_DUBT_findBestMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
373 curr, (U32)bestLength, (U32)*offsetPtr, mIndex);
374 }
375 return bestLength;
376 }
377 }
378
379
380 /** ZSTD_BtFindBestMatch() : Tree updater, providing best match */
381 FORCE_INLINE_TEMPLATE size_t
ZSTD_BtFindBestMatch(ZSTD_matchState_t * ms,const BYTE * const ip,const BYTE * const iLimit,size_t * offsetPtr,const U32 mls,const ZSTD_dictMode_e dictMode)382 ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms,
383 const BYTE* const ip, const BYTE* const iLimit,
384 size_t* offsetPtr,
385 const U32 mls /* template */,
386 const ZSTD_dictMode_e dictMode)
387 {
388 DEBUGLOG(7, "ZSTD_BtFindBestMatch");
389 if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */
390 ZSTD_updateDUBT(ms, ip, iLimit, mls);
391 return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offsetPtr, mls, dictMode);
392 }
393
394 /***********************************
395 * Dedicated dict search
396 ***********************************/
397
ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t * ms,const BYTE * const ip)398 void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip)
399 {
400 const BYTE* const base = ms->window.base;
401 U32 const target = (U32)(ip - base);
402 U32* const hashTable = ms->hashTable;
403 U32* const chainTable = ms->chainTable;
404 U32 const chainSize = 1 << ms->cParams.chainLog;
405 U32 idx = ms->nextToUpdate;
406 U32 const minChain = chainSize < target - idx ? target - chainSize : idx;
407 U32 const bucketSize = 1 << ZSTD_LAZY_DDSS_BUCKET_LOG;
408 U32 const cacheSize = bucketSize - 1;
409 U32 const chainAttempts = (1 << ms->cParams.searchLog) - cacheSize;
410 U32 const chainLimit = chainAttempts > 255 ? 255 : chainAttempts;
411
412 /* We know the hashtable is oversized by a factor of `bucketSize`.
413 * We are going to temporarily pretend `bucketSize == 1`, keeping only a
414 * single entry. We will use the rest of the space to construct a temporary
415 * chaintable.
416 */
417 U32 const hashLog = ms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG;
418 U32* const tmpHashTable = hashTable;
419 U32* const tmpChainTable = hashTable + ((size_t)1 << hashLog);
420 U32 const tmpChainSize = (U32)((1 << ZSTD_LAZY_DDSS_BUCKET_LOG) - 1) << hashLog;
421 U32 const tmpMinChain = tmpChainSize < target ? target - tmpChainSize : idx;
422 U32 hashIdx;
423
424 assert(ms->cParams.chainLog <= 24);
425 assert(ms->cParams.hashLog > ms->cParams.chainLog);
426 assert(idx != 0);
427 assert(tmpMinChain <= minChain);
428
429 /* fill conventional hash table and conventional chain table */
430 for ( ; idx < target; idx++) {
431 U32 const h = (U32)ZSTD_hashPtr(base + idx, hashLog, ms->cParams.minMatch);
432 if (idx >= tmpMinChain) {
433 tmpChainTable[idx - tmpMinChain] = hashTable[h];
434 }
435 tmpHashTable[h] = idx;
436 }
437
438 /* sort chains into ddss chain table */
439 {
440 U32 chainPos = 0;
441 for (hashIdx = 0; hashIdx < (1U << hashLog); hashIdx++) {
442 U32 count;
443 U32 countBeyondMinChain = 0;
444 U32 i = tmpHashTable[hashIdx];
445 for (count = 0; i >= tmpMinChain && count < cacheSize; count++) {
446 /* skip through the chain to the first position that won't be
447 * in the hash cache bucket */
448 if (i < minChain) {
449 countBeyondMinChain++;
450 }
451 i = tmpChainTable[i - tmpMinChain];
452 }
453 if (count == cacheSize) {
454 for (count = 0; count < chainLimit;) {
455 if (i < minChain) {
456 if (!i || ++countBeyondMinChain > cacheSize) {
457 /* only allow pulling `cacheSize` number of entries
458 * into the cache or chainTable beyond `minChain`,
459 * to replace the entries pulled out of the
460 * chainTable into the cache. This lets us reach
461 * back further without increasing the total number
462 * of entries in the chainTable, guaranteeing the
463 * DDSS chain table will fit into the space
464 * allocated for the regular one. */
465 break;
466 }
467 }
468 chainTable[chainPos++] = i;
469 count++;
470 if (i < tmpMinChain) {
471 break;
472 }
473 i = tmpChainTable[i - tmpMinChain];
474 }
475 } else {
476 count = 0;
477 }
478 if (count) {
479 tmpHashTable[hashIdx] = ((chainPos - count) << 8) + count;
480 } else {
481 tmpHashTable[hashIdx] = 0;
482 }
483 }
484 assert(chainPos <= chainSize); /* I believe this is guaranteed... */
485 }
486
487 /* move chain pointers into the last entry of each hash bucket */
488 for (hashIdx = (1 << hashLog); hashIdx; ) {
489 U32 const bucketIdx = --hashIdx << ZSTD_LAZY_DDSS_BUCKET_LOG;
490 U32 const chainPackedPointer = tmpHashTable[hashIdx];
491 U32 i;
492 for (i = 0; i < cacheSize; i++) {
493 hashTable[bucketIdx + i] = 0;
494 }
495 hashTable[bucketIdx + bucketSize - 1] = chainPackedPointer;
496 }
497
498 /* fill the buckets of the hash table */
499 for (idx = ms->nextToUpdate; idx < target; idx++) {
500 U32 const h = (U32)ZSTD_hashPtr(base + idx, hashLog, ms->cParams.minMatch)
501 << ZSTD_LAZY_DDSS_BUCKET_LOG;
502 U32 i;
503 /* Shift hash cache down 1. */
504 for (i = cacheSize - 1; i; i--)
505 hashTable[h + i] = hashTable[h + i - 1];
506 hashTable[h] = idx;
507 }
508
509 ms->nextToUpdate = target;
510 }
511
512 /* Returns the longest match length found in the dedicated dict search structure.
513 * If none are longer than the argument ml, then ml will be returned.
514 */
515 FORCE_INLINE_TEMPLATE
ZSTD_dedicatedDictSearch_lazy_search(size_t * offsetPtr,size_t ml,U32 nbAttempts,const ZSTD_matchState_t * const dms,const BYTE * const ip,const BYTE * const iLimit,const BYTE * const prefixStart,const U32 curr,const U32 dictLimit,const size_t ddsIdx)516 size_t ZSTD_dedicatedDictSearch_lazy_search(size_t* offsetPtr, size_t ml, U32 nbAttempts,
517 const ZSTD_matchState_t* const dms,
518 const BYTE* const ip, const BYTE* const iLimit,
519 const BYTE* const prefixStart, const U32 curr,
520 const U32 dictLimit, const size_t ddsIdx) {
521 const U32 ddsLowestIndex = dms->window.dictLimit;
522 const BYTE* const ddsBase = dms->window.base;
523 const BYTE* const ddsEnd = dms->window.nextSrc;
524 const U32 ddsSize = (U32)(ddsEnd - ddsBase);
525 const U32 ddsIndexDelta = dictLimit - ddsSize;
526 const U32 bucketSize = (1 << ZSTD_LAZY_DDSS_BUCKET_LOG);
527 const U32 bucketLimit = nbAttempts < bucketSize - 1 ? nbAttempts : bucketSize - 1;
528 U32 ddsAttempt;
529 U32 matchIndex;
530
531 for (ddsAttempt = 0; ddsAttempt < bucketSize - 1; ddsAttempt++) {
532 PREFETCH_L1(ddsBase + dms->hashTable[ddsIdx + ddsAttempt]);
533 }
534
535 {
536 U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1];
537 U32 const chainIndex = chainPackedPointer >> 8;
538
539 PREFETCH_L1(&dms->chainTable[chainIndex]);
540 }
541
542 for (ddsAttempt = 0; ddsAttempt < bucketLimit; ddsAttempt++) {
543 size_t currentMl=0;
544 const BYTE* match;
545 matchIndex = dms->hashTable[ddsIdx + ddsAttempt];
546 match = ddsBase + matchIndex;
547
548 if (!matchIndex) {
549 return ml;
550 }
551
552 /* guaranteed by table construction */
553 (void)ddsLowestIndex;
554 assert(matchIndex >= ddsLowestIndex);
555 assert(match+4 <= ddsEnd);
556 if (MEM_read32(match) == MEM_read32(ip)) {
557 /* assumption : matchIndex <= dictLimit-4 (by table construction) */
558 currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4;
559 }
560
561 /* save best solution */
562 if (currentMl > ml) {
563 ml = currentMl;
564 *offsetPtr = STORE_OFFSET(curr - (matchIndex + ddsIndexDelta));
565 if (ip+currentMl == iLimit) {
566 /* best possible, avoids read overflow on next attempt */
567 return ml;
568 }
569 }
570 }
571
572 {
573 U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1];
574 U32 chainIndex = chainPackedPointer >> 8;
575 U32 const chainLength = chainPackedPointer & 0xFF;
576 U32 const chainAttempts = nbAttempts - ddsAttempt;
577 U32 const chainLimit = chainAttempts > chainLength ? chainLength : chainAttempts;
578 U32 chainAttempt;
579
580 for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++) {
581 PREFETCH_L1(ddsBase + dms->chainTable[chainIndex + chainAttempt]);
582 }
583
584 for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++, chainIndex++) {
585 size_t currentMl=0;
586 const BYTE* match;
587 matchIndex = dms->chainTable[chainIndex];
588 match = ddsBase + matchIndex;
589
590 /* guaranteed by table construction */
591 assert(matchIndex >= ddsLowestIndex);
592 assert(match+4 <= ddsEnd);
593 if (MEM_read32(match) == MEM_read32(ip)) {
594 /* assumption : matchIndex <= dictLimit-4 (by table construction) */
595 currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4;
596 }
597
598 /* save best solution */
599 if (currentMl > ml) {
600 ml = currentMl;
601 *offsetPtr = STORE_OFFSET(curr - (matchIndex + ddsIndexDelta));
602 if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
603 }
604 }
605 }
606 return ml;
607 }
608
609
610 /* *********************************
611 * Hash Chain
612 ***********************************/
613 #define NEXT_IN_CHAIN(d, mask) chainTable[(d) & (mask)]
614
615 /* Update chains up to ip (excluded)
616 Assumption : always within prefix (i.e. not within extDict) */
ZSTD_insertAndFindFirstIndex_internal(ZSTD_matchState_t * ms,const ZSTD_compressionParameters * const cParams,const BYTE * ip,U32 const mls)617 FORCE_INLINE_TEMPLATE U32 ZSTD_insertAndFindFirstIndex_internal(
618 ZSTD_matchState_t* ms,
619 const ZSTD_compressionParameters* const cParams,
620 const BYTE* ip, U32 const mls)
621 {
622 U32* const hashTable = ms->hashTable;
623 const U32 hashLog = cParams->hashLog;
624 U32* const chainTable = ms->chainTable;
625 const U32 chainMask = (1 << cParams->chainLog) - 1;
626 const BYTE* const base = ms->window.base;
627 const U32 target = (U32)(ip - base);
628 U32 idx = ms->nextToUpdate;
629
630 while(idx < target) { /* catch up */
631 size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls);
632 NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
633 hashTable[h] = idx;
634 idx++;
635 }
636
637 ms->nextToUpdate = target;
638 return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
639 }
640
ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t * ms,const BYTE * ip)641 U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) {
642 const ZSTD_compressionParameters* const cParams = &ms->cParams;
643 return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch);
644 }
645
646 /* inlining is important to hardwire a hot branch (template emulation) */
647 FORCE_INLINE_TEMPLATE
ZSTD_HcFindBestMatch(ZSTD_matchState_t * ms,const BYTE * const ip,const BYTE * const iLimit,size_t * offsetPtr,const U32 mls,const ZSTD_dictMode_e dictMode)648 size_t ZSTD_HcFindBestMatch(
649 ZSTD_matchState_t* ms,
650 const BYTE* const ip, const BYTE* const iLimit,
651 size_t* offsetPtr,
652 const U32 mls, const ZSTD_dictMode_e dictMode)
653 {
654 const ZSTD_compressionParameters* const cParams = &ms->cParams;
655 U32* const chainTable = ms->chainTable;
656 const U32 chainSize = (1 << cParams->chainLog);
657 const U32 chainMask = chainSize-1;
658 const BYTE* const base = ms->window.base;
659 const BYTE* const dictBase = ms->window.dictBase;
660 const U32 dictLimit = ms->window.dictLimit;
661 const BYTE* const prefixStart = base + dictLimit;
662 const BYTE* const dictEnd = dictBase + dictLimit;
663 const U32 curr = (U32)(ip-base);
664 const U32 maxDistance = 1U << cParams->windowLog;
665 const U32 lowestValid = ms->window.lowLimit;
666 const U32 withinMaxDistance = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
667 const U32 isDictionary = (ms->loadedDictEnd != 0);
668 const U32 lowLimit = isDictionary ? lowestValid : withinMaxDistance;
669 const U32 minChain = curr > chainSize ? curr - chainSize : 0;
670 U32 nbAttempts = 1U << cParams->searchLog;
671 size_t ml=4-1;
672
673 const ZSTD_matchState_t* const dms = ms->dictMatchState;
674 const U32 ddsHashLog = dictMode == ZSTD_dedicatedDictSearch
675 ? dms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG : 0;
676 const size_t ddsIdx = dictMode == ZSTD_dedicatedDictSearch
677 ? ZSTD_hashPtr(ip, ddsHashLog, mls) << ZSTD_LAZY_DDSS_BUCKET_LOG : 0;
678
679 U32 matchIndex;
680
681 if (dictMode == ZSTD_dedicatedDictSearch) {
682 const U32* entry = &dms->hashTable[ddsIdx];
683 PREFETCH_L1(entry);
684 }
685
686 /* HC4 match finder */
687 matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls);
688
689 for ( ; (matchIndex>=lowLimit) & (nbAttempts>0) ; nbAttempts--) {
690 size_t currentMl=0;
691 if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
692 const BYTE* const match = base + matchIndex;
693 assert(matchIndex >= dictLimit); /* ensures this is true if dictMode != ZSTD_extDict */
694 if (match[ml] == ip[ml]) /* potentially better */
695 currentMl = ZSTD_count(ip, match, iLimit);
696 } else {
697 const BYTE* const match = dictBase + matchIndex;
698 assert(match+4 <= dictEnd);
699 if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
700 currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4;
701 }
702
703 /* save best solution */
704 if (currentMl > ml) {
705 ml = currentMl;
706 *offsetPtr = STORE_OFFSET(curr - matchIndex);
707 if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
708 }
709
710 if (matchIndex <= minChain) break;
711 matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
712 }
713
714 assert(nbAttempts <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */
715 if (dictMode == ZSTD_dedicatedDictSearch) {
716 ml = ZSTD_dedicatedDictSearch_lazy_search(offsetPtr, ml, nbAttempts, dms,
717 ip, iLimit, prefixStart, curr, dictLimit, ddsIdx);
718 } else if (dictMode == ZSTD_dictMatchState) {
719 const U32* const dmsChainTable = dms->chainTable;
720 const U32 dmsChainSize = (1 << dms->cParams.chainLog);
721 const U32 dmsChainMask = dmsChainSize - 1;
722 const U32 dmsLowestIndex = dms->window.dictLimit;
723 const BYTE* const dmsBase = dms->window.base;
724 const BYTE* const dmsEnd = dms->window.nextSrc;
725 const U32 dmsSize = (U32)(dmsEnd - dmsBase);
726 const U32 dmsIndexDelta = dictLimit - dmsSize;
727 const U32 dmsMinChain = dmsSize > dmsChainSize ? dmsSize - dmsChainSize : 0;
728
729 matchIndex = dms->hashTable[ZSTD_hashPtr(ip, dms->cParams.hashLog, mls)];
730
731 for ( ; (matchIndex>=dmsLowestIndex) & (nbAttempts>0) ; nbAttempts--) {
732 size_t currentMl=0;
733 const BYTE* const match = dmsBase + matchIndex;
734 assert(match+4 <= dmsEnd);
735 if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
736 currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dmsEnd, prefixStart) + 4;
737
738 /* save best solution */
739 if (currentMl > ml) {
740 ml = currentMl;
741 assert(curr > matchIndex + dmsIndexDelta);
742 *offsetPtr = STORE_OFFSET(curr - (matchIndex + dmsIndexDelta));
743 if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
744 }
745
746 if (matchIndex <= dmsMinChain) break;
747
748 matchIndex = dmsChainTable[matchIndex & dmsChainMask];
749 }
750 }
751
752 return ml;
753 }
754
755 /* *********************************
756 * (SIMD) Row-based matchfinder
757 ***********************************/
758 /* Constants for row-based hash */
759 #define ZSTD_ROW_HASH_TAG_OFFSET 16 /* byte offset of hashes in the match state's tagTable from the beginning of a row */
760 #define ZSTD_ROW_HASH_TAG_BITS 8 /* nb bits to use for the tag */
761 #define ZSTD_ROW_HASH_TAG_MASK ((1u << ZSTD_ROW_HASH_TAG_BITS) - 1)
762 #define ZSTD_ROW_HASH_MAX_ENTRIES 64 /* absolute maximum number of entries per row, for all configurations */
763
764 #define ZSTD_ROW_HASH_CACHE_MASK (ZSTD_ROW_HASH_CACHE_SIZE - 1)
765
766 typedef U64 ZSTD_VecMask; /* Clarifies when we are interacting with a U64 representing a mask of matches */
767
768 /* ZSTD_VecMask_next():
769 * Starting from the LSB, returns the idx of the next non-zero bit.
770 * Basically counting the nb of trailing zeroes.
771 */
ZSTD_VecMask_next(ZSTD_VecMask val)772 static U32 ZSTD_VecMask_next(ZSTD_VecMask val) {
773 assert(val != 0);
774 # if defined(_MSC_VER) && defined(_WIN64)
775 if (val != 0) {
776 unsigned long r;
777 _BitScanForward64(&r, val);
778 return (U32)(r);
779 } else {
780 /* Should not reach this code path */
781 __assume(0);
782 }
783 # elif (defined(__GNUC__) && ((__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))
784 if (sizeof(size_t) == 4) {
785 U32 mostSignificantWord = (U32)(val >> 32);
786 U32 leastSignificantWord = (U32)val;
787 if (leastSignificantWord == 0) {
788 return 32 + (U32)__builtin_ctz(mostSignificantWord);
789 } else {
790 return (U32)__builtin_ctz(leastSignificantWord);
791 }
792 } else {
793 return (U32)__builtin_ctzll(val);
794 }
795 # else
796 /* Software ctz version: http://aggregate.org/MAGIC/#Trailing%20Zero%20Count
797 * and: https://stackoverflow.com/questions/2709430/count-number-of-bits-in-a-64-bit-long-big-integer
798 */
799 val = ~val & (val - 1ULL); /* Lowest set bit mask */
800 val = val - ((val >> 1) & 0x5555555555555555);
801 val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL);
802 return (U32)((((val + (val >> 4)) & 0xF0F0F0F0F0F0F0FULL) * 0x101010101010101ULL) >> 56);
803 # endif
804 }
805
806 /* ZSTD_rotateRight_*():
807 * Rotates a bitfield to the right by "count" bits.
808 * https://en.wikipedia.org/w/index.php?title=Circular_shift&oldid=991635599#Implementing_circular_shifts
809 */
810 FORCE_INLINE_TEMPLATE
ZSTD_rotateRight_U64(U64 const value,U32 count)811 U64 ZSTD_rotateRight_U64(U64 const value, U32 count) {
812 assert(count < 64);
813 count &= 0x3F; /* for fickle pattern recognition */
814 return (value >> count) | (U64)(value << ((0U - count) & 0x3F));
815 }
816
817 FORCE_INLINE_TEMPLATE
ZSTD_rotateRight_U32(U32 const value,U32 count)818 U32 ZSTD_rotateRight_U32(U32 const value, U32 count) {
819 assert(count < 32);
820 count &= 0x1F; /* for fickle pattern recognition */
821 return (value >> count) | (U32)(value << ((0U - count) & 0x1F));
822 }
823
824 FORCE_INLINE_TEMPLATE
ZSTD_rotateRight_U16(U16 const value,U32 count)825 U16 ZSTD_rotateRight_U16(U16 const value, U32 count) {
826 assert(count < 16);
827 count &= 0x0F; /* for fickle pattern recognition */
828 return (value >> count) | (U16)(value << ((0U - count) & 0x0F));
829 }
830
831 /* ZSTD_row_nextIndex():
832 * Returns the next index to insert at within a tagTable row, and updates the "head"
833 * value to reflect the update. Essentially cycles backwards from [0, {entries per row})
834 */
ZSTD_row_nextIndex(BYTE * const tagRow,U32 const rowMask)835 FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextIndex(BYTE* const tagRow, U32 const rowMask) {
836 U32 const next = (*tagRow - 1) & rowMask;
837 *tagRow = (BYTE)next;
838 return next;
839 }
840
841 /* ZSTD_isAligned():
842 * Checks that a pointer is aligned to "align" bytes which must be a power of 2.
843 */
ZSTD_isAligned(void const * ptr,size_t align)844 MEM_STATIC int ZSTD_isAligned(void const* ptr, size_t align) {
845 assert((align & (align - 1)) == 0);
846 return (((size_t)ptr) & (align - 1)) == 0;
847 }
848
849 /* ZSTD_row_prefetch():
850 * Performs prefetching for the hashTable and tagTable at a given row.
851 */
ZSTD_row_prefetch(U32 const * hashTable,U16 const * tagTable,U32 const relRow,U32 const rowLog)852 FORCE_INLINE_TEMPLATE void ZSTD_row_prefetch(U32 const* hashTable, U16 const* tagTable, U32 const relRow, U32 const rowLog) {
853 PREFETCH_L1(hashTable + relRow);
854 if (rowLog >= 5) {
855 PREFETCH_L1(hashTable + relRow + 16);
856 /* Note: prefetching more of the hash table does not appear to be beneficial for 128-entry rows */
857 }
858 PREFETCH_L1(tagTable + relRow);
859 if (rowLog == 6) {
860 PREFETCH_L1(tagTable + relRow + 32);
861 }
862 assert(rowLog == 4 || rowLog == 5 || rowLog == 6);
863 assert(ZSTD_isAligned(hashTable + relRow, 64)); /* prefetched hash row always 64-byte aligned */
864 assert(ZSTD_isAligned(tagTable + relRow, (size_t)1 << rowLog)); /* prefetched tagRow sits on correct multiple of bytes (32,64,128) */
865 }
866
867 /* ZSTD_row_fillHashCache():
868 * Fill up the hash cache starting at idx, prefetching up to ZSTD_ROW_HASH_CACHE_SIZE entries,
869 * but not beyond iLimit.
870 */
ZSTD_row_fillHashCache(ZSTD_matchState_t * ms,const BYTE * base,U32 const rowLog,U32 const mls,U32 idx,const BYTE * const iLimit)871 FORCE_INLINE_TEMPLATE void ZSTD_row_fillHashCache(ZSTD_matchState_t* ms, const BYTE* base,
872 U32 const rowLog, U32 const mls,
873 U32 idx, const BYTE* const iLimit)
874 {
875 U32 const* const hashTable = ms->hashTable;
876 U16 const* const tagTable = ms->tagTable;
877 U32 const hashLog = ms->rowHashLog;
878 U32 const maxElemsToPrefetch = (base + idx) > iLimit ? 0 : (U32)(iLimit - (base + idx) + 1);
879 U32 const lim = idx + MIN(ZSTD_ROW_HASH_CACHE_SIZE, maxElemsToPrefetch);
880
881 for (; idx < lim; ++idx) {
882 U32 const hash = (U32)ZSTD_hashPtr(base + idx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls);
883 U32 const row = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
884 ZSTD_row_prefetch(hashTable, tagTable, row, rowLog);
885 ms->hashCache[idx & ZSTD_ROW_HASH_CACHE_MASK] = hash;
886 }
887
888 DEBUGLOG(6, "ZSTD_row_fillHashCache(): [%u %u %u %u %u %u %u %u]", ms->hashCache[0], ms->hashCache[1],
889 ms->hashCache[2], ms->hashCache[3], ms->hashCache[4],
890 ms->hashCache[5], ms->hashCache[6], ms->hashCache[7]);
891 }
892
893 /* ZSTD_row_nextCachedHash():
894 * Returns the hash of base + idx, and replaces the hash in the hash cache with the byte at
895 * base + idx + ZSTD_ROW_HASH_CACHE_SIZE. Also prefetches the appropriate rows from hashTable and tagTable.
896 */
ZSTD_row_nextCachedHash(U32 * cache,U32 const * hashTable,U16 const * tagTable,BYTE const * base,U32 idx,U32 const hashLog,U32 const rowLog,U32 const mls)897 FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextCachedHash(U32* cache, U32 const* hashTable,
898 U16 const* tagTable, BYTE const* base,
899 U32 idx, U32 const hashLog,
900 U32 const rowLog, U32 const mls)
901 {
902 U32 const newHash = (U32)ZSTD_hashPtr(base+idx+ZSTD_ROW_HASH_CACHE_SIZE, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls);
903 U32 const row = (newHash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
904 ZSTD_row_prefetch(hashTable, tagTable, row, rowLog);
905 { U32 const hash = cache[idx & ZSTD_ROW_HASH_CACHE_MASK];
906 cache[idx & ZSTD_ROW_HASH_CACHE_MASK] = newHash;
907 return hash;
908 }
909 }
910
911 /* ZSTD_row_update_internalImpl():
912 * Updates the hash table with positions starting from updateStartIdx until updateEndIdx.
913 */
ZSTD_row_update_internalImpl(ZSTD_matchState_t * ms,U32 updateStartIdx,U32 const updateEndIdx,U32 const mls,U32 const rowLog,U32 const rowMask,U32 const useCache)914 FORCE_INLINE_TEMPLATE void ZSTD_row_update_internalImpl(ZSTD_matchState_t* ms,
915 U32 updateStartIdx, U32 const updateEndIdx,
916 U32 const mls, U32 const rowLog,
917 U32 const rowMask, U32 const useCache)
918 {
919 U32* const hashTable = ms->hashTable;
920 U16* const tagTable = ms->tagTable;
921 U32 const hashLog = ms->rowHashLog;
922 const BYTE* const base = ms->window.base;
923
924 DEBUGLOG(6, "ZSTD_row_update_internalImpl(): updateStartIdx=%u, updateEndIdx=%u", updateStartIdx, updateEndIdx);
925 for (; updateStartIdx < updateEndIdx; ++updateStartIdx) {
926 U32 const hash = useCache ? ZSTD_row_nextCachedHash(ms->hashCache, hashTable, tagTable, base, updateStartIdx, hashLog, rowLog, mls)
927 : (U32)ZSTD_hashPtr(base + updateStartIdx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls);
928 U32 const relRow = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
929 U32* const row = hashTable + relRow;
930 BYTE* tagRow = (BYTE*)(tagTable + relRow); /* Though tagTable is laid out as a table of U16, each tag is only 1 byte.
931 Explicit cast allows us to get exact desired position within each row */
932 U32 const pos = ZSTD_row_nextIndex(tagRow, rowMask);
933
934 assert(hash == ZSTD_hashPtr(base + updateStartIdx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls));
935 ((BYTE*)tagRow)[pos + ZSTD_ROW_HASH_TAG_OFFSET] = hash & ZSTD_ROW_HASH_TAG_MASK;
936 row[pos] = updateStartIdx;
937 }
938 }
939
940 /* ZSTD_row_update_internal():
941 * Inserts the byte at ip into the appropriate position in the hash table, and updates ms->nextToUpdate.
942 * Skips sections of long matches as is necessary.
943 */
ZSTD_row_update_internal(ZSTD_matchState_t * ms,const BYTE * ip,U32 const mls,U32 const rowLog,U32 const rowMask,U32 const useCache)944 FORCE_INLINE_TEMPLATE void ZSTD_row_update_internal(ZSTD_matchState_t* ms, const BYTE* ip,
945 U32 const mls, U32 const rowLog,
946 U32 const rowMask, U32 const useCache)
947 {
948 U32 idx = ms->nextToUpdate;
949 const BYTE* const base = ms->window.base;
950 const U32 target = (U32)(ip - base);
951 const U32 kSkipThreshold = 384;
952 const U32 kMaxMatchStartPositionsToUpdate = 96;
953 const U32 kMaxMatchEndPositionsToUpdate = 32;
954
955 if (useCache) {
956 /* Only skip positions when using hash cache, i.e.
957 * if we are loading a dict, don't skip anything.
958 * If we decide to skip, then we only update a set number
959 * of positions at the beginning and end of the match.
960 */
961 if (UNLIKELY(target - idx > kSkipThreshold)) {
962 U32 const bound = idx + kMaxMatchStartPositionsToUpdate;
963 ZSTD_row_update_internalImpl(ms, idx, bound, mls, rowLog, rowMask, useCache);
964 idx = target - kMaxMatchEndPositionsToUpdate;
965 ZSTD_row_fillHashCache(ms, base, rowLog, mls, idx, ip+1);
966 }
967 }
968 assert(target >= idx);
969 ZSTD_row_update_internalImpl(ms, idx, target, mls, rowLog, rowMask, useCache);
970 ms->nextToUpdate = target;
971 }
972
973 /* ZSTD_row_update():
974 * External wrapper for ZSTD_row_update_internal(). Used for filling the hashtable during dictionary
975 * processing.
976 */
ZSTD_row_update(ZSTD_matchState_t * const ms,const BYTE * ip)977 void ZSTD_row_update(ZSTD_matchState_t* const ms, const BYTE* ip) {
978 const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6);
979 const U32 rowMask = (1u << rowLog) - 1;
980 const U32 mls = MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */);
981
982 DEBUGLOG(5, "ZSTD_row_update(), rowLog=%u", rowLog);
983 ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 0 /* dont use cache */);
984 }
985
986 #if defined(ZSTD_ARCH_X86_SSE2)
987 FORCE_INLINE_TEMPLATE ZSTD_VecMask
ZSTD_row_getSSEMask(int nbChunks,const BYTE * const src,const BYTE tag,const U32 head)988 ZSTD_row_getSSEMask(int nbChunks, const BYTE* const src, const BYTE tag, const U32 head)
989 {
990 const __m128i comparisonMask = _mm_set1_epi8((char)tag);
991 int matches[4] = {0};
992 int i;
993 assert(nbChunks == 1 || nbChunks == 2 || nbChunks == 4);
994 for (i=0; i<nbChunks; i++) {
995 const __m128i chunk = _mm_loadu_si128((const __m128i*)(const void*)(src + 16*i));
996 const __m128i equalMask = _mm_cmpeq_epi8(chunk, comparisonMask);
997 matches[i] = _mm_movemask_epi8(equalMask);
998 }
999 if (nbChunks == 1) return ZSTD_rotateRight_U16((U16)matches[0], head);
1000 if (nbChunks == 2) return ZSTD_rotateRight_U32((U32)matches[1] << 16 | (U32)matches[0], head);
1001 assert(nbChunks == 4);
1002 return ZSTD_rotateRight_U64((U64)matches[3] << 48 | (U64)matches[2] << 32 | (U64)matches[1] << 16 | (U64)matches[0], head);
1003 }
1004 #endif
1005
1006 /* Returns a ZSTD_VecMask (U32) that has the nth bit set to 1 if the newly-computed "tag" matches
1007 * the hash at the nth position in a row of the tagTable.
1008 * Each row is a circular buffer beginning at the value of "head". So we must rotate the "matches" bitfield
1009 * to match up with the actual layout of the entries within the hashTable */
1010 FORCE_INLINE_TEMPLATE ZSTD_VecMask
ZSTD_row_getMatchMask(const BYTE * const tagRow,const BYTE tag,const U32 head,const U32 rowEntries)1011 ZSTD_row_getMatchMask(const BYTE* const tagRow, const BYTE tag, const U32 head, const U32 rowEntries)
1012 {
1013 const BYTE* const src = tagRow + ZSTD_ROW_HASH_TAG_OFFSET;
1014 assert((rowEntries == 16) || (rowEntries == 32) || rowEntries == 64);
1015 assert(rowEntries <= ZSTD_ROW_HASH_MAX_ENTRIES);
1016
1017 #if defined(ZSTD_ARCH_X86_SSE2)
1018
1019 return ZSTD_row_getSSEMask(rowEntries / 16, src, tag, head);
1020
1021 #else /* SW or NEON-LE */
1022
1023 # if defined(ZSTD_ARCH_ARM_NEON)
1024 /* This NEON path only works for little endian - otherwise use SWAR below */
1025 if (MEM_isLittleEndian()) {
1026 if (rowEntries == 16) {
1027 const uint8x16_t chunk = vld1q_u8(src);
1028 const uint16x8_t equalMask = vreinterpretq_u16_u8(vceqq_u8(chunk, vdupq_n_u8(tag)));
1029 const uint16x8_t t0 = vshlq_n_u16(equalMask, 7);
1030 const uint32x4_t t1 = vreinterpretq_u32_u16(vsriq_n_u16(t0, t0, 14));
1031 const uint64x2_t t2 = vreinterpretq_u64_u32(vshrq_n_u32(t1, 14));
1032 const uint8x16_t t3 = vreinterpretq_u8_u64(vsraq_n_u64(t2, t2, 28));
1033 const U16 hi = (U16)vgetq_lane_u8(t3, 8);
1034 const U16 lo = (U16)vgetq_lane_u8(t3, 0);
1035 return ZSTD_rotateRight_U16((hi << 8) | lo, head);
1036 } else if (rowEntries == 32) {
1037 const uint16x8x2_t chunk = vld2q_u16((const U16*)(const void*)src);
1038 const uint8x16_t chunk0 = vreinterpretq_u8_u16(chunk.val[0]);
1039 const uint8x16_t chunk1 = vreinterpretq_u8_u16(chunk.val[1]);
1040 const uint8x16_t equalMask0 = vceqq_u8(chunk0, vdupq_n_u8(tag));
1041 const uint8x16_t equalMask1 = vceqq_u8(chunk1, vdupq_n_u8(tag));
1042 const int8x8_t pack0 = vqmovn_s16(vreinterpretq_s16_u8(equalMask0));
1043 const int8x8_t pack1 = vqmovn_s16(vreinterpretq_s16_u8(equalMask1));
1044 const uint8x8_t t0 = vreinterpret_u8_s8(pack0);
1045 const uint8x8_t t1 = vreinterpret_u8_s8(pack1);
1046 const uint8x8_t t2 = vsri_n_u8(t1, t0, 2);
1047 const uint8x8x2_t t3 = vuzp_u8(t2, t0);
1048 const uint8x8_t t4 = vsri_n_u8(t3.val[1], t3.val[0], 4);
1049 const U32 matches = vget_lane_u32(vreinterpret_u32_u8(t4), 0);
1050 return ZSTD_rotateRight_U32(matches, head);
1051 } else { /* rowEntries == 64 */
1052 const uint8x16x4_t chunk = vld4q_u8(src);
1053 const uint8x16_t dup = vdupq_n_u8(tag);
1054 const uint8x16_t cmp0 = vceqq_u8(chunk.val[0], dup);
1055 const uint8x16_t cmp1 = vceqq_u8(chunk.val[1], dup);
1056 const uint8x16_t cmp2 = vceqq_u8(chunk.val[2], dup);
1057 const uint8x16_t cmp3 = vceqq_u8(chunk.val[3], dup);
1058
1059 const uint8x16_t t0 = vsriq_n_u8(cmp1, cmp0, 1);
1060 const uint8x16_t t1 = vsriq_n_u8(cmp3, cmp2, 1);
1061 const uint8x16_t t2 = vsriq_n_u8(t1, t0, 2);
1062 const uint8x16_t t3 = vsriq_n_u8(t2, t2, 4);
1063 const uint8x8_t t4 = vshrn_n_u16(vreinterpretq_u16_u8(t3), 4);
1064 const U64 matches = vget_lane_u64(vreinterpret_u64_u8(t4), 0);
1065 return ZSTD_rotateRight_U64(matches, head);
1066 }
1067 }
1068 # endif /* ZSTD_ARCH_ARM_NEON */
1069 /* SWAR */
1070 { const size_t chunkSize = sizeof(size_t);
1071 const size_t shiftAmount = ((chunkSize * 8) - chunkSize);
1072 const size_t xFF = ~((size_t)0);
1073 const size_t x01 = xFF / 0xFF;
1074 const size_t x80 = x01 << 7;
1075 const size_t splatChar = tag * x01;
1076 ZSTD_VecMask matches = 0;
1077 int i = rowEntries - chunkSize;
1078 assert((sizeof(size_t) == 4) || (sizeof(size_t) == 8));
1079 if (MEM_isLittleEndian()) { /* runtime check so have two loops */
1080 const size_t extractMagic = (xFF / 0x7F) >> chunkSize;
1081 do {
1082 size_t chunk = MEM_readST(&src[i]);
1083 chunk ^= splatChar;
1084 chunk = (((chunk | x80) - x01) | chunk) & x80;
1085 matches <<= chunkSize;
1086 matches |= (chunk * extractMagic) >> shiftAmount;
1087 i -= chunkSize;
1088 } while (i >= 0);
1089 } else { /* big endian: reverse bits during extraction */
1090 const size_t msb = xFF ^ (xFF >> 1);
1091 const size_t extractMagic = (msb / 0x1FF) | msb;
1092 do {
1093 size_t chunk = MEM_readST(&src[i]);
1094 chunk ^= splatChar;
1095 chunk = (((chunk | x80) - x01) | chunk) & x80;
1096 matches <<= chunkSize;
1097 matches |= ((chunk >> 7) * extractMagic) >> shiftAmount;
1098 i -= chunkSize;
1099 } while (i >= 0);
1100 }
1101 matches = ~matches;
1102 if (rowEntries == 16) {
1103 return ZSTD_rotateRight_U16((U16)matches, head);
1104 } else if (rowEntries == 32) {
1105 return ZSTD_rotateRight_U32((U32)matches, head);
1106 } else {
1107 return ZSTD_rotateRight_U64((U64)matches, head);
1108 }
1109 }
1110 #endif
1111 }
1112
1113 /* The high-level approach of the SIMD row based match finder is as follows:
1114 * - Figure out where to insert the new entry:
1115 * - Generate a hash from a byte along with an additional 1-byte "short hash". The additional byte is our "tag"
1116 * - The hashTable is effectively split into groups or "rows" of 16 or 32 entries of U32, and the hash determines
1117 * which row to insert into.
1118 * - Determine the correct position within the row to insert the entry into. Each row of 16 or 32 can
1119 * be considered as a circular buffer with a "head" index that resides in the tagTable.
1120 * - Also insert the "tag" into the equivalent row and position in the tagTable.
1121 * - Note: The tagTable has 17 or 33 1-byte entries per row, due to 16 or 32 tags, and 1 "head" entry.
1122 * The 17 or 33 entry rows are spaced out to occur every 32 or 64 bytes, respectively,
1123 * for alignment/performance reasons, leaving some bytes unused.
1124 * - Use SIMD to efficiently compare the tags in the tagTable to the 1-byte "short hash" and
1125 * generate a bitfield that we can cycle through to check the collisions in the hash table.
1126 * - Pick the longest match.
1127 */
1128 FORCE_INLINE_TEMPLATE
ZSTD_RowFindBestMatch(ZSTD_matchState_t * ms,const BYTE * const ip,const BYTE * const iLimit,size_t * offsetPtr,const U32 mls,const ZSTD_dictMode_e dictMode,const U32 rowLog)1129 size_t ZSTD_RowFindBestMatch(
1130 ZSTD_matchState_t* ms,
1131 const BYTE* const ip, const BYTE* const iLimit,
1132 size_t* offsetPtr,
1133 const U32 mls, const ZSTD_dictMode_e dictMode,
1134 const U32 rowLog)
1135 {
1136 U32* const hashTable = ms->hashTable;
1137 U16* const tagTable = ms->tagTable;
1138 U32* const hashCache = ms->hashCache;
1139 const U32 hashLog = ms->rowHashLog;
1140 const ZSTD_compressionParameters* const cParams = &ms->cParams;
1141 const BYTE* const base = ms->window.base;
1142 const BYTE* const dictBase = ms->window.dictBase;
1143 const U32 dictLimit = ms->window.dictLimit;
1144 const BYTE* const prefixStart = base + dictLimit;
1145 const BYTE* const dictEnd = dictBase + dictLimit;
1146 const U32 curr = (U32)(ip-base);
1147 const U32 maxDistance = 1U << cParams->windowLog;
1148 const U32 lowestValid = ms->window.lowLimit;
1149 const U32 withinMaxDistance = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
1150 const U32 isDictionary = (ms->loadedDictEnd != 0);
1151 const U32 lowLimit = isDictionary ? lowestValid : withinMaxDistance;
1152 const U32 rowEntries = (1U << rowLog);
1153 const U32 rowMask = rowEntries - 1;
1154 const U32 cappedSearchLog = MIN(cParams->searchLog, rowLog); /* nb of searches is capped at nb entries per row */
1155 U32 nbAttempts = 1U << cappedSearchLog;
1156 size_t ml=4-1;
1157
1158 /* DMS/DDS variables that may be referenced laster */
1159 const ZSTD_matchState_t* const dms = ms->dictMatchState;
1160
1161 /* Initialize the following variables to satisfy static analyzer */
1162 size_t ddsIdx = 0;
1163 U32 ddsExtraAttempts = 0; /* cctx hash tables are limited in searches, but allow extra searches into DDS */
1164 U32 dmsTag = 0;
1165 U32* dmsRow = NULL;
1166 BYTE* dmsTagRow = NULL;
1167
1168 if (dictMode == ZSTD_dedicatedDictSearch) {
1169 const U32 ddsHashLog = dms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG;
1170 { /* Prefetch DDS hashtable entry */
1171 ddsIdx = ZSTD_hashPtr(ip, ddsHashLog, mls) << ZSTD_LAZY_DDSS_BUCKET_LOG;
1172 PREFETCH_L1(&dms->hashTable[ddsIdx]);
1173 }
1174 ddsExtraAttempts = cParams->searchLog > rowLog ? 1U << (cParams->searchLog - rowLog) : 0;
1175 }
1176
1177 if (dictMode == ZSTD_dictMatchState) {
1178 /* Prefetch DMS rows */
1179 U32* const dmsHashTable = dms->hashTable;
1180 U16* const dmsTagTable = dms->tagTable;
1181 U32 const dmsHash = (U32)ZSTD_hashPtr(ip, dms->rowHashLog + ZSTD_ROW_HASH_TAG_BITS, mls);
1182 U32 const dmsRelRow = (dmsHash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
1183 dmsTag = dmsHash & ZSTD_ROW_HASH_TAG_MASK;
1184 dmsTagRow = (BYTE*)(dmsTagTable + dmsRelRow);
1185 dmsRow = dmsHashTable + dmsRelRow;
1186 ZSTD_row_prefetch(dmsHashTable, dmsTagTable, dmsRelRow, rowLog);
1187 }
1188
1189 /* Update the hashTable and tagTable up to (but not including) ip */
1190 ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 1 /* useCache */);
1191 { /* Get the hash for ip, compute the appropriate row */
1192 U32 const hash = ZSTD_row_nextCachedHash(hashCache, hashTable, tagTable, base, curr, hashLog, rowLog, mls);
1193 U32 const relRow = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
1194 U32 const tag = hash & ZSTD_ROW_HASH_TAG_MASK;
1195 U32* const row = hashTable + relRow;
1196 BYTE* tagRow = (BYTE*)(tagTable + relRow);
1197 U32 const head = *tagRow & rowMask;
1198 U32 matchBuffer[ZSTD_ROW_HASH_MAX_ENTRIES];
1199 size_t numMatches = 0;
1200 size_t currMatch = 0;
1201 ZSTD_VecMask matches = ZSTD_row_getMatchMask(tagRow, (BYTE)tag, head, rowEntries);
1202
1203 /* Cycle through the matches and prefetch */
1204 for (; (matches > 0) && (nbAttempts > 0); --nbAttempts, matches &= (matches - 1)) {
1205 U32 const matchPos = (head + ZSTD_VecMask_next(matches)) & rowMask;
1206 U32 const matchIndex = row[matchPos];
1207 assert(numMatches < rowEntries);
1208 if (matchIndex < lowLimit)
1209 break;
1210 if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
1211 PREFETCH_L1(base + matchIndex);
1212 } else {
1213 PREFETCH_L1(dictBase + matchIndex);
1214 }
1215 matchBuffer[numMatches++] = matchIndex;
1216 }
1217
1218 /* Speed opt: insert current byte into hashtable too. This allows us to avoid one iteration of the loop
1219 in ZSTD_row_update_internal() at the next search. */
1220 {
1221 U32 const pos = ZSTD_row_nextIndex(tagRow, rowMask);
1222 tagRow[pos + ZSTD_ROW_HASH_TAG_OFFSET] = (BYTE)tag;
1223 row[pos] = ms->nextToUpdate++;
1224 }
1225
1226 /* Return the longest match */
1227 for (; currMatch < numMatches; ++currMatch) {
1228 U32 const matchIndex = matchBuffer[currMatch];
1229 size_t currentMl=0;
1230 assert(matchIndex < curr);
1231 assert(matchIndex >= lowLimit);
1232
1233 if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
1234 const BYTE* const match = base + matchIndex;
1235 assert(matchIndex >= dictLimit); /* ensures this is true if dictMode != ZSTD_extDict */
1236 if (match[ml] == ip[ml]) /* potentially better */
1237 currentMl = ZSTD_count(ip, match, iLimit);
1238 } else {
1239 const BYTE* const match = dictBase + matchIndex;
1240 assert(match+4 <= dictEnd);
1241 if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
1242 currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4;
1243 }
1244
1245 /* Save best solution */
1246 if (currentMl > ml) {
1247 ml = currentMl;
1248 *offsetPtr = STORE_OFFSET(curr - matchIndex);
1249 if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
1250 }
1251 }
1252 }
1253
1254 assert(nbAttempts <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */
1255 if (dictMode == ZSTD_dedicatedDictSearch) {
1256 ml = ZSTD_dedicatedDictSearch_lazy_search(offsetPtr, ml, nbAttempts + ddsExtraAttempts, dms,
1257 ip, iLimit, prefixStart, curr, dictLimit, ddsIdx);
1258 } else if (dictMode == ZSTD_dictMatchState) {
1259 /* TODO: Measure and potentially add prefetching to DMS */
1260 const U32 dmsLowestIndex = dms->window.dictLimit;
1261 const BYTE* const dmsBase = dms->window.base;
1262 const BYTE* const dmsEnd = dms->window.nextSrc;
1263 const U32 dmsSize = (U32)(dmsEnd - dmsBase);
1264 const U32 dmsIndexDelta = dictLimit - dmsSize;
1265
1266 { U32 const head = *dmsTagRow & rowMask;
1267 U32 matchBuffer[ZSTD_ROW_HASH_MAX_ENTRIES];
1268 size_t numMatches = 0;
1269 size_t currMatch = 0;
1270 ZSTD_VecMask matches = ZSTD_row_getMatchMask(dmsTagRow, (BYTE)dmsTag, head, rowEntries);
1271
1272 for (; (matches > 0) && (nbAttempts > 0); --nbAttempts, matches &= (matches - 1)) {
1273 U32 const matchPos = (head + ZSTD_VecMask_next(matches)) & rowMask;
1274 U32 const matchIndex = dmsRow[matchPos];
1275 if (matchIndex < dmsLowestIndex)
1276 break;
1277 PREFETCH_L1(dmsBase + matchIndex);
1278 matchBuffer[numMatches++] = matchIndex;
1279 }
1280
1281 /* Return the longest match */
1282 for (; currMatch < numMatches; ++currMatch) {
1283 U32 const matchIndex = matchBuffer[currMatch];
1284 size_t currentMl=0;
1285 assert(matchIndex >= dmsLowestIndex);
1286 assert(matchIndex < curr);
1287
1288 { const BYTE* const match = dmsBase + matchIndex;
1289 assert(match+4 <= dmsEnd);
1290 if (MEM_read32(match) == MEM_read32(ip))
1291 currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dmsEnd, prefixStart) + 4;
1292 }
1293
1294 if (currentMl > ml) {
1295 ml = currentMl;
1296 assert(curr > matchIndex + dmsIndexDelta);
1297 *offsetPtr = STORE_OFFSET(curr - (matchIndex + dmsIndexDelta));
1298 if (ip+currentMl == iLimit) break;
1299 }
1300 }
1301 }
1302 }
1303 return ml;
1304 }
1305
1306
1307 typedef size_t (*searchMax_f)(
1308 ZSTD_matchState_t* ms,
1309 const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
1310
1311 /**
1312 * This struct contains the functions necessary for lazy to search.
1313 * Currently, that is only searchMax. However, it is still valuable to have the
1314 * VTable because this makes it easier to add more functions to the VTable later.
1315 *
1316 * TODO: The start of the search function involves loading and calculating a
1317 * bunch of constants from the ZSTD_matchState_t. These computations could be
1318 * done in an initialization function, and saved somewhere in the match state.
1319 * Then we could pass a pointer to the saved state instead of the match state,
1320 * and avoid duplicate computations.
1321 *
1322 * TODO: Move the match re-winding into searchMax. This improves compression
1323 * ratio, and unlocks further simplifications with the next TODO.
1324 *
1325 * TODO: Try moving the repcode search into searchMax. After the re-winding
1326 * and repcode search are in searchMax, there is no more logic in the match
1327 * finder loop that requires knowledge about the dictMode. So we should be
1328 * able to avoid force inlining it, and we can join the extDict loop with
1329 * the single segment loop. It should go in searchMax instead of its own
1330 * function to avoid having multiple virtual function calls per search.
1331 */
1332 typedef struct {
1333 searchMax_f searchMax;
1334 } ZSTD_LazyVTable;
1335
1336 #define GEN_ZSTD_BT_VTABLE(dictMode, mls) \
1337 static size_t ZSTD_BtFindBestMatch_##dictMode##_##mls( \
1338 ZSTD_matchState_t* ms, \
1339 const BYTE* ip, const BYTE* const iLimit, \
1340 size_t* offsetPtr) \
1341 { \
1342 assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \
1343 return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, mls, ZSTD_##dictMode); \
1344 } \
1345 static const ZSTD_LazyVTable ZSTD_BtVTable_##dictMode##_##mls = { \
1346 ZSTD_BtFindBestMatch_##dictMode##_##mls \
1347 };
1348
1349 #define GEN_ZSTD_HC_VTABLE(dictMode, mls) \
1350 static size_t ZSTD_HcFindBestMatch_##dictMode##_##mls( \
1351 ZSTD_matchState_t* ms, \
1352 const BYTE* ip, const BYTE* const iLimit, \
1353 size_t* offsetPtr) \
1354 { \
1355 assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \
1356 return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, mls, ZSTD_##dictMode); \
1357 } \
1358 static const ZSTD_LazyVTable ZSTD_HcVTable_##dictMode##_##mls = { \
1359 ZSTD_HcFindBestMatch_##dictMode##_##mls \
1360 };
1361
1362 #define GEN_ZSTD_ROW_VTABLE(dictMode, mls, rowLog) \
1363 static size_t ZSTD_RowFindBestMatch_##dictMode##_##mls##_##rowLog( \
1364 ZSTD_matchState_t* ms, \
1365 const BYTE* ip, const BYTE* const iLimit, \
1366 size_t* offsetPtr) \
1367 { \
1368 assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \
1369 assert(MAX(4, MIN(6, ms->cParams.searchLog)) == rowLog); \
1370 return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, mls, ZSTD_##dictMode, rowLog); \
1371 } \
1372 static const ZSTD_LazyVTable ZSTD_RowVTable_##dictMode##_##mls##_##rowLog = { \
1373 ZSTD_RowFindBestMatch_##dictMode##_##mls##_##rowLog \
1374 };
1375
1376 #define ZSTD_FOR_EACH_ROWLOG(X, dictMode, mls) \
1377 X(dictMode, mls, 4) \
1378 X(dictMode, mls, 5) \
1379 X(dictMode, mls, 6)
1380
1381 #define ZSTD_FOR_EACH_MLS_ROWLOG(X, dictMode) \
1382 ZSTD_FOR_EACH_ROWLOG(X, dictMode, 4) \
1383 ZSTD_FOR_EACH_ROWLOG(X, dictMode, 5) \
1384 ZSTD_FOR_EACH_ROWLOG(X, dictMode, 6)
1385
1386 #define ZSTD_FOR_EACH_MLS(X, dictMode) \
1387 X(dictMode, 4) \
1388 X(dictMode, 5) \
1389 X(dictMode, 6)
1390
1391 #define ZSTD_FOR_EACH_DICT_MODE(X, ...) \
1392 X(__VA_ARGS__, noDict) \
1393 X(__VA_ARGS__, extDict) \
1394 X(__VA_ARGS__, dictMatchState) \
1395 X(__VA_ARGS__, dedicatedDictSearch)
1396
1397 /* Generate Row VTables for each combination of (dictMode, mls, rowLog) */
1398 ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS_ROWLOG, GEN_ZSTD_ROW_VTABLE)
1399 /* Generate Binary Tree VTables for each combination of (dictMode, mls) */
1400 ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS, GEN_ZSTD_BT_VTABLE)
1401 /* Generate Hash Chain VTables for each combination of (dictMode, mls) */
1402 ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS, GEN_ZSTD_HC_VTABLE)
1403
1404 #define GEN_ZSTD_BT_VTABLE_ARRAY(dictMode) \
1405 { \
1406 &ZSTD_BtVTable_##dictMode##_4, \
1407 &ZSTD_BtVTable_##dictMode##_5, \
1408 &ZSTD_BtVTable_##dictMode##_6 \
1409 }
1410
1411 #define GEN_ZSTD_HC_VTABLE_ARRAY(dictMode) \
1412 { \
1413 &ZSTD_HcVTable_##dictMode##_4, \
1414 &ZSTD_HcVTable_##dictMode##_5, \
1415 &ZSTD_HcVTable_##dictMode##_6 \
1416 }
1417
1418 #define GEN_ZSTD_ROW_VTABLE_ARRAY_(dictMode, mls) \
1419 { \
1420 &ZSTD_RowVTable_##dictMode##_##mls##_4, \
1421 &ZSTD_RowVTable_##dictMode##_##mls##_5, \
1422 &ZSTD_RowVTable_##dictMode##_##mls##_6 \
1423 }
1424
1425 #define GEN_ZSTD_ROW_VTABLE_ARRAY(dictMode) \
1426 { \
1427 GEN_ZSTD_ROW_VTABLE_ARRAY_(dictMode, 4), \
1428 GEN_ZSTD_ROW_VTABLE_ARRAY_(dictMode, 5), \
1429 GEN_ZSTD_ROW_VTABLE_ARRAY_(dictMode, 6) \
1430 }
1431
1432 #define GEN_ZSTD_VTABLE_ARRAY(X) \
1433 { \
1434 X(noDict), \
1435 X(extDict), \
1436 X(dictMatchState), \
1437 X(dedicatedDictSearch) \
1438 }
1439
1440 /* *******************************
1441 * Common parser - lazy strategy
1442 *********************************/
1443 typedef enum { search_hashChain=0, search_binaryTree=1, search_rowHash=2 } searchMethod_e;
1444
1445 /**
1446 * This table is indexed first by the four ZSTD_dictMode_e values, and then
1447 * by the two searchMethod_e values. NULLs are placed for configurations
1448 * that should never occur (extDict modes go to the other implementation
1449 * below and there is no DDSS for binary tree search yet).
1450 */
1451
1452 static ZSTD_LazyVTable const*
ZSTD_selectLazyVTable(ZSTD_matchState_t const * ms,searchMethod_e searchMethod,ZSTD_dictMode_e dictMode)1453 ZSTD_selectLazyVTable(ZSTD_matchState_t const* ms, searchMethod_e searchMethod, ZSTD_dictMode_e dictMode)
1454 {
1455 /* Fill the Hc/Bt VTable arrays with the right functions for the (dictMode, mls) combination. */
1456 ZSTD_LazyVTable const* const hcVTables[4][3] = GEN_ZSTD_VTABLE_ARRAY(GEN_ZSTD_HC_VTABLE_ARRAY);
1457 ZSTD_LazyVTable const* const btVTables[4][3] = GEN_ZSTD_VTABLE_ARRAY(GEN_ZSTD_BT_VTABLE_ARRAY);
1458 /* Fill the Row VTable array with the right functions for the (dictMode, mls, rowLog) combination. */
1459 ZSTD_LazyVTable const* const rowVTables[4][3][3] = GEN_ZSTD_VTABLE_ARRAY(GEN_ZSTD_ROW_VTABLE_ARRAY);
1460
1461 U32 const mls = MAX(4, MIN(6, ms->cParams.minMatch));
1462 U32 const rowLog = MAX(4, MIN(6, ms->cParams.searchLog));
1463 switch (searchMethod) {
1464 case search_hashChain:
1465 return hcVTables[dictMode][mls - 4];
1466 case search_binaryTree:
1467 return btVTables[dictMode][mls - 4];
1468 case search_rowHash:
1469 return rowVTables[dictMode][mls - 4][rowLog - 4];
1470 default:
1471 return NULL;
1472 }
1473 }
1474
1475 FORCE_INLINE_TEMPLATE size_t
ZSTD_compressBlock_lazy_generic(ZSTD_matchState_t * ms,seqStore_t * seqStore,U32 rep[ZSTD_REP_NUM],const void * src,size_t srcSize,const searchMethod_e searchMethod,const U32 depth,ZSTD_dictMode_e const dictMode)1476 ZSTD_compressBlock_lazy_generic(
1477 ZSTD_matchState_t* ms, seqStore_t* seqStore,
1478 U32 rep[ZSTD_REP_NUM],
1479 const void* src, size_t srcSize,
1480 const searchMethod_e searchMethod, const U32 depth,
1481 ZSTD_dictMode_e const dictMode)
1482 {
1483 const BYTE* const istart = (const BYTE*)src;
1484 const BYTE* ip = istart;
1485 const BYTE* anchor = istart;
1486 const BYTE* const iend = istart + srcSize;
1487 const BYTE* const ilimit = (searchMethod == search_rowHash) ? iend - 8 - ZSTD_ROW_HASH_CACHE_SIZE : iend - 8;
1488 const BYTE* const base = ms->window.base;
1489 const U32 prefixLowestIndex = ms->window.dictLimit;
1490 const BYTE* const prefixLowest = base + prefixLowestIndex;
1491
1492 searchMax_f const searchMax = ZSTD_selectLazyVTable(ms, searchMethod, dictMode)->searchMax;
1493 U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0;
1494
1495 const int isDMS = dictMode == ZSTD_dictMatchState;
1496 const int isDDS = dictMode == ZSTD_dedicatedDictSearch;
1497 const int isDxS = isDMS || isDDS;
1498 const ZSTD_matchState_t* const dms = ms->dictMatchState;
1499 const U32 dictLowestIndex = isDxS ? dms->window.dictLimit : 0;
1500 const BYTE* const dictBase = isDxS ? dms->window.base : NULL;
1501 const BYTE* const dictLowest = isDxS ? dictBase + dictLowestIndex : NULL;
1502 const BYTE* const dictEnd = isDxS ? dms->window.nextSrc : NULL;
1503 const U32 dictIndexDelta = isDxS ?
1504 prefixLowestIndex - (U32)(dictEnd - dictBase) :
1505 0;
1506 const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictLowest));
1507
1508 assert(searchMax != NULL);
1509
1510 DEBUGLOG(5, "ZSTD_compressBlock_lazy_generic (dictMode=%u) (searchFunc=%u)", (U32)dictMode, (U32)searchMethod);
1511 ip += (dictAndPrefixLength == 0);
1512 if (dictMode == ZSTD_noDict) {
1513 U32 const curr = (U32)(ip - base);
1514 U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, ms->cParams.windowLog);
1515 U32 const maxRep = curr - windowLow;
1516 if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0;
1517 if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0;
1518 }
1519 if (isDxS) {
1520 /* dictMatchState repCode checks don't currently handle repCode == 0
1521 * disabling. */
1522 assert(offset_1 <= dictAndPrefixLength);
1523 assert(offset_2 <= dictAndPrefixLength);
1524 }
1525
1526 if (searchMethod == search_rowHash) {
1527 const U32 rowLog = MAX(4, MIN(6, ms->cParams.searchLog));
1528 ZSTD_row_fillHashCache(ms, base, rowLog,
1529 MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */),
1530 ms->nextToUpdate, ilimit);
1531 }
1532
1533 /* Match Loop */
1534 #if defined(__GNUC__) && defined(__x86_64__)
1535 /* I've measured random a 5% speed loss on levels 5 & 6 (greedy) when the
1536 * code alignment is perturbed. To fix the instability align the loop on 32-bytes.
1537 */
1538 __asm__(".p2align 5");
1539 #endif
1540 while (ip < ilimit) {
1541 size_t matchLength=0;
1542 size_t offcode=STORE_REPCODE_1;
1543 const BYTE* start=ip+1;
1544 DEBUGLOG(7, "search baseline (depth 0)");
1545
1546 /* check repCode */
1547 if (isDxS) {
1548 const U32 repIndex = (U32)(ip - base) + 1 - offset_1;
1549 const BYTE* repMatch = ((dictMode == ZSTD_dictMatchState || dictMode == ZSTD_dedicatedDictSearch)
1550 && repIndex < prefixLowestIndex) ?
1551 dictBase + (repIndex - dictIndexDelta) :
1552 base + repIndex;
1553 if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
1554 && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
1555 const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
1556 matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
1557 if (depth==0) goto _storeSequence;
1558 }
1559 }
1560 if ( dictMode == ZSTD_noDict
1561 && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {
1562 matchLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
1563 if (depth==0) goto _storeSequence;
1564 }
1565
1566 /* first search (depth 0) */
1567 { size_t offsetFound = 999999999;
1568 size_t const ml2 = searchMax(ms, ip, iend, &offsetFound);
1569 if (ml2 > matchLength)
1570 matchLength = ml2, start = ip, offcode=offsetFound;
1571 }
1572
1573 if (matchLength < 4) {
1574 ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */
1575 continue;
1576 }
1577
1578 /* let's try to find a better solution */
1579 if (depth>=1)
1580 while (ip<ilimit) {
1581 DEBUGLOG(7, "search depth 1");
1582 ip ++;
1583 if ( (dictMode == ZSTD_noDict)
1584 && (offcode) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
1585 size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
1586 int const gain2 = (int)(mlRep * 3);
1587 int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1);
1588 if ((mlRep >= 4) && (gain2 > gain1))
1589 matchLength = mlRep, offcode = STORE_REPCODE_1, start = ip;
1590 }
1591 if (isDxS) {
1592 const U32 repIndex = (U32)(ip - base) - offset_1;
1593 const BYTE* repMatch = repIndex < prefixLowestIndex ?
1594 dictBase + (repIndex - dictIndexDelta) :
1595 base + repIndex;
1596 if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
1597 && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
1598 const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
1599 size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
1600 int const gain2 = (int)(mlRep * 3);
1601 int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1);
1602 if ((mlRep >= 4) && (gain2 > gain1))
1603 matchLength = mlRep, offcode = STORE_REPCODE_1, start = ip;
1604 }
1605 }
1606 { size_t offset2=999999999;
1607 size_t const ml2 = searchMax(ms, ip, iend, &offset2);
1608 int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offset2))); /* raw approx */
1609 int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 4);
1610 if ((ml2 >= 4) && (gain2 > gain1)) {
1611 matchLength = ml2, offcode = offset2, start = ip;
1612 continue; /* search a better one */
1613 } }
1614
1615 /* let's find an even better one */
1616 if ((depth==2) && (ip<ilimit)) {
1617 DEBUGLOG(7, "search depth 2");
1618 ip ++;
1619 if ( (dictMode == ZSTD_noDict)
1620 && (offcode) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
1621 size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
1622 int const gain2 = (int)(mlRep * 4);
1623 int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1);
1624 if ((mlRep >= 4) && (gain2 > gain1))
1625 matchLength = mlRep, offcode = STORE_REPCODE_1, start = ip;
1626 }
1627 if (isDxS) {
1628 const U32 repIndex = (U32)(ip - base) - offset_1;
1629 const BYTE* repMatch = repIndex < prefixLowestIndex ?
1630 dictBase + (repIndex - dictIndexDelta) :
1631 base + repIndex;
1632 if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
1633 && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
1634 const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
1635 size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
1636 int const gain2 = (int)(mlRep * 4);
1637 int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1);
1638 if ((mlRep >= 4) && (gain2 > gain1))
1639 matchLength = mlRep, offcode = STORE_REPCODE_1, start = ip;
1640 }
1641 }
1642 { size_t offset2=999999999;
1643 size_t const ml2 = searchMax(ms, ip, iend, &offset2);
1644 int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offset2))); /* raw approx */
1645 int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 7);
1646 if ((ml2 >= 4) && (gain2 > gain1)) {
1647 matchLength = ml2, offcode = offset2, start = ip;
1648 continue;
1649 } } }
1650 break; /* nothing found : store previous solution */
1651 }
1652
1653 /* NOTE:
1654 * Pay attention that `start[-value]` can lead to strange undefined behavior
1655 * notably if `value` is unsigned, resulting in a large positive `-value`.
1656 */
1657 /* catch up */
1658 if (STORED_IS_OFFSET(offcode)) {
1659 if (dictMode == ZSTD_noDict) {
1660 while ( ((start > anchor) & (start - STORED_OFFSET(offcode) > prefixLowest))
1661 && (start[-1] == (start-STORED_OFFSET(offcode))[-1]) ) /* only search for offset within prefix */
1662 { start--; matchLength++; }
1663 }
1664 if (isDxS) {
1665 U32 const matchIndex = (U32)((size_t)(start-base) - STORED_OFFSET(offcode));
1666 const BYTE* match = (matchIndex < prefixLowestIndex) ? dictBase + matchIndex - dictIndexDelta : base + matchIndex;
1667 const BYTE* const mStart = (matchIndex < prefixLowestIndex) ? dictLowest : prefixLowest;
1668 while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */
1669 }
1670 offset_2 = offset_1; offset_1 = (U32)STORED_OFFSET(offcode);
1671 }
1672 /* store sequence */
1673 _storeSequence:
1674 { size_t const litLength = (size_t)(start - anchor);
1675 ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offcode, matchLength);
1676 anchor = ip = start + matchLength;
1677 }
1678
1679 /* check immediate repcode */
1680 if (isDxS) {
1681 while (ip <= ilimit) {
1682 U32 const current2 = (U32)(ip-base);
1683 U32 const repIndex = current2 - offset_2;
1684 const BYTE* repMatch = repIndex < prefixLowestIndex ?
1685 dictBase - dictIndexDelta + repIndex :
1686 base + repIndex;
1687 if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex) >= 3 /* intentional overflow */)
1688 && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
1689 const BYTE* const repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend;
1690 matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd2, prefixLowest) + 4;
1691 offcode = offset_2; offset_2 = offset_1; offset_1 = (U32)offcode; /* swap offset_2 <=> offset_1 */
1692 ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, matchLength);
1693 ip += matchLength;
1694 anchor = ip;
1695 continue;
1696 }
1697 break;
1698 }
1699 }
1700
1701 if (dictMode == ZSTD_noDict) {
1702 while ( ((ip <= ilimit) & (offset_2>0))
1703 && (MEM_read32(ip) == MEM_read32(ip - offset_2)) ) {
1704 /* store sequence */
1705 matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
1706 offcode = offset_2; offset_2 = offset_1; offset_1 = (U32)offcode; /* swap repcodes */
1707 ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, matchLength);
1708 ip += matchLength;
1709 anchor = ip;
1710 continue; /* faster when present ... (?) */
1711 } } }
1712
1713 /* Save reps for next block */
1714 rep[0] = offset_1 ? offset_1 : savedOffset;
1715 rep[1] = offset_2 ? offset_2 : savedOffset;
1716
1717 /* Return the last literals size */
1718 return (size_t)(iend - anchor);
1719 }
1720
1721
ZSTD_compressBlock_btlazy2(ZSTD_matchState_t * ms,seqStore_t * seqStore,U32 rep[ZSTD_REP_NUM],void const * src,size_t srcSize)1722 size_t ZSTD_compressBlock_btlazy2(
1723 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1724 void const* src, size_t srcSize)
1725 {
1726 return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_noDict);
1727 }
1728
ZSTD_compressBlock_lazy2(ZSTD_matchState_t * ms,seqStore_t * seqStore,U32 rep[ZSTD_REP_NUM],void const * src,size_t srcSize)1729 size_t ZSTD_compressBlock_lazy2(
1730 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1731 void const* src, size_t srcSize)
1732 {
1733 return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_noDict);
1734 }
1735
ZSTD_compressBlock_lazy(ZSTD_matchState_t * ms,seqStore_t * seqStore,U32 rep[ZSTD_REP_NUM],void const * src,size_t srcSize)1736 size_t ZSTD_compressBlock_lazy(
1737 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1738 void const* src, size_t srcSize)
1739 {
1740 return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_noDict);
1741 }
1742
ZSTD_compressBlock_greedy(ZSTD_matchState_t * ms,seqStore_t * seqStore,U32 rep[ZSTD_REP_NUM],void const * src,size_t srcSize)1743 size_t ZSTD_compressBlock_greedy(
1744 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1745 void const* src, size_t srcSize)
1746 {
1747 return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_noDict);
1748 }
1749
ZSTD_compressBlock_btlazy2_dictMatchState(ZSTD_matchState_t * ms,seqStore_t * seqStore,U32 rep[ZSTD_REP_NUM],void const * src,size_t srcSize)1750 size_t ZSTD_compressBlock_btlazy2_dictMatchState(
1751 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1752 void const* src, size_t srcSize)
1753 {
1754 return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_dictMatchState);
1755 }
1756
ZSTD_compressBlock_lazy2_dictMatchState(ZSTD_matchState_t * ms,seqStore_t * seqStore,U32 rep[ZSTD_REP_NUM],void const * src,size_t srcSize)1757 size_t ZSTD_compressBlock_lazy2_dictMatchState(
1758 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1759 void const* src, size_t srcSize)
1760 {
1761 return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dictMatchState);
1762 }
1763
ZSTD_compressBlock_lazy_dictMatchState(ZSTD_matchState_t * ms,seqStore_t * seqStore,U32 rep[ZSTD_REP_NUM],void const * src,size_t srcSize)1764 size_t ZSTD_compressBlock_lazy_dictMatchState(
1765 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1766 void const* src, size_t srcSize)
1767 {
1768 return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dictMatchState);
1769 }
1770
ZSTD_compressBlock_greedy_dictMatchState(ZSTD_matchState_t * ms,seqStore_t * seqStore,U32 rep[ZSTD_REP_NUM],void const * src,size_t srcSize)1771 size_t ZSTD_compressBlock_greedy_dictMatchState(
1772 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1773 void const* src, size_t srcSize)
1774 {
1775 return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dictMatchState);
1776 }
1777
1778
ZSTD_compressBlock_lazy2_dedicatedDictSearch(ZSTD_matchState_t * ms,seqStore_t * seqStore,U32 rep[ZSTD_REP_NUM],void const * src,size_t srcSize)1779 size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch(
1780 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1781 void const* src, size_t srcSize)
1782 {
1783 return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dedicatedDictSearch);
1784 }
1785
ZSTD_compressBlock_lazy_dedicatedDictSearch(ZSTD_matchState_t * ms,seqStore_t * seqStore,U32 rep[ZSTD_REP_NUM],void const * src,size_t srcSize)1786 size_t ZSTD_compressBlock_lazy_dedicatedDictSearch(
1787 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1788 void const* src, size_t srcSize)
1789 {
1790 return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dedicatedDictSearch);
1791 }
1792
ZSTD_compressBlock_greedy_dedicatedDictSearch(ZSTD_matchState_t * ms,seqStore_t * seqStore,U32 rep[ZSTD_REP_NUM],void const * src,size_t srcSize)1793 size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(
1794 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1795 void const* src, size_t srcSize)
1796 {
1797 return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dedicatedDictSearch);
1798 }
1799
1800 /* Row-based matchfinder */
ZSTD_compressBlock_lazy2_row(ZSTD_matchState_t * ms,seqStore_t * seqStore,U32 rep[ZSTD_REP_NUM],void const * src,size_t srcSize)1801 size_t ZSTD_compressBlock_lazy2_row(
1802 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1803 void const* src, size_t srcSize)
1804 {
1805 return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_noDict);
1806 }
1807
ZSTD_compressBlock_lazy_row(ZSTD_matchState_t * ms,seqStore_t * seqStore,U32 rep[ZSTD_REP_NUM],void const * src,size_t srcSize)1808 size_t ZSTD_compressBlock_lazy_row(
1809 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1810 void const* src, size_t srcSize)
1811 {
1812 return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_noDict);
1813 }
1814
ZSTD_compressBlock_greedy_row(ZSTD_matchState_t * ms,seqStore_t * seqStore,U32 rep[ZSTD_REP_NUM],void const * src,size_t srcSize)1815 size_t ZSTD_compressBlock_greedy_row(
1816 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1817 void const* src, size_t srcSize)
1818 {
1819 return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_noDict);
1820 }
1821
ZSTD_compressBlock_lazy2_dictMatchState_row(ZSTD_matchState_t * ms,seqStore_t * seqStore,U32 rep[ZSTD_REP_NUM],void const * src,size_t srcSize)1822 size_t ZSTD_compressBlock_lazy2_dictMatchState_row(
1823 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1824 void const* src, size_t srcSize)
1825 {
1826 return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dictMatchState);
1827 }
1828
ZSTD_compressBlock_lazy_dictMatchState_row(ZSTD_matchState_t * ms,seqStore_t * seqStore,U32 rep[ZSTD_REP_NUM],void const * src,size_t srcSize)1829 size_t ZSTD_compressBlock_lazy_dictMatchState_row(
1830 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1831 void const* src, size_t srcSize)
1832 {
1833 return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dictMatchState);
1834 }
1835
ZSTD_compressBlock_greedy_dictMatchState_row(ZSTD_matchState_t * ms,seqStore_t * seqStore,U32 rep[ZSTD_REP_NUM],void const * src,size_t srcSize)1836 size_t ZSTD_compressBlock_greedy_dictMatchState_row(
1837 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1838 void const* src, size_t srcSize)
1839 {
1840 return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dictMatchState);
1841 }
1842
1843
ZSTD_compressBlock_lazy2_dedicatedDictSearch_row(ZSTD_matchState_t * ms,seqStore_t * seqStore,U32 rep[ZSTD_REP_NUM],void const * src,size_t srcSize)1844 size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row(
1845 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1846 void const* src, size_t srcSize)
1847 {
1848 return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dedicatedDictSearch);
1849 }
1850
ZSTD_compressBlock_lazy_dedicatedDictSearch_row(ZSTD_matchState_t * ms,seqStore_t * seqStore,U32 rep[ZSTD_REP_NUM],void const * src,size_t srcSize)1851 size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row(
1852 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1853 void const* src, size_t srcSize)
1854 {
1855 return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dedicatedDictSearch);
1856 }
1857
ZSTD_compressBlock_greedy_dedicatedDictSearch_row(ZSTD_matchState_t * ms,seqStore_t * seqStore,U32 rep[ZSTD_REP_NUM],void const * src,size_t srcSize)1858 size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row(
1859 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1860 void const* src, size_t srcSize)
1861 {
1862 return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dedicatedDictSearch);
1863 }
1864
1865 FORCE_INLINE_TEMPLATE
ZSTD_compressBlock_lazy_extDict_generic(ZSTD_matchState_t * ms,seqStore_t * seqStore,U32 rep[ZSTD_REP_NUM],const void * src,size_t srcSize,const searchMethod_e searchMethod,const U32 depth)1866 size_t ZSTD_compressBlock_lazy_extDict_generic(
1867 ZSTD_matchState_t* ms, seqStore_t* seqStore,
1868 U32 rep[ZSTD_REP_NUM],
1869 const void* src, size_t srcSize,
1870 const searchMethod_e searchMethod, const U32 depth)
1871 {
1872 const BYTE* const istart = (const BYTE*)src;
1873 const BYTE* ip = istart;
1874 const BYTE* anchor = istart;
1875 const BYTE* const iend = istart + srcSize;
1876 const BYTE* const ilimit = searchMethod == search_rowHash ? iend - 8 - ZSTD_ROW_HASH_CACHE_SIZE : iend - 8;
1877 const BYTE* const base = ms->window.base;
1878 const U32 dictLimit = ms->window.dictLimit;
1879 const BYTE* const prefixStart = base + dictLimit;
1880 const BYTE* const dictBase = ms->window.dictBase;
1881 const BYTE* const dictEnd = dictBase + dictLimit;
1882 const BYTE* const dictStart = dictBase + ms->window.lowLimit;
1883 const U32 windowLog = ms->cParams.windowLog;
1884 const U32 rowLog = ms->cParams.searchLog < 5 ? 4 : 5;
1885
1886 searchMax_f const searchMax = ZSTD_selectLazyVTable(ms, searchMethod, ZSTD_extDict)->searchMax;
1887 U32 offset_1 = rep[0], offset_2 = rep[1];
1888
1889 DEBUGLOG(5, "ZSTD_compressBlock_lazy_extDict_generic (searchFunc=%u)", (U32)searchMethod);
1890
1891 /* init */
1892 ip += (ip == prefixStart);
1893 if (searchMethod == search_rowHash) {
1894 ZSTD_row_fillHashCache(ms, base, rowLog,
1895 MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */),
1896 ms->nextToUpdate, ilimit);
1897 }
1898
1899 /* Match Loop */
1900 #if defined(__GNUC__) && defined(__x86_64__)
1901 /* I've measured random a 5% speed loss on levels 5 & 6 (greedy) when the
1902 * code alignment is perturbed. To fix the instability align the loop on 32-bytes.
1903 */
1904 __asm__(".p2align 5");
1905 #endif
1906 while (ip < ilimit) {
1907 size_t matchLength=0;
1908 size_t offcode=STORE_REPCODE_1;
1909 const BYTE* start=ip+1;
1910 U32 curr = (U32)(ip-base);
1911
1912 /* check repCode */
1913 { const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr+1, windowLog);
1914 const U32 repIndex = (U32)(curr+1 - offset_1);
1915 const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
1916 const BYTE* const repMatch = repBase + repIndex;
1917 if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow */
1918 & (offset_1 <= curr+1 - windowLow) ) /* note: we are searching at curr+1 */
1919 if (MEM_read32(ip+1) == MEM_read32(repMatch)) {
1920 /* repcode detected we should take it */
1921 const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
1922 matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repEnd, prefixStart) + 4;
1923 if (depth==0) goto _storeSequence;
1924 } }
1925
1926 /* first search (depth 0) */
1927 { size_t offsetFound = 999999999;
1928 size_t const ml2 = searchMax(ms, ip, iend, &offsetFound);
1929 if (ml2 > matchLength)
1930 matchLength = ml2, start = ip, offcode=offsetFound;
1931 }
1932
1933 if (matchLength < 4) {
1934 ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */
1935 continue;
1936 }
1937
1938 /* let's try to find a better solution */
1939 if (depth>=1)
1940 while (ip<ilimit) {
1941 ip ++;
1942 curr++;
1943 /* check repCode */
1944 if (offcode) {
1945 const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog);
1946 const U32 repIndex = (U32)(curr - offset_1);
1947 const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
1948 const BYTE* const repMatch = repBase + repIndex;
1949 if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */
1950 & (offset_1 <= curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */
1951 if (MEM_read32(ip) == MEM_read32(repMatch)) {
1952 /* repcode detected */
1953 const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
1954 size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
1955 int const gain2 = (int)(repLength * 3);
1956 int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1);
1957 if ((repLength >= 4) && (gain2 > gain1))
1958 matchLength = repLength, offcode = STORE_REPCODE_1, start = ip;
1959 } }
1960
1961 /* search match, depth 1 */
1962 { size_t offset2=999999999;
1963 size_t const ml2 = searchMax(ms, ip, iend, &offset2);
1964 int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offset2))); /* raw approx */
1965 int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 4);
1966 if ((ml2 >= 4) && (gain2 > gain1)) {
1967 matchLength = ml2, offcode = offset2, start = ip;
1968 continue; /* search a better one */
1969 } }
1970
1971 /* let's find an even better one */
1972 if ((depth==2) && (ip<ilimit)) {
1973 ip ++;
1974 curr++;
1975 /* check repCode */
1976 if (offcode) {
1977 const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog);
1978 const U32 repIndex = (U32)(curr - offset_1);
1979 const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
1980 const BYTE* const repMatch = repBase + repIndex;
1981 if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */
1982 & (offset_1 <= curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */
1983 if (MEM_read32(ip) == MEM_read32(repMatch)) {
1984 /* repcode detected */
1985 const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
1986 size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
1987 int const gain2 = (int)(repLength * 4);
1988 int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1);
1989 if ((repLength >= 4) && (gain2 > gain1))
1990 matchLength = repLength, offcode = STORE_REPCODE_1, start = ip;
1991 } }
1992
1993 /* search match, depth 2 */
1994 { size_t offset2=999999999;
1995 size_t const ml2 = searchMax(ms, ip, iend, &offset2);
1996 int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offset2))); /* raw approx */
1997 int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 7);
1998 if ((ml2 >= 4) && (gain2 > gain1)) {
1999 matchLength = ml2, offcode = offset2, start = ip;
2000 continue;
2001 } } }
2002 break; /* nothing found : store previous solution */
2003 }
2004
2005 /* catch up */
2006 if (STORED_IS_OFFSET(offcode)) {
2007 U32 const matchIndex = (U32)((size_t)(start-base) - STORED_OFFSET(offcode));
2008 const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex;
2009 const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart;
2010 while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */
2011 offset_2 = offset_1; offset_1 = (U32)STORED_OFFSET(offcode);
2012 }
2013
2014 /* store sequence */
2015 _storeSequence:
2016 { size_t const litLength = (size_t)(start - anchor);
2017 ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offcode, matchLength);
2018 anchor = ip = start + matchLength;
2019 }
2020
2021 /* check immediate repcode */
2022 while (ip <= ilimit) {
2023 const U32 repCurrent = (U32)(ip-base);
2024 const U32 windowLow = ZSTD_getLowestMatchIndex(ms, repCurrent, windowLog);
2025 const U32 repIndex = repCurrent - offset_2;
2026 const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
2027 const BYTE* const repMatch = repBase + repIndex;
2028 if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */
2029 & (offset_2 <= repCurrent - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */
2030 if (MEM_read32(ip) == MEM_read32(repMatch)) {
2031 /* repcode detected we should take it */
2032 const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
2033 matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
2034 offcode = offset_2; offset_2 = offset_1; offset_1 = (U32)offcode; /* swap offset history */
2035 ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, matchLength);
2036 ip += matchLength;
2037 anchor = ip;
2038 continue; /* faster when present ... (?) */
2039 }
2040 break;
2041 } }
2042
2043 /* Save reps for next block */
2044 rep[0] = offset_1;
2045 rep[1] = offset_2;
2046
2047 /* Return the last literals size */
2048 return (size_t)(iend - anchor);
2049 }
2050
2051
ZSTD_compressBlock_greedy_extDict(ZSTD_matchState_t * ms,seqStore_t * seqStore,U32 rep[ZSTD_REP_NUM],void const * src,size_t srcSize)2052 size_t ZSTD_compressBlock_greedy_extDict(
2053 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
2054 void const* src, size_t srcSize)
2055 {
2056 return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0);
2057 }
2058
ZSTD_compressBlock_lazy_extDict(ZSTD_matchState_t * ms,seqStore_t * seqStore,U32 rep[ZSTD_REP_NUM],void const * src,size_t srcSize)2059 size_t ZSTD_compressBlock_lazy_extDict(
2060 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
2061 void const* src, size_t srcSize)
2062
2063 {
2064 return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1);
2065 }
2066
ZSTD_compressBlock_lazy2_extDict(ZSTD_matchState_t * ms,seqStore_t * seqStore,U32 rep[ZSTD_REP_NUM],void const * src,size_t srcSize)2067 size_t ZSTD_compressBlock_lazy2_extDict(
2068 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
2069 void const* src, size_t srcSize)
2070
2071 {
2072 return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2);
2073 }
2074
ZSTD_compressBlock_btlazy2_extDict(ZSTD_matchState_t * ms,seqStore_t * seqStore,U32 rep[ZSTD_REP_NUM],void const * src,size_t srcSize)2075 size_t ZSTD_compressBlock_btlazy2_extDict(
2076 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
2077 void const* src, size_t srcSize)
2078
2079 {
2080 return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2);
2081 }
2082
ZSTD_compressBlock_greedy_extDict_row(ZSTD_matchState_t * ms,seqStore_t * seqStore,U32 rep[ZSTD_REP_NUM],void const * src,size_t srcSize)2083 size_t ZSTD_compressBlock_greedy_extDict_row(
2084 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
2085 void const* src, size_t srcSize)
2086 {
2087 return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0);
2088 }
2089
ZSTD_compressBlock_lazy_extDict_row(ZSTD_matchState_t * ms,seqStore_t * seqStore,U32 rep[ZSTD_REP_NUM],void const * src,size_t srcSize)2090 size_t ZSTD_compressBlock_lazy_extDict_row(
2091 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
2092 void const* src, size_t srcSize)
2093
2094 {
2095 return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1);
2096 }
2097
ZSTD_compressBlock_lazy2_extDict_row(ZSTD_matchState_t * ms,seqStore_t * seqStore,U32 rep[ZSTD_REP_NUM],void const * src,size_t srcSize)2098 size_t ZSTD_compressBlock_lazy2_extDict_row(
2099 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
2100 void const* src, size_t srcSize)
2101
2102 {
2103 return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2);
2104 }
2105