1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-only
2 /*
3 * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
4 * All rights reserved.
5 *
6 * This source code is licensed under both the BSD-style license (found in the
7 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
8 * in the COPYING file in the root directory of this source tree).
9 * You may select, at your option, one of the above-listed licenses.
10 */
11
12 /* zstd_decompress_block :
13 * this module takes care of decompressing _compressed_ block */
14
15 /*-*******************************************************
16 * Dependencies
17 *********************************************************/
18 #include <string.h> /* memcpy, memmove, memset */
19 #include "../common/compiler.h" /* prefetch */
20 #include "../common/cpu.h" /* bmi2 */
21 #include "../common/mem.h" /* low level memory routines */
22 #define FSE_STATIC_LINKING_ONLY
23 #include "../common/fse.h"
24 #define HUF_STATIC_LINKING_ONLY
25 #include "../common/huf.h"
26 #include "../common/zstd_internal.h"
27 #include "zstd_decompress_internal.h" /* ZSTD_DCtx */
28 #include "zstd_ddict.h" /* ZSTD_DDictDictContent */
29 #include "zstd_decompress_block.h"
30
31 /*_*******************************************************
32 * Macros
33 **********************************************************/
34
35 /* These two optional macros force the use one way or another of the two
36 * ZSTD_decompressSequences implementations. You can't force in both directions
37 * at the same time.
38 */
39 #if defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
40 defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
41 #error "Cannot force the use of the short and the long ZSTD_decompressSequences variants!"
42 #endif
43
44
45 /*_*******************************************************
46 * Memory operations
47 **********************************************************/
ZSTD_copy4(void * dst,const void * src)48 static void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
49
50
51 /*-*************************************************************
52 * Block decoding
53 ***************************************************************/
54
55 /*! ZSTD_getcBlockSize() :
56 * Provides the size of compressed block from block header `src` */
ZSTD_getcBlockSize(const void * src,size_t srcSize,blockProperties_t * bpPtr)57 size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
58 blockProperties_t* bpPtr)
59 {
60 RETURN_ERROR_IF(srcSize < ZSTD_blockHeaderSize, srcSize_wrong, "");
61
62 { U32 const cBlockHeader = MEM_readLE24(src);
63 U32 const cSize = cBlockHeader >> 3;
64 bpPtr->lastBlock = cBlockHeader & 1;
65 bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3);
66 bpPtr->origSize = cSize; /* only useful for RLE */
67 if (bpPtr->blockType == bt_rle) return 1;
68 RETURN_ERROR_IF(bpPtr->blockType == bt_reserved, corruption_detected, "");
69 return cSize;
70 }
71 }
72
73
74 /* Hidden declaration for fullbench */
75 size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
76 const void* src, size_t srcSize);
77 /*! ZSTD_decodeLiteralsBlock() :
78 * @return : nb of bytes read from src (< srcSize )
79 * note : symbol not declared but exposed for fullbench */
ZSTD_decodeLiteralsBlock(ZSTD_DCtx * dctx,const void * src,size_t srcSize)80 size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
81 const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */
82 {
83 DEBUGLOG(5, "ZSTD_decodeLiteralsBlock");
84 RETURN_ERROR_IF(srcSize < MIN_CBLOCK_SIZE, corruption_detected, "");
85
86 { const BYTE* const istart = (const BYTE*) src;
87 symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3);
88
89 switch(litEncType)
90 {
91 case set_repeat:
92 DEBUGLOG(5, "set_repeat flag : re-using stats from previous compressed literals block");
93 RETURN_ERROR_IF(dctx->litEntropy==0, dictionary_corrupted, "");
94 /* fall-through */
95
96 case set_compressed:
97 RETURN_ERROR_IF(srcSize < 5, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3");
98 { size_t lhSize, litSize, litCSize;
99 U32 singleStream=0;
100 U32 const lhlCode = (istart[0] >> 2) & 3;
101 U32 const lhc = MEM_readLE32(istart);
102 size_t hufSuccess;
103 switch(lhlCode)
104 {
105 case 0: case 1: default: /* note : default is impossible, since lhlCode into [0..3] */
106 /* 2 - 2 - 10 - 10 */
107 singleStream = !lhlCode;
108 lhSize = 3;
109 litSize = (lhc >> 4) & 0x3FF;
110 litCSize = (lhc >> 14) & 0x3FF;
111 break;
112 case 2:
113 /* 2 - 2 - 14 - 14 */
114 lhSize = 4;
115 litSize = (lhc >> 4) & 0x3FFF;
116 litCSize = lhc >> 18;
117 break;
118 case 3:
119 /* 2 - 2 - 18 - 18 */
120 lhSize = 5;
121 litSize = (lhc >> 4) & 0x3FFFF;
122 litCSize = (lhc >> 22) + ((size_t)istart[4] << 10);
123 break;
124 }
125 RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, "");
126 RETURN_ERROR_IF(litCSize + lhSize > srcSize, corruption_detected, "");
127
128 /* prefetch huffman table if cold */
129 if (dctx->ddictIsCold && (litSize > 768 /* heuristic */)) {
130 PREFETCH_AREA(dctx->HUFptr, sizeof(dctx->entropy.hufTable));
131 }
132
133 if (litEncType==set_repeat) {
134 if (singleStream) {
135 hufSuccess = HUF_decompress1X_usingDTable_bmi2(
136 dctx->litBuffer, litSize, istart+lhSize, litCSize,
137 dctx->HUFptr, dctx->bmi2);
138 } else {
139 hufSuccess = HUF_decompress4X_usingDTable_bmi2(
140 dctx->litBuffer, litSize, istart+lhSize, litCSize,
141 dctx->HUFptr, dctx->bmi2);
142 }
143 } else {
144 if (singleStream) {
145 #if defined(HUF_FORCE_DECOMPRESS_X2)
146 hufSuccess = HUF_decompress1X_DCtx_wksp(
147 dctx->entropy.hufTable, dctx->litBuffer, litSize,
148 istart+lhSize, litCSize, dctx->workspace,
149 sizeof(dctx->workspace));
150 #else
151 hufSuccess = HUF_decompress1X1_DCtx_wksp_bmi2(
152 dctx->entropy.hufTable, dctx->litBuffer, litSize,
153 istart+lhSize, litCSize, dctx->workspace,
154 sizeof(dctx->workspace), dctx->bmi2);
155 #endif
156 } else {
157 hufSuccess = HUF_decompress4X_hufOnly_wksp_bmi2(
158 dctx->entropy.hufTable, dctx->litBuffer, litSize,
159 istart+lhSize, litCSize, dctx->workspace,
160 sizeof(dctx->workspace), dctx->bmi2);
161 }
162 }
163
164 RETURN_ERROR_IF(HUF_isError(hufSuccess), corruption_detected, "");
165
166 dctx->litPtr = dctx->litBuffer;
167 dctx->litSize = litSize;
168 dctx->litEntropy = 1;
169 if (litEncType==set_compressed) dctx->HUFptr = dctx->entropy.hufTable;
170 memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
171 return litCSize + lhSize;
172 }
173
174 case set_basic:
175 { size_t litSize, lhSize;
176 U32 const lhlCode = ((istart[0]) >> 2) & 3;
177 switch(lhlCode)
178 {
179 case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */
180 lhSize = 1;
181 litSize = istart[0] >> 3;
182 break;
183 case 1:
184 lhSize = 2;
185 litSize = MEM_readLE16(istart) >> 4;
186 break;
187 case 3:
188 lhSize = 3;
189 litSize = MEM_readLE24(istart) >> 4;
190 break;
191 }
192
193 if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */
194 RETURN_ERROR_IF(litSize+lhSize > srcSize, corruption_detected, "");
195 memcpy(dctx->litBuffer, istart+lhSize, litSize);
196 dctx->litPtr = dctx->litBuffer;
197 dctx->litSize = litSize;
198 memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
199 return lhSize+litSize;
200 }
201 /* direct reference into compressed stream */
202 dctx->litPtr = istart+lhSize;
203 dctx->litSize = litSize;
204 return lhSize+litSize;
205 }
206
207 case set_rle:
208 { U32 const lhlCode = ((istart[0]) >> 2) & 3;
209 size_t litSize, lhSize;
210 switch(lhlCode)
211 {
212 case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */
213 lhSize = 1;
214 litSize = istart[0] >> 3;
215 break;
216 case 1:
217 lhSize = 2;
218 litSize = MEM_readLE16(istart) >> 4;
219 break;
220 case 3:
221 lhSize = 3;
222 litSize = MEM_readLE24(istart) >> 4;
223 RETURN_ERROR_IF(srcSize<4, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4");
224 break;
225 }
226 RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, "");
227 memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
228 dctx->litPtr = dctx->litBuffer;
229 dctx->litSize = litSize;
230 return lhSize+1;
231 }
232 default:
233 RETURN_ERROR(corruption_detected, "impossible");
234 }
235 }
236 }
237
238 /* Default FSE distribution tables.
239 * These are pre-calculated FSE decoding tables using default distributions as defined in specification :
240 * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#default-distributions
241 * They were generated programmatically with following method :
242 * - start from default distributions, present in /lib/common/zstd_internal.h
243 * - generate tables normally, using ZSTD_buildFSETable()
244 * - printout the content of tables
245 * - pretify output, report below, test with fuzzer to ensure it's correct */
246
247 /* Default FSE distribution table for Literal Lengths */
248 static const ZSTD_seqSymbol LL_defaultDTable[(1<<LL_DEFAULTNORMLOG)+1] = {
249 { 1, 1, 1, LL_DEFAULTNORMLOG}, /* header : fastMode, tableLog */
250 /* nextState, nbAddBits, nbBits, baseVal */
251 { 0, 0, 4, 0}, { 16, 0, 4, 0},
252 { 32, 0, 5, 1}, { 0, 0, 5, 3},
253 { 0, 0, 5, 4}, { 0, 0, 5, 6},
254 { 0, 0, 5, 7}, { 0, 0, 5, 9},
255 { 0, 0, 5, 10}, { 0, 0, 5, 12},
256 { 0, 0, 6, 14}, { 0, 1, 5, 16},
257 { 0, 1, 5, 20}, { 0, 1, 5, 22},
258 { 0, 2, 5, 28}, { 0, 3, 5, 32},
259 { 0, 4, 5, 48}, { 32, 6, 5, 64},
260 { 0, 7, 5, 128}, { 0, 8, 6, 256},
261 { 0, 10, 6, 1024}, { 0, 12, 6, 4096},
262 { 32, 0, 4, 0}, { 0, 0, 4, 1},
263 { 0, 0, 5, 2}, { 32, 0, 5, 4},
264 { 0, 0, 5, 5}, { 32, 0, 5, 7},
265 { 0, 0, 5, 8}, { 32, 0, 5, 10},
266 { 0, 0, 5, 11}, { 0, 0, 6, 13},
267 { 32, 1, 5, 16}, { 0, 1, 5, 18},
268 { 32, 1, 5, 22}, { 0, 2, 5, 24},
269 { 32, 3, 5, 32}, { 0, 3, 5, 40},
270 { 0, 6, 4, 64}, { 16, 6, 4, 64},
271 { 32, 7, 5, 128}, { 0, 9, 6, 512},
272 { 0, 11, 6, 2048}, { 48, 0, 4, 0},
273 { 16, 0, 4, 1}, { 32, 0, 5, 2},
274 { 32, 0, 5, 3}, { 32, 0, 5, 5},
275 { 32, 0, 5, 6}, { 32, 0, 5, 8},
276 { 32, 0, 5, 9}, { 32, 0, 5, 11},
277 { 32, 0, 5, 12}, { 0, 0, 6, 15},
278 { 32, 1, 5, 18}, { 32, 1, 5, 20},
279 { 32, 2, 5, 24}, { 32, 2, 5, 28},
280 { 32, 3, 5, 40}, { 32, 4, 5, 48},
281 { 0, 16, 6,65536}, { 0, 15, 6,32768},
282 { 0, 14, 6,16384}, { 0, 13, 6, 8192},
283 }; /* LL_defaultDTable */
284
285 /* Default FSE distribution table for Offset Codes */
286 static const ZSTD_seqSymbol OF_defaultDTable[(1<<OF_DEFAULTNORMLOG)+1] = {
287 { 1, 1, 1, OF_DEFAULTNORMLOG}, /* header : fastMode, tableLog */
288 /* nextState, nbAddBits, nbBits, baseVal */
289 { 0, 0, 5, 0}, { 0, 6, 4, 61},
290 { 0, 9, 5, 509}, { 0, 15, 5,32765},
291 { 0, 21, 5,2097149}, { 0, 3, 5, 5},
292 { 0, 7, 4, 125}, { 0, 12, 5, 4093},
293 { 0, 18, 5,262141}, { 0, 23, 5,8388605},
294 { 0, 5, 5, 29}, { 0, 8, 4, 253},
295 { 0, 14, 5,16381}, { 0, 20, 5,1048573},
296 { 0, 2, 5, 1}, { 16, 7, 4, 125},
297 { 0, 11, 5, 2045}, { 0, 17, 5,131069},
298 { 0, 22, 5,4194301}, { 0, 4, 5, 13},
299 { 16, 8, 4, 253}, { 0, 13, 5, 8189},
300 { 0, 19, 5,524285}, { 0, 1, 5, 1},
301 { 16, 6, 4, 61}, { 0, 10, 5, 1021},
302 { 0, 16, 5,65533}, { 0, 28, 5,268435453},
303 { 0, 27, 5,134217725}, { 0, 26, 5,67108861},
304 { 0, 25, 5,33554429}, { 0, 24, 5,16777213},
305 }; /* OF_defaultDTable */
306
307
308 /* Default FSE distribution table for Match Lengths */
309 static const ZSTD_seqSymbol ML_defaultDTable[(1<<ML_DEFAULTNORMLOG)+1] = {
310 { 1, 1, 1, ML_DEFAULTNORMLOG}, /* header : fastMode, tableLog */
311 /* nextState, nbAddBits, nbBits, baseVal */
312 { 0, 0, 6, 3}, { 0, 0, 4, 4},
313 { 32, 0, 5, 5}, { 0, 0, 5, 6},
314 { 0, 0, 5, 8}, { 0, 0, 5, 9},
315 { 0, 0, 5, 11}, { 0, 0, 6, 13},
316 { 0, 0, 6, 16}, { 0, 0, 6, 19},
317 { 0, 0, 6, 22}, { 0, 0, 6, 25},
318 { 0, 0, 6, 28}, { 0, 0, 6, 31},
319 { 0, 0, 6, 34}, { 0, 1, 6, 37},
320 { 0, 1, 6, 41}, { 0, 2, 6, 47},
321 { 0, 3, 6, 59}, { 0, 4, 6, 83},
322 { 0, 7, 6, 131}, { 0, 9, 6, 515},
323 { 16, 0, 4, 4}, { 0, 0, 4, 5},
324 { 32, 0, 5, 6}, { 0, 0, 5, 7},
325 { 32, 0, 5, 9}, { 0, 0, 5, 10},
326 { 0, 0, 6, 12}, { 0, 0, 6, 15},
327 { 0, 0, 6, 18}, { 0, 0, 6, 21},
328 { 0, 0, 6, 24}, { 0, 0, 6, 27},
329 { 0, 0, 6, 30}, { 0, 0, 6, 33},
330 { 0, 1, 6, 35}, { 0, 1, 6, 39},
331 { 0, 2, 6, 43}, { 0, 3, 6, 51},
332 { 0, 4, 6, 67}, { 0, 5, 6, 99},
333 { 0, 8, 6, 259}, { 32, 0, 4, 4},
334 { 48, 0, 4, 4}, { 16, 0, 4, 5},
335 { 32, 0, 5, 7}, { 32, 0, 5, 8},
336 { 32, 0, 5, 10}, { 32, 0, 5, 11},
337 { 0, 0, 6, 14}, { 0, 0, 6, 17},
338 { 0, 0, 6, 20}, { 0, 0, 6, 23},
339 { 0, 0, 6, 26}, { 0, 0, 6, 29},
340 { 0, 0, 6, 32}, { 0, 16, 6,65539},
341 { 0, 15, 6,32771}, { 0, 14, 6,16387},
342 { 0, 13, 6, 8195}, { 0, 12, 6, 4099},
343 { 0, 11, 6, 2051}, { 0, 10, 6, 1027},
344 }; /* ML_defaultDTable */
345
346
ZSTD_buildSeqTable_rle(ZSTD_seqSymbol * dt,U32 baseValue,U32 nbAddBits)347 static void ZSTD_buildSeqTable_rle(ZSTD_seqSymbol* dt, U32 baseValue, U32 nbAddBits)
348 {
349 void* ptr = dt;
350 ZSTD_seqSymbol_header* const DTableH = (ZSTD_seqSymbol_header*)ptr;
351 ZSTD_seqSymbol* const cell = dt + 1;
352
353 DTableH->tableLog = 0;
354 DTableH->fastMode = 0;
355
356 cell->nbBits = 0;
357 cell->nextState = 0;
358 assert(nbAddBits < 255);
359 cell->nbAdditionalBits = (BYTE)nbAddBits;
360 cell->baseValue = baseValue;
361 }
362
363
364 /* ZSTD_buildFSETable() :
365 * generate FSE decoding table for one symbol (ll, ml or off)
366 * cannot fail if input is valid =>
367 * all inputs are presumed validated at this stage */
368 void
ZSTD_buildFSETable(ZSTD_seqSymbol * dt,const short * normalizedCounter,unsigned maxSymbolValue,const U32 * baseValue,const U32 * nbAdditionalBits,unsigned tableLog)369 ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
370 const short* normalizedCounter, unsigned maxSymbolValue,
371 const U32* baseValue, const U32* nbAdditionalBits,
372 unsigned tableLog)
373 {
374 ZSTD_seqSymbol* const tableDecode = dt+1;
375 U16 symbolNext[MaxSeq+1];
376
377 U32 const maxSV1 = maxSymbolValue + 1;
378 U32 const tableSize = 1 << tableLog;
379 U32 highThreshold = tableSize-1;
380
381 /* Sanity Checks */
382 assert(maxSymbolValue <= MaxSeq);
383 assert(tableLog <= MaxFSELog);
384
385 /* Init, lay down lowprob symbols */
386 { ZSTD_seqSymbol_header DTableH;
387 DTableH.tableLog = tableLog;
388 DTableH.fastMode = 1;
389 { S16 const largeLimit= (S16)(1 << (tableLog-1));
390 U32 s;
391 for (s=0; s<maxSV1; s++) {
392 if (normalizedCounter[s]==-1) {
393 tableDecode[highThreshold--].baseValue = s;
394 symbolNext[s] = 1;
395 } else {
396 if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
397 assert(normalizedCounter[s]>=0);
398 symbolNext[s] = (U16)normalizedCounter[s];
399 } } }
400 memcpy(dt, &DTableH, sizeof(DTableH));
401 }
402
403 /* Spread symbols */
404 { U32 const tableMask = tableSize-1;
405 U32 const step = FSE_TABLESTEP(tableSize);
406 U32 s, position = 0;
407 for (s=0; s<maxSV1; s++) {
408 int i;
409 for (i=0; i<normalizedCounter[s]; i++) {
410 tableDecode[position].baseValue = s;
411 position = (position + step) & tableMask;
412 while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
413 } }
414 assert(position == 0); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
415 }
416
417 /* Build Decoding table */
418 { U32 u;
419 for (u=0; u<tableSize; u++) {
420 U32 const symbol = tableDecode[u].baseValue;
421 U32 const nextState = symbolNext[symbol]++;
422 tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) );
423 tableDecode[u].nextState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
424 assert(nbAdditionalBits[symbol] < 255);
425 tableDecode[u].nbAdditionalBits = (BYTE)nbAdditionalBits[symbol];
426 tableDecode[u].baseValue = baseValue[symbol];
427 } }
428 }
429
430
431 /*! ZSTD_buildSeqTable() :
432 * @return : nb bytes read from src,
433 * or an error code if it fails */
ZSTD_buildSeqTable(ZSTD_seqSymbol * DTableSpace,const ZSTD_seqSymbol ** DTablePtr,symbolEncodingType_e type,unsigned max,U32 maxLog,const void * src,size_t srcSize,const U32 * baseValue,const U32 * nbAdditionalBits,const ZSTD_seqSymbol * defaultTable,U32 flagRepeatTable,int ddictIsCold,int nbSeq)434 static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymbol** DTablePtr,
435 symbolEncodingType_e type, unsigned max, U32 maxLog,
436 const void* src, size_t srcSize,
437 const U32* baseValue, const U32* nbAdditionalBits,
438 const ZSTD_seqSymbol* defaultTable, U32 flagRepeatTable,
439 int ddictIsCold, int nbSeq)
440 {
441 switch(type)
442 {
443 case set_rle :
444 RETURN_ERROR_IF(!srcSize, srcSize_wrong, "");
445 RETURN_ERROR_IF((*(const BYTE*)src) > max, corruption_detected, "");
446 { U32 const symbol = *(const BYTE*)src;
447 U32 const baseline = baseValue[symbol];
448 U32 const nbBits = nbAdditionalBits[symbol];
449 ZSTD_buildSeqTable_rle(DTableSpace, baseline, nbBits);
450 }
451 *DTablePtr = DTableSpace;
452 return 1;
453 case set_basic :
454 *DTablePtr = defaultTable;
455 return 0;
456 case set_repeat:
457 RETURN_ERROR_IF(!flagRepeatTable, corruption_detected, "");
458 /* prefetch FSE table if used */
459 if (ddictIsCold && (nbSeq > 24 /* heuristic */)) {
460 const void* const pStart = *DTablePtr;
461 size_t const pSize = sizeof(ZSTD_seqSymbol) * (SEQSYMBOL_TABLE_SIZE(maxLog));
462 PREFETCH_AREA(pStart, pSize);
463 }
464 return 0;
465 case set_compressed :
466 { unsigned tableLog;
467 S16 norm[MaxSeq+1];
468 size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize);
469 RETURN_ERROR_IF(FSE_isError(headerSize), corruption_detected, "");
470 RETURN_ERROR_IF(tableLog > maxLog, corruption_detected, "");
471 ZSTD_buildFSETable(DTableSpace, norm, max, baseValue, nbAdditionalBits, tableLog);
472 *DTablePtr = DTableSpace;
473 return headerSize;
474 }
475 default :
476 assert(0);
477 RETURN_ERROR(GENERIC, "impossible");
478 }
479 }
480
ZSTD_decodeSeqHeaders(ZSTD_DCtx * dctx,int * nbSeqPtr,const void * src,size_t srcSize)481 size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
482 const void* src, size_t srcSize)
483 {
484 const BYTE* const istart = (const BYTE* const)src;
485 const BYTE* const iend = istart + srcSize;
486 const BYTE* ip = istart;
487 int nbSeq;
488 DEBUGLOG(5, "ZSTD_decodeSeqHeaders");
489
490 /* check */
491 RETURN_ERROR_IF(srcSize < MIN_SEQUENCES_SIZE, srcSize_wrong, "");
492
493 /* SeqHead */
494 nbSeq = *ip++;
495 if (!nbSeq) {
496 *nbSeqPtr=0;
497 RETURN_ERROR_IF(srcSize != 1, srcSize_wrong, "");
498 return 1;
499 }
500 if (nbSeq > 0x7F) {
501 if (nbSeq == 0xFF) {
502 RETURN_ERROR_IF(ip+2 > iend, srcSize_wrong, "");
503 nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2;
504 } else {
505 RETURN_ERROR_IF(ip >= iend, srcSize_wrong, "");
506 nbSeq = ((nbSeq-0x80)<<8) + *ip++;
507 }
508 }
509 *nbSeqPtr = nbSeq;
510
511 /* FSE table descriptors */
512 RETURN_ERROR_IF(ip+1 > iend, srcSize_wrong, ""); /* minimum possible size: 1 byte for symbol encoding types */
513 { symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6);
514 symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3);
515 symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3);
516 ip++;
517
518 /* Build DTables */
519 { size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr,
520 LLtype, MaxLL, LLFSELog,
521 ip, iend-ip,
522 LL_base, LL_bits,
523 LL_defaultDTable, dctx->fseEntropy,
524 dctx->ddictIsCold, nbSeq);
525 RETURN_ERROR_IF(ZSTD_isError(llhSize), corruption_detected, "ZSTD_buildSeqTable failed");
526 ip += llhSize;
527 }
528
529 { size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr,
530 OFtype, MaxOff, OffFSELog,
531 ip, iend-ip,
532 OF_base, OF_bits,
533 OF_defaultDTable, dctx->fseEntropy,
534 dctx->ddictIsCold, nbSeq);
535 RETURN_ERROR_IF(ZSTD_isError(ofhSize), corruption_detected, "ZSTD_buildSeqTable failed");
536 ip += ofhSize;
537 }
538
539 { size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr,
540 MLtype, MaxML, MLFSELog,
541 ip, iend-ip,
542 ML_base, ML_bits,
543 ML_defaultDTable, dctx->fseEntropy,
544 dctx->ddictIsCold, nbSeq);
545 RETURN_ERROR_IF(ZSTD_isError(mlhSize), corruption_detected, "ZSTD_buildSeqTable failed");
546 ip += mlhSize;
547 }
548 }
549
550 return ip-istart;
551 }
552
553
554 typedef struct {
555 size_t litLength;
556 size_t matchLength;
557 size_t offset;
558 } seq_t;
559
560 typedef struct {
561 size_t state;
562 const ZSTD_seqSymbol* table;
563 } ZSTD_fseState;
564
565 typedef struct {
566 BIT_DStream_t DStream;
567 ZSTD_fseState stateLL;
568 ZSTD_fseState stateOffb;
569 ZSTD_fseState stateML;
570 size_t prevOffset[ZSTD_REP_NUM];
571 } seqState_t;
572
573 /*! ZSTD_overlapCopy8() :
574 * Copies 8 bytes from ip to op and updates op and ip where ip <= op.
575 * If the offset is < 8 then the offset is spread to at least 8 bytes.
576 *
577 * Precondition: *ip <= *op
578 * Postcondition: *op - *op >= 8
579 */
ZSTD_overlapCopy8(BYTE ** op,BYTE const ** ip,size_t offset)580 HINT_INLINE void ZSTD_overlapCopy8(BYTE** op, BYTE const** ip, size_t offset) {
581 assert(*ip <= *op);
582 if (offset < 8) {
583 /* close range match, overlap */
584 static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */
585 static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */
586 int const sub2 = dec64table[offset];
587 (*op)[0] = (*ip)[0];
588 (*op)[1] = (*ip)[1];
589 (*op)[2] = (*ip)[2];
590 (*op)[3] = (*ip)[3];
591 *ip += dec32table[offset];
592 ZSTD_copy4(*op+4, *ip);
593 *ip -= sub2;
594 } else {
595 ZSTD_copy8(*op, *ip);
596 }
597 *ip += 8;
598 *op += 8;
599 assert(*op - *ip >= 8);
600 }
601
602 /*! ZSTD_safecopy() :
603 * Specialized version of memcpy() that is allowed to READ up to WILDCOPY_OVERLENGTH past the input buffer
604 * and write up to 16 bytes past oend_w (op >= oend_w is allowed).
605 * This function is only called in the uncommon case where the sequence is near the end of the block. It
606 * should be fast for a single long sequence, but can be slow for several short sequences.
607 *
608 * @param ovtype controls the overlap detection
609 * - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart.
610 * - ZSTD_overlap_src_before_dst: The src and dst may overlap and may be any distance apart.
611 * The src buffer must be before the dst buffer.
612 */
ZSTD_safecopy(BYTE * op,BYTE * const oend_w,BYTE const * ip,ptrdiff_t length,ZSTD_overlap_e ovtype)613 static void ZSTD_safecopy(BYTE* op, BYTE* const oend_w, BYTE const* ip, ptrdiff_t length, ZSTD_overlap_e ovtype) {
614 ptrdiff_t const diff = op - ip;
615 BYTE* const oend = op + length;
616
617 assert((ovtype == ZSTD_no_overlap && (diff <= -8 || diff >= 8 || op >= oend_w)) ||
618 (ovtype == ZSTD_overlap_src_before_dst && diff >= 0));
619
620 if (length < 8) {
621 /* Handle short lengths. */
622 while (op < oend) *op++ = *ip++;
623 return;
624 }
625 if (ovtype == ZSTD_overlap_src_before_dst) {
626 /* Copy 8 bytes and ensure the offset >= 8 when there can be overlap. */
627 assert(length >= 8);
628 ZSTD_overlapCopy8(&op, &ip, diff);
629 assert(op - ip >= 8);
630 assert(op <= oend);
631 }
632
633 if (oend <= oend_w) {
634 /* No risk of overwrite. */
635 ZSTD_wildcopy(op, ip, length, ovtype);
636 return;
637 }
638 if (op <= oend_w) {
639 /* Wildcopy until we get close to the end. */
640 assert(oend > oend_w);
641 ZSTD_wildcopy(op, ip, oend_w - op, ovtype);
642 ip += oend_w - op;
643 op = oend_w;
644 }
645 /* Handle the leftovers. */
646 while (op < oend) *op++ = *ip++;
647 }
648
649 /* ZSTD_execSequenceEnd():
650 * This version handles cases that are near the end of the output buffer. It requires
651 * more careful checks to make sure there is no overflow. By separating out these hard
652 * and unlikely cases, we can speed up the common cases.
653 *
654 * NOTE: This function needs to be fast for a single long sequence, but doesn't need
655 * to be optimized for many small sequences, since those fall into ZSTD_execSequence().
656 */
657 FORCE_NOINLINE
ZSTD_execSequenceEnd(BYTE * op,BYTE * const oend,seq_t sequence,const BYTE ** litPtr,const BYTE * const litLimit,const BYTE * const prefixStart,const BYTE * const virtualStart,const BYTE * const dictEnd)658 size_t ZSTD_execSequenceEnd(BYTE* op,
659 BYTE* const oend, seq_t sequence,
660 const BYTE** litPtr, const BYTE* const litLimit,
661 const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
662 {
663 BYTE* const oLitEnd = op + sequence.litLength;
664 size_t const sequenceLength = sequence.litLength + sequence.matchLength;
665 const BYTE* const iLitEnd = *litPtr + sequence.litLength;
666 const BYTE* match = oLitEnd - sequence.offset;
667 BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
668
669 /* bounds checks : careful of address space overflow in 32-bit mode */
670 RETURN_ERROR_IF(sequenceLength > (size_t)(oend - op), dstSize_tooSmall, "last match must fit within dstBuffer");
671 RETURN_ERROR_IF(sequence.litLength > (size_t)(litLimit - *litPtr), corruption_detected, "try to read beyond literal buffer");
672 assert(op < op + sequenceLength);
673 assert(oLitEnd < op + sequenceLength);
674
675 /* copy literals */
676 ZSTD_safecopy(op, oend_w, *litPtr, sequence.litLength, ZSTD_no_overlap);
677 op = oLitEnd;
678 *litPtr = iLitEnd;
679
680 /* copy Match */
681 if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
682 /* offset beyond prefix */
683 RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected, "");
684 match = dictEnd - (prefixStart-match);
685 if (match + sequence.matchLength <= dictEnd) {
686 memmove(oLitEnd, match, sequence.matchLength);
687 return sequenceLength;
688 }
689 /* span extDict & currentPrefixSegment */
690 { size_t const length1 = dictEnd - match;
691 memmove(oLitEnd, match, length1);
692 op = oLitEnd + length1;
693 sequence.matchLength -= length1;
694 match = prefixStart;
695 } }
696 ZSTD_safecopy(op, oend_w, match, sequence.matchLength, ZSTD_overlap_src_before_dst);
697 return sequenceLength;
698 }
699
700 HINT_INLINE
ZSTD_execSequence(BYTE * op,BYTE * const oend,seq_t sequence,const BYTE ** litPtr,const BYTE * const litLimit,const BYTE * const prefixStart,const BYTE * const virtualStart,const BYTE * const dictEnd)701 size_t ZSTD_execSequence(BYTE* op,
702 BYTE* const oend, seq_t sequence,
703 const BYTE** litPtr, const BYTE* const litLimit,
704 const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
705 {
706 BYTE* const oLitEnd = op + sequence.litLength;
707 size_t const sequenceLength = sequence.litLength + sequence.matchLength;
708 BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
709 BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH; /* risk : address space underflow on oend=NULL */
710 const BYTE* const iLitEnd = *litPtr + sequence.litLength;
711 const BYTE* match = oLitEnd - sequence.offset;
712
713 assert(op != NULL /* Precondition */);
714 assert(oend_w < oend /* No underflow */);
715 /* Handle edge cases in a slow path:
716 * - Read beyond end of literals
717 * - Match end is within WILDCOPY_OVERLIMIT of oend
718 * - 32-bit mode and the match length overflows
719 */
720 if (UNLIKELY(
721 iLitEnd > litLimit ||
722 oMatchEnd > oend_w ||
723 (MEM_32bits() && (size_t)(oend - op) < sequenceLength + WILDCOPY_OVERLENGTH)))
724 return ZSTD_execSequenceEnd(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd);
725
726 /* Assumptions (everything else goes into ZSTD_execSequenceEnd()) */
727 assert(op <= oLitEnd /* No overflow */);
728 assert(oLitEnd < oMatchEnd /* Non-zero match & no overflow */);
729 assert(oMatchEnd <= oend /* No underflow */);
730 assert(iLitEnd <= litLimit /* Literal length is in bounds */);
731 assert(oLitEnd <= oend_w /* Can wildcopy literals */);
732 assert(oMatchEnd <= oend_w /* Can wildcopy matches */);
733
734 /* Copy Literals:
735 * Split out litLength <= 16 since it is nearly always true. +1.6% on gcc-9.
736 * We likely don't need the full 32-byte wildcopy.
737 */
738 assert(WILDCOPY_OVERLENGTH >= 16);
739 ZSTD_copy16(op, (*litPtr));
740 if (UNLIKELY(sequence.litLength > 16)) {
741 ZSTD_wildcopy(op+16, (*litPtr)+16, sequence.litLength-16, ZSTD_no_overlap);
742 }
743 op = oLitEnd;
744 *litPtr = iLitEnd; /* update for next sequence */
745
746 /* Copy Match */
747 if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
748 /* offset beyond prefix -> go into extDict */
749 RETURN_ERROR_IF(UNLIKELY(sequence.offset > (size_t)(oLitEnd - virtualStart)), corruption_detected, "");
750 match = dictEnd + (match - prefixStart);
751 if (match + sequence.matchLength <= dictEnd) {
752 memmove(oLitEnd, match, sequence.matchLength);
753 return sequenceLength;
754 }
755 /* span extDict & currentPrefixSegment */
756 { size_t const length1 = dictEnd - match;
757 memmove(oLitEnd, match, length1);
758 op = oLitEnd + length1;
759 sequence.matchLength -= length1;
760 match = prefixStart;
761 } }
762 /* Match within prefix of 1 or more bytes */
763 assert(op <= oMatchEnd);
764 assert(oMatchEnd <= oend_w);
765 assert(match >= prefixStart);
766 assert(sequence.matchLength >= 1);
767
768 /* Nearly all offsets are >= WILDCOPY_VECLEN bytes, which means we can use wildcopy
769 * without overlap checking.
770 */
771 if (LIKELY(sequence.offset >= WILDCOPY_VECLEN)) {
772 /* We bet on a full wildcopy for matches, since we expect matches to be
773 * longer than literals (in general). In silesia, ~10% of matches are longer
774 * than 16 bytes.
775 */
776 ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength, ZSTD_no_overlap);
777 return sequenceLength;
778 }
779 assert(sequence.offset < WILDCOPY_VECLEN);
780
781 /* Copy 8 bytes and spread the offset to be >= 8. */
782 ZSTD_overlapCopy8(&op, &match, sequence.offset);
783
784 /* If the match length is > 8 bytes, then continue with the wildcopy. */
785 if (sequence.matchLength > 8) {
786 assert(op < oMatchEnd);
787 ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8, ZSTD_overlap_src_before_dst);
788 }
789 return sequenceLength;
790 }
791
792 static void
ZSTD_initFseState(ZSTD_fseState * DStatePtr,BIT_DStream_t * bitD,const ZSTD_seqSymbol * dt)793 ZSTD_initFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, const ZSTD_seqSymbol* dt)
794 {
795 const void* ptr = dt;
796 const ZSTD_seqSymbol_header* const DTableH = (const ZSTD_seqSymbol_header*)ptr;
797 DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
798 DEBUGLOG(6, "ZSTD_initFseState : val=%u using %u bits",
799 (U32)DStatePtr->state, DTableH->tableLog);
800 BIT_reloadDStream(bitD);
801 DStatePtr->table = dt + 1;
802 }
803
804 FORCE_INLINE_TEMPLATE void
ZSTD_updateFseState(ZSTD_fseState * DStatePtr,BIT_DStream_t * bitD)805 ZSTD_updateFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD)
806 {
807 ZSTD_seqSymbol const DInfo = DStatePtr->table[DStatePtr->state];
808 U32 const nbBits = DInfo.nbBits;
809 size_t const lowBits = BIT_readBits(bitD, nbBits);
810 DStatePtr->state = DInfo.nextState + lowBits;
811 }
812
813 FORCE_INLINE_TEMPLATE void
ZSTD_updateFseStateWithDInfo(ZSTD_fseState * DStatePtr,BIT_DStream_t * bitD,ZSTD_seqSymbol const DInfo)814 ZSTD_updateFseStateWithDInfo(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, ZSTD_seqSymbol const DInfo)
815 {
816 U32 const nbBits = DInfo.nbBits;
817 size_t const lowBits = BIT_readBits(bitD, nbBits);
818 DStatePtr->state = DInfo.nextState + lowBits;
819 }
820
821 /* We need to add at most (ZSTD_WINDOWLOG_MAX_32 - 1) bits to read the maximum
822 * offset bits. But we can only read at most (STREAM_ACCUMULATOR_MIN_32 - 1)
823 * bits before reloading. This value is the maximum number of bytes we read
824 * after reloading when we are decoding long offsets.
825 */
826 #define LONG_OFFSETS_MAX_EXTRA_BITS_32 \
827 (ZSTD_WINDOWLOG_MAX_32 > STREAM_ACCUMULATOR_MIN_32 \
828 ? ZSTD_WINDOWLOG_MAX_32 - STREAM_ACCUMULATOR_MIN_32 \
829 : 0)
830
831 typedef enum { ZSTD_lo_isRegularOffset, ZSTD_lo_isLongOffset=1 } ZSTD_longOffset_e;
832
833 FORCE_INLINE_TEMPLATE seq_t
ZSTD_decodeSequence(seqState_t * seqState,const ZSTD_longOffset_e longOffsets)834 ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets)
835 {
836 seq_t seq;
837 ZSTD_seqSymbol const llDInfo = seqState->stateLL.table[seqState->stateLL.state];
838 ZSTD_seqSymbol const mlDInfo = seqState->stateML.table[seqState->stateML.state];
839 ZSTD_seqSymbol const ofDInfo = seqState->stateOffb.table[seqState->stateOffb.state];
840 U32 const llBase = llDInfo.baseValue;
841 U32 const mlBase = mlDInfo.baseValue;
842 U32 const ofBase = ofDInfo.baseValue;
843 BYTE const llBits = llDInfo.nbAdditionalBits;
844 BYTE const mlBits = mlDInfo.nbAdditionalBits;
845 BYTE const ofBits = ofDInfo.nbAdditionalBits;
846 BYTE const totalBits = llBits+mlBits+ofBits;
847
848 /* sequence */
849 { size_t offset;
850 if (ofBits > 1) {
851 ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1);
852 ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5);
853 assert(ofBits <= MaxOff);
854 if (MEM_32bits() && longOffsets && (ofBits >= STREAM_ACCUMULATOR_MIN_32)) {
855 U32 const extraBits = ofBits - MIN(ofBits, 32 - seqState->DStream.bitsConsumed);
856 offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
857 BIT_reloadDStream(&seqState->DStream);
858 if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits);
859 assert(extraBits <= LONG_OFFSETS_MAX_EXTRA_BITS_32); /* to avoid another reload */
860 } else {
861 offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */
862 if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
863 }
864 seqState->prevOffset[2] = seqState->prevOffset[1];
865 seqState->prevOffset[1] = seqState->prevOffset[0];
866 seqState->prevOffset[0] = offset;
867 } else {
868 U32 const ll0 = (llBase == 0);
869 if (LIKELY((ofBits == 0))) {
870 if (LIKELY(!ll0))
871 offset = seqState->prevOffset[0];
872 else {
873 offset = seqState->prevOffset[1];
874 seqState->prevOffset[1] = seqState->prevOffset[0];
875 seqState->prevOffset[0] = offset;
876 }
877 } else {
878 offset = ofBase + ll0 + BIT_readBitsFast(&seqState->DStream, 1);
879 { size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
880 temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */
881 if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
882 seqState->prevOffset[1] = seqState->prevOffset[0];
883 seqState->prevOffset[0] = offset = temp;
884 } } }
885 seq.offset = offset;
886 }
887
888 seq.matchLength = mlBase;
889 if (mlBits > 0)
890 seq.matchLength += BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/);
891
892 if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32))
893 BIT_reloadDStream(&seqState->DStream);
894 if (MEM_64bits() && UNLIKELY(totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog)))
895 BIT_reloadDStream(&seqState->DStream);
896 /* Ensure there are enough bits to read the rest of data in 64-bit mode. */
897 ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64);
898
899 seq.litLength = llBase;
900 if (llBits > 0)
901 seq.litLength += BIT_readBitsFast(&seqState->DStream, llBits/*>0*/);
902
903 if (MEM_32bits())
904 BIT_reloadDStream(&seqState->DStream);
905
906 DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u",
907 (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
908
909 /* ANS state update
910 * gcc-9.0.0 does 2.5% worse with ZSTD_updateFseStateWithDInfo().
911 * clang-9.2.0 does 7% worse with ZSTD_updateFseState().
912 * Naturally it seems like ZSTD_updateFseStateWithDInfo() should be the
913 * better option, so it is the default for other compilers. But, if you
914 * measure that it is worse, please put up a pull request.
915 */
916 {
917 #if defined(__GNUC__) && !defined(__clang__)
918 const int kUseUpdateFseState = 1;
919 #else
920 const int kUseUpdateFseState = 0;
921 #endif
922 if (kUseUpdateFseState) {
923 ZSTD_updateFseState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */
924 ZSTD_updateFseState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */
925 if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */
926 ZSTD_updateFseState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */
927 } else {
928 ZSTD_updateFseStateWithDInfo(&seqState->stateLL, &seqState->DStream, llDInfo); /* <= 9 bits */
929 ZSTD_updateFseStateWithDInfo(&seqState->stateML, &seqState->DStream, mlDInfo); /* <= 9 bits */
930 if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */
931 ZSTD_updateFseStateWithDInfo(&seqState->stateOffb, &seqState->DStream, ofDInfo); /* <= 8 bits */
932 }
933 }
934
935 return seq;
936 }
937
938 #ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
ZSTD_dictionaryIsActive(ZSTD_DCtx const * dctx,BYTE const * prefixStart,BYTE const * oLitEnd)939 static int ZSTD_dictionaryIsActive(ZSTD_DCtx const* dctx, BYTE const* prefixStart, BYTE const* oLitEnd)
940 {
941 size_t const windowSize = dctx->fParams.windowSize;
942 /* No dictionary used. */
943 if (dctx->dictContentEndForFuzzing == NULL) return 0;
944 /* Dictionary is our prefix. */
945 if (prefixStart == dctx->dictContentBeginForFuzzing) return 1;
946 /* Dictionary is not our ext-dict. */
947 if (dctx->dictEnd != dctx->dictContentEndForFuzzing) return 0;
948 /* Dictionary is not within our window size. */
949 if ((size_t)(oLitEnd - prefixStart) >= windowSize) return 0;
950 /* Dictionary is active. */
951 return 1;
952 }
953
ZSTD_assertValidSequence(ZSTD_DCtx const * dctx,BYTE const * op,BYTE const * oend,seq_t const seq,BYTE const * prefixStart,BYTE const * virtualStart)954 MEM_STATIC void ZSTD_assertValidSequence(
955 ZSTD_DCtx const* dctx,
956 BYTE const* op, BYTE const* oend,
957 seq_t const seq,
958 BYTE const* prefixStart, BYTE const* virtualStart)
959 {
960 size_t const windowSize = dctx->fParams.windowSize;
961 size_t const sequenceSize = seq.litLength + seq.matchLength;
962 BYTE const* const oLitEnd = op + seq.litLength;
963 DEBUGLOG(6, "Checking sequence: litL=%u matchL=%u offset=%u",
964 (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
965 assert(op <= oend);
966 assert((size_t)(oend - op) >= sequenceSize);
967 assert(sequenceSize <= ZSTD_BLOCKSIZE_MAX);
968 if (ZSTD_dictionaryIsActive(dctx, prefixStart, oLitEnd)) {
969 size_t const dictSize = (size_t)((char const*)dctx->dictContentEndForFuzzing - (char const*)dctx->dictContentBeginForFuzzing);
970 /* Offset must be within the dictionary. */
971 assert(seq.offset <= (size_t)(oLitEnd - virtualStart));
972 assert(seq.offset <= windowSize + dictSize);
973 } else {
974 /* Offset must be within our window. */
975 assert(seq.offset <= windowSize);
976 }
977 }
978 #endif
979
980 #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
981 FORCE_INLINE_TEMPLATE size_t
982 DONT_VECTORIZE
ZSTD_decompressSequences_body(ZSTD_DCtx * dctx,void * dst,size_t maxDstSize,const void * seqStart,size_t seqSize,int nbSeq,const ZSTD_longOffset_e isLongOffset,const int frame)983 ZSTD_decompressSequences_body( ZSTD_DCtx* dctx,
984 void* dst, size_t maxDstSize,
985 const void* seqStart, size_t seqSize, int nbSeq,
986 const ZSTD_longOffset_e isLongOffset,
987 const int frame)
988 {
989 const BYTE* ip = (const BYTE*)seqStart;
990 const BYTE* const iend = ip + seqSize;
991 BYTE* const ostart = (BYTE* const)dst;
992 BYTE* const oend = ostart + maxDstSize;
993 BYTE* op = ostart;
994 const BYTE* litPtr = dctx->litPtr;
995 const BYTE* const litEnd = litPtr + dctx->litSize;
996 const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart);
997 const BYTE* const vBase = (const BYTE*) (dctx->virtualStart);
998 const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
999 DEBUGLOG(5, "ZSTD_decompressSequences_body");
1000 (void)frame;
1001
1002 /* Regen sequences */
1003 if (nbSeq) {
1004 seqState_t seqState;
1005 size_t error = 0;
1006 dctx->fseEntropy = 1;
1007 { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
1008 RETURN_ERROR_IF(
1009 ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),
1010 corruption_detected, "");
1011 ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
1012 ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
1013 ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
1014 assert(dst != NULL);
1015
1016 ZSTD_STATIC_ASSERT(
1017 BIT_DStream_unfinished < BIT_DStream_completed &&
1018 BIT_DStream_endOfBuffer < BIT_DStream_completed &&
1019 BIT_DStream_completed < BIT_DStream_overflow);
1020
1021 #if defined(__GNUC__) && defined(__x86_64__)
1022 /* Align the decompression loop to 32 + 16 bytes.
1023 *
1024 * zstd compiled with gcc-9 on an Intel i9-9900k shows 10% decompression
1025 * speed swings based on the alignment of the decompression loop. This
1026 * performance swing is caused by parts of the decompression loop falling
1027 * out of the DSB. The entire decompression loop should fit in the DSB,
1028 * when it can't we get much worse performance. You can measure if you've
1029 * hit the good case or the bad case with this perf command for some
1030 * compressed file test.zst:
1031 *
1032 * perf stat -e cycles -e instructions -e idq.all_dsb_cycles_any_uops \
1033 * -e idq.all_mite_cycles_any_uops -- ./zstd -tq test.zst
1034 *
1035 * If you see most cycles served out of the MITE you've hit the bad case.
1036 * If you see most cycles served out of the DSB you've hit the good case.
1037 * If it is pretty even then you may be in an okay case.
1038 *
1039 * I've been able to reproduce this issue on the following CPUs:
1040 * - Kabylake: Macbook Pro (15-inch, 2019) 2.4 GHz Intel Core i9
1041 * Use Instruments->Counters to get DSB/MITE cycles.
1042 * I never got performance swings, but I was able to
1043 * go from the good case of mostly DSB to half of the
1044 * cycles served from MITE.
1045 * - Coffeelake: Intel i9-9900k
1046 *
1047 * I haven't been able to reproduce the instability or DSB misses on any
1048 * of the following CPUS:
1049 * - Haswell
1050 * - Broadwell: Intel(R) Xeon(R) CPU E5-2680 v4 @ 2.40GH
1051 * - Skylake
1052 *
1053 * If you are seeing performance stability this script can help test.
1054 * It tests on 4 commits in zstd where I saw performance change.
1055 *
1056 * https://gist.github.com/terrelln/9889fc06a423fd5ca6e99351564473f4
1057 */
1058 __asm__(".p2align 5");
1059 __asm__("nop");
1060 __asm__(".p2align 4");
1061 #endif
1062 for ( ; ; ) {
1063 seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
1064 size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, prefixStart, vBase, dictEnd);
1065 #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
1066 assert(!ZSTD_isError(oneSeqSize));
1067 if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
1068 #endif
1069 DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
1070 BIT_reloadDStream(&(seqState.DStream));
1071 /* gcc and clang both don't like early returns in this loop.
1072 * gcc doesn't like early breaks either.
1073 * Instead save an error and report it at the end.
1074 * When there is an error, don't increment op, so we don't
1075 * overwrite.
1076 */
1077 if (UNLIKELY(ZSTD_isError(oneSeqSize))) error = oneSeqSize;
1078 else op += oneSeqSize;
1079 if (UNLIKELY(!--nbSeq)) break;
1080 }
1081
1082 /* check if reached exact end */
1083 DEBUGLOG(5, "ZSTD_decompressSequences_body: after decode loop, remaining nbSeq : %i", nbSeq);
1084 if (ZSTD_isError(error)) return error;
1085 RETURN_ERROR_IF(nbSeq, corruption_detected, "");
1086 RETURN_ERROR_IF(BIT_reloadDStream(&seqState.DStream) < BIT_DStream_completed, corruption_detected, "");
1087 /* save reps for next block */
1088 { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
1089 }
1090
1091 /* last literal segment */
1092 { size_t const lastLLSize = litEnd - litPtr;
1093 RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, "");
1094 if (op != NULL) {
1095 memcpy(op, litPtr, lastLLSize);
1096 op += lastLLSize;
1097 }
1098 }
1099
1100 return op-ostart;
1101 }
1102
1103 static size_t
ZSTD_decompressSequences_default(ZSTD_DCtx * dctx,void * dst,size_t maxDstSize,const void * seqStart,size_t seqSize,int nbSeq,const ZSTD_longOffset_e isLongOffset,const int frame)1104 ZSTD_decompressSequences_default(ZSTD_DCtx* dctx,
1105 void* dst, size_t maxDstSize,
1106 const void* seqStart, size_t seqSize, int nbSeq,
1107 const ZSTD_longOffset_e isLongOffset,
1108 const int frame)
1109 {
1110 return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
1111 }
1112 #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
1113
1114 #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
1115
1116 FORCE_INLINE_TEMPLATE size_t
ZSTD_prefetchMatch(size_t prefixPos,seq_t const sequence,const BYTE * const prefixStart,const BYTE * const dictEnd)1117 ZSTD_prefetchMatch(size_t prefixPos, seq_t const sequence,
1118 const BYTE* const prefixStart, const BYTE* const dictEnd)
1119 {
1120 prefixPos += sequence.litLength;
1121 { const BYTE* const matchBase = (sequence.offset > prefixPos) ? dictEnd : prefixStart;
1122 const BYTE* const match = matchBase + prefixPos - sequence.offset; /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted.
1123 * No consequence though : no memory access will occur, offset is only used for prefetching */
1124 PREFETCH_L1(match); PREFETCH_L1(match + sequence.matchLength - 1); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
1125 }
1126 return prefixPos + sequence.matchLength;
1127 }
1128
1129 /* This decoding function employs prefetching
1130 * to reduce latency impact of cache misses.
1131 * It's generally employed when block contains a significant portion of long-distance matches
1132 * or when coupled with a "cold" dictionary */
1133 FORCE_INLINE_TEMPLATE size_t
ZSTD_decompressSequencesLong_body(ZSTD_DCtx * dctx,void * dst,size_t maxDstSize,const void * seqStart,size_t seqSize,int nbSeq,const ZSTD_longOffset_e isLongOffset,const int frame)1134 ZSTD_decompressSequencesLong_body(
1135 ZSTD_DCtx* dctx,
1136 void* dst, size_t maxDstSize,
1137 const void* seqStart, size_t seqSize, int nbSeq,
1138 const ZSTD_longOffset_e isLongOffset,
1139 const int frame)
1140 {
1141 const BYTE* ip = (const BYTE*)seqStart;
1142 const BYTE* const iend = ip + seqSize;
1143 BYTE* const ostart = (BYTE* const)dst;
1144 BYTE* const oend = ostart + maxDstSize;
1145 BYTE* op = ostart;
1146 const BYTE* litPtr = dctx->litPtr;
1147 const BYTE* const litEnd = litPtr + dctx->litSize;
1148 const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart);
1149 const BYTE* const dictStart = (const BYTE*) (dctx->virtualStart);
1150 const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
1151 (void)frame;
1152
1153 /* Regen sequences */
1154 if (nbSeq) {
1155 #define STORED_SEQS 4
1156 #define STORED_SEQS_MASK (STORED_SEQS-1)
1157 #define ADVANCED_SEQS 4
1158 seq_t sequences[STORED_SEQS];
1159 int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS);
1160 seqState_t seqState;
1161 int seqNb;
1162 size_t prefixPos = (size_t)(op-prefixStart); /* track position relative to prefixStart */
1163
1164 dctx->fseEntropy = 1;
1165 { int i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
1166 assert(dst != NULL);
1167 assert(iend >= ip);
1168 RETURN_ERROR_IF(
1169 ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),
1170 corruption_detected, "");
1171 ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
1172 ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
1173 ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
1174
1175 /* prepare in advance */
1176 for (seqNb=0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && (seqNb<seqAdvance); seqNb++) {
1177 seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
1178 prefixPos = ZSTD_prefetchMatch(prefixPos, sequence, prefixStart, dictEnd);
1179 sequences[seqNb] = sequence;
1180 }
1181 RETURN_ERROR_IF(seqNb<seqAdvance, corruption_detected, "");
1182
1183 /* decode and decompress */
1184 for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (seqNb<nbSeq) ; seqNb++) {
1185 seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
1186 size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequences[(seqNb-ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
1187 #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
1188 assert(!ZSTD_isError(oneSeqSize));
1189 if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb-ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart);
1190 #endif
1191 if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
1192
1193 prefixPos = ZSTD_prefetchMatch(prefixPos, sequence, prefixStart, dictEnd);
1194 sequences[seqNb & STORED_SEQS_MASK] = sequence;
1195 op += oneSeqSize;
1196 }
1197 RETURN_ERROR_IF(seqNb<nbSeq, corruption_detected, "");
1198
1199 /* finish queue */
1200 seqNb -= seqAdvance;
1201 for ( ; seqNb<nbSeq ; seqNb++) {
1202 size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequences[seqNb&STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
1203 #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
1204 assert(!ZSTD_isError(oneSeqSize));
1205 if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart);
1206 #endif
1207 if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
1208 op += oneSeqSize;
1209 }
1210
1211 /* save reps for next block */
1212 { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
1213 }
1214
1215 /* last literal segment */
1216 { size_t const lastLLSize = litEnd - litPtr;
1217 RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, "");
1218 if (op != NULL) {
1219 memcpy(op, litPtr, lastLLSize);
1220 op += lastLLSize;
1221 }
1222 }
1223
1224 return op-ostart;
1225 }
1226
1227 static size_t
ZSTD_decompressSequencesLong_default(ZSTD_DCtx * dctx,void * dst,size_t maxDstSize,const void * seqStart,size_t seqSize,int nbSeq,const ZSTD_longOffset_e isLongOffset,const int frame)1228 ZSTD_decompressSequencesLong_default(ZSTD_DCtx* dctx,
1229 void* dst, size_t maxDstSize,
1230 const void* seqStart, size_t seqSize, int nbSeq,
1231 const ZSTD_longOffset_e isLongOffset,
1232 const int frame)
1233 {
1234 return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
1235 }
1236 #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
1237
1238
1239
1240 #if DYNAMIC_BMI2
1241
1242 #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
1243 static TARGET_ATTRIBUTE("bmi2") size_t
1244 DONT_VECTORIZE
ZSTD_decompressSequences_bmi2(ZSTD_DCtx * dctx,void * dst,size_t maxDstSize,const void * seqStart,size_t seqSize,int nbSeq,const ZSTD_longOffset_e isLongOffset,const int frame)1245 ZSTD_decompressSequences_bmi2(ZSTD_DCtx* dctx,
1246 void* dst, size_t maxDstSize,
1247 const void* seqStart, size_t seqSize, int nbSeq,
1248 const ZSTD_longOffset_e isLongOffset,
1249 const int frame)
1250 {
1251 return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
1252 }
1253 #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
1254
1255 #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
1256 static TARGET_ATTRIBUTE("bmi2") size_t
ZSTD_decompressSequencesLong_bmi2(ZSTD_DCtx * dctx,void * dst,size_t maxDstSize,const void * seqStart,size_t seqSize,int nbSeq,const ZSTD_longOffset_e isLongOffset,const int frame)1257 ZSTD_decompressSequencesLong_bmi2(ZSTD_DCtx* dctx,
1258 void* dst, size_t maxDstSize,
1259 const void* seqStart, size_t seqSize, int nbSeq,
1260 const ZSTD_longOffset_e isLongOffset,
1261 const int frame)
1262 {
1263 return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
1264 }
1265 #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
1266
1267 #endif /* DYNAMIC_BMI2 */
1268
1269 typedef size_t (*ZSTD_decompressSequences_t)(
1270 ZSTD_DCtx* dctx,
1271 void* dst, size_t maxDstSize,
1272 const void* seqStart, size_t seqSize, int nbSeq,
1273 const ZSTD_longOffset_e isLongOffset,
1274 const int frame);
1275
1276 #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
1277 static size_t
ZSTD_decompressSequences(ZSTD_DCtx * dctx,void * dst,size_t maxDstSize,const void * seqStart,size_t seqSize,int nbSeq,const ZSTD_longOffset_e isLongOffset,const int frame)1278 ZSTD_decompressSequences(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize,
1279 const void* seqStart, size_t seqSize, int nbSeq,
1280 const ZSTD_longOffset_e isLongOffset,
1281 const int frame)
1282 {
1283 DEBUGLOG(5, "ZSTD_decompressSequences");
1284 #if DYNAMIC_BMI2
1285 if (dctx->bmi2) {
1286 return ZSTD_decompressSequences_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
1287 }
1288 #endif
1289 return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
1290 }
1291 #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
1292
1293
1294 #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
1295 /* ZSTD_decompressSequencesLong() :
1296 * decompression function triggered when a minimum share of offsets is considered "long",
1297 * aka out of cache.
1298 * note : "long" definition seems overloaded here, sometimes meaning "wider than bitstream register", and sometimes meaning "farther than memory cache distance".
1299 * This function will try to mitigate main memory latency through the use of prefetching */
1300 static size_t
ZSTD_decompressSequencesLong(ZSTD_DCtx * dctx,void * dst,size_t maxDstSize,const void * seqStart,size_t seqSize,int nbSeq,const ZSTD_longOffset_e isLongOffset,const int frame)1301 ZSTD_decompressSequencesLong(ZSTD_DCtx* dctx,
1302 void* dst, size_t maxDstSize,
1303 const void* seqStart, size_t seqSize, int nbSeq,
1304 const ZSTD_longOffset_e isLongOffset,
1305 const int frame)
1306 {
1307 DEBUGLOG(5, "ZSTD_decompressSequencesLong");
1308 #if DYNAMIC_BMI2
1309 if (dctx->bmi2) {
1310 return ZSTD_decompressSequencesLong_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
1311 }
1312 #endif
1313 return ZSTD_decompressSequencesLong_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
1314 }
1315 #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
1316
1317
1318
1319 #if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
1320 !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
1321 /* ZSTD_getLongOffsetsShare() :
1322 * condition : offTable must be valid
1323 * @return : "share" of long offsets (arbitrarily defined as > (1<<23))
1324 * compared to maximum possible of (1<<OffFSELog) */
1325 static unsigned
ZSTD_getLongOffsetsShare(const ZSTD_seqSymbol * offTable)1326 ZSTD_getLongOffsetsShare(const ZSTD_seqSymbol* offTable)
1327 {
1328 const void* ptr = offTable;
1329 U32 const tableLog = ((const ZSTD_seqSymbol_header*)ptr)[0].tableLog;
1330 const ZSTD_seqSymbol* table = offTable + 1;
1331 U32 const max = 1 << tableLog;
1332 U32 u, total = 0;
1333 DEBUGLOG(5, "ZSTD_getLongOffsetsShare: (tableLog=%u)", tableLog);
1334
1335 assert(max <= (1 << OffFSELog)); /* max not too large */
1336 for (u=0; u<max; u++) {
1337 if (table[u].nbAdditionalBits > 22) total += 1;
1338 }
1339
1340 assert(tableLog <= OffFSELog);
1341 total <<= (OffFSELog - tableLog); /* scale to OffFSELog */
1342
1343 return total;
1344 }
1345 #endif
1346
1347 size_t
ZSTD_decompressBlock_internal(ZSTD_DCtx * dctx,void * dst,size_t dstCapacity,const void * src,size_t srcSize,const int frame)1348 ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
1349 void* dst, size_t dstCapacity,
1350 const void* src, size_t srcSize, const int frame)
1351 { /* blockType == blockCompressed */
1352 const BYTE* ip = (const BYTE*)src;
1353 /* isLongOffset must be true if there are long offsets.
1354 * Offsets are long if they are larger than 2^STREAM_ACCUMULATOR_MIN.
1355 * We don't expect that to be the case in 64-bit mode.
1356 * In block mode, window size is not known, so we have to be conservative.
1357 * (note: but it could be evaluated from current-lowLimit)
1358 */
1359 ZSTD_longOffset_e const isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (!frame || (dctx->fParams.windowSize > (1ULL << STREAM_ACCUMULATOR_MIN))));
1360 DEBUGLOG(5, "ZSTD_decompressBlock_internal (size : %u)", (U32)srcSize);
1361
1362 RETURN_ERROR_IF(srcSize >= ZSTD_BLOCKSIZE_MAX, srcSize_wrong, "");
1363
1364 /* Decode literals section */
1365 { size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
1366 DEBUGLOG(5, "ZSTD_decodeLiteralsBlock : %u", (U32)litCSize);
1367 if (ZSTD_isError(litCSize)) return litCSize;
1368 ip += litCSize;
1369 srcSize -= litCSize;
1370 }
1371
1372 /* Build Decoding Tables */
1373 {
1374 /* These macros control at build-time which decompressor implementation
1375 * we use. If neither is defined, we do some inspection and dispatch at
1376 * runtime.
1377 */
1378 #if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
1379 !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
1380 int usePrefetchDecoder = dctx->ddictIsCold;
1381 #endif
1382 int nbSeq;
1383 size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, srcSize);
1384 if (ZSTD_isError(seqHSize)) return seqHSize;
1385 ip += seqHSize;
1386 srcSize -= seqHSize;
1387
1388 RETURN_ERROR_IF(dst == NULL && nbSeq > 0, dstSize_tooSmall, "NULL not handled");
1389
1390 #if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
1391 !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
1392 if ( !usePrefetchDecoder
1393 && (!frame || (dctx->fParams.windowSize > (1<<24)))
1394 && (nbSeq>ADVANCED_SEQS) ) { /* could probably use a larger nbSeq limit */
1395 U32 const shareLongOffsets = ZSTD_getLongOffsetsShare(dctx->OFTptr);
1396 U32 const minShare = MEM_64bits() ? 7 : 20; /* heuristic values, correspond to 2.73% and 7.81% */
1397 usePrefetchDecoder = (shareLongOffsets >= minShare);
1398 }
1399 #endif
1400
1401 dctx->ddictIsCold = 0;
1402
1403 #if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
1404 !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
1405 if (usePrefetchDecoder)
1406 #endif
1407 #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
1408 return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame);
1409 #endif
1410
1411 #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
1412 /* else */
1413 return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame);
1414 #endif
1415 }
1416 }
1417
1418
ZSTD_checkContinuity(ZSTD_DCtx * dctx,const void * dst)1419 void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst)
1420 {
1421 if (dst != dctx->previousDstEnd) { /* not contiguous */
1422 dctx->dictEnd = dctx->previousDstEnd;
1423 dctx->virtualStart = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart));
1424 dctx->prefixStart = dst;
1425 dctx->previousDstEnd = dst;
1426 }
1427 }
1428
1429
ZSTD_decompressBlock(ZSTD_DCtx * dctx,void * dst,size_t dstCapacity,const void * src,size_t srcSize)1430 size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx,
1431 void* dst, size_t dstCapacity,
1432 const void* src, size_t srcSize)
1433 {
1434 size_t dSize;
1435 ZSTD_checkContinuity(dctx, dst);
1436 dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 0);
1437 dctx->previousDstEnd = (char*)dst + dSize;
1438 return dSize;
1439 }
1440