xref: /freebsd/sys/contrib/zstd/lib/common/entropy_common.c (revision ebacd8013fe5f7fdf9f6a5b286f6680dd2891036)
1 /* ******************************************************************
2  * Common functions of New Generation Entropy library
3  * Copyright (c) Yann Collet, Facebook, Inc.
4  *
5  *  You can contact the author at :
6  *  - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
7  *  - Public forum : https://groups.google.com/forum/#!forum/lz4c
8  *
9  * This source code is licensed under both the BSD-style license (found in the
10  * LICENSE file in the root directory of this source tree) and the GPLv2 (found
11  * in the COPYING file in the root directory of this source tree).
12  * You may select, at your option, one of the above-listed licenses.
13 ****************************************************************** */
14 
15 /* *************************************
16 *  Dependencies
17 ***************************************/
18 #include "mem.h"
19 #include "error_private.h"       /* ERR_*, ERROR */
20 #define FSE_STATIC_LINKING_ONLY  /* FSE_MIN_TABLELOG */
21 #include "fse.h"
22 #define HUF_STATIC_LINKING_ONLY  /* HUF_TABLELOG_ABSOLUTEMAX */
23 #include "huf.h"
24 
25 
26 /*===   Version   ===*/
27 unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; }
28 
29 
30 /*===   Error Management   ===*/
31 unsigned FSE_isError(size_t code) { return ERR_isError(code); }
32 const char* FSE_getErrorName(size_t code) { return ERR_getErrorName(code); }
33 
34 unsigned HUF_isError(size_t code) { return ERR_isError(code); }
35 const char* HUF_getErrorName(size_t code) { return ERR_getErrorName(code); }
36 
37 
38 /*-**************************************************************
39 *  FSE NCount encoding-decoding
40 ****************************************************************/
41 static U32 FSE_ctz(U32 val)
42 {
43     assert(val != 0);
44     {
45 #   if defined(_MSC_VER)   /* Visual */
46         if (val != 0) {
47             unsigned long r;
48             _BitScanForward(&r, val);
49             return (unsigned)r;
50         } else {
51             /* Should not reach this code path */
52             __assume(0);
53         }
54 #   elif defined(__GNUC__) && (__GNUC__ >= 3)   /* GCC Intrinsic */
55         return __builtin_ctz(val);
56 #   elif defined(__ICCARM__)    /* IAR Intrinsic */
57         return __CTZ(val);
58 #   else   /* Software version */
59         U32 count = 0;
60         while ((val & 1) == 0) {
61             val >>= 1;
62             ++count;
63         }
64         return count;
65 #   endif
66     }
67 }
68 
69 FORCE_INLINE_TEMPLATE
70 size_t FSE_readNCount_body(short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
71                            const void* headerBuffer, size_t hbSize)
72 {
73     const BYTE* const istart = (const BYTE*) headerBuffer;
74     const BYTE* const iend = istart + hbSize;
75     const BYTE* ip = istart;
76     int nbBits;
77     int remaining;
78     int threshold;
79     U32 bitStream;
80     int bitCount;
81     unsigned charnum = 0;
82     unsigned const maxSV1 = *maxSVPtr + 1;
83     int previous0 = 0;
84 
85     if (hbSize < 8) {
86         /* This function only works when hbSize >= 8 */
87         char buffer[8] = {0};
88         ZSTD_memcpy(buffer, headerBuffer, hbSize);
89         {   size_t const countSize = FSE_readNCount(normalizedCounter, maxSVPtr, tableLogPtr,
90                                                     buffer, sizeof(buffer));
91             if (FSE_isError(countSize)) return countSize;
92             if (countSize > hbSize) return ERROR(corruption_detected);
93             return countSize;
94     }   }
95     assert(hbSize >= 8);
96 
97     /* init */
98     ZSTD_memset(normalizedCounter, 0, (*maxSVPtr+1) * sizeof(normalizedCounter[0]));   /* all symbols not present in NCount have a frequency of 0 */
99     bitStream = MEM_readLE32(ip);
100     nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG;   /* extract tableLog */
101     if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
102     bitStream >>= 4;
103     bitCount = 4;
104     *tableLogPtr = nbBits;
105     remaining = (1<<nbBits)+1;
106     threshold = 1<<nbBits;
107     nbBits++;
108 
109     for (;;) {
110         if (previous0) {
111             /* Count the number of repeats. Each time the
112              * 2-bit repeat code is 0b11 there is another
113              * repeat.
114              * Avoid UB by setting the high bit to 1.
115              */
116             int repeats = FSE_ctz(~bitStream | 0x80000000) >> 1;
117             while (repeats >= 12) {
118                 charnum += 3 * 12;
119                 if (LIKELY(ip <= iend-7)) {
120                     ip += 3;
121                 } else {
122                     bitCount -= (int)(8 * (iend - 7 - ip));
123                     bitCount &= 31;
124                     ip = iend - 4;
125                 }
126                 bitStream = MEM_readLE32(ip) >> bitCount;
127                 repeats = FSE_ctz(~bitStream | 0x80000000) >> 1;
128             }
129             charnum += 3 * repeats;
130             bitStream >>= 2 * repeats;
131             bitCount += 2 * repeats;
132 
133             /* Add the final repeat which isn't 0b11. */
134             assert((bitStream & 3) < 3);
135             charnum += bitStream & 3;
136             bitCount += 2;
137 
138             /* This is an error, but break and return an error
139              * at the end, because returning out of a loop makes
140              * it harder for the compiler to optimize.
141              */
142             if (charnum >= maxSV1) break;
143 
144             /* We don't need to set the normalized count to 0
145              * because we already memset the whole buffer to 0.
146              */
147 
148             if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
149                 assert((bitCount >> 3) <= 3); /* For first condition to work */
150                 ip += bitCount>>3;
151                 bitCount &= 7;
152             } else {
153                 bitCount -= (int)(8 * (iend - 4 - ip));
154                 bitCount &= 31;
155                 ip = iend - 4;
156             }
157             bitStream = MEM_readLE32(ip) >> bitCount;
158         }
159         {
160             int const max = (2*threshold-1) - remaining;
161             int count;
162 
163             if ((bitStream & (threshold-1)) < (U32)max) {
164                 count = bitStream & (threshold-1);
165                 bitCount += nbBits-1;
166             } else {
167                 count = bitStream & (2*threshold-1);
168                 if (count >= threshold) count -= max;
169                 bitCount += nbBits;
170             }
171 
172             count--;   /* extra accuracy */
173             /* When it matters (small blocks), this is a
174              * predictable branch, because we don't use -1.
175              */
176             if (count >= 0) {
177                 remaining -= count;
178             } else {
179                 assert(count == -1);
180                 remaining += count;
181             }
182             normalizedCounter[charnum++] = (short)count;
183             previous0 = !count;
184 
185             assert(threshold > 1);
186             if (remaining < threshold) {
187                 /* This branch can be folded into the
188                  * threshold update condition because we
189                  * know that threshold > 1.
190                  */
191                 if (remaining <= 1) break;
192                 nbBits = BIT_highbit32(remaining) + 1;
193                 threshold = 1 << (nbBits - 1);
194             }
195             if (charnum >= maxSV1) break;
196 
197             if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
198                 ip += bitCount>>3;
199                 bitCount &= 7;
200             } else {
201                 bitCount -= (int)(8 * (iend - 4 - ip));
202                 bitCount &= 31;
203                 ip = iend - 4;
204             }
205             bitStream = MEM_readLE32(ip) >> bitCount;
206     }   }
207     if (remaining != 1) return ERROR(corruption_detected);
208     /* Only possible when there are too many zeros. */
209     if (charnum > maxSV1) return ERROR(maxSymbolValue_tooSmall);
210     if (bitCount > 32) return ERROR(corruption_detected);
211     *maxSVPtr = charnum-1;
212 
213     ip += (bitCount+7)>>3;
214     return ip-istart;
215 }
216 
217 /* Avoids the FORCE_INLINE of the _body() function. */
218 static size_t FSE_readNCount_body_default(
219         short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
220         const void* headerBuffer, size_t hbSize)
221 {
222     return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
223 }
224 
225 #if DYNAMIC_BMI2
226 BMI2_TARGET_ATTRIBUTE static size_t FSE_readNCount_body_bmi2(
227         short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
228         const void* headerBuffer, size_t hbSize)
229 {
230     return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
231 }
232 #endif
233 
234 size_t FSE_readNCount_bmi2(
235         short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
236         const void* headerBuffer, size_t hbSize, int bmi2)
237 {
238 #if DYNAMIC_BMI2
239     if (bmi2) {
240         return FSE_readNCount_body_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
241     }
242 #endif
243     (void)bmi2;
244     return FSE_readNCount_body_default(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
245 }
246 
247 size_t FSE_readNCount(
248         short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
249         const void* headerBuffer, size_t hbSize)
250 {
251     return FSE_readNCount_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize, /* bmi2 */ 0);
252 }
253 
254 
255 /*! HUF_readStats() :
256     Read compact Huffman tree, saved by HUF_writeCTable().
257     `huffWeight` is destination buffer.
258     `rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32.
259     @return : size read from `src` , or an error Code .
260     Note : Needed by HUF_readCTable() and HUF_readDTableX?() .
261 */
262 size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
263                      U32* nbSymbolsPtr, U32* tableLogPtr,
264                      const void* src, size_t srcSize)
265 {
266     U32 wksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
267     return HUF_readStats_wksp(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, wksp, sizeof(wksp), /* bmi2 */ 0);
268 }
269 
270 FORCE_INLINE_TEMPLATE size_t
271 HUF_readStats_body(BYTE* huffWeight, size_t hwSize, U32* rankStats,
272                    U32* nbSymbolsPtr, U32* tableLogPtr,
273                    const void* src, size_t srcSize,
274                    void* workSpace, size_t wkspSize,
275                    int bmi2)
276 {
277     U32 weightTotal;
278     const BYTE* ip = (const BYTE*) src;
279     size_t iSize;
280     size_t oSize;
281 
282     if (!srcSize) return ERROR(srcSize_wrong);
283     iSize = ip[0];
284     /* ZSTD_memset(huffWeight, 0, hwSize);   *//* is not necessary, even though some analyzer complain ... */
285 
286     if (iSize >= 128) {  /* special header */
287         oSize = iSize - 127;
288         iSize = ((oSize+1)/2);
289         if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
290         if (oSize >= hwSize) return ERROR(corruption_detected);
291         ip += 1;
292         {   U32 n;
293             for (n=0; n<oSize; n+=2) {
294                 huffWeight[n]   = ip[n/2] >> 4;
295                 huffWeight[n+1] = ip[n/2] & 15;
296     }   }   }
297     else  {   /* header compressed with FSE (normal case) */
298         if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
299         /* max (hwSize-1) values decoded, as last one is implied */
300         oSize = FSE_decompress_wksp_bmi2(huffWeight, hwSize-1, ip+1, iSize, 6, workSpace, wkspSize, bmi2);
301         if (FSE_isError(oSize)) return oSize;
302     }
303 
304     /* collect weight stats */
305     ZSTD_memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32));
306     weightTotal = 0;
307     {   U32 n; for (n=0; n<oSize; n++) {
308             if (huffWeight[n] > HUF_TABLELOG_MAX) return ERROR(corruption_detected);
309             rankStats[huffWeight[n]]++;
310             weightTotal += (1 << huffWeight[n]) >> 1;
311     }   }
312     if (weightTotal == 0) return ERROR(corruption_detected);
313 
314     /* get last non-null symbol weight (implied, total must be 2^n) */
315     {   U32 const tableLog = BIT_highbit32(weightTotal) + 1;
316         if (tableLog > HUF_TABLELOG_MAX) return ERROR(corruption_detected);
317         *tableLogPtr = tableLog;
318         /* determine last weight */
319         {   U32 const total = 1 << tableLog;
320             U32 const rest = total - weightTotal;
321             U32 const verif = 1 << BIT_highbit32(rest);
322             U32 const lastWeight = BIT_highbit32(rest) + 1;
323             if (verif != rest) return ERROR(corruption_detected);    /* last value must be a clean power of 2 */
324             huffWeight[oSize] = (BYTE)lastWeight;
325             rankStats[lastWeight]++;
326     }   }
327 
328     /* check tree construction validity */
329     if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected);   /* by construction : at least 2 elts of rank 1, must be even */
330 
331     /* results */
332     *nbSymbolsPtr = (U32)(oSize+1);
333     return iSize+1;
334 }
335 
336 /* Avoids the FORCE_INLINE of the _body() function. */
337 static size_t HUF_readStats_body_default(BYTE* huffWeight, size_t hwSize, U32* rankStats,
338                      U32* nbSymbolsPtr, U32* tableLogPtr,
339                      const void* src, size_t srcSize,
340                      void* workSpace, size_t wkspSize)
341 {
342     return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 0);
343 }
344 
345 #if DYNAMIC_BMI2
346 static BMI2_TARGET_ATTRIBUTE size_t HUF_readStats_body_bmi2(BYTE* huffWeight, size_t hwSize, U32* rankStats,
347                      U32* nbSymbolsPtr, U32* tableLogPtr,
348                      const void* src, size_t srcSize,
349                      void* workSpace, size_t wkspSize)
350 {
351     return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 1);
352 }
353 #endif
354 
355 size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize, U32* rankStats,
356                      U32* nbSymbolsPtr, U32* tableLogPtr,
357                      const void* src, size_t srcSize,
358                      void* workSpace, size_t wkspSize,
359                      int bmi2)
360 {
361 #if DYNAMIC_BMI2
362     if (bmi2) {
363         return HUF_readStats_body_bmi2(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
364     }
365 #endif
366     (void)bmi2;
367     return HUF_readStats_body_default(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
368 }
369