10c16b537SWarner Losh /* ******************************************************************
237f1f268SConrad Meyer * Huffman encoder, part of New Generation Entropy library
3*5ff13fbcSAllan Jude * Copyright (c) Yann Collet, Facebook, Inc.
437f1f268SConrad Meyer *
537f1f268SConrad Meyer * You can contact the author at :
637f1f268SConrad Meyer * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
737f1f268SConrad Meyer * - Public forum : https://groups.google.com/forum/#!forum/lz4c
837f1f268SConrad Meyer *
937f1f268SConrad Meyer * This source code is licensed under both the BSD-style license (found in the
1037f1f268SConrad Meyer * LICENSE file in the root directory of this source tree) and the GPLv2 (found
1137f1f268SConrad Meyer * in the COPYING file in the root directory of this source tree).
1237f1f268SConrad Meyer * You may select, at your option, one of the above-listed licenses.
130c16b537SWarner Losh ****************************************************************** */
140c16b537SWarner Losh
150c16b537SWarner Losh /* **************************************************************
160c16b537SWarner Losh * Compiler specifics
170c16b537SWarner Losh ****************************************************************/
180c16b537SWarner Losh #ifdef _MSC_VER /* Visual Studio */
190c16b537SWarner Losh # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
200c16b537SWarner Losh #endif
210c16b537SWarner Losh
220c16b537SWarner Losh
230c16b537SWarner Losh /* **************************************************************
240c16b537SWarner Losh * Includes
250c16b537SWarner Losh ****************************************************************/
26f7cd7fe5SConrad Meyer #include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memset */
2737f1f268SConrad Meyer #include "../common/compiler.h"
2837f1f268SConrad Meyer #include "../common/bitstream.h"
290f743729SConrad Meyer #include "hist.h"
300c16b537SWarner Losh #define FSE_STATIC_LINKING_ONLY /* FSE_optimalTableLog_internal */
3137f1f268SConrad Meyer #include "../common/fse.h" /* header compression */
320c16b537SWarner Losh #define HUF_STATIC_LINKING_ONLY
3337f1f268SConrad Meyer #include "../common/huf.h"
3437f1f268SConrad Meyer #include "../common/error_private.h"
350c16b537SWarner Losh
360c16b537SWarner Losh
370c16b537SWarner Losh /* **************************************************************
380c16b537SWarner Losh * Error Management
390c16b537SWarner Losh ****************************************************************/
400c16b537SWarner Losh #define HUF_isError ERR_isError
410f743729SConrad Meyer #define HUF_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */
420c16b537SWarner Losh
430c16b537SWarner Losh
440c16b537SWarner Losh /* **************************************************************
450c16b537SWarner Losh * Utils
460c16b537SWarner Losh ****************************************************************/
HUF_optimalTableLog(unsigned maxTableLog,size_t srcSize,unsigned maxSymbolValue)470c16b537SWarner Losh unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
480c16b537SWarner Losh {
490c16b537SWarner Losh return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1);
500c16b537SWarner Losh }
510c16b537SWarner Losh
520c16b537SWarner Losh
530c16b537SWarner Losh /* *******************************************************
540c16b537SWarner Losh * HUF : Huffman block compression
550c16b537SWarner Losh *********************************************************/
56*5ff13fbcSAllan Jude #define HUF_WORKSPACE_MAX_ALIGNMENT 8
57*5ff13fbcSAllan Jude
HUF_alignUpWorkspace(void * workspace,size_t * workspaceSizePtr,size_t align)58*5ff13fbcSAllan Jude static void* HUF_alignUpWorkspace(void* workspace, size_t* workspaceSizePtr, size_t align)
59*5ff13fbcSAllan Jude {
60*5ff13fbcSAllan Jude size_t const mask = align - 1;
61*5ff13fbcSAllan Jude size_t const rem = (size_t)workspace & mask;
62*5ff13fbcSAllan Jude size_t const add = (align - rem) & mask;
63*5ff13fbcSAllan Jude BYTE* const aligned = (BYTE*)workspace + add;
64*5ff13fbcSAllan Jude assert((align & (align - 1)) == 0); /* pow 2 */
65*5ff13fbcSAllan Jude assert(align <= HUF_WORKSPACE_MAX_ALIGNMENT);
66*5ff13fbcSAllan Jude if (*workspaceSizePtr >= add) {
67*5ff13fbcSAllan Jude assert(add < align);
68*5ff13fbcSAllan Jude assert(((size_t)aligned & mask) == 0);
69*5ff13fbcSAllan Jude *workspaceSizePtr -= add;
70*5ff13fbcSAllan Jude return aligned;
71*5ff13fbcSAllan Jude } else {
72*5ff13fbcSAllan Jude *workspaceSizePtr = 0;
73*5ff13fbcSAllan Jude return NULL;
74*5ff13fbcSAllan Jude }
75*5ff13fbcSAllan Jude }
76*5ff13fbcSAllan Jude
77*5ff13fbcSAllan Jude
780c16b537SWarner Losh /* HUF_compressWeights() :
790c16b537SWarner Losh * Same as FSE_compress(), but dedicated to huff0's weights compression.
800c16b537SWarner Losh * The use case needs much less stack memory.
810c16b537SWarner Losh * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX.
820c16b537SWarner Losh */
830c16b537SWarner Losh #define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6
84*5ff13fbcSAllan Jude
85*5ff13fbcSAllan Jude typedef struct {
86*5ff13fbcSAllan Jude FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)];
87*5ff13fbcSAllan Jude U32 scratchBuffer[FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(HUF_TABLELOG_MAX, MAX_FSE_TABLELOG_FOR_HUFF_HEADER)];
88*5ff13fbcSAllan Jude unsigned count[HUF_TABLELOG_MAX+1];
89*5ff13fbcSAllan Jude S16 norm[HUF_TABLELOG_MAX+1];
90*5ff13fbcSAllan Jude } HUF_CompressWeightsWksp;
91*5ff13fbcSAllan Jude
HUF_compressWeights(void * dst,size_t dstSize,const void * weightTable,size_t wtSize,void * workspace,size_t workspaceSize)92*5ff13fbcSAllan Jude static size_t HUF_compressWeights(void* dst, size_t dstSize, const void* weightTable, size_t wtSize, void* workspace, size_t workspaceSize)
930c16b537SWarner Losh {
940c16b537SWarner Losh BYTE* const ostart = (BYTE*) dst;
950c16b537SWarner Losh BYTE* op = ostart;
960c16b537SWarner Losh BYTE* const oend = ostart + dstSize;
970c16b537SWarner Losh
98a0483764SConrad Meyer unsigned maxSymbolValue = HUF_TABLELOG_MAX;
990c16b537SWarner Losh U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;
100*5ff13fbcSAllan Jude HUF_CompressWeightsWksp* wksp = (HUF_CompressWeightsWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, ZSTD_ALIGNOF(U32));
1010c16b537SWarner Losh
102*5ff13fbcSAllan Jude if (workspaceSize < sizeof(HUF_CompressWeightsWksp)) return ERROR(GENERIC);
1030c16b537SWarner Losh
1040c16b537SWarner Losh /* init conditions */
1050c16b537SWarner Losh if (wtSize <= 1) return 0; /* Not compressible */
1060c16b537SWarner Losh
1070c16b537SWarner Losh /* Scan input and build symbol stats */
108*5ff13fbcSAllan Jude { unsigned const maxCount = HIST_count_simple(wksp->count, &maxSymbolValue, weightTable, wtSize); /* never fails */
1090c16b537SWarner Losh if (maxCount == wtSize) return 1; /* only a single symbol in src : rle */
1100c16b537SWarner Losh if (maxCount == 1) return 0; /* each symbol present maximum once => not compressible */
1110c16b537SWarner Losh }
1120c16b537SWarner Losh
1130c16b537SWarner Losh tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue);
114*5ff13fbcSAllan Jude CHECK_F( FSE_normalizeCount(wksp->norm, tableLog, wksp->count, wtSize, maxSymbolValue, /* useLowProbCount */ 0) );
1150c16b537SWarner Losh
1160c16b537SWarner Losh /* Write table description header */
117*5ff13fbcSAllan Jude { CHECK_V_F(hSize, FSE_writeNCount(op, (size_t)(oend-op), wksp->norm, maxSymbolValue, tableLog) );
1180c16b537SWarner Losh op += hSize;
1190c16b537SWarner Losh }
1200c16b537SWarner Losh
1210c16b537SWarner Losh /* Compress */
122*5ff13fbcSAllan Jude CHECK_F( FSE_buildCTable_wksp(wksp->CTable, wksp->norm, maxSymbolValue, tableLog, wksp->scratchBuffer, sizeof(wksp->scratchBuffer)) );
123*5ff13fbcSAllan Jude { CHECK_V_F(cSize, FSE_compress_usingCTable(op, (size_t)(oend - op), weightTable, wtSize, wksp->CTable) );
1240c16b537SWarner Losh if (cSize == 0) return 0; /* not enough space for compressed data */
1250c16b537SWarner Losh op += cSize;
1260c16b537SWarner Losh }
1270c16b537SWarner Losh
12837f1f268SConrad Meyer return (size_t)(op-ostart);
1290c16b537SWarner Losh }
1300c16b537SWarner Losh
HUF_getNbBits(HUF_CElt elt)131*5ff13fbcSAllan Jude static size_t HUF_getNbBits(HUF_CElt elt)
1320c16b537SWarner Losh {
133*5ff13fbcSAllan Jude return elt & 0xFF;
134*5ff13fbcSAllan Jude }
135*5ff13fbcSAllan Jude
HUF_getNbBitsFast(HUF_CElt elt)136*5ff13fbcSAllan Jude static size_t HUF_getNbBitsFast(HUF_CElt elt)
137*5ff13fbcSAllan Jude {
138*5ff13fbcSAllan Jude return elt;
139*5ff13fbcSAllan Jude }
140*5ff13fbcSAllan Jude
HUF_getValue(HUF_CElt elt)141*5ff13fbcSAllan Jude static size_t HUF_getValue(HUF_CElt elt)
142*5ff13fbcSAllan Jude {
143*5ff13fbcSAllan Jude return elt & ~0xFF;
144*5ff13fbcSAllan Jude }
145*5ff13fbcSAllan Jude
HUF_getValueFast(HUF_CElt elt)146*5ff13fbcSAllan Jude static size_t HUF_getValueFast(HUF_CElt elt)
147*5ff13fbcSAllan Jude {
148*5ff13fbcSAllan Jude return elt;
149*5ff13fbcSAllan Jude }
150*5ff13fbcSAllan Jude
HUF_setNbBits(HUF_CElt * elt,size_t nbBits)151*5ff13fbcSAllan Jude static void HUF_setNbBits(HUF_CElt* elt, size_t nbBits)
152*5ff13fbcSAllan Jude {
153*5ff13fbcSAllan Jude assert(nbBits <= HUF_TABLELOG_ABSOLUTEMAX);
154*5ff13fbcSAllan Jude *elt = nbBits;
155*5ff13fbcSAllan Jude }
156*5ff13fbcSAllan Jude
HUF_setValue(HUF_CElt * elt,size_t value)157*5ff13fbcSAllan Jude static void HUF_setValue(HUF_CElt* elt, size_t value)
158*5ff13fbcSAllan Jude {
159*5ff13fbcSAllan Jude size_t const nbBits = HUF_getNbBits(*elt);
160*5ff13fbcSAllan Jude if (nbBits > 0) {
161*5ff13fbcSAllan Jude assert((value >> nbBits) == 0);
162*5ff13fbcSAllan Jude *elt |= value << (sizeof(HUF_CElt) * 8 - nbBits);
163*5ff13fbcSAllan Jude }
164*5ff13fbcSAllan Jude }
165*5ff13fbcSAllan Jude
166*5ff13fbcSAllan Jude typedef struct {
167*5ff13fbcSAllan Jude HUF_CompressWeightsWksp wksp;
1680c16b537SWarner Losh BYTE bitsToWeight[HUF_TABLELOG_MAX + 1]; /* precomputed conversion table */
1690c16b537SWarner Losh BYTE huffWeight[HUF_SYMBOLVALUE_MAX];
170*5ff13fbcSAllan Jude } HUF_WriteCTableWksp;
171*5ff13fbcSAllan Jude
HUF_writeCTable_wksp(void * dst,size_t maxDstSize,const HUF_CElt * CTable,unsigned maxSymbolValue,unsigned huffLog,void * workspace,size_t workspaceSize)172*5ff13fbcSAllan Jude size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize,
173*5ff13fbcSAllan Jude const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog,
174*5ff13fbcSAllan Jude void* workspace, size_t workspaceSize)
175*5ff13fbcSAllan Jude {
176*5ff13fbcSAllan Jude HUF_CElt const* const ct = CTable + 1;
1770c16b537SWarner Losh BYTE* op = (BYTE*)dst;
1780c16b537SWarner Losh U32 n;
179*5ff13fbcSAllan Jude HUF_WriteCTableWksp* wksp = (HUF_WriteCTableWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, ZSTD_ALIGNOF(U32));
1800c16b537SWarner Losh
1810c16b537SWarner Losh /* check conditions */
182*5ff13fbcSAllan Jude if (workspaceSize < sizeof(HUF_WriteCTableWksp)) return ERROR(GENERIC);
1830c16b537SWarner Losh if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
1840c16b537SWarner Losh
1850c16b537SWarner Losh /* convert to weight */
186*5ff13fbcSAllan Jude wksp->bitsToWeight[0] = 0;
1870c16b537SWarner Losh for (n=1; n<huffLog+1; n++)
188*5ff13fbcSAllan Jude wksp->bitsToWeight[n] = (BYTE)(huffLog + 1 - n);
1890c16b537SWarner Losh for (n=0; n<maxSymbolValue; n++)
190*5ff13fbcSAllan Jude wksp->huffWeight[n] = wksp->bitsToWeight[HUF_getNbBits(ct[n])];
1910c16b537SWarner Losh
1920c16b537SWarner Losh /* attempt weights compression by FSE */
193*5ff13fbcSAllan Jude if (maxDstSize < 1) return ERROR(dstSize_tooSmall);
194*5ff13fbcSAllan Jude { CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, wksp->huffWeight, maxSymbolValue, &wksp->wksp, sizeof(wksp->wksp)) );
1950c16b537SWarner Losh if ((hSize>1) & (hSize < maxSymbolValue/2)) { /* FSE compressed */
1960c16b537SWarner Losh op[0] = (BYTE)hSize;
1970c16b537SWarner Losh return hSize+1;
1980c16b537SWarner Losh } }
1990c16b537SWarner Losh
2000c16b537SWarner Losh /* write raw values as 4-bits (max : 15) */
2010c16b537SWarner Losh if (maxSymbolValue > (256-128)) return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */
2020c16b537SWarner Losh if (((maxSymbolValue+1)/2) + 1 > maxDstSize) return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */
2030c16b537SWarner Losh op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue-1));
204*5ff13fbcSAllan Jude wksp->huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */
2050c16b537SWarner Losh for (n=0; n<maxSymbolValue; n+=2)
206*5ff13fbcSAllan Jude op[(n/2)+1] = (BYTE)((wksp->huffWeight[n] << 4) + wksp->huffWeight[n+1]);
2070c16b537SWarner Losh return ((maxSymbolValue+1)/2) + 1;
2080c16b537SWarner Losh }
2090c16b537SWarner Losh
210*5ff13fbcSAllan Jude /*! HUF_writeCTable() :
211*5ff13fbcSAllan Jude `CTable` : Huffman tree to save, using huf representation.
212*5ff13fbcSAllan Jude @return : size of saved CTable */
HUF_writeCTable(void * dst,size_t maxDstSize,const HUF_CElt * CTable,unsigned maxSymbolValue,unsigned huffLog)213*5ff13fbcSAllan Jude size_t HUF_writeCTable (void* dst, size_t maxDstSize,
214*5ff13fbcSAllan Jude const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog)
215*5ff13fbcSAllan Jude {
216*5ff13fbcSAllan Jude HUF_WriteCTableWksp wksp;
217*5ff13fbcSAllan Jude return HUF_writeCTable_wksp(dst, maxDstSize, CTable, maxSymbolValue, huffLog, &wksp, sizeof(wksp));
218*5ff13fbcSAllan Jude }
219*5ff13fbcSAllan Jude
2200c16b537SWarner Losh
HUF_readCTable(HUF_CElt * CTable,unsigned * maxSymbolValuePtr,const void * src,size_t srcSize,unsigned * hasZeroWeights)22137f1f268SConrad Meyer size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* hasZeroWeights)
2220c16b537SWarner Losh {
2230c16b537SWarner Losh BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; /* init not required, even though some static analyzer may complain */
2240c16b537SWarner Losh U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */
2250c16b537SWarner Losh U32 tableLog = 0;
2260c16b537SWarner Losh U32 nbSymbols = 0;
227*5ff13fbcSAllan Jude HUF_CElt* const ct = CTable + 1;
2280c16b537SWarner Losh
2290c16b537SWarner Losh /* get symbol weights */
2300c16b537SWarner Losh CHECK_V_F(readSize, HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX+1, rankVal, &nbSymbols, &tableLog, src, srcSize));
231f7cd7fe5SConrad Meyer *hasZeroWeights = (rankVal[0] > 0);
2320c16b537SWarner Losh
2330c16b537SWarner Losh /* check result */
2340c16b537SWarner Losh if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
2350c16b537SWarner Losh if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall);
2360c16b537SWarner Losh
237*5ff13fbcSAllan Jude CTable[0] = tableLog;
238*5ff13fbcSAllan Jude
2390c16b537SWarner Losh /* Prepare base value per rank */
2400c16b537SWarner Losh { U32 n, nextRankStart = 0;
2410c16b537SWarner Losh for (n=1; n<=tableLog; n++) {
242f7cd7fe5SConrad Meyer U32 curr = nextRankStart;
2430c16b537SWarner Losh nextRankStart += (rankVal[n] << (n-1));
244f7cd7fe5SConrad Meyer rankVal[n] = curr;
2450c16b537SWarner Losh } }
2460c16b537SWarner Losh
2470c16b537SWarner Losh /* fill nbBits */
2480c16b537SWarner Losh { U32 n; for (n=0; n<nbSymbols; n++) {
2490c16b537SWarner Losh const U32 w = huffWeight[n];
250*5ff13fbcSAllan Jude HUF_setNbBits(ct + n, (BYTE)(tableLog + 1 - w) & -(w != 0));
2510c16b537SWarner Losh } }
2520c16b537SWarner Losh
2530c16b537SWarner Losh /* fill val */
2540c16b537SWarner Losh { U16 nbPerRank[HUF_TABLELOG_MAX+2] = {0}; /* support w=0=>n=tableLog+1 */
2550c16b537SWarner Losh U16 valPerRank[HUF_TABLELOG_MAX+2] = {0};
256*5ff13fbcSAllan Jude { U32 n; for (n=0; n<nbSymbols; n++) nbPerRank[HUF_getNbBits(ct[n])]++; }
2570c16b537SWarner Losh /* determine stating value per rank */
2580c16b537SWarner Losh valPerRank[tableLog+1] = 0; /* for w==0 */
2590c16b537SWarner Losh { U16 min = 0;
2600c16b537SWarner Losh U32 n; for (n=tableLog; n>0; n--) { /* start at n=tablelog <-> w=1 */
2610c16b537SWarner Losh valPerRank[n] = min; /* get starting value within each rank */
2620c16b537SWarner Losh min += nbPerRank[n];
2630c16b537SWarner Losh min >>= 1;
2640c16b537SWarner Losh } }
2650c16b537SWarner Losh /* assign value within rank, symbol order */
266*5ff13fbcSAllan Jude { U32 n; for (n=0; n<nbSymbols; n++) HUF_setValue(ct + n, valPerRank[HUF_getNbBits(ct[n])]++); }
2670c16b537SWarner Losh }
2680c16b537SWarner Losh
2690c16b537SWarner Losh *maxSymbolValuePtr = nbSymbols - 1;
2700c16b537SWarner Losh return readSize;
2710c16b537SWarner Losh }
2720c16b537SWarner Losh
HUF_getNbBitsFromCTable(HUF_CElt const * CTable,U32 symbolValue)273*5ff13fbcSAllan Jude U32 HUF_getNbBitsFromCTable(HUF_CElt const* CTable, U32 symbolValue)
2740f743729SConrad Meyer {
275*5ff13fbcSAllan Jude const HUF_CElt* ct = CTable + 1;
2760f743729SConrad Meyer assert(symbolValue <= HUF_SYMBOLVALUE_MAX);
277*5ff13fbcSAllan Jude return (U32)HUF_getNbBits(ct[symbolValue]);
2780f743729SConrad Meyer }
2790f743729SConrad Meyer
2800c16b537SWarner Losh
2810c16b537SWarner Losh typedef struct nodeElt_s {
2820c16b537SWarner Losh U32 count;
2830c16b537SWarner Losh U16 parent;
2840c16b537SWarner Losh BYTE byte;
2850c16b537SWarner Losh BYTE nbBits;
2860c16b537SWarner Losh } nodeElt;
2870c16b537SWarner Losh
288f7cd7fe5SConrad Meyer /**
289f7cd7fe5SConrad Meyer * HUF_setMaxHeight():
290f7cd7fe5SConrad Meyer * Enforces maxNbBits on the Huffman tree described in huffNode.
291f7cd7fe5SConrad Meyer *
292f7cd7fe5SConrad Meyer * It sets all nodes with nbBits > maxNbBits to be maxNbBits. Then it adjusts
293f7cd7fe5SConrad Meyer * the tree to so that it is a valid canonical Huffman tree.
294f7cd7fe5SConrad Meyer *
295f7cd7fe5SConrad Meyer * @pre The sum of the ranks of each symbol == 2^largestBits,
296f7cd7fe5SConrad Meyer * where largestBits == huffNode[lastNonNull].nbBits.
297f7cd7fe5SConrad Meyer * @post The sum of the ranks of each symbol == 2^largestBits,
298f7cd7fe5SConrad Meyer * where largestBits is the return value <= maxNbBits.
299f7cd7fe5SConrad Meyer *
300f7cd7fe5SConrad Meyer * @param huffNode The Huffman tree modified in place to enforce maxNbBits.
301f7cd7fe5SConrad Meyer * @param lastNonNull The symbol with the lowest count in the Huffman tree.
302f7cd7fe5SConrad Meyer * @param maxNbBits The maximum allowed number of bits, which the Huffman tree
303f7cd7fe5SConrad Meyer * may not respect. After this function the Huffman tree will
304f7cd7fe5SConrad Meyer * respect maxNbBits.
305f7cd7fe5SConrad Meyer * @return The maximum number of bits of the Huffman tree after adjustment,
306f7cd7fe5SConrad Meyer * necessarily no more than maxNbBits.
307f7cd7fe5SConrad Meyer */
HUF_setMaxHeight(nodeElt * huffNode,U32 lastNonNull,U32 maxNbBits)3080c16b537SWarner Losh static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
3090c16b537SWarner Losh {
3100c16b537SWarner Losh const U32 largestBits = huffNode[lastNonNull].nbBits;
311f7cd7fe5SConrad Meyer /* early exit : no elt > maxNbBits, so the tree is already valid. */
312f7cd7fe5SConrad Meyer if (largestBits <= maxNbBits) return largestBits;
3130c16b537SWarner Losh
3140c16b537SWarner Losh /* there are several too large elements (at least >= 2) */
3150c16b537SWarner Losh { int totalCost = 0;
3160c16b537SWarner Losh const U32 baseCost = 1 << (largestBits - maxNbBits);
31737f1f268SConrad Meyer int n = (int)lastNonNull;
3180c16b537SWarner Losh
319f7cd7fe5SConrad Meyer /* Adjust any ranks > maxNbBits to maxNbBits.
320f7cd7fe5SConrad Meyer * Compute totalCost, which is how far the sum of the ranks is
321f7cd7fe5SConrad Meyer * we are over 2^largestBits after adjust the offending ranks.
322f7cd7fe5SConrad Meyer */
3230c16b537SWarner Losh while (huffNode[n].nbBits > maxNbBits) {
3240c16b537SWarner Losh totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits));
3250c16b537SWarner Losh huffNode[n].nbBits = (BYTE)maxNbBits;
3260c16b537SWarner Losh n--;
327f7cd7fe5SConrad Meyer }
328f7cd7fe5SConrad Meyer /* n stops at huffNode[n].nbBits <= maxNbBits */
329f7cd7fe5SConrad Meyer assert(huffNode[n].nbBits <= maxNbBits);
330f7cd7fe5SConrad Meyer /* n end at index of smallest symbol using < maxNbBits */
331f7cd7fe5SConrad Meyer while (huffNode[n].nbBits == maxNbBits) --n;
3320c16b537SWarner Losh
333f7cd7fe5SConrad Meyer /* renorm totalCost from 2^largestBits to 2^maxNbBits
334f7cd7fe5SConrad Meyer * note : totalCost is necessarily a multiple of baseCost */
335f7cd7fe5SConrad Meyer assert((totalCost & (baseCost - 1)) == 0);
336f7cd7fe5SConrad Meyer totalCost >>= (largestBits - maxNbBits);
337f7cd7fe5SConrad Meyer assert(totalCost > 0);
3380c16b537SWarner Losh
3390c16b537SWarner Losh /* repay normalized cost */
3400c16b537SWarner Losh { U32 const noSymbol = 0xF0F0F0F0;
3410c16b537SWarner Losh U32 rankLast[HUF_TABLELOG_MAX+2];
3420c16b537SWarner Losh
343f7cd7fe5SConrad Meyer /* Get pos of last (smallest = lowest cum. count) symbol per rank */
344f7cd7fe5SConrad Meyer ZSTD_memset(rankLast, 0xF0, sizeof(rankLast));
3450c16b537SWarner Losh { U32 currentNbBits = maxNbBits;
34637f1f268SConrad Meyer int pos;
3470c16b537SWarner Losh for (pos=n ; pos >= 0; pos--) {
3480c16b537SWarner Losh if (huffNode[pos].nbBits >= currentNbBits) continue;
3490c16b537SWarner Losh currentNbBits = huffNode[pos].nbBits; /* < maxNbBits */
35037f1f268SConrad Meyer rankLast[maxNbBits-currentNbBits] = (U32)pos;
3510c16b537SWarner Losh } }
3520c16b537SWarner Losh
3530c16b537SWarner Losh while (totalCost > 0) {
354f7cd7fe5SConrad Meyer /* Try to reduce the next power of 2 above totalCost because we
355f7cd7fe5SConrad Meyer * gain back half the rank.
356f7cd7fe5SConrad Meyer */
35737f1f268SConrad Meyer U32 nBitsToDecrease = BIT_highbit32((U32)totalCost) + 1;
3580c16b537SWarner Losh for ( ; nBitsToDecrease > 1; nBitsToDecrease--) {
35937f1f268SConrad Meyer U32 const highPos = rankLast[nBitsToDecrease];
36037f1f268SConrad Meyer U32 const lowPos = rankLast[nBitsToDecrease-1];
3610c16b537SWarner Losh if (highPos == noSymbol) continue;
362f7cd7fe5SConrad Meyer /* Decrease highPos if no symbols of lowPos or if it is
363f7cd7fe5SConrad Meyer * not cheaper to remove 2 lowPos than highPos.
364f7cd7fe5SConrad Meyer */
3650c16b537SWarner Losh if (lowPos == noSymbol) break;
3660c16b537SWarner Losh { U32 const highTotal = huffNode[highPos].count;
3670c16b537SWarner Losh U32 const lowTotal = 2 * huffNode[lowPos].count;
3680c16b537SWarner Losh if (highTotal <= lowTotal) break;
3690c16b537SWarner Losh } }
3700c16b537SWarner Losh /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */
371f7cd7fe5SConrad Meyer assert(rankLast[nBitsToDecrease] != noSymbol || nBitsToDecrease == 1);
3720c16b537SWarner Losh /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */
3730c16b537SWarner Losh while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol))
3740c16b537SWarner Losh nBitsToDecrease++;
375f7cd7fe5SConrad Meyer assert(rankLast[nBitsToDecrease] != noSymbol);
376f7cd7fe5SConrad Meyer /* Increase the number of bits to gain back half the rank cost. */
3770c16b537SWarner Losh totalCost -= 1 << (nBitsToDecrease-1);
3780c16b537SWarner Losh huffNode[rankLast[nBitsToDecrease]].nbBits++;
379f7cd7fe5SConrad Meyer
380f7cd7fe5SConrad Meyer /* Fix up the new rank.
381f7cd7fe5SConrad Meyer * If the new rank was empty, this symbol is now its smallest.
382f7cd7fe5SConrad Meyer * Otherwise, this symbol will be the largest in the new rank so no adjustment.
383f7cd7fe5SConrad Meyer */
384f7cd7fe5SConrad Meyer if (rankLast[nBitsToDecrease-1] == noSymbol)
385f7cd7fe5SConrad Meyer rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease];
386f7cd7fe5SConrad Meyer /* Fix up the old rank.
387f7cd7fe5SConrad Meyer * If the symbol was at position 0, meaning it was the highest weight symbol in the tree,
388f7cd7fe5SConrad Meyer * it must be the only symbol in its rank, so the old rank now has no symbols.
389f7cd7fe5SConrad Meyer * Otherwise, since the Huffman nodes are sorted by count, the previous position is now
390f7cd7fe5SConrad Meyer * the smallest node in the rank. If the previous position belongs to a different rank,
391f7cd7fe5SConrad Meyer * then the rank is now empty.
392f7cd7fe5SConrad Meyer */
3930c16b537SWarner Losh if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */
3940c16b537SWarner Losh rankLast[nBitsToDecrease] = noSymbol;
3950c16b537SWarner Losh else {
3960c16b537SWarner Losh rankLast[nBitsToDecrease]--;
3970c16b537SWarner Losh if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease)
3980c16b537SWarner Losh rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */
399f7cd7fe5SConrad Meyer }
400f7cd7fe5SConrad Meyer } /* while (totalCost > 0) */
4010c16b537SWarner Losh
402f7cd7fe5SConrad Meyer /* If we've removed too much weight, then we have to add it back.
403f7cd7fe5SConrad Meyer * To avoid overshooting again, we only adjust the smallest rank.
404f7cd7fe5SConrad Meyer * We take the largest nodes from the lowest rank 0 and move them
405f7cd7fe5SConrad Meyer * to rank 1. There's guaranteed to be enough rank 0 symbols because
406f7cd7fe5SConrad Meyer * TODO.
407f7cd7fe5SConrad Meyer */
4080c16b537SWarner Losh while (totalCost < 0) { /* Sometimes, cost correction overshoot */
409f7cd7fe5SConrad Meyer /* special case : no rank 1 symbol (using maxNbBits-1);
410f7cd7fe5SConrad Meyer * let's create one from largest rank 0 (using maxNbBits).
411f7cd7fe5SConrad Meyer */
412f7cd7fe5SConrad Meyer if (rankLast[1] == noSymbol) {
4130c16b537SWarner Losh while (huffNode[n].nbBits == maxNbBits) n--;
4140c16b537SWarner Losh huffNode[n+1].nbBits--;
41537f1f268SConrad Meyer assert(n >= 0);
41637f1f268SConrad Meyer rankLast[1] = (U32)(n+1);
4170c16b537SWarner Losh totalCost++;
4180c16b537SWarner Losh continue;
4190c16b537SWarner Losh }
4200c16b537SWarner Losh huffNode[ rankLast[1] + 1 ].nbBits--;
4210c16b537SWarner Losh rankLast[1]++;
4220c16b537SWarner Losh totalCost ++;
423f7cd7fe5SConrad Meyer }
424f7cd7fe5SConrad Meyer } /* repay normalized cost */
425f7cd7fe5SConrad Meyer } /* there are several too large elements (at least >= 2) */
4260c16b537SWarner Losh
4270c16b537SWarner Losh return maxNbBits;
4280c16b537SWarner Losh }
4290c16b537SWarner Losh
4300c16b537SWarner Losh typedef struct {
431*5ff13fbcSAllan Jude U16 base;
432*5ff13fbcSAllan Jude U16 curr;
4330c16b537SWarner Losh } rankPos;
4340c16b537SWarner Losh
43537f1f268SConrad Meyer typedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32];
43637f1f268SConrad Meyer
437*5ff13fbcSAllan Jude /* Number of buckets available for HUF_sort() */
438*5ff13fbcSAllan Jude #define RANK_POSITION_TABLE_SIZE 192
43937f1f268SConrad Meyer
44037f1f268SConrad Meyer typedef struct {
44137f1f268SConrad Meyer huffNodeTable huffNodeTbl;
44237f1f268SConrad Meyer rankPos rankPosition[RANK_POSITION_TABLE_SIZE];
44337f1f268SConrad Meyer } HUF_buildCTable_wksp_tables;
44437f1f268SConrad Meyer
445*5ff13fbcSAllan Jude /* RANK_POSITION_DISTINCT_COUNT_CUTOFF == Cutoff point in HUF_sort() buckets for which we use log2 bucketing.
446*5ff13fbcSAllan Jude * Strategy is to use as many buckets as possible for representing distinct
447*5ff13fbcSAllan Jude * counts while using the remainder to represent all "large" counts.
448*5ff13fbcSAllan Jude *
449*5ff13fbcSAllan Jude * To satisfy this requirement for 192 buckets, we can do the following:
450*5ff13fbcSAllan Jude * Let buckets 0-166 represent distinct counts of [0, 166]
451*5ff13fbcSAllan Jude * Let buckets 166 to 192 represent all remaining counts up to RANK_POSITION_MAX_COUNT_LOG using log2 bucketing.
452*5ff13fbcSAllan Jude */
453*5ff13fbcSAllan Jude #define RANK_POSITION_MAX_COUNT_LOG 32
454*5ff13fbcSAllan Jude #define RANK_POSITION_LOG_BUCKETS_BEGIN (RANK_POSITION_TABLE_SIZE - 1) - RANK_POSITION_MAX_COUNT_LOG - 1 /* == 158 */
455*5ff13fbcSAllan Jude #define RANK_POSITION_DISTINCT_COUNT_CUTOFF RANK_POSITION_LOG_BUCKETS_BEGIN + BIT_highbit32(RANK_POSITION_LOG_BUCKETS_BEGIN) /* == 166 */
456*5ff13fbcSAllan Jude
457*5ff13fbcSAllan Jude /* Return the appropriate bucket index for a given count. See definition of
458*5ff13fbcSAllan Jude * RANK_POSITION_DISTINCT_COUNT_CUTOFF for explanation of bucketing strategy.
459*5ff13fbcSAllan Jude */
HUF_getIndex(U32 const count)460*5ff13fbcSAllan Jude static U32 HUF_getIndex(U32 const count) {
461*5ff13fbcSAllan Jude return (count < RANK_POSITION_DISTINCT_COUNT_CUTOFF)
462*5ff13fbcSAllan Jude ? count
463*5ff13fbcSAllan Jude : BIT_highbit32(count) + RANK_POSITION_LOG_BUCKETS_BEGIN;
464*5ff13fbcSAllan Jude }
465*5ff13fbcSAllan Jude
466*5ff13fbcSAllan Jude /* Helper swap function for HUF_quickSortPartition() */
HUF_swapNodes(nodeElt * a,nodeElt * b)467*5ff13fbcSAllan Jude static void HUF_swapNodes(nodeElt* a, nodeElt* b) {
468*5ff13fbcSAllan Jude nodeElt tmp = *a;
469*5ff13fbcSAllan Jude *a = *b;
470*5ff13fbcSAllan Jude *b = tmp;
471*5ff13fbcSAllan Jude }
472*5ff13fbcSAllan Jude
473*5ff13fbcSAllan Jude /* Returns 0 if the huffNode array is not sorted by descending count */
HUF_isSorted(nodeElt huffNode[],U32 const maxSymbolValue1)474*5ff13fbcSAllan Jude MEM_STATIC int HUF_isSorted(nodeElt huffNode[], U32 const maxSymbolValue1) {
475*5ff13fbcSAllan Jude U32 i;
476*5ff13fbcSAllan Jude for (i = 1; i < maxSymbolValue1; ++i) {
477*5ff13fbcSAllan Jude if (huffNode[i].count > huffNode[i-1].count) {
478*5ff13fbcSAllan Jude return 0;
479*5ff13fbcSAllan Jude }
480*5ff13fbcSAllan Jude }
481*5ff13fbcSAllan Jude return 1;
482*5ff13fbcSAllan Jude }
483*5ff13fbcSAllan Jude
484*5ff13fbcSAllan Jude /* Insertion sort by descending order */
HUF_insertionSort(nodeElt huffNode[],int const low,int const high)485*5ff13fbcSAllan Jude HINT_INLINE void HUF_insertionSort(nodeElt huffNode[], int const low, int const high) {
486*5ff13fbcSAllan Jude int i;
487*5ff13fbcSAllan Jude int const size = high-low+1;
488*5ff13fbcSAllan Jude huffNode += low;
489*5ff13fbcSAllan Jude for (i = 1; i < size; ++i) {
490*5ff13fbcSAllan Jude nodeElt const key = huffNode[i];
491*5ff13fbcSAllan Jude int j = i - 1;
492*5ff13fbcSAllan Jude while (j >= 0 && huffNode[j].count < key.count) {
493*5ff13fbcSAllan Jude huffNode[j + 1] = huffNode[j];
494*5ff13fbcSAllan Jude j--;
495*5ff13fbcSAllan Jude }
496*5ff13fbcSAllan Jude huffNode[j + 1] = key;
497*5ff13fbcSAllan Jude }
498*5ff13fbcSAllan Jude }
499*5ff13fbcSAllan Jude
500*5ff13fbcSAllan Jude /* Pivot helper function for quicksort. */
HUF_quickSortPartition(nodeElt arr[],int const low,int const high)501*5ff13fbcSAllan Jude static int HUF_quickSortPartition(nodeElt arr[], int const low, int const high) {
502*5ff13fbcSAllan Jude /* Simply select rightmost element as pivot. "Better" selectors like
503*5ff13fbcSAllan Jude * median-of-three don't experimentally appear to have any benefit.
504*5ff13fbcSAllan Jude */
505*5ff13fbcSAllan Jude U32 const pivot = arr[high].count;
506*5ff13fbcSAllan Jude int i = low - 1;
507*5ff13fbcSAllan Jude int j = low;
508*5ff13fbcSAllan Jude for ( ; j < high; j++) {
509*5ff13fbcSAllan Jude if (arr[j].count > pivot) {
510*5ff13fbcSAllan Jude i++;
511*5ff13fbcSAllan Jude HUF_swapNodes(&arr[i], &arr[j]);
512*5ff13fbcSAllan Jude }
513*5ff13fbcSAllan Jude }
514*5ff13fbcSAllan Jude HUF_swapNodes(&arr[i + 1], &arr[high]);
515*5ff13fbcSAllan Jude return i + 1;
516*5ff13fbcSAllan Jude }
517*5ff13fbcSAllan Jude
518*5ff13fbcSAllan Jude /* Classic quicksort by descending with partially iterative calls
519*5ff13fbcSAllan Jude * to reduce worst case callstack size.
520*5ff13fbcSAllan Jude */
HUF_simpleQuickSort(nodeElt arr[],int low,int high)521*5ff13fbcSAllan Jude static void HUF_simpleQuickSort(nodeElt arr[], int low, int high) {
522*5ff13fbcSAllan Jude int const kInsertionSortThreshold = 8;
523*5ff13fbcSAllan Jude if (high - low < kInsertionSortThreshold) {
524*5ff13fbcSAllan Jude HUF_insertionSort(arr, low, high);
525*5ff13fbcSAllan Jude return;
526*5ff13fbcSAllan Jude }
527*5ff13fbcSAllan Jude while (low < high) {
528*5ff13fbcSAllan Jude int const idx = HUF_quickSortPartition(arr, low, high);
529*5ff13fbcSAllan Jude if (idx - low < high - idx) {
530*5ff13fbcSAllan Jude HUF_simpleQuickSort(arr, low, idx - 1);
531*5ff13fbcSAllan Jude low = idx + 1;
532*5ff13fbcSAllan Jude } else {
533*5ff13fbcSAllan Jude HUF_simpleQuickSort(arr, idx + 1, high);
534*5ff13fbcSAllan Jude high = idx - 1;
535*5ff13fbcSAllan Jude }
536*5ff13fbcSAllan Jude }
537*5ff13fbcSAllan Jude }
538*5ff13fbcSAllan Jude
539f7cd7fe5SConrad Meyer /**
540f7cd7fe5SConrad Meyer * HUF_sort():
541f7cd7fe5SConrad Meyer * Sorts the symbols [0, maxSymbolValue] by count[symbol] in decreasing order.
542*5ff13fbcSAllan Jude * This is a typical bucket sorting strategy that uses either quicksort or insertion sort to sort each bucket.
543f7cd7fe5SConrad Meyer *
544f7cd7fe5SConrad Meyer * @param[out] huffNode Sorted symbols by decreasing count. Only members `.count` and `.byte` are filled.
545f7cd7fe5SConrad Meyer * Must have (maxSymbolValue + 1) entries.
546f7cd7fe5SConrad Meyer * @param[in] count Histogram of the symbols.
547f7cd7fe5SConrad Meyer * @param[in] maxSymbolValue Maximum symbol value.
548f7cd7fe5SConrad Meyer * @param rankPosition This is a scratch workspace. Must have RANK_POSITION_TABLE_SIZE entries.
549f7cd7fe5SConrad Meyer */
HUF_sort(nodeElt huffNode[],const unsigned count[],U32 const maxSymbolValue,rankPos rankPosition[])550*5ff13fbcSAllan Jude static void HUF_sort(nodeElt huffNode[], const unsigned count[], U32 const maxSymbolValue, rankPos rankPosition[]) {
551*5ff13fbcSAllan Jude U32 n;
552*5ff13fbcSAllan Jude U32 const maxSymbolValue1 = maxSymbolValue+1;
5530c16b537SWarner Losh
554f7cd7fe5SConrad Meyer /* Compute base and set curr to base.
555*5ff13fbcSAllan Jude * For symbol s let lowerRank = HUF_getIndex(count[n]) and rank = lowerRank + 1.
556*5ff13fbcSAllan Jude * See HUF_getIndex to see bucketing strategy.
557f7cd7fe5SConrad Meyer * We attribute each symbol to lowerRank's base value, because we want to know where
558f7cd7fe5SConrad Meyer * each rank begins in the output, so for rank R we want to count ranks R+1 and above.
559f7cd7fe5SConrad Meyer */
560f7cd7fe5SConrad Meyer ZSTD_memset(rankPosition, 0, sizeof(*rankPosition) * RANK_POSITION_TABLE_SIZE);
561f7cd7fe5SConrad Meyer for (n = 0; n < maxSymbolValue1; ++n) {
562*5ff13fbcSAllan Jude U32 lowerRank = HUF_getIndex(count[n]);
563*5ff13fbcSAllan Jude assert(lowerRank < RANK_POSITION_TABLE_SIZE - 1);
564f7cd7fe5SConrad Meyer rankPosition[lowerRank].base++;
5650c16b537SWarner Losh }
566*5ff13fbcSAllan Jude
567f7cd7fe5SConrad Meyer assert(rankPosition[RANK_POSITION_TABLE_SIZE - 1].base == 0);
568*5ff13fbcSAllan Jude /* Set up the rankPosition table */
569f7cd7fe5SConrad Meyer for (n = RANK_POSITION_TABLE_SIZE - 1; n > 0; --n) {
570f7cd7fe5SConrad Meyer rankPosition[n-1].base += rankPosition[n].base;
571f7cd7fe5SConrad Meyer rankPosition[n-1].curr = rankPosition[n-1].base;
572f7cd7fe5SConrad Meyer }
573*5ff13fbcSAllan Jude
574*5ff13fbcSAllan Jude /* Insert each symbol into their appropriate bucket, setting up rankPosition table. */
575f7cd7fe5SConrad Meyer for (n = 0; n < maxSymbolValue1; ++n) {
5760c16b537SWarner Losh U32 const c = count[n];
577*5ff13fbcSAllan Jude U32 const r = HUF_getIndex(c) + 1;
578*5ff13fbcSAllan Jude U32 const pos = rankPosition[r].curr++;
579*5ff13fbcSAllan Jude assert(pos < maxSymbolValue1);
5800c16b537SWarner Losh huffNode[pos].count = c;
5810c16b537SWarner Losh huffNode[pos].byte = (BYTE)n;
5820c16b537SWarner Losh }
583*5ff13fbcSAllan Jude
584*5ff13fbcSAllan Jude /* Sort each bucket. */
585*5ff13fbcSAllan Jude for (n = RANK_POSITION_DISTINCT_COUNT_CUTOFF; n < RANK_POSITION_TABLE_SIZE - 1; ++n) {
586*5ff13fbcSAllan Jude U32 const bucketSize = rankPosition[n].curr-rankPosition[n].base;
587*5ff13fbcSAllan Jude U32 const bucketStartIdx = rankPosition[n].base;
588*5ff13fbcSAllan Jude if (bucketSize > 1) {
589*5ff13fbcSAllan Jude assert(bucketStartIdx < maxSymbolValue1);
590*5ff13fbcSAllan Jude HUF_simpleQuickSort(huffNode + bucketStartIdx, 0, bucketSize-1);
591*5ff13fbcSAllan Jude }
5920c16b537SWarner Losh }
5930c16b537SWarner Losh
594*5ff13fbcSAllan Jude assert(HUF_isSorted(huffNode, maxSymbolValue1));
595*5ff13fbcSAllan Jude }
5960c16b537SWarner Losh
5970c16b537SWarner Losh /** HUF_buildCTable_wksp() :
5980c16b537SWarner Losh * Same as HUF_buildCTable(), but using externally allocated scratch buffer.
59937f1f268SConrad Meyer * `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as sizeof(HUF_buildCTable_wksp_tables).
6000c16b537SWarner Losh */
6010c16b537SWarner Losh #define STARTNODE (HUF_SYMBOLVALUE_MAX+1)
60237f1f268SConrad Meyer
603f7cd7fe5SConrad Meyer /* HUF_buildTree():
604f7cd7fe5SConrad Meyer * Takes the huffNode array sorted by HUF_sort() and builds an unlimited-depth Huffman tree.
605f7cd7fe5SConrad Meyer *
606f7cd7fe5SConrad Meyer * @param huffNode The array sorted by HUF_sort(). Builds the Huffman tree in this array.
607f7cd7fe5SConrad Meyer * @param maxSymbolValue The maximum symbol value.
608f7cd7fe5SConrad Meyer * @return The smallest node in the Huffman tree (by count).
609f7cd7fe5SConrad Meyer */
HUF_buildTree(nodeElt * huffNode,U32 maxSymbolValue)610f7cd7fe5SConrad Meyer static int HUF_buildTree(nodeElt* huffNode, U32 maxSymbolValue)
6110c16b537SWarner Losh {
612f7cd7fe5SConrad Meyer nodeElt* const huffNode0 = huffNode - 1;
61337f1f268SConrad Meyer int nonNullRank;
6140c16b537SWarner Losh int lowS, lowN;
61537f1f268SConrad Meyer int nodeNb = STARTNODE;
61637f1f268SConrad Meyer int n, nodeRoot;
6170c16b537SWarner Losh /* init for parents */
61837f1f268SConrad Meyer nonNullRank = (int)maxSymbolValue;
6190c16b537SWarner Losh while(huffNode[nonNullRank].count == 0) nonNullRank--;
6200c16b537SWarner Losh lowS = nonNullRank; nodeRoot = nodeNb + lowS - 1; lowN = nodeNb;
6210c16b537SWarner Losh huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count;
62237f1f268SConrad Meyer huffNode[lowS].parent = huffNode[lowS-1].parent = (U16)nodeNb;
6230c16b537SWarner Losh nodeNb++; lowS-=2;
6240c16b537SWarner Losh for (n=nodeNb; n<=nodeRoot; n++) huffNode[n].count = (U32)(1U<<30);
6250c16b537SWarner Losh huffNode0[0].count = (U32)(1U<<31); /* fake entry, strong barrier */
6260c16b537SWarner Losh
6270c16b537SWarner Losh /* create parents */
6280c16b537SWarner Losh while (nodeNb <= nodeRoot) {
62937f1f268SConrad Meyer int const n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
63037f1f268SConrad Meyer int const n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
6310c16b537SWarner Losh huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count;
63237f1f268SConrad Meyer huffNode[n1].parent = huffNode[n2].parent = (U16)nodeNb;
6330c16b537SWarner Losh nodeNb++;
6340c16b537SWarner Losh }
6350c16b537SWarner Losh
6360c16b537SWarner Losh /* distribute weights (unlimited tree height) */
6370c16b537SWarner Losh huffNode[nodeRoot].nbBits = 0;
6380c16b537SWarner Losh for (n=nodeRoot-1; n>=STARTNODE; n--)
6390c16b537SWarner Losh huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
6400c16b537SWarner Losh for (n=0; n<=nonNullRank; n++)
6410c16b537SWarner Losh huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
6420c16b537SWarner Losh
643f7cd7fe5SConrad Meyer return nonNullRank;
644f7cd7fe5SConrad Meyer }
6450c16b537SWarner Losh
646f7cd7fe5SConrad Meyer /**
647f7cd7fe5SConrad Meyer * HUF_buildCTableFromTree():
648f7cd7fe5SConrad Meyer * Build the CTable given the Huffman tree in huffNode.
649f7cd7fe5SConrad Meyer *
650f7cd7fe5SConrad Meyer * @param[out] CTable The output Huffman CTable.
651f7cd7fe5SConrad Meyer * @param huffNode The Huffman tree.
652f7cd7fe5SConrad Meyer * @param nonNullRank The last and smallest node in the Huffman tree.
653f7cd7fe5SConrad Meyer * @param maxSymbolValue The maximum symbol value.
654f7cd7fe5SConrad Meyer * @param maxNbBits The exact maximum number of bits used in the Huffman tree.
655f7cd7fe5SConrad Meyer */
HUF_buildCTableFromTree(HUF_CElt * CTable,nodeElt const * huffNode,int nonNullRank,U32 maxSymbolValue,U32 maxNbBits)656f7cd7fe5SConrad Meyer static void HUF_buildCTableFromTree(HUF_CElt* CTable, nodeElt const* huffNode, int nonNullRank, U32 maxSymbolValue, U32 maxNbBits)
657f7cd7fe5SConrad Meyer {
658*5ff13fbcSAllan Jude HUF_CElt* const ct = CTable + 1;
659f7cd7fe5SConrad Meyer /* fill result into ctable (val, nbBits) */
660f7cd7fe5SConrad Meyer int n;
661f7cd7fe5SConrad Meyer U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0};
6620c16b537SWarner Losh U16 valPerRank[HUF_TABLELOG_MAX+1] = {0};
66337f1f268SConrad Meyer int const alphabetSize = (int)(maxSymbolValue + 1);
6640c16b537SWarner Losh for (n=0; n<=nonNullRank; n++)
6650c16b537SWarner Losh nbPerRank[huffNode[n].nbBits]++;
666f7cd7fe5SConrad Meyer /* determine starting value per rank */
6670c16b537SWarner Losh { U16 min = 0;
66837f1f268SConrad Meyer for (n=(int)maxNbBits; n>0; n--) {
6690c16b537SWarner Losh valPerRank[n] = min; /* get starting value within each rank */
6700c16b537SWarner Losh min += nbPerRank[n];
6710c16b537SWarner Losh min >>= 1;
6720c16b537SWarner Losh } }
67337f1f268SConrad Meyer for (n=0; n<alphabetSize; n++)
674*5ff13fbcSAllan Jude HUF_setNbBits(ct + huffNode[n].byte, huffNode[n].nbBits); /* push nbBits per symbol, symbol order */
67537f1f268SConrad Meyer for (n=0; n<alphabetSize; n++)
676*5ff13fbcSAllan Jude HUF_setValue(ct + n, valPerRank[HUF_getNbBits(ct[n])]++); /* assign value within rank, symbol order */
677*5ff13fbcSAllan Jude CTable[0] = maxNbBits;
6780c16b537SWarner Losh }
6790c16b537SWarner Losh
HUF_buildCTable_wksp(HUF_CElt * CTable,const unsigned * count,U32 maxSymbolValue,U32 maxNbBits,void * workSpace,size_t wkspSize)680*5ff13fbcSAllan Jude size_t HUF_buildCTable_wksp (HUF_CElt* CTable, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)
681f7cd7fe5SConrad Meyer {
682*5ff13fbcSAllan Jude HUF_buildCTable_wksp_tables* const wksp_tables = (HUF_buildCTable_wksp_tables*)HUF_alignUpWorkspace(workSpace, &wkspSize, ZSTD_ALIGNOF(U32));
683f7cd7fe5SConrad Meyer nodeElt* const huffNode0 = wksp_tables->huffNodeTbl;
684f7cd7fe5SConrad Meyer nodeElt* const huffNode = huffNode0+1;
685f7cd7fe5SConrad Meyer int nonNullRank;
686f7cd7fe5SConrad Meyer
687f7cd7fe5SConrad Meyer /* safety checks */
688f7cd7fe5SConrad Meyer if (wkspSize < sizeof(HUF_buildCTable_wksp_tables))
689f7cd7fe5SConrad Meyer return ERROR(workSpace_tooSmall);
690f7cd7fe5SConrad Meyer if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT;
691f7cd7fe5SConrad Meyer if (maxSymbolValue > HUF_SYMBOLVALUE_MAX)
692f7cd7fe5SConrad Meyer return ERROR(maxSymbolValue_tooLarge);
693f7cd7fe5SConrad Meyer ZSTD_memset(huffNode0, 0, sizeof(huffNodeTable));
694f7cd7fe5SConrad Meyer
695f7cd7fe5SConrad Meyer /* sort, decreasing order */
696f7cd7fe5SConrad Meyer HUF_sort(huffNode, count, maxSymbolValue, wksp_tables->rankPosition);
697f7cd7fe5SConrad Meyer
698f7cd7fe5SConrad Meyer /* build tree */
699f7cd7fe5SConrad Meyer nonNullRank = HUF_buildTree(huffNode, maxSymbolValue);
700f7cd7fe5SConrad Meyer
701f7cd7fe5SConrad Meyer /* enforce maxTableLog */
702f7cd7fe5SConrad Meyer maxNbBits = HUF_setMaxHeight(huffNode, (U32)nonNullRank, maxNbBits);
703f7cd7fe5SConrad Meyer if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */
704f7cd7fe5SConrad Meyer
705*5ff13fbcSAllan Jude HUF_buildCTableFromTree(CTable, huffNode, nonNullRank, maxSymbolValue, maxNbBits);
706f7cd7fe5SConrad Meyer
7070c16b537SWarner Losh return maxNbBits;
7080c16b537SWarner Losh }
7090c16b537SWarner Losh
HUF_estimateCompressedSize(const HUF_CElt * CTable,const unsigned * count,unsigned maxSymbolValue)71037f1f268SConrad Meyer size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue)
7110c16b537SWarner Losh {
712*5ff13fbcSAllan Jude HUF_CElt const* ct = CTable + 1;
7130c16b537SWarner Losh size_t nbBits = 0;
7140c16b537SWarner Losh int s;
7150c16b537SWarner Losh for (s = 0; s <= (int)maxSymbolValue; ++s) {
716*5ff13fbcSAllan Jude nbBits += HUF_getNbBits(ct[s]) * count[s];
7170c16b537SWarner Losh }
7180c16b537SWarner Losh return nbBits >> 3;
7190c16b537SWarner Losh }
7200c16b537SWarner Losh
HUF_validateCTable(const HUF_CElt * CTable,const unsigned * count,unsigned maxSymbolValue)72137f1f268SConrad Meyer int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) {
722*5ff13fbcSAllan Jude HUF_CElt const* ct = CTable + 1;
7230c16b537SWarner Losh int bad = 0;
7240c16b537SWarner Losh int s;
7250c16b537SWarner Losh for (s = 0; s <= (int)maxSymbolValue; ++s) {
726*5ff13fbcSAllan Jude bad |= (count[s] != 0) & (HUF_getNbBits(ct[s]) == 0);
7270c16b537SWarner Losh }
7280c16b537SWarner Losh return !bad;
7290c16b537SWarner Losh }
7300c16b537SWarner Losh
HUF_compressBound(size_t size)73119fcbaf1SConrad Meyer size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
73219fcbaf1SConrad Meyer
733*5ff13fbcSAllan Jude /** HUF_CStream_t:
734*5ff13fbcSAllan Jude * Huffman uses its own BIT_CStream_t implementation.
735*5ff13fbcSAllan Jude * There are three major differences from BIT_CStream_t:
736*5ff13fbcSAllan Jude * 1. HUF_addBits() takes a HUF_CElt (size_t) which is
737*5ff13fbcSAllan Jude * the pair (nbBits, value) in the format:
738*5ff13fbcSAllan Jude * format:
739*5ff13fbcSAllan Jude * - Bits [0, 4) = nbBits
740*5ff13fbcSAllan Jude * - Bits [4, 64 - nbBits) = 0
741*5ff13fbcSAllan Jude * - Bits [64 - nbBits, 64) = value
742*5ff13fbcSAllan Jude * 2. The bitContainer is built from the upper bits and
743*5ff13fbcSAllan Jude * right shifted. E.g. to add a new value of N bits
744*5ff13fbcSAllan Jude * you right shift the bitContainer by N, then or in
745*5ff13fbcSAllan Jude * the new value into the N upper bits.
746*5ff13fbcSAllan Jude * 3. The bitstream has two bit containers. You can add
747*5ff13fbcSAllan Jude * bits to the second container and merge them into
748*5ff13fbcSAllan Jude * the first container.
749*5ff13fbcSAllan Jude */
750*5ff13fbcSAllan Jude
751*5ff13fbcSAllan Jude #define HUF_BITS_IN_CONTAINER (sizeof(size_t) * 8)
752*5ff13fbcSAllan Jude
753*5ff13fbcSAllan Jude typedef struct {
754*5ff13fbcSAllan Jude size_t bitContainer[2];
755*5ff13fbcSAllan Jude size_t bitPos[2];
756*5ff13fbcSAllan Jude
757*5ff13fbcSAllan Jude BYTE* startPtr;
758*5ff13fbcSAllan Jude BYTE* ptr;
759*5ff13fbcSAllan Jude BYTE* endPtr;
760*5ff13fbcSAllan Jude } HUF_CStream_t;
761*5ff13fbcSAllan Jude
762*5ff13fbcSAllan Jude /**! HUF_initCStream():
763*5ff13fbcSAllan Jude * Initializes the bitstream.
764*5ff13fbcSAllan Jude * @returns 0 or an error code.
765*5ff13fbcSAllan Jude */
HUF_initCStream(HUF_CStream_t * bitC,void * startPtr,size_t dstCapacity)766*5ff13fbcSAllan Jude static size_t HUF_initCStream(HUF_CStream_t* bitC,
767*5ff13fbcSAllan Jude void* startPtr, size_t dstCapacity)
7680c16b537SWarner Losh {
769*5ff13fbcSAllan Jude ZSTD_memset(bitC, 0, sizeof(*bitC));
770*5ff13fbcSAllan Jude bitC->startPtr = (BYTE*)startPtr;
771*5ff13fbcSAllan Jude bitC->ptr = bitC->startPtr;
772*5ff13fbcSAllan Jude bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer[0]);
773*5ff13fbcSAllan Jude if (dstCapacity <= sizeof(bitC->bitContainer[0])) return ERROR(dstSize_tooSmall);
774*5ff13fbcSAllan Jude return 0;
7750c16b537SWarner Losh }
7760c16b537SWarner Losh
777*5ff13fbcSAllan Jude /*! HUF_addBits():
778*5ff13fbcSAllan Jude * Adds the symbol stored in HUF_CElt elt to the bitstream.
779*5ff13fbcSAllan Jude *
780*5ff13fbcSAllan Jude * @param elt The element we're adding. This is a (nbBits, value) pair.
781*5ff13fbcSAllan Jude * See the HUF_CStream_t docs for the format.
782*5ff13fbcSAllan Jude * @param idx Insert into the bitstream at this idx.
783*5ff13fbcSAllan Jude * @param kFast This is a template parameter. If the bitstream is guaranteed
784*5ff13fbcSAllan Jude * to have at least 4 unused bits after this call it may be 1,
785*5ff13fbcSAllan Jude * otherwise it must be 0. HUF_addBits() is faster when fast is set.
786*5ff13fbcSAllan Jude */
HUF_addBits(HUF_CStream_t * bitC,HUF_CElt elt,int idx,int kFast)787*5ff13fbcSAllan Jude FORCE_INLINE_TEMPLATE void HUF_addBits(HUF_CStream_t* bitC, HUF_CElt elt, int idx, int kFast)
788*5ff13fbcSAllan Jude {
789*5ff13fbcSAllan Jude assert(idx <= 1);
790*5ff13fbcSAllan Jude assert(HUF_getNbBits(elt) <= HUF_TABLELOG_ABSOLUTEMAX);
791*5ff13fbcSAllan Jude /* This is efficient on x86-64 with BMI2 because shrx
792*5ff13fbcSAllan Jude * only reads the low 6 bits of the register. The compiler
793*5ff13fbcSAllan Jude * knows this and elides the mask. When fast is set,
794*5ff13fbcSAllan Jude * every operation can use the same value loaded from elt.
795*5ff13fbcSAllan Jude */
796*5ff13fbcSAllan Jude bitC->bitContainer[idx] >>= HUF_getNbBits(elt);
797*5ff13fbcSAllan Jude bitC->bitContainer[idx] |= kFast ? HUF_getValueFast(elt) : HUF_getValue(elt);
798*5ff13fbcSAllan Jude /* We only read the low 8 bits of bitC->bitPos[idx] so it
799*5ff13fbcSAllan Jude * doesn't matter that the high bits have noise from the value.
800*5ff13fbcSAllan Jude */
801*5ff13fbcSAllan Jude bitC->bitPos[idx] += HUF_getNbBitsFast(elt);
802*5ff13fbcSAllan Jude assert((bitC->bitPos[idx] & 0xFF) <= HUF_BITS_IN_CONTAINER);
803*5ff13fbcSAllan Jude /* The last 4-bits of elt are dirty if fast is set,
804*5ff13fbcSAllan Jude * so we must not be overwriting bits that have already been
805*5ff13fbcSAllan Jude * inserted into the bit container.
806*5ff13fbcSAllan Jude */
807*5ff13fbcSAllan Jude #if DEBUGLEVEL >= 1
808*5ff13fbcSAllan Jude {
809*5ff13fbcSAllan Jude size_t const nbBits = HUF_getNbBits(elt);
810*5ff13fbcSAllan Jude size_t const dirtyBits = nbBits == 0 ? 0 : BIT_highbit32((U32)nbBits) + 1;
811*5ff13fbcSAllan Jude (void)dirtyBits;
812*5ff13fbcSAllan Jude /* Middle bits are 0. */
813*5ff13fbcSAllan Jude assert(((elt >> dirtyBits) << (dirtyBits + nbBits)) == 0);
814*5ff13fbcSAllan Jude /* We didn't overwrite any bits in the bit container. */
815*5ff13fbcSAllan Jude assert(!kFast || (bitC->bitPos[idx] & 0xFF) <= HUF_BITS_IN_CONTAINER);
816*5ff13fbcSAllan Jude (void)dirtyBits;
817*5ff13fbcSAllan Jude }
818*5ff13fbcSAllan Jude #endif
819*5ff13fbcSAllan Jude }
8200c16b537SWarner Losh
HUF_zeroIndex1(HUF_CStream_t * bitC)821*5ff13fbcSAllan Jude FORCE_INLINE_TEMPLATE void HUF_zeroIndex1(HUF_CStream_t* bitC)
822*5ff13fbcSAllan Jude {
823*5ff13fbcSAllan Jude bitC->bitContainer[1] = 0;
824*5ff13fbcSAllan Jude bitC->bitPos[1] = 0;
825*5ff13fbcSAllan Jude }
8260c16b537SWarner Losh
827*5ff13fbcSAllan Jude /*! HUF_mergeIndex1() :
828*5ff13fbcSAllan Jude * Merges the bit container @ index 1 into the bit container @ index 0
829*5ff13fbcSAllan Jude * and zeros the bit container @ index 1.
830*5ff13fbcSAllan Jude */
HUF_mergeIndex1(HUF_CStream_t * bitC)831*5ff13fbcSAllan Jude FORCE_INLINE_TEMPLATE void HUF_mergeIndex1(HUF_CStream_t* bitC)
832*5ff13fbcSAllan Jude {
833*5ff13fbcSAllan Jude assert((bitC->bitPos[1] & 0xFF) < HUF_BITS_IN_CONTAINER);
834*5ff13fbcSAllan Jude bitC->bitContainer[0] >>= (bitC->bitPos[1] & 0xFF);
835*5ff13fbcSAllan Jude bitC->bitContainer[0] |= bitC->bitContainer[1];
836*5ff13fbcSAllan Jude bitC->bitPos[0] += bitC->bitPos[1];
837*5ff13fbcSAllan Jude assert((bitC->bitPos[0] & 0xFF) <= HUF_BITS_IN_CONTAINER);
838*5ff13fbcSAllan Jude }
839*5ff13fbcSAllan Jude
840*5ff13fbcSAllan Jude /*! HUF_flushBits() :
841*5ff13fbcSAllan Jude * Flushes the bits in the bit container @ index 0.
842*5ff13fbcSAllan Jude *
843*5ff13fbcSAllan Jude * @post bitPos will be < 8.
844*5ff13fbcSAllan Jude * @param kFast If kFast is set then we must know a-priori that
845*5ff13fbcSAllan Jude * the bit container will not overflow.
846*5ff13fbcSAllan Jude */
HUF_flushBits(HUF_CStream_t * bitC,int kFast)847*5ff13fbcSAllan Jude FORCE_INLINE_TEMPLATE void HUF_flushBits(HUF_CStream_t* bitC, int kFast)
848*5ff13fbcSAllan Jude {
849*5ff13fbcSAllan Jude /* The upper bits of bitPos are noisy, so we must mask by 0xFF. */
850*5ff13fbcSAllan Jude size_t const nbBits = bitC->bitPos[0] & 0xFF;
851*5ff13fbcSAllan Jude size_t const nbBytes = nbBits >> 3;
852*5ff13fbcSAllan Jude /* The top nbBits bits of bitContainer are the ones we need. */
853*5ff13fbcSAllan Jude size_t const bitContainer = bitC->bitContainer[0] >> (HUF_BITS_IN_CONTAINER - nbBits);
854*5ff13fbcSAllan Jude /* Mask bitPos to account for the bytes we consumed. */
855*5ff13fbcSAllan Jude bitC->bitPos[0] &= 7;
856*5ff13fbcSAllan Jude assert(nbBits > 0);
857*5ff13fbcSAllan Jude assert(nbBits <= sizeof(bitC->bitContainer[0]) * 8);
858*5ff13fbcSAllan Jude assert(bitC->ptr <= bitC->endPtr);
859*5ff13fbcSAllan Jude MEM_writeLEST(bitC->ptr, bitContainer);
860*5ff13fbcSAllan Jude bitC->ptr += nbBytes;
861*5ff13fbcSAllan Jude assert(!kFast || bitC->ptr <= bitC->endPtr);
862*5ff13fbcSAllan Jude if (!kFast && bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr;
863*5ff13fbcSAllan Jude /* bitContainer doesn't need to be modified because the leftover
864*5ff13fbcSAllan Jude * bits are already the top bitPos bits. And we don't care about
865*5ff13fbcSAllan Jude * noise in the lower values.
866*5ff13fbcSAllan Jude */
867*5ff13fbcSAllan Jude }
868*5ff13fbcSAllan Jude
869*5ff13fbcSAllan Jude /*! HUF_endMark()
870*5ff13fbcSAllan Jude * @returns The Huffman stream end mark: A 1-bit value = 1.
871*5ff13fbcSAllan Jude */
HUF_endMark(void)872*5ff13fbcSAllan Jude static HUF_CElt HUF_endMark(void)
873*5ff13fbcSAllan Jude {
874*5ff13fbcSAllan Jude HUF_CElt endMark;
875*5ff13fbcSAllan Jude HUF_setNbBits(&endMark, 1);
876*5ff13fbcSAllan Jude HUF_setValue(&endMark, 1);
877*5ff13fbcSAllan Jude return endMark;
878*5ff13fbcSAllan Jude }
879*5ff13fbcSAllan Jude
880*5ff13fbcSAllan Jude /*! HUF_closeCStream() :
881*5ff13fbcSAllan Jude * @return Size of CStream, in bytes,
882*5ff13fbcSAllan Jude * or 0 if it could not fit into dstBuffer */
HUF_closeCStream(HUF_CStream_t * bitC)883*5ff13fbcSAllan Jude static size_t HUF_closeCStream(HUF_CStream_t* bitC)
884*5ff13fbcSAllan Jude {
885*5ff13fbcSAllan Jude HUF_addBits(bitC, HUF_endMark(), /* idx */ 0, /* kFast */ 0);
886*5ff13fbcSAllan Jude HUF_flushBits(bitC, /* kFast */ 0);
887*5ff13fbcSAllan Jude {
888*5ff13fbcSAllan Jude size_t const nbBits = bitC->bitPos[0] & 0xFF;
889*5ff13fbcSAllan Jude if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */
890*5ff13fbcSAllan Jude return (bitC->ptr - bitC->startPtr) + (nbBits > 0);
891*5ff13fbcSAllan Jude }
892*5ff13fbcSAllan Jude }
893*5ff13fbcSAllan Jude
894*5ff13fbcSAllan Jude FORCE_INLINE_TEMPLATE void
HUF_encodeSymbol(HUF_CStream_t * bitCPtr,U32 symbol,const HUF_CElt * CTable,int idx,int fast)895*5ff13fbcSAllan Jude HUF_encodeSymbol(HUF_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable, int idx, int fast)
896*5ff13fbcSAllan Jude {
897*5ff13fbcSAllan Jude HUF_addBits(bitCPtr, CTable[symbol], idx, fast);
898*5ff13fbcSAllan Jude }
899*5ff13fbcSAllan Jude
900*5ff13fbcSAllan Jude FORCE_INLINE_TEMPLATE void
HUF_compress1X_usingCTable_internal_body_loop(HUF_CStream_t * bitC,const BYTE * ip,size_t srcSize,const HUF_CElt * ct,int kUnroll,int kFastFlush,int kLastFast)901*5ff13fbcSAllan Jude HUF_compress1X_usingCTable_internal_body_loop(HUF_CStream_t* bitC,
902*5ff13fbcSAllan Jude const BYTE* ip, size_t srcSize,
903*5ff13fbcSAllan Jude const HUF_CElt* ct,
904*5ff13fbcSAllan Jude int kUnroll, int kFastFlush, int kLastFast)
905*5ff13fbcSAllan Jude {
906*5ff13fbcSAllan Jude /* Join to kUnroll */
907*5ff13fbcSAllan Jude int n = (int)srcSize;
908*5ff13fbcSAllan Jude int rem = n % kUnroll;
909*5ff13fbcSAllan Jude if (rem > 0) {
910*5ff13fbcSAllan Jude for (; rem > 0; --rem) {
911*5ff13fbcSAllan Jude HUF_encodeSymbol(bitC, ip[--n], ct, 0, /* fast */ 0);
912*5ff13fbcSAllan Jude }
913*5ff13fbcSAllan Jude HUF_flushBits(bitC, kFastFlush);
914*5ff13fbcSAllan Jude }
915*5ff13fbcSAllan Jude assert(n % kUnroll == 0);
916*5ff13fbcSAllan Jude
917*5ff13fbcSAllan Jude /* Join to 2 * kUnroll */
918*5ff13fbcSAllan Jude if (n % (2 * kUnroll)) {
919*5ff13fbcSAllan Jude int u;
920*5ff13fbcSAllan Jude for (u = 1; u < kUnroll; ++u) {
921*5ff13fbcSAllan Jude HUF_encodeSymbol(bitC, ip[n - u], ct, 0, 1);
922*5ff13fbcSAllan Jude }
923*5ff13fbcSAllan Jude HUF_encodeSymbol(bitC, ip[n - kUnroll], ct, 0, kLastFast);
924*5ff13fbcSAllan Jude HUF_flushBits(bitC, kFastFlush);
925*5ff13fbcSAllan Jude n -= kUnroll;
926*5ff13fbcSAllan Jude }
927*5ff13fbcSAllan Jude assert(n % (2 * kUnroll) == 0);
928*5ff13fbcSAllan Jude
929*5ff13fbcSAllan Jude for (; n>0; n-= 2 * kUnroll) {
930*5ff13fbcSAllan Jude /* Encode kUnroll symbols into the bitstream @ index 0. */
931*5ff13fbcSAllan Jude int u;
932*5ff13fbcSAllan Jude for (u = 1; u < kUnroll; ++u) {
933*5ff13fbcSAllan Jude HUF_encodeSymbol(bitC, ip[n - u], ct, /* idx */ 0, /* fast */ 1);
934*5ff13fbcSAllan Jude }
935*5ff13fbcSAllan Jude HUF_encodeSymbol(bitC, ip[n - kUnroll], ct, /* idx */ 0, /* fast */ kLastFast);
936*5ff13fbcSAllan Jude HUF_flushBits(bitC, kFastFlush);
937*5ff13fbcSAllan Jude /* Encode kUnroll symbols into the bitstream @ index 1.
938*5ff13fbcSAllan Jude * This allows us to start filling the bit container
939*5ff13fbcSAllan Jude * without any data dependencies.
940*5ff13fbcSAllan Jude */
941*5ff13fbcSAllan Jude HUF_zeroIndex1(bitC);
942*5ff13fbcSAllan Jude for (u = 1; u < kUnroll; ++u) {
943*5ff13fbcSAllan Jude HUF_encodeSymbol(bitC, ip[n - kUnroll - u], ct, /* idx */ 1, /* fast */ 1);
944*5ff13fbcSAllan Jude }
945*5ff13fbcSAllan Jude HUF_encodeSymbol(bitC, ip[n - kUnroll - kUnroll], ct, /* idx */ 1, /* fast */ kLastFast);
946*5ff13fbcSAllan Jude /* Merge bitstream @ index 1 into the bitstream @ index 0 */
947*5ff13fbcSAllan Jude HUF_mergeIndex1(bitC);
948*5ff13fbcSAllan Jude HUF_flushBits(bitC, kFastFlush);
949*5ff13fbcSAllan Jude }
950*5ff13fbcSAllan Jude assert(n == 0);
951*5ff13fbcSAllan Jude
952*5ff13fbcSAllan Jude }
953*5ff13fbcSAllan Jude
954*5ff13fbcSAllan Jude /**
955*5ff13fbcSAllan Jude * Returns a tight upper bound on the output space needed by Huffman
956*5ff13fbcSAllan Jude * with 8 bytes buffer to handle over-writes. If the output is at least
957*5ff13fbcSAllan Jude * this large we don't need to do bounds checks during Huffman encoding.
958*5ff13fbcSAllan Jude */
HUF_tightCompressBound(size_t srcSize,size_t tableLog)959*5ff13fbcSAllan Jude static size_t HUF_tightCompressBound(size_t srcSize, size_t tableLog)
960*5ff13fbcSAllan Jude {
961*5ff13fbcSAllan Jude return ((srcSize * tableLog) >> 3) + 8;
962*5ff13fbcSAllan Jude }
963*5ff13fbcSAllan Jude
9640c16b537SWarner Losh
96519fcbaf1SConrad Meyer FORCE_INLINE_TEMPLATE size_t
HUF_compress1X_usingCTable_internal_body(void * dst,size_t dstSize,const void * src,size_t srcSize,const HUF_CElt * CTable)96619fcbaf1SConrad Meyer HUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize,
96719fcbaf1SConrad Meyer const void* src, size_t srcSize,
96819fcbaf1SConrad Meyer const HUF_CElt* CTable)
9690c16b537SWarner Losh {
970*5ff13fbcSAllan Jude U32 const tableLog = (U32)CTable[0];
971*5ff13fbcSAllan Jude HUF_CElt const* ct = CTable + 1;
9720c16b537SWarner Losh const BYTE* ip = (const BYTE*) src;
9730c16b537SWarner Losh BYTE* const ostart = (BYTE*)dst;
9740c16b537SWarner Losh BYTE* const oend = ostart + dstSize;
9750c16b537SWarner Losh BYTE* op = ostart;
976*5ff13fbcSAllan Jude HUF_CStream_t bitC;
9770c16b537SWarner Losh
9780c16b537SWarner Losh /* init */
9790c16b537SWarner Losh if (dstSize < 8) return 0; /* not enough space to compress */
980*5ff13fbcSAllan Jude { size_t const initErr = HUF_initCStream(&bitC, op, (size_t)(oend-op));
9810c16b537SWarner Losh if (HUF_isError(initErr)) return 0; }
9820c16b537SWarner Losh
983*5ff13fbcSAllan Jude if (dstSize < HUF_tightCompressBound(srcSize, (size_t)tableLog) || tableLog > 11)
984*5ff13fbcSAllan Jude HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ MEM_32bits() ? 2 : 4, /* kFast */ 0, /* kLastFast */ 0);
985*5ff13fbcSAllan Jude else {
986*5ff13fbcSAllan Jude if (MEM_32bits()) {
987*5ff13fbcSAllan Jude switch (tableLog) {
988*5ff13fbcSAllan Jude case 11:
989*5ff13fbcSAllan Jude HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 2, /* kFastFlush */ 1, /* kLastFast */ 0);
990*5ff13fbcSAllan Jude break;
991*5ff13fbcSAllan Jude case 10: ZSTD_FALLTHROUGH;
992*5ff13fbcSAllan Jude case 9: ZSTD_FALLTHROUGH;
993*5ff13fbcSAllan Jude case 8:
994*5ff13fbcSAllan Jude HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 2, /* kFastFlush */ 1, /* kLastFast */ 1);
995*5ff13fbcSAllan Jude break;
996*5ff13fbcSAllan Jude case 7: ZSTD_FALLTHROUGH;
997*5ff13fbcSAllan Jude default:
998*5ff13fbcSAllan Jude HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 3, /* kFastFlush */ 1, /* kLastFast */ 1);
999*5ff13fbcSAllan Jude break;
10000c16b537SWarner Losh }
1001*5ff13fbcSAllan Jude } else {
1002*5ff13fbcSAllan Jude switch (tableLog) {
1003*5ff13fbcSAllan Jude case 11:
1004*5ff13fbcSAllan Jude HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 5, /* kFastFlush */ 1, /* kLastFast */ 0);
1005*5ff13fbcSAllan Jude break;
1006*5ff13fbcSAllan Jude case 10:
1007*5ff13fbcSAllan Jude HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 5, /* kFastFlush */ 1, /* kLastFast */ 1);
1008*5ff13fbcSAllan Jude break;
1009*5ff13fbcSAllan Jude case 9:
1010*5ff13fbcSAllan Jude HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 6, /* kFastFlush */ 1, /* kLastFast */ 0);
1011*5ff13fbcSAllan Jude break;
1012*5ff13fbcSAllan Jude case 8:
1013*5ff13fbcSAllan Jude HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 7, /* kFastFlush */ 1, /* kLastFast */ 0);
1014*5ff13fbcSAllan Jude break;
1015*5ff13fbcSAllan Jude case 7:
1016*5ff13fbcSAllan Jude HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 8, /* kFastFlush */ 1, /* kLastFast */ 0);
1017*5ff13fbcSAllan Jude break;
1018*5ff13fbcSAllan Jude case 6: ZSTD_FALLTHROUGH;
1019*5ff13fbcSAllan Jude default:
1020*5ff13fbcSAllan Jude HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 9, /* kFastFlush */ 1, /* kLastFast */ 1);
1021*5ff13fbcSAllan Jude break;
10220c16b537SWarner Losh }
1023*5ff13fbcSAllan Jude }
1024*5ff13fbcSAllan Jude }
1025*5ff13fbcSAllan Jude assert(bitC.ptr <= bitC.endPtr);
10260c16b537SWarner Losh
1027*5ff13fbcSAllan Jude return HUF_closeCStream(&bitC);
10280c16b537SWarner Losh }
10290c16b537SWarner Losh
103019fcbaf1SConrad Meyer #if DYNAMIC_BMI2
10310c16b537SWarner Losh
1032*5ff13fbcSAllan Jude static BMI2_TARGET_ATTRIBUTE size_t
HUF_compress1X_usingCTable_internal_bmi2(void * dst,size_t dstSize,const void * src,size_t srcSize,const HUF_CElt * CTable)103319fcbaf1SConrad Meyer HUF_compress1X_usingCTable_internal_bmi2(void* dst, size_t dstSize,
103419fcbaf1SConrad Meyer const void* src, size_t srcSize,
103519fcbaf1SConrad Meyer const HUF_CElt* CTable)
103619fcbaf1SConrad Meyer {
103719fcbaf1SConrad Meyer return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
103819fcbaf1SConrad Meyer }
103919fcbaf1SConrad Meyer
104019fcbaf1SConrad Meyer static size_t
HUF_compress1X_usingCTable_internal_default(void * dst,size_t dstSize,const void * src,size_t srcSize,const HUF_CElt * CTable)104119fcbaf1SConrad Meyer HUF_compress1X_usingCTable_internal_default(void* dst, size_t dstSize,
104219fcbaf1SConrad Meyer const void* src, size_t srcSize,
104319fcbaf1SConrad Meyer const HUF_CElt* CTable)
104419fcbaf1SConrad Meyer {
104519fcbaf1SConrad Meyer return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
104619fcbaf1SConrad Meyer }
104719fcbaf1SConrad Meyer
104819fcbaf1SConrad Meyer static size_t
HUF_compress1X_usingCTable_internal(void * dst,size_t dstSize,const void * src,size_t srcSize,const HUF_CElt * CTable,const int bmi2)104919fcbaf1SConrad Meyer HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
105019fcbaf1SConrad Meyer const void* src, size_t srcSize,
105119fcbaf1SConrad Meyer const HUF_CElt* CTable, const int bmi2)
105219fcbaf1SConrad Meyer {
105319fcbaf1SConrad Meyer if (bmi2) {
105419fcbaf1SConrad Meyer return HUF_compress1X_usingCTable_internal_bmi2(dst, dstSize, src, srcSize, CTable);
105519fcbaf1SConrad Meyer }
105619fcbaf1SConrad Meyer return HUF_compress1X_usingCTable_internal_default(dst, dstSize, src, srcSize, CTable);
105719fcbaf1SConrad Meyer }
105819fcbaf1SConrad Meyer
105919fcbaf1SConrad Meyer #else
106019fcbaf1SConrad Meyer
106119fcbaf1SConrad Meyer static size_t
HUF_compress1X_usingCTable_internal(void * dst,size_t dstSize,const void * src,size_t srcSize,const HUF_CElt * CTable,const int bmi2)106219fcbaf1SConrad Meyer HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
106319fcbaf1SConrad Meyer const void* src, size_t srcSize,
106419fcbaf1SConrad Meyer const HUF_CElt* CTable, const int bmi2)
106519fcbaf1SConrad Meyer {
106619fcbaf1SConrad Meyer (void)bmi2;
106719fcbaf1SConrad Meyer return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
106819fcbaf1SConrad Meyer }
106919fcbaf1SConrad Meyer
107019fcbaf1SConrad Meyer #endif
107119fcbaf1SConrad Meyer
HUF_compress1X_usingCTable(void * dst,size_t dstSize,const void * src,size_t srcSize,const HUF_CElt * CTable)107219fcbaf1SConrad Meyer size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
107319fcbaf1SConrad Meyer {
1074*5ff13fbcSAllan Jude return HUF_compress1X_usingCTable_bmi2(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
107519fcbaf1SConrad Meyer }
107619fcbaf1SConrad Meyer
HUF_compress1X_usingCTable_bmi2(void * dst,size_t dstSize,const void * src,size_t srcSize,const HUF_CElt * CTable,int bmi2)1077*5ff13fbcSAllan Jude size_t HUF_compress1X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2)
1078*5ff13fbcSAllan Jude {
1079*5ff13fbcSAllan Jude return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, bmi2);
1080*5ff13fbcSAllan Jude }
108119fcbaf1SConrad Meyer
108219fcbaf1SConrad Meyer static size_t
HUF_compress4X_usingCTable_internal(void * dst,size_t dstSize,const void * src,size_t srcSize,const HUF_CElt * CTable,int bmi2)108319fcbaf1SConrad Meyer HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
108419fcbaf1SConrad Meyer const void* src, size_t srcSize,
108519fcbaf1SConrad Meyer const HUF_CElt* CTable, int bmi2)
10860c16b537SWarner Losh {
10870c16b537SWarner Losh size_t const segmentSize = (srcSize+3)/4; /* first 3 segments */
10880c16b537SWarner Losh const BYTE* ip = (const BYTE*) src;
10890c16b537SWarner Losh const BYTE* const iend = ip + srcSize;
10900c16b537SWarner Losh BYTE* const ostart = (BYTE*) dst;
10910c16b537SWarner Losh BYTE* const oend = ostart + dstSize;
10920c16b537SWarner Losh BYTE* op = ostart;
10930c16b537SWarner Losh
10940c16b537SWarner Losh if (dstSize < 6 + 1 + 1 + 1 + 8) return 0; /* minimum space to compress successfully */
10950c16b537SWarner Losh if (srcSize < 12) return 0; /* no saving possible : too small input */
10960c16b537SWarner Losh op += 6; /* jumpTable */
10970c16b537SWarner Losh
109837f1f268SConrad Meyer assert(op <= oend);
109937f1f268SConrad Meyer { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
1100*5ff13fbcSAllan Jude if (cSize == 0 || cSize > 65535) return 0;
11010c16b537SWarner Losh MEM_writeLE16(ostart, (U16)cSize);
11020c16b537SWarner Losh op += cSize;
11030c16b537SWarner Losh }
11040c16b537SWarner Losh
11050c16b537SWarner Losh ip += segmentSize;
110637f1f268SConrad Meyer assert(op <= oend);
110737f1f268SConrad Meyer { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
1108*5ff13fbcSAllan Jude if (cSize == 0 || cSize > 65535) return 0;
11090c16b537SWarner Losh MEM_writeLE16(ostart+2, (U16)cSize);
11100c16b537SWarner Losh op += cSize;
11110c16b537SWarner Losh }
11120c16b537SWarner Losh
11130c16b537SWarner Losh ip += segmentSize;
111437f1f268SConrad Meyer assert(op <= oend);
111537f1f268SConrad Meyer { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
1116*5ff13fbcSAllan Jude if (cSize == 0 || cSize > 65535) return 0;
11170c16b537SWarner Losh MEM_writeLE16(ostart+4, (U16)cSize);
11180c16b537SWarner Losh op += cSize;
11190c16b537SWarner Losh }
11200c16b537SWarner Losh
11210c16b537SWarner Losh ip += segmentSize;
112237f1f268SConrad Meyer assert(op <= oend);
112337f1f268SConrad Meyer assert(ip <= iend);
112437f1f268SConrad Meyer { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, bmi2) );
1125*5ff13fbcSAllan Jude if (cSize == 0 || cSize > 65535) return 0;
11260c16b537SWarner Losh op += cSize;
11270c16b537SWarner Losh }
11280c16b537SWarner Losh
112937f1f268SConrad Meyer return (size_t)(op-ostart);
11300c16b537SWarner Losh }
11310c16b537SWarner Losh
HUF_compress4X_usingCTable(void * dst,size_t dstSize,const void * src,size_t srcSize,const HUF_CElt * CTable)113219fcbaf1SConrad Meyer size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
113319fcbaf1SConrad Meyer {
1134*5ff13fbcSAllan Jude return HUF_compress4X_usingCTable_bmi2(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
1135*5ff13fbcSAllan Jude }
1136*5ff13fbcSAllan Jude
HUF_compress4X_usingCTable_bmi2(void * dst,size_t dstSize,const void * src,size_t srcSize,const HUF_CElt * CTable,int bmi2)1137*5ff13fbcSAllan Jude size_t HUF_compress4X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2)
1138*5ff13fbcSAllan Jude {
1139*5ff13fbcSAllan Jude return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, bmi2);
114019fcbaf1SConrad Meyer }
114119fcbaf1SConrad Meyer
1142a0483764SConrad Meyer typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e;
11430c16b537SWarner Losh
HUF_compressCTable_internal(BYTE * const ostart,BYTE * op,BYTE * const oend,const void * src,size_t srcSize,HUF_nbStreams_e nbStreams,const HUF_CElt * CTable,const int bmi2)11440c16b537SWarner Losh static size_t HUF_compressCTable_internal(
11450c16b537SWarner Losh BYTE* const ostart, BYTE* op, BYTE* const oend,
11460c16b537SWarner Losh const void* src, size_t srcSize,
1147a0483764SConrad Meyer HUF_nbStreams_e nbStreams, const HUF_CElt* CTable, const int bmi2)
11480c16b537SWarner Losh {
1149a0483764SConrad Meyer size_t const cSize = (nbStreams==HUF_singleStream) ?
115037f1f268SConrad Meyer HUF_compress1X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2) :
115137f1f268SConrad Meyer HUF_compress4X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2);
11520c16b537SWarner Losh if (HUF_isError(cSize)) { return cSize; }
11530c16b537SWarner Losh if (cSize==0) { return 0; } /* uncompressible */
11540c16b537SWarner Losh op += cSize;
11550c16b537SWarner Losh /* check compressibility */
115637f1f268SConrad Meyer assert(op >= ostart);
11570c16b537SWarner Losh if ((size_t)(op-ostart) >= srcSize-1) { return 0; }
115837f1f268SConrad Meyer return (size_t)(op-ostart);
11590c16b537SWarner Losh }
11600c16b537SWarner Losh
116119fcbaf1SConrad Meyer typedef struct {
1162a0483764SConrad Meyer unsigned count[HUF_SYMBOLVALUE_MAX + 1];
1163*5ff13fbcSAllan Jude HUF_CElt CTable[HUF_CTABLE_SIZE_ST(HUF_SYMBOLVALUE_MAX)];
1164*5ff13fbcSAllan Jude union {
116537f1f268SConrad Meyer HUF_buildCTable_wksp_tables buildCTable_wksp;
1166*5ff13fbcSAllan Jude HUF_WriteCTableWksp writeCTable_wksp;
1167*5ff13fbcSAllan Jude U32 hist_wksp[HIST_WKSP_SIZE_U32];
1168*5ff13fbcSAllan Jude } wksps;
116919fcbaf1SConrad Meyer } HUF_compress_tables_t;
11700c16b537SWarner Losh
1171*5ff13fbcSAllan Jude #define SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE 4096
1172*5ff13fbcSAllan Jude #define SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO 10 /* Must be >= 2 */
1173*5ff13fbcSAllan Jude
117419fcbaf1SConrad Meyer /* HUF_compress_internal() :
1175f7cd7fe5SConrad Meyer * `workSpace_align4` must be aligned on 4-bytes boundaries,
1176*5ff13fbcSAllan Jude * and occupies the same space as a table of HUF_WORKSPACE_SIZE_U64 unsigned */
1177a0483764SConrad Meyer static size_t
HUF_compress_internal(void * dst,size_t dstSize,const void * src,size_t srcSize,unsigned maxSymbolValue,unsigned huffLog,HUF_nbStreams_e nbStreams,void * workSpace,size_t wkspSize,HUF_CElt * oldHufTable,HUF_repeat * repeat,int preferRepeat,const int bmi2,unsigned suspectUncompressible)1178a0483764SConrad Meyer HUF_compress_internal (void* dst, size_t dstSize,
11790c16b537SWarner Losh const void* src, size_t srcSize,
11800c16b537SWarner Losh unsigned maxSymbolValue, unsigned huffLog,
1181a0483764SConrad Meyer HUF_nbStreams_e nbStreams,
1182*5ff13fbcSAllan Jude void* workSpace, size_t wkspSize,
118319fcbaf1SConrad Meyer HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat,
1184*5ff13fbcSAllan Jude const int bmi2, unsigned suspectUncompressible)
11850c16b537SWarner Losh {
1186*5ff13fbcSAllan Jude HUF_compress_tables_t* const table = (HUF_compress_tables_t*)HUF_alignUpWorkspace(workSpace, &wkspSize, ZSTD_ALIGNOF(size_t));
11870c16b537SWarner Losh BYTE* const ostart = (BYTE*)dst;
11880c16b537SWarner Losh BYTE* const oend = ostart + dstSize;
11890c16b537SWarner Losh BYTE* op = ostart;
11900c16b537SWarner Losh
1191*5ff13fbcSAllan Jude HUF_STATIC_ASSERT(sizeof(*table) + HUF_WORKSPACE_MAX_ALIGNMENT <= HUF_WORKSPACE_SIZE);
119237f1f268SConrad Meyer
11930c16b537SWarner Losh /* checks & inits */
1194*5ff13fbcSAllan Jude if (wkspSize < sizeof(*table)) return ERROR(workSpace_tooSmall);
119519fcbaf1SConrad Meyer if (!srcSize) return 0; /* Uncompressed */
119619fcbaf1SConrad Meyer if (!dstSize) return 0; /* cannot fit anything within dst budget */
11970c16b537SWarner Losh if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */
11980c16b537SWarner Losh if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
119919fcbaf1SConrad Meyer if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
12000c16b537SWarner Losh if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX;
12010c16b537SWarner Losh if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT;
12020c16b537SWarner Losh
120319fcbaf1SConrad Meyer /* Heuristic : If old table is valid, use it for small inputs */
12040c16b537SWarner Losh if (preferRepeat && repeat && *repeat == HUF_repeat_valid) {
120519fcbaf1SConrad Meyer return HUF_compressCTable_internal(ostart, op, oend,
120619fcbaf1SConrad Meyer src, srcSize,
1207a0483764SConrad Meyer nbStreams, oldHufTable, bmi2);
12080c16b537SWarner Losh }
12090c16b537SWarner Losh
1210*5ff13fbcSAllan Jude /* If uncompressible data is suspected, do a smaller sampling first */
1211*5ff13fbcSAllan Jude DEBUG_STATIC_ASSERT(SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO >= 2);
1212*5ff13fbcSAllan Jude if (suspectUncompressible && srcSize >= (SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE * SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO)) {
1213*5ff13fbcSAllan Jude size_t largestTotal = 0;
1214*5ff13fbcSAllan Jude { unsigned maxSymbolValueBegin = maxSymbolValue;
1215*5ff13fbcSAllan Jude CHECK_V_F(largestBegin, HIST_count_simple (table->count, &maxSymbolValueBegin, (const BYTE*)src, SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) );
1216*5ff13fbcSAllan Jude largestTotal += largestBegin;
1217*5ff13fbcSAllan Jude }
1218*5ff13fbcSAllan Jude { unsigned maxSymbolValueEnd = maxSymbolValue;
1219*5ff13fbcSAllan Jude CHECK_V_F(largestEnd, HIST_count_simple (table->count, &maxSymbolValueEnd, (const BYTE*)src + srcSize - SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE, SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) );
1220*5ff13fbcSAllan Jude largestTotal += largestEnd;
1221*5ff13fbcSAllan Jude }
1222*5ff13fbcSAllan Jude if (largestTotal <= ((2 * SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) >> 7)+4) return 0; /* heuristic : probably not compressible enough */
1223*5ff13fbcSAllan Jude }
1224*5ff13fbcSAllan Jude
12250c16b537SWarner Losh /* Scan input and build symbol stats */
1226*5ff13fbcSAllan Jude { CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, table->wksps.hist_wksp, sizeof(table->wksps.hist_wksp)) );
12270c16b537SWarner Losh if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */
12280f743729SConrad Meyer if (largest <= (srcSize >> 7)+4) return 0; /* heuristic : probably not compressible enough */
12290c16b537SWarner Losh }
12300c16b537SWarner Losh
12310c16b537SWarner Losh /* Check validity of previous table */
123219fcbaf1SConrad Meyer if ( repeat
123319fcbaf1SConrad Meyer && *repeat == HUF_repeat_check
123419fcbaf1SConrad Meyer && !HUF_validateCTable(oldHufTable, table->count, maxSymbolValue)) {
12350c16b537SWarner Losh *repeat = HUF_repeat_none;
12360c16b537SWarner Losh }
12370c16b537SWarner Losh /* Heuristic : use existing table for small inputs */
12380c16b537SWarner Losh if (preferRepeat && repeat && *repeat != HUF_repeat_none) {
123919fcbaf1SConrad Meyer return HUF_compressCTable_internal(ostart, op, oend,
124019fcbaf1SConrad Meyer src, srcSize,
1241a0483764SConrad Meyer nbStreams, oldHufTable, bmi2);
12420c16b537SWarner Losh }
12430c16b537SWarner Losh
12440c16b537SWarner Losh /* Build Huffman Tree */
12450c16b537SWarner Losh huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
1246a0483764SConrad Meyer { size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count,
124719fcbaf1SConrad Meyer maxSymbolValue, huffLog,
1248*5ff13fbcSAllan Jude &table->wksps.buildCTable_wksp, sizeof(table->wksps.buildCTable_wksp));
1249a0483764SConrad Meyer CHECK_F(maxBits);
12500c16b537SWarner Losh huffLog = (U32)maxBits;
1251*5ff13fbcSAllan Jude }
125219fcbaf1SConrad Meyer /* Zero unused symbols in CTable, so we can check it for validity */
1253*5ff13fbcSAllan Jude {
1254*5ff13fbcSAllan Jude size_t const ctableSize = HUF_CTABLE_SIZE_ST(maxSymbolValue);
1255*5ff13fbcSAllan Jude size_t const unusedSize = sizeof(table->CTable) - ctableSize * sizeof(HUF_CElt);
1256*5ff13fbcSAllan Jude ZSTD_memset(table->CTable + ctableSize, 0, unusedSize);
12570c16b537SWarner Losh }
12580c16b537SWarner Losh
12590c16b537SWarner Losh /* Write table description header */
1260*5ff13fbcSAllan Jude { CHECK_V_F(hSize, HUF_writeCTable_wksp(op, dstSize, table->CTable, maxSymbolValue, huffLog,
1261*5ff13fbcSAllan Jude &table->wksps.writeCTable_wksp, sizeof(table->wksps.writeCTable_wksp)) );
126219fcbaf1SConrad Meyer /* Check if using previous huffman table is beneficial */
12630c16b537SWarner Losh if (repeat && *repeat != HUF_repeat_none) {
126419fcbaf1SConrad Meyer size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, table->count, maxSymbolValue);
126519fcbaf1SConrad Meyer size_t const newSize = HUF_estimateCompressedSize(table->CTable, table->count, maxSymbolValue);
12660c16b537SWarner Losh if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) {
126719fcbaf1SConrad Meyer return HUF_compressCTable_internal(ostart, op, oend,
126819fcbaf1SConrad Meyer src, srcSize,
1269a0483764SConrad Meyer nbStreams, oldHufTable, bmi2);
127019fcbaf1SConrad Meyer } }
127119fcbaf1SConrad Meyer
127219fcbaf1SConrad Meyer /* Use the new huffman table */
12730c16b537SWarner Losh if (hSize + 12ul >= srcSize) { return 0; }
12740c16b537SWarner Losh op += hSize;
12750c16b537SWarner Losh if (repeat) { *repeat = HUF_repeat_none; }
127619fcbaf1SConrad Meyer if (oldHufTable)
1277f7cd7fe5SConrad Meyer ZSTD_memcpy(oldHufTable, table->CTable, sizeof(table->CTable)); /* Save new table */
12780c16b537SWarner Losh }
127919fcbaf1SConrad Meyer return HUF_compressCTable_internal(ostart, op, oend,
128019fcbaf1SConrad Meyer src, srcSize,
1281a0483764SConrad Meyer nbStreams, table->CTable, bmi2);
12820c16b537SWarner Losh }
12830c16b537SWarner Losh
12840c16b537SWarner Losh
HUF_compress1X_wksp(void * dst,size_t dstSize,const void * src,size_t srcSize,unsigned maxSymbolValue,unsigned huffLog,void * workSpace,size_t wkspSize)12850c16b537SWarner Losh size_t HUF_compress1X_wksp (void* dst, size_t dstSize,
12860c16b537SWarner Losh const void* src, size_t srcSize,
12870c16b537SWarner Losh unsigned maxSymbolValue, unsigned huffLog,
12880c16b537SWarner Losh void* workSpace, size_t wkspSize)
12890c16b537SWarner Losh {
129019fcbaf1SConrad Meyer return HUF_compress_internal(dst, dstSize, src, srcSize,
1291a0483764SConrad Meyer maxSymbolValue, huffLog, HUF_singleStream,
129219fcbaf1SConrad Meyer workSpace, wkspSize,
1293*5ff13fbcSAllan Jude NULL, NULL, 0, 0 /*bmi2*/, 0);
12940c16b537SWarner Losh }
12950c16b537SWarner Losh
HUF_compress1X_repeat(void * dst,size_t dstSize,const void * src,size_t srcSize,unsigned maxSymbolValue,unsigned huffLog,void * workSpace,size_t wkspSize,HUF_CElt * hufTable,HUF_repeat * repeat,int preferRepeat,int bmi2,unsigned suspectUncompressible)12960c16b537SWarner Losh size_t HUF_compress1X_repeat (void* dst, size_t dstSize,
12970c16b537SWarner Losh const void* src, size_t srcSize,
12980c16b537SWarner Losh unsigned maxSymbolValue, unsigned huffLog,
12990c16b537SWarner Losh void* workSpace, size_t wkspSize,
1300*5ff13fbcSAllan Jude HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat,
1301*5ff13fbcSAllan Jude int bmi2, unsigned suspectUncompressible)
13020c16b537SWarner Losh {
130319fcbaf1SConrad Meyer return HUF_compress_internal(dst, dstSize, src, srcSize,
1304a0483764SConrad Meyer maxSymbolValue, huffLog, HUF_singleStream,
130519fcbaf1SConrad Meyer workSpace, wkspSize, hufTable,
1306*5ff13fbcSAllan Jude repeat, preferRepeat, bmi2, suspectUncompressible);
13070c16b537SWarner Losh }
13080c16b537SWarner Losh
130919fcbaf1SConrad Meyer /* HUF_compress4X_repeat():
131019fcbaf1SConrad Meyer * compress input using 4 streams.
131119fcbaf1SConrad Meyer * provide workspace to generate compression tables */
HUF_compress4X_wksp(void * dst,size_t dstSize,const void * src,size_t srcSize,unsigned maxSymbolValue,unsigned huffLog,void * workSpace,size_t wkspSize)13120c16b537SWarner Losh size_t HUF_compress4X_wksp (void* dst, size_t dstSize,
13130c16b537SWarner Losh const void* src, size_t srcSize,
13140c16b537SWarner Losh unsigned maxSymbolValue, unsigned huffLog,
13150c16b537SWarner Losh void* workSpace, size_t wkspSize)
13160c16b537SWarner Losh {
131719fcbaf1SConrad Meyer return HUF_compress_internal(dst, dstSize, src, srcSize,
1318a0483764SConrad Meyer maxSymbolValue, huffLog, HUF_fourStreams,
131919fcbaf1SConrad Meyer workSpace, wkspSize,
1320*5ff13fbcSAllan Jude NULL, NULL, 0, 0 /*bmi2*/, 0);
13210c16b537SWarner Losh }
13220c16b537SWarner Losh
132319fcbaf1SConrad Meyer /* HUF_compress4X_repeat():
132419fcbaf1SConrad Meyer * compress input using 4 streams.
1325*5ff13fbcSAllan Jude * consider skipping quickly
132619fcbaf1SConrad Meyer * re-use an existing huffman compression table */
HUF_compress4X_repeat(void * dst,size_t dstSize,const void * src,size_t srcSize,unsigned maxSymbolValue,unsigned huffLog,void * workSpace,size_t wkspSize,HUF_CElt * hufTable,HUF_repeat * repeat,int preferRepeat,int bmi2,unsigned suspectUncompressible)13270c16b537SWarner Losh size_t HUF_compress4X_repeat (void* dst, size_t dstSize,
13280c16b537SWarner Losh const void* src, size_t srcSize,
13290c16b537SWarner Losh unsigned maxSymbolValue, unsigned huffLog,
13300c16b537SWarner Losh void* workSpace, size_t wkspSize,
1331*5ff13fbcSAllan Jude HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible)
13320c16b537SWarner Losh {
133319fcbaf1SConrad Meyer return HUF_compress_internal(dst, dstSize, src, srcSize,
1334a0483764SConrad Meyer maxSymbolValue, huffLog, HUF_fourStreams,
133519fcbaf1SConrad Meyer workSpace, wkspSize,
1336*5ff13fbcSAllan Jude hufTable, repeat, preferRepeat, bmi2, suspectUncompressible);
13370c16b537SWarner Losh }
13380c16b537SWarner Losh
1339f7cd7fe5SConrad Meyer #ifndef ZSTD_NO_UNUSED_FUNCTIONS
1340f7cd7fe5SConrad Meyer /** HUF_buildCTable() :
1341f7cd7fe5SConrad Meyer * @return : maxNbBits
1342f7cd7fe5SConrad Meyer * Note : count is used before tree is written, so they can safely overlap
1343f7cd7fe5SConrad Meyer */
HUF_buildCTable(HUF_CElt * tree,const unsigned * count,unsigned maxSymbolValue,unsigned maxNbBits)1344f7cd7fe5SConrad Meyer size_t HUF_buildCTable (HUF_CElt* tree, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits)
1345f7cd7fe5SConrad Meyer {
1346f7cd7fe5SConrad Meyer HUF_buildCTable_wksp_tables workspace;
1347f7cd7fe5SConrad Meyer return HUF_buildCTable_wksp(tree, count, maxSymbolValue, maxNbBits, &workspace, sizeof(workspace));
1348f7cd7fe5SConrad Meyer }
1349f7cd7fe5SConrad Meyer
HUF_compress1X(void * dst,size_t dstSize,const void * src,size_t srcSize,unsigned maxSymbolValue,unsigned huffLog)1350f7cd7fe5SConrad Meyer size_t HUF_compress1X (void* dst, size_t dstSize,
1351f7cd7fe5SConrad Meyer const void* src, size_t srcSize,
1352f7cd7fe5SConrad Meyer unsigned maxSymbolValue, unsigned huffLog)
1353f7cd7fe5SConrad Meyer {
1354*5ff13fbcSAllan Jude U64 workSpace[HUF_WORKSPACE_SIZE_U64];
1355f7cd7fe5SConrad Meyer return HUF_compress1X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace));
1356f7cd7fe5SConrad Meyer }
1357f7cd7fe5SConrad Meyer
HUF_compress2(void * dst,size_t dstSize,const void * src,size_t srcSize,unsigned maxSymbolValue,unsigned huffLog)13580c16b537SWarner Losh size_t HUF_compress2 (void* dst, size_t dstSize,
13590c16b537SWarner Losh const void* src, size_t srcSize,
13600c16b537SWarner Losh unsigned maxSymbolValue, unsigned huffLog)
13610c16b537SWarner Losh {
1362*5ff13fbcSAllan Jude U64 workSpace[HUF_WORKSPACE_SIZE_U64];
13630c16b537SWarner Losh return HUF_compress4X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace));
13640c16b537SWarner Losh }
13650c16b537SWarner Losh
HUF_compress(void * dst,size_t maxDstSize,const void * src,size_t srcSize)13660c16b537SWarner Losh size_t HUF_compress (void* dst, size_t maxDstSize, const void* src, size_t srcSize)
13670c16b537SWarner Losh {
136819fcbaf1SConrad Meyer return HUF_compress2(dst, maxDstSize, src, srcSize, 255, HUF_TABLELOG_DEFAULT);
13690c16b537SWarner Losh }
1370f7cd7fe5SConrad Meyer #endif
1371