1 /* ****************************************************************** 2 Huffman encoder, part of New Generation Entropy library 3 Copyright (C) 2013-2016, Yann Collet. 4 5 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) 6 7 Redistribution and use in source and binary forms, with or without 8 modification, are permitted provided that the following conditions are 9 met: 10 11 * Redistributions of source code must retain the above copyright 12 notice, this list of conditions and the following disclaimer. 13 * Redistributions in binary form must reproduce the above 14 copyright notice, this list of conditions and the following disclaimer 15 in the documentation and/or other materials provided with the 16 distribution. 17 18 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 30 You can contact the author at : 31 - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy 32 - Public forum : https://groups.google.com/forum/#!forum/lz4c 33 ****************************************************************** */ 34 35 /* ************************************************************** 36 * Compiler specifics 37 ****************************************************************/ 38 #ifdef _MSC_VER /* Visual Studio */ 39 # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ 40 #endif 41 42 43 /* ************************************************************** 44 * Includes 45 ****************************************************************/ 46 #include <string.h> /* memcpy, memset */ 47 #include <stdio.h> /* printf (debug) */ 48 #include "bitstream.h" 49 #define FSE_STATIC_LINKING_ONLY /* FSE_optimalTableLog_internal */ 50 #include "fse.h" /* header compression */ 51 #define HUF_STATIC_LINKING_ONLY 52 #include "huf.h" 53 #include "error_private.h" 54 55 56 /* ************************************************************** 57 * Error Management 58 ****************************************************************/ 59 #define HUF_isError ERR_isError 60 #define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ 61 #define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e 62 #define CHECK_F(f) { CHECK_V_F(_var_err__, f); } 63 64 65 /* ************************************************************** 66 * Utils 67 ****************************************************************/ 68 unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue) 69 { 70 return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1); 71 } 72 73 74 /* ******************************************************* 75 * HUF : Huffman block compression 76 *********************************************************/ 77 /* HUF_compressWeights() : 78 * Same as FSE_compress(), but dedicated to huff0's weights compression. 79 * The use case needs much less stack memory. 80 * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX. 81 */ 82 #define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6 83 size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weightTable, size_t wtSize) 84 { 85 BYTE* const ostart = (BYTE*) dst; 86 BYTE* op = ostart; 87 BYTE* const oend = ostart + dstSize; 88 89 U32 maxSymbolValue = HUF_TABLELOG_MAX; 90 U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER; 91 92 FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)]; 93 BYTE scratchBuffer[1<<MAX_FSE_TABLELOG_FOR_HUFF_HEADER]; 94 95 U32 count[HUF_TABLELOG_MAX+1]; 96 S16 norm[HUF_TABLELOG_MAX+1]; 97 98 /* init conditions */ 99 if (wtSize <= 1) return 0; /* Not compressible */ 100 101 /* Scan input and build symbol stats */ 102 { CHECK_V_F(maxCount, FSE_count_simple(count, &maxSymbolValue, weightTable, wtSize) ); 103 if (maxCount == wtSize) return 1; /* only a single symbol in src : rle */ 104 if (maxCount == 1) return 0; /* each symbol present maximum once => not compressible */ 105 } 106 107 tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue); 108 CHECK_F( FSE_normalizeCount(norm, tableLog, count, wtSize, maxSymbolValue) ); 109 110 /* Write table description header */ 111 { CHECK_V_F(hSize, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) ); 112 op += hSize; 113 } 114 115 /* Compress */ 116 CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, sizeof(scratchBuffer)) ); 117 { CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, weightTable, wtSize, CTable) ); 118 if (cSize == 0) return 0; /* not enough space for compressed data */ 119 op += cSize; 120 } 121 122 return op-ostart; 123 } 124 125 126 struct HUF_CElt_s { 127 U16 val; 128 BYTE nbBits; 129 }; /* typedef'd to HUF_CElt within "huf.h" */ 130 131 /*! HUF_writeCTable() : 132 `CTable` : Huffman tree to save, using huf representation. 133 @return : size of saved CTable */ 134 size_t HUF_writeCTable (void* dst, size_t maxDstSize, 135 const HUF_CElt* CTable, U32 maxSymbolValue, U32 huffLog) 136 { 137 BYTE bitsToWeight[HUF_TABLELOG_MAX + 1]; /* precomputed conversion table */ 138 BYTE huffWeight[HUF_SYMBOLVALUE_MAX]; 139 BYTE* op = (BYTE*)dst; 140 U32 n; 141 142 /* check conditions */ 143 if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge); 144 145 /* convert to weight */ 146 bitsToWeight[0] = 0; 147 for (n=1; n<huffLog+1; n++) 148 bitsToWeight[n] = (BYTE)(huffLog + 1 - n); 149 for (n=0; n<maxSymbolValue; n++) 150 huffWeight[n] = bitsToWeight[CTable[n].nbBits]; 151 152 /* attempt weights compression by FSE */ 153 { CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, huffWeight, maxSymbolValue) ); 154 if ((hSize>1) & (hSize < maxSymbolValue/2)) { /* FSE compressed */ 155 op[0] = (BYTE)hSize; 156 return hSize+1; 157 } } 158 159 /* write raw values as 4-bits (max : 15) */ 160 if (maxSymbolValue > (256-128)) return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */ 161 if (((maxSymbolValue+1)/2) + 1 > maxDstSize) return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */ 162 op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue-1)); 163 huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */ 164 for (n=0; n<maxSymbolValue; n+=2) 165 op[(n/2)+1] = (BYTE)((huffWeight[n] << 4) + huffWeight[n+1]); 166 return ((maxSymbolValue+1)/2) + 1; 167 } 168 169 170 size_t HUF_readCTable (HUF_CElt* CTable, U32* maxSymbolValuePtr, const void* src, size_t srcSize) 171 { 172 BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; /* init not required, even though some static analyzer may complain */ 173 U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */ 174 U32 tableLog = 0; 175 U32 nbSymbols = 0; 176 177 /* get symbol weights */ 178 CHECK_V_F(readSize, HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX+1, rankVal, &nbSymbols, &tableLog, src, srcSize)); 179 180 /* check result */ 181 if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); 182 if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall); 183 184 /* Prepare base value per rank */ 185 { U32 n, nextRankStart = 0; 186 for (n=1; n<=tableLog; n++) { 187 U32 current = nextRankStart; 188 nextRankStart += (rankVal[n] << (n-1)); 189 rankVal[n] = current; 190 } } 191 192 /* fill nbBits */ 193 { U32 n; for (n=0; n<nbSymbols; n++) { 194 const U32 w = huffWeight[n]; 195 CTable[n].nbBits = (BYTE)(tableLog + 1 - w); 196 } } 197 198 /* fill val */ 199 { U16 nbPerRank[HUF_TABLELOG_MAX+2] = {0}; /* support w=0=>n=tableLog+1 */ 200 U16 valPerRank[HUF_TABLELOG_MAX+2] = {0}; 201 { U32 n; for (n=0; n<nbSymbols; n++) nbPerRank[CTable[n].nbBits]++; } 202 /* determine stating value per rank */ 203 valPerRank[tableLog+1] = 0; /* for w==0 */ 204 { U16 min = 0; 205 U32 n; for (n=tableLog; n>0; n--) { /* start at n=tablelog <-> w=1 */ 206 valPerRank[n] = min; /* get starting value within each rank */ 207 min += nbPerRank[n]; 208 min >>= 1; 209 } } 210 /* assign value within rank, symbol order */ 211 { U32 n; for (n=0; n<nbSymbols; n++) CTable[n].val = valPerRank[CTable[n].nbBits]++; } 212 } 213 214 *maxSymbolValuePtr = nbSymbols - 1; 215 return readSize; 216 } 217 218 219 typedef struct nodeElt_s { 220 U32 count; 221 U16 parent; 222 BYTE byte; 223 BYTE nbBits; 224 } nodeElt; 225 226 static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits) 227 { 228 const U32 largestBits = huffNode[lastNonNull].nbBits; 229 if (largestBits <= maxNbBits) return largestBits; /* early exit : no elt > maxNbBits */ 230 231 /* there are several too large elements (at least >= 2) */ 232 { int totalCost = 0; 233 const U32 baseCost = 1 << (largestBits - maxNbBits); 234 U32 n = lastNonNull; 235 236 while (huffNode[n].nbBits > maxNbBits) { 237 totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)); 238 huffNode[n].nbBits = (BYTE)maxNbBits; 239 n --; 240 } /* n stops at huffNode[n].nbBits <= maxNbBits */ 241 while (huffNode[n].nbBits == maxNbBits) n--; /* n end at index of smallest symbol using < maxNbBits */ 242 243 /* renorm totalCost */ 244 totalCost >>= (largestBits - maxNbBits); /* note : totalCost is necessarily a multiple of baseCost */ 245 246 /* repay normalized cost */ 247 { U32 const noSymbol = 0xF0F0F0F0; 248 U32 rankLast[HUF_TABLELOG_MAX+2]; 249 int pos; 250 251 /* Get pos of last (smallest) symbol per rank */ 252 memset(rankLast, 0xF0, sizeof(rankLast)); 253 { U32 currentNbBits = maxNbBits; 254 for (pos=n ; pos >= 0; pos--) { 255 if (huffNode[pos].nbBits >= currentNbBits) continue; 256 currentNbBits = huffNode[pos].nbBits; /* < maxNbBits */ 257 rankLast[maxNbBits-currentNbBits] = pos; 258 } } 259 260 while (totalCost > 0) { 261 U32 nBitsToDecrease = BIT_highbit32(totalCost) + 1; 262 for ( ; nBitsToDecrease > 1; nBitsToDecrease--) { 263 U32 highPos = rankLast[nBitsToDecrease]; 264 U32 lowPos = rankLast[nBitsToDecrease-1]; 265 if (highPos == noSymbol) continue; 266 if (lowPos == noSymbol) break; 267 { U32 const highTotal = huffNode[highPos].count; 268 U32 const lowTotal = 2 * huffNode[lowPos].count; 269 if (highTotal <= lowTotal) break; 270 } } 271 /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */ 272 /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */ 273 while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol)) 274 nBitsToDecrease ++; 275 totalCost -= 1 << (nBitsToDecrease-1); 276 if (rankLast[nBitsToDecrease-1] == noSymbol) 277 rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease]; /* this rank is no longer empty */ 278 huffNode[rankLast[nBitsToDecrease]].nbBits ++; 279 if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */ 280 rankLast[nBitsToDecrease] = noSymbol; 281 else { 282 rankLast[nBitsToDecrease]--; 283 if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease) 284 rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */ 285 } } /* while (totalCost > 0) */ 286 287 while (totalCost < 0) { /* Sometimes, cost correction overshoot */ 288 if (rankLast[1] == noSymbol) { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ 289 while (huffNode[n].nbBits == maxNbBits) n--; 290 huffNode[n+1].nbBits--; 291 rankLast[1] = n+1; 292 totalCost++; 293 continue; 294 } 295 huffNode[ rankLast[1] + 1 ].nbBits--; 296 rankLast[1]++; 297 totalCost ++; 298 } } } /* there are several too large elements (at least >= 2) */ 299 300 return maxNbBits; 301 } 302 303 304 typedef struct { 305 U32 base; 306 U32 current; 307 } rankPos; 308 309 static void HUF_sort(nodeElt* huffNode, const U32* count, U32 maxSymbolValue) 310 { 311 rankPos rank[32]; 312 U32 n; 313 314 memset(rank, 0, sizeof(rank)); 315 for (n=0; n<=maxSymbolValue; n++) { 316 U32 r = BIT_highbit32(count[n] + 1); 317 rank[r].base ++; 318 } 319 for (n=30; n>0; n--) rank[n-1].base += rank[n].base; 320 for (n=0; n<32; n++) rank[n].current = rank[n].base; 321 for (n=0; n<=maxSymbolValue; n++) { 322 U32 const c = count[n]; 323 U32 const r = BIT_highbit32(c+1) + 1; 324 U32 pos = rank[r].current++; 325 while ((pos > rank[r].base) && (c > huffNode[pos-1].count)) huffNode[pos]=huffNode[pos-1], pos--; 326 huffNode[pos].count = c; 327 huffNode[pos].byte = (BYTE)n; 328 } 329 } 330 331 332 /** HUF_buildCTable_wksp() : 333 * Same as HUF_buildCTable(), but using externally allocated scratch buffer. 334 * `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of 1024 unsigned. 335 */ 336 #define STARTNODE (HUF_SYMBOLVALUE_MAX+1) 337 typedef nodeElt huffNodeTable[2*HUF_SYMBOLVALUE_MAX+1 +1]; 338 size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize) 339 { 340 nodeElt* const huffNode0 = (nodeElt*)workSpace; 341 nodeElt* const huffNode = huffNode0+1; 342 U32 n, nonNullRank; 343 int lowS, lowN; 344 U16 nodeNb = STARTNODE; 345 U32 nodeRoot; 346 347 /* safety checks */ 348 if (wkspSize < sizeof(huffNodeTable)) return ERROR(GENERIC); /* workSpace is not large enough */ 349 if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT; 350 if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(GENERIC); 351 memset(huffNode0, 0, sizeof(huffNodeTable)); 352 353 /* sort, decreasing order */ 354 HUF_sort(huffNode, count, maxSymbolValue); 355 356 /* init for parents */ 357 nonNullRank = maxSymbolValue; 358 while(huffNode[nonNullRank].count == 0) nonNullRank--; 359 lowS = nonNullRank; nodeRoot = nodeNb + lowS - 1; lowN = nodeNb; 360 huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count; 361 huffNode[lowS].parent = huffNode[lowS-1].parent = nodeNb; 362 nodeNb++; lowS-=2; 363 for (n=nodeNb; n<=nodeRoot; n++) huffNode[n].count = (U32)(1U<<30); 364 huffNode0[0].count = (U32)(1U<<31); /* fake entry, strong barrier */ 365 366 /* create parents */ 367 while (nodeNb <= nodeRoot) { 368 U32 n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++; 369 U32 n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++; 370 huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count; 371 huffNode[n1].parent = huffNode[n2].parent = nodeNb; 372 nodeNb++; 373 } 374 375 /* distribute weights (unlimited tree height) */ 376 huffNode[nodeRoot].nbBits = 0; 377 for (n=nodeRoot-1; n>=STARTNODE; n--) 378 huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1; 379 for (n=0; n<=nonNullRank; n++) 380 huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1; 381 382 /* enforce maxTableLog */ 383 maxNbBits = HUF_setMaxHeight(huffNode, nonNullRank, maxNbBits); 384 385 /* fill result into tree (val, nbBits) */ 386 { U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0}; 387 U16 valPerRank[HUF_TABLELOG_MAX+1] = {0}; 388 if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */ 389 for (n=0; n<=nonNullRank; n++) 390 nbPerRank[huffNode[n].nbBits]++; 391 /* determine stating value per rank */ 392 { U16 min = 0; 393 for (n=maxNbBits; n>0; n--) { 394 valPerRank[n] = min; /* get starting value within each rank */ 395 min += nbPerRank[n]; 396 min >>= 1; 397 } } 398 for (n=0; n<=maxSymbolValue; n++) 399 tree[huffNode[n].byte].nbBits = huffNode[n].nbBits; /* push nbBits per symbol, symbol order */ 400 for (n=0; n<=maxSymbolValue; n++) 401 tree[n].val = valPerRank[tree[n].nbBits]++; /* assign value within rank, symbol order */ 402 } 403 404 return maxNbBits; 405 } 406 407 /** HUF_buildCTable() : 408 * Note : count is used before tree is written, so they can safely overlap 409 */ 410 size_t HUF_buildCTable (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits) 411 { 412 huffNodeTable nodeTable; 413 return HUF_buildCTable_wksp(tree, count, maxSymbolValue, maxNbBits, nodeTable, sizeof(nodeTable)); 414 } 415 416 static size_t HUF_estimateCompressedSize(HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) 417 { 418 size_t nbBits = 0; 419 int s; 420 for (s = 0; s <= (int)maxSymbolValue; ++s) { 421 nbBits += CTable[s].nbBits * count[s]; 422 } 423 return nbBits >> 3; 424 } 425 426 static int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) { 427 int bad = 0; 428 int s; 429 for (s = 0; s <= (int)maxSymbolValue; ++s) { 430 bad |= (count[s] != 0) & (CTable[s].nbBits == 0); 431 } 432 return !bad; 433 } 434 435 static void HUF_encodeSymbol(BIT_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable) 436 { 437 BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits); 438 } 439 440 size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); } 441 442 #define HUF_FLUSHBITS(s) BIT_flushBits(s) 443 444 #define HUF_FLUSHBITS_1(stream) \ 445 if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*2+7) HUF_FLUSHBITS(stream) 446 447 #define HUF_FLUSHBITS_2(stream) \ 448 if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*4+7) HUF_FLUSHBITS(stream) 449 450 size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) 451 { 452 const BYTE* ip = (const BYTE*) src; 453 BYTE* const ostart = (BYTE*)dst; 454 BYTE* const oend = ostart + dstSize; 455 BYTE* op = ostart; 456 size_t n; 457 BIT_CStream_t bitC; 458 459 /* init */ 460 if (dstSize < 8) return 0; /* not enough space to compress */ 461 { size_t const initErr = BIT_initCStream(&bitC, op, oend-op); 462 if (HUF_isError(initErr)) return 0; } 463 464 n = srcSize & ~3; /* join to mod 4 */ 465 switch (srcSize & 3) 466 { 467 case 3 : HUF_encodeSymbol(&bitC, ip[n+ 2], CTable); 468 HUF_FLUSHBITS_2(&bitC); 469 /* fall-through */ 470 case 2 : HUF_encodeSymbol(&bitC, ip[n+ 1], CTable); 471 HUF_FLUSHBITS_1(&bitC); 472 /* fall-through */ 473 case 1 : HUF_encodeSymbol(&bitC, ip[n+ 0], CTable); 474 HUF_FLUSHBITS(&bitC); 475 /* fall-through */ 476 case 0 : /* fall-through */ 477 default: break; 478 } 479 480 for (; n>0; n-=4) { /* note : n&3==0 at this stage */ 481 HUF_encodeSymbol(&bitC, ip[n- 1], CTable); 482 HUF_FLUSHBITS_1(&bitC); 483 HUF_encodeSymbol(&bitC, ip[n- 2], CTable); 484 HUF_FLUSHBITS_2(&bitC); 485 HUF_encodeSymbol(&bitC, ip[n- 3], CTable); 486 HUF_FLUSHBITS_1(&bitC); 487 HUF_encodeSymbol(&bitC, ip[n- 4], CTable); 488 HUF_FLUSHBITS(&bitC); 489 } 490 491 return BIT_closeCStream(&bitC); 492 } 493 494 495 size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) 496 { 497 size_t const segmentSize = (srcSize+3)/4; /* first 3 segments */ 498 const BYTE* ip = (const BYTE*) src; 499 const BYTE* const iend = ip + srcSize; 500 BYTE* const ostart = (BYTE*) dst; 501 BYTE* const oend = ostart + dstSize; 502 BYTE* op = ostart; 503 504 if (dstSize < 6 + 1 + 1 + 1 + 8) return 0; /* minimum space to compress successfully */ 505 if (srcSize < 12) return 0; /* no saving possible : too small input */ 506 op += 6; /* jumpTable */ 507 508 { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) ); 509 if (cSize==0) return 0; 510 MEM_writeLE16(ostart, (U16)cSize); 511 op += cSize; 512 } 513 514 ip += segmentSize; 515 { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) ); 516 if (cSize==0) return 0; 517 MEM_writeLE16(ostart+2, (U16)cSize); 518 op += cSize; 519 } 520 521 ip += segmentSize; 522 { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) ); 523 if (cSize==0) return 0; 524 MEM_writeLE16(ostart+4, (U16)cSize); 525 op += cSize; 526 } 527 528 ip += segmentSize; 529 { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, iend-ip, CTable) ); 530 if (cSize==0) return 0; 531 op += cSize; 532 } 533 534 return op-ostart; 535 } 536 537 538 static size_t HUF_compressCTable_internal( 539 BYTE* const ostart, BYTE* op, BYTE* const oend, 540 const void* src, size_t srcSize, 541 unsigned singleStream, const HUF_CElt* CTable) 542 { 543 size_t const cSize = singleStream ? 544 HUF_compress1X_usingCTable(op, oend - op, src, srcSize, CTable) : 545 HUF_compress4X_usingCTable(op, oend - op, src, srcSize, CTable); 546 if (HUF_isError(cSize)) { return cSize; } 547 if (cSize==0) { return 0; } /* uncompressible */ 548 op += cSize; 549 /* check compressibility */ 550 if ((size_t)(op-ostart) >= srcSize-1) { return 0; } 551 return op-ostart; 552 } 553 554 555 /* `workSpace` must a table of at least 1024 unsigned */ 556 static size_t HUF_compress_internal ( 557 void* dst, size_t dstSize, 558 const void* src, size_t srcSize, 559 unsigned maxSymbolValue, unsigned huffLog, 560 unsigned singleStream, 561 void* workSpace, size_t wkspSize, 562 HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat) 563 { 564 BYTE* const ostart = (BYTE*)dst; 565 BYTE* const oend = ostart + dstSize; 566 BYTE* op = ostart; 567 568 U32* count; 569 size_t const countSize = sizeof(U32) * (HUF_SYMBOLVALUE_MAX + 1); 570 HUF_CElt* CTable; 571 size_t const CTableSize = sizeof(HUF_CElt) * (HUF_SYMBOLVALUE_MAX + 1); 572 573 /* checks & inits */ 574 if (wkspSize < sizeof(huffNodeTable) + countSize + CTableSize) return ERROR(GENERIC); 575 if (!srcSize) return 0; /* Uncompressed (note : 1 means rle, so first byte must be correct) */ 576 if (!dstSize) return 0; /* cannot fit within dst budget */ 577 if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */ 578 if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); 579 if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX; 580 if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT; 581 582 count = (U32*)workSpace; 583 workSpace = (BYTE*)workSpace + countSize; 584 wkspSize -= countSize; 585 CTable = (HUF_CElt*)workSpace; 586 workSpace = (BYTE*)workSpace + CTableSize; 587 wkspSize -= CTableSize; 588 589 /* Heuristic : If we don't need to check the validity of the old table use the old table for small inputs */ 590 if (preferRepeat && repeat && *repeat == HUF_repeat_valid) { 591 return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable); 592 } 593 594 /* Scan input and build symbol stats */ 595 { CHECK_V_F(largest, FSE_count_wksp (count, &maxSymbolValue, (const BYTE*)src, srcSize, (U32*)workSpace) ); 596 if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */ 597 if (largest <= (srcSize >> 7)+1) return 0; /* Fast heuristic : not compressible enough */ 598 } 599 600 /* Check validity of previous table */ 601 if (repeat && *repeat == HUF_repeat_check && !HUF_validateCTable(oldHufTable, count, maxSymbolValue)) { 602 *repeat = HUF_repeat_none; 603 } 604 /* Heuristic : use existing table for small inputs */ 605 if (preferRepeat && repeat && *repeat != HUF_repeat_none) { 606 return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable); 607 } 608 609 /* Build Huffman Tree */ 610 huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue); 611 { CHECK_V_F(maxBits, HUF_buildCTable_wksp (CTable, count, maxSymbolValue, huffLog, workSpace, wkspSize) ); 612 huffLog = (U32)maxBits; 613 /* Zero the unused symbols so we can check it for validity */ 614 memset(CTable + maxSymbolValue + 1, 0, CTableSize - (maxSymbolValue + 1) * sizeof(HUF_CElt)); 615 } 616 617 /* Write table description header */ 618 { CHECK_V_F(hSize, HUF_writeCTable (op, dstSize, CTable, maxSymbolValue, huffLog) ); 619 /* Check if using the previous table will be beneficial */ 620 if (repeat && *repeat != HUF_repeat_none) { 621 size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, count, maxSymbolValue); 622 size_t const newSize = HUF_estimateCompressedSize(CTable, count, maxSymbolValue); 623 if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) { 624 return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable); 625 } 626 } 627 /* Use the new table */ 628 if (hSize + 12ul >= srcSize) { return 0; } 629 op += hSize; 630 if (repeat) { *repeat = HUF_repeat_none; } 631 if (oldHufTable) { memcpy(oldHufTable, CTable, CTableSize); } /* Save the new table */ 632 } 633 return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, CTable); 634 } 635 636 637 size_t HUF_compress1X_wksp (void* dst, size_t dstSize, 638 const void* src, size_t srcSize, 639 unsigned maxSymbolValue, unsigned huffLog, 640 void* workSpace, size_t wkspSize) 641 { 642 return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize, NULL, NULL, 0); 643 } 644 645 size_t HUF_compress1X_repeat (void* dst, size_t dstSize, 646 const void* src, size_t srcSize, 647 unsigned maxSymbolValue, unsigned huffLog, 648 void* workSpace, size_t wkspSize, 649 HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat) 650 { 651 return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize, hufTable, repeat, preferRepeat); 652 } 653 654 size_t HUF_compress1X (void* dst, size_t dstSize, 655 const void* src, size_t srcSize, 656 unsigned maxSymbolValue, unsigned huffLog) 657 { 658 unsigned workSpace[1024]; 659 return HUF_compress1X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace)); 660 } 661 662 size_t HUF_compress4X_wksp (void* dst, size_t dstSize, 663 const void* src, size_t srcSize, 664 unsigned maxSymbolValue, unsigned huffLog, 665 void* workSpace, size_t wkspSize) 666 { 667 return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize, NULL, NULL, 0); 668 } 669 670 size_t HUF_compress4X_repeat (void* dst, size_t dstSize, 671 const void* src, size_t srcSize, 672 unsigned maxSymbolValue, unsigned huffLog, 673 void* workSpace, size_t wkspSize, 674 HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat) 675 { 676 return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize, hufTable, repeat, preferRepeat); 677 } 678 679 size_t HUF_compress2 (void* dst, size_t dstSize, 680 const void* src, size_t srcSize, 681 unsigned maxSymbolValue, unsigned huffLog) 682 { 683 unsigned workSpace[1024]; 684 return HUF_compress4X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace)); 685 } 686 687 size_t HUF_compress (void* dst, size_t maxDstSize, const void* src, size_t srcSize) 688 { 689 return HUF_compress2(dst, maxDstSize, src, (U32)srcSize, 255, HUF_TABLELOG_DEFAULT); 690 } 691