1 /* ****************************************************************** 2 Huffman decoder, part of New Generation Entropy library 3 Copyright (C) 2013-2016, Yann Collet. 4 5 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) 6 7 Redistribution and use in source and binary forms, with or without 8 modification, are permitted provided that the following conditions are 9 met: 10 11 * Redistributions of source code must retain the above copyright 12 notice, this list of conditions and the following disclaimer. 13 * Redistributions in binary form must reproduce the above 14 copyright notice, this list of conditions and the following disclaimer 15 in the documentation and/or other materials provided with the 16 distribution. 17 18 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 30 You can contact the author at : 31 - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy 32 - Public forum : https://groups.google.com/forum/#!forum/lz4c 33 ****************************************************************** */ 34 35 /* ************************************************************** 36 * Dependencies 37 ****************************************************************/ 38 #include <string.h> /* memcpy, memset */ 39 #include "bitstream.h" /* BIT_* */ 40 #include "compiler.h" 41 #include "fse.h" /* header compression */ 42 #define HUF_STATIC_LINKING_ONLY 43 #include "huf.h" 44 #include "error_private.h" 45 46 47 /* ************************************************************** 48 * Error Management 49 ****************************************************************/ 50 #define HUF_isError ERR_isError 51 #define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ 52 53 54 /* ************************************************************** 55 * Byte alignment for workSpace management 56 ****************************************************************/ 57 #define HUF_ALIGN(x, a) HUF_ALIGN_MASK((x), (a) - 1) 58 #define HUF_ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask)) 59 60 /*-***************************/ 61 /* generic DTableDesc */ 62 /*-***************************/ 63 64 typedef struct { BYTE maxTableLog; BYTE tableType; BYTE tableLog; BYTE reserved; } DTableDesc; 65 66 static DTableDesc HUF_getDTableDesc(const HUF_DTable* table) 67 { 68 DTableDesc dtd; 69 memcpy(&dtd, table, sizeof(dtd)); 70 return dtd; 71 } 72 73 74 /*-***************************/ 75 /* single-symbol decoding */ 76 /*-***************************/ 77 78 typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX2; /* single-symbol decoding */ 79 80 size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize) 81 { 82 U32 tableLog = 0; 83 U32 nbSymbols = 0; 84 size_t iSize; 85 void* const dtPtr = DTable + 1; 86 HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr; 87 88 U32* rankVal; 89 BYTE* huffWeight; 90 size_t spaceUsed32 = 0; 91 92 rankVal = (U32 *)workSpace + spaceUsed32; 93 spaceUsed32 += HUF_TABLELOG_ABSOLUTEMAX + 1; 94 huffWeight = (BYTE *)((U32 *)workSpace + spaceUsed32); 95 spaceUsed32 += HUF_ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2; 96 97 if ((spaceUsed32 << 2) > wkspSize) 98 return ERROR(tableLog_tooLarge); 99 workSpace = (U32 *)workSpace + spaceUsed32; 100 wkspSize -= (spaceUsed32 << 2); 101 102 HUF_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable)); 103 /* memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */ 104 105 iSize = HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize); 106 if (HUF_isError(iSize)) return iSize; 107 108 /* Table header */ 109 { DTableDesc dtd = HUF_getDTableDesc(DTable); 110 if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge); /* DTable too small, Huffman tree cannot fit in */ 111 dtd.tableType = 0; 112 dtd.tableLog = (BYTE)tableLog; 113 memcpy(DTable, &dtd, sizeof(dtd)); 114 } 115 116 /* Calculate starting value for each rank */ 117 { U32 n, nextRankStart = 0; 118 for (n=1; n<tableLog+1; n++) { 119 U32 const current = nextRankStart; 120 nextRankStart += (rankVal[n] << (n-1)); 121 rankVal[n] = current; 122 } } 123 124 /* fill DTable */ 125 { U32 n; 126 for (n=0; n<nbSymbols; n++) { 127 U32 const w = huffWeight[n]; 128 U32 const length = (1 << w) >> 1; 129 U32 u; 130 HUF_DEltX2 D; 131 D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w); 132 for (u = rankVal[w]; u < rankVal[w] + length; u++) 133 dt[u] = D; 134 rankVal[w] += length; 135 } } 136 137 return iSize; 138 } 139 140 size_t HUF_readDTableX2(HUF_DTable* DTable, const void* src, size_t srcSize) 141 { 142 U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; 143 return HUF_readDTableX2_wksp(DTable, src, srcSize, 144 workSpace, sizeof(workSpace)); 145 } 146 147 148 static BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog) 149 { 150 size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */ 151 BYTE const c = dt[val].byte; 152 BIT_skipBits(Dstream, dt[val].nbBits); 153 return c; 154 } 155 156 #define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \ 157 *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog) 158 159 #define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \ 160 if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \ 161 HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) 162 163 #define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \ 164 if (MEM_64bits()) \ 165 HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) 166 167 HINT_INLINE size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog) 168 { 169 BYTE* const pStart = p; 170 171 /* up to 4 symbols at a time */ 172 while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-4)) { 173 HUF_DECODE_SYMBOLX2_2(p, bitDPtr); 174 HUF_DECODE_SYMBOLX2_1(p, bitDPtr); 175 HUF_DECODE_SYMBOLX2_2(p, bitDPtr); 176 HUF_DECODE_SYMBOLX2_0(p, bitDPtr); 177 } 178 179 /* closer to the end */ 180 while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd)) 181 HUF_DECODE_SYMBOLX2_0(p, bitDPtr); 182 183 /* no more data to retrieve from bitstream, hence no need to reload */ 184 while (p < pEnd) 185 HUF_DECODE_SYMBOLX2_0(p, bitDPtr); 186 187 return pEnd-pStart; 188 } 189 190 static size_t HUF_decompress1X2_usingDTable_internal( 191 void* dst, size_t dstSize, 192 const void* cSrc, size_t cSrcSize, 193 const HUF_DTable* DTable) 194 { 195 BYTE* op = (BYTE*)dst; 196 BYTE* const oend = op + dstSize; 197 const void* dtPtr = DTable + 1; 198 const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr; 199 BIT_DStream_t bitD; 200 DTableDesc const dtd = HUF_getDTableDesc(DTable); 201 U32 const dtLog = dtd.tableLog; 202 203 { size_t const errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize); 204 if (HUF_isError(errorCode)) return errorCode; } 205 206 HUF_decodeStreamX2(op, &bitD, oend, dt, dtLog); 207 208 /* check */ 209 if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected); 210 211 return dstSize; 212 } 213 214 size_t HUF_decompress1X2_usingDTable( 215 void* dst, size_t dstSize, 216 const void* cSrc, size_t cSrcSize, 217 const HUF_DTable* DTable) 218 { 219 DTableDesc dtd = HUF_getDTableDesc(DTable); 220 if (dtd.tableType != 0) return ERROR(GENERIC); 221 return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable); 222 } 223 224 size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize, 225 const void* cSrc, size_t cSrcSize, 226 void* workSpace, size_t wkspSize) 227 { 228 const BYTE* ip = (const BYTE*) cSrc; 229 230 size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize); 231 if (HUF_isError(hSize)) return hSize; 232 if (hSize >= cSrcSize) return ERROR(srcSize_wrong); 233 ip += hSize; cSrcSize -= hSize; 234 235 return HUF_decompress1X2_usingDTable_internal (dst, dstSize, ip, cSrcSize, DCtx); 236 } 237 238 239 size_t HUF_decompress1X2_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize, 240 const void* cSrc, size_t cSrcSize) 241 { 242 U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; 243 return HUF_decompress1X2_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize, 244 workSpace, sizeof(workSpace)); 245 } 246 247 size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) 248 { 249 HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX); 250 return HUF_decompress1X2_DCtx (DTable, dst, dstSize, cSrc, cSrcSize); 251 } 252 253 254 static size_t HUF_decompress4X2_usingDTable_internal( 255 void* dst, size_t dstSize, 256 const void* cSrc, size_t cSrcSize, 257 const HUF_DTable* DTable) 258 { 259 /* Check */ 260 if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ 261 262 { const BYTE* const istart = (const BYTE*) cSrc; 263 BYTE* const ostart = (BYTE*) dst; 264 BYTE* const oend = ostart + dstSize; 265 const void* const dtPtr = DTable + 1; 266 const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr; 267 268 /* Init */ 269 BIT_DStream_t bitD1; 270 BIT_DStream_t bitD2; 271 BIT_DStream_t bitD3; 272 BIT_DStream_t bitD4; 273 size_t const length1 = MEM_readLE16(istart); 274 size_t const length2 = MEM_readLE16(istart+2); 275 size_t const length3 = MEM_readLE16(istart+4); 276 size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6); 277 const BYTE* const istart1 = istart + 6; /* jumpTable */ 278 const BYTE* const istart2 = istart1 + length1; 279 const BYTE* const istart3 = istart2 + length2; 280 const BYTE* const istart4 = istart3 + length3; 281 const size_t segmentSize = (dstSize+3) / 4; 282 BYTE* const opStart2 = ostart + segmentSize; 283 BYTE* const opStart3 = opStart2 + segmentSize; 284 BYTE* const opStart4 = opStart3 + segmentSize; 285 BYTE* op1 = ostart; 286 BYTE* op2 = opStart2; 287 BYTE* op3 = opStart3; 288 BYTE* op4 = opStart4; 289 U32 endSignal; 290 DTableDesc const dtd = HUF_getDTableDesc(DTable); 291 U32 const dtLog = dtd.tableLog; 292 293 if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ 294 { size_t const errorCode = BIT_initDStream(&bitD1, istart1, length1); 295 if (HUF_isError(errorCode)) return errorCode; } 296 { size_t const errorCode = BIT_initDStream(&bitD2, istart2, length2); 297 if (HUF_isError(errorCode)) return errorCode; } 298 { size_t const errorCode = BIT_initDStream(&bitD3, istart3, length3); 299 if (HUF_isError(errorCode)) return errorCode; } 300 { size_t const errorCode = BIT_initDStream(&bitD4, istart4, length4); 301 if (HUF_isError(errorCode)) return errorCode; } 302 303 /* 16-32 symbols per loop (4-8 symbols per stream) */ 304 endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); 305 for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; ) { 306 HUF_DECODE_SYMBOLX2_2(op1, &bitD1); 307 HUF_DECODE_SYMBOLX2_2(op2, &bitD2); 308 HUF_DECODE_SYMBOLX2_2(op3, &bitD3); 309 HUF_DECODE_SYMBOLX2_2(op4, &bitD4); 310 HUF_DECODE_SYMBOLX2_1(op1, &bitD1); 311 HUF_DECODE_SYMBOLX2_1(op2, &bitD2); 312 HUF_DECODE_SYMBOLX2_1(op3, &bitD3); 313 HUF_DECODE_SYMBOLX2_1(op4, &bitD4); 314 HUF_DECODE_SYMBOLX2_2(op1, &bitD1); 315 HUF_DECODE_SYMBOLX2_2(op2, &bitD2); 316 HUF_DECODE_SYMBOLX2_2(op3, &bitD3); 317 HUF_DECODE_SYMBOLX2_2(op4, &bitD4); 318 HUF_DECODE_SYMBOLX2_0(op1, &bitD1); 319 HUF_DECODE_SYMBOLX2_0(op2, &bitD2); 320 HUF_DECODE_SYMBOLX2_0(op3, &bitD3); 321 HUF_DECODE_SYMBOLX2_0(op4, &bitD4); 322 endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); 323 } 324 325 /* check corruption */ 326 if (op1 > opStart2) return ERROR(corruption_detected); 327 if (op2 > opStart3) return ERROR(corruption_detected); 328 if (op3 > opStart4) return ERROR(corruption_detected); 329 /* note : op4 supposed already verified within main loop */ 330 331 /* finish bitStreams one by one */ 332 HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog); 333 HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog); 334 HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog); 335 HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog); 336 337 /* check */ 338 endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); 339 if (!endSignal) return ERROR(corruption_detected); 340 341 /* decoded size */ 342 return dstSize; 343 } 344 } 345 346 347 size_t HUF_decompress4X2_usingDTable( 348 void* dst, size_t dstSize, 349 const void* cSrc, size_t cSrcSize, 350 const HUF_DTable* DTable) 351 { 352 DTableDesc dtd = HUF_getDTableDesc(DTable); 353 if (dtd.tableType != 0) return ERROR(GENERIC); 354 return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable); 355 } 356 357 358 size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, 359 const void* cSrc, size_t cSrcSize, 360 void* workSpace, size_t wkspSize) 361 { 362 const BYTE* ip = (const BYTE*) cSrc; 363 364 size_t const hSize = HUF_readDTableX2_wksp (dctx, cSrc, cSrcSize, 365 workSpace, wkspSize); 366 if (HUF_isError(hSize)) return hSize; 367 if (hSize >= cSrcSize) return ERROR(srcSize_wrong); 368 ip += hSize; cSrcSize -= hSize; 369 370 return HUF_decompress4X2_usingDTable_internal (dst, dstSize, ip, cSrcSize, dctx); 371 } 372 373 374 size_t HUF_decompress4X2_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) 375 { 376 U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; 377 return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, 378 workSpace, sizeof(workSpace)); 379 } 380 size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) 381 { 382 HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX); 383 return HUF_decompress4X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize); 384 } 385 386 387 /* *************************/ 388 /* double-symbols decoding */ 389 /* *************************/ 390 typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX4; /* double-symbols decoding */ 391 392 typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t; 393 394 /* HUF_fillDTableX4Level2() : 395 * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */ 396 static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 consumed, 397 const U32* rankValOrigin, const int minWeight, 398 const sortedSymbol_t* sortedSymbols, const U32 sortedListSize, 399 U32 nbBitsBaseline, U16 baseSeq) 400 { 401 HUF_DEltX4 DElt; 402 U32 rankVal[HUF_TABLELOG_MAX + 1]; 403 404 /* get pre-calculated rankVal */ 405 memcpy(rankVal, rankValOrigin, sizeof(rankVal)); 406 407 /* fill skipped values */ 408 if (minWeight>1) { 409 U32 i, skipSize = rankVal[minWeight]; 410 MEM_writeLE16(&(DElt.sequence), baseSeq); 411 DElt.nbBits = (BYTE)(consumed); 412 DElt.length = 1; 413 for (i = 0; i < skipSize; i++) 414 DTable[i] = DElt; 415 } 416 417 /* fill DTable */ 418 { U32 s; for (s=0; s<sortedListSize; s++) { /* note : sortedSymbols already skipped */ 419 const U32 symbol = sortedSymbols[s].symbol; 420 const U32 weight = sortedSymbols[s].weight; 421 const U32 nbBits = nbBitsBaseline - weight; 422 const U32 length = 1 << (sizeLog-nbBits); 423 const U32 start = rankVal[weight]; 424 U32 i = start; 425 const U32 end = start + length; 426 427 MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8))); 428 DElt.nbBits = (BYTE)(nbBits + consumed); 429 DElt.length = 2; 430 do { DTable[i++] = DElt; } while (i<end); /* since length >= 1 */ 431 432 rankVal[weight] += length; 433 } } 434 } 435 436 typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1]; 437 typedef rankValCol_t rankVal_t[HUF_TABLELOG_MAX]; 438 439 static void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog, 440 const sortedSymbol_t* sortedList, const U32 sortedListSize, 441 const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight, 442 const U32 nbBitsBaseline) 443 { 444 U32 rankVal[HUF_TABLELOG_MAX + 1]; 445 const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */ 446 const U32 minBits = nbBitsBaseline - maxWeight; 447 U32 s; 448 449 memcpy(rankVal, rankValOrigin, sizeof(rankVal)); 450 451 /* fill DTable */ 452 for (s=0; s<sortedListSize; s++) { 453 const U16 symbol = sortedList[s].symbol; 454 const U32 weight = sortedList[s].weight; 455 const U32 nbBits = nbBitsBaseline - weight; 456 const U32 start = rankVal[weight]; 457 const U32 length = 1 << (targetLog-nbBits); 458 459 if (targetLog-nbBits >= minBits) { /* enough room for a second symbol */ 460 U32 sortedRank; 461 int minWeight = nbBits + scaleLog; 462 if (minWeight < 1) minWeight = 1; 463 sortedRank = rankStart[minWeight]; 464 HUF_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits, 465 rankValOrigin[nbBits], minWeight, 466 sortedList+sortedRank, sortedListSize-sortedRank, 467 nbBitsBaseline, symbol); 468 } else { 469 HUF_DEltX4 DElt; 470 MEM_writeLE16(&(DElt.sequence), symbol); 471 DElt.nbBits = (BYTE)(nbBits); 472 DElt.length = 1; 473 { U32 const end = start + length; 474 U32 u; 475 for (u = start; u < end; u++) DTable[u] = DElt; 476 } } 477 rankVal[weight] += length; 478 } 479 } 480 481 size_t HUF_readDTableX4_wksp(HUF_DTable* DTable, const void* src, 482 size_t srcSize, void* workSpace, 483 size_t wkspSize) 484 { 485 U32 tableLog, maxW, sizeOfSort, nbSymbols; 486 DTableDesc dtd = HUF_getDTableDesc(DTable); 487 U32 const maxTableLog = dtd.maxTableLog; 488 size_t iSize; 489 void* dtPtr = DTable+1; /* force compiler to avoid strict-aliasing */ 490 HUF_DEltX4* const dt = (HUF_DEltX4*)dtPtr; 491 U32 *rankStart; 492 493 rankValCol_t* rankVal; 494 U32* rankStats; 495 U32* rankStart0; 496 sortedSymbol_t* sortedSymbol; 497 BYTE* weightList; 498 size_t spaceUsed32 = 0; 499 500 rankVal = (rankValCol_t *)((U32 *)workSpace + spaceUsed32); 501 spaceUsed32 += (sizeof(rankValCol_t) * HUF_TABLELOG_MAX) >> 2; 502 rankStats = (U32 *)workSpace + spaceUsed32; 503 spaceUsed32 += HUF_TABLELOG_MAX + 1; 504 rankStart0 = (U32 *)workSpace + spaceUsed32; 505 spaceUsed32 += HUF_TABLELOG_MAX + 2; 506 sortedSymbol = (sortedSymbol_t *)workSpace + (spaceUsed32 * sizeof(U32)) / sizeof(sortedSymbol_t); 507 spaceUsed32 += HUF_ALIGN(sizeof(sortedSymbol_t) * (HUF_SYMBOLVALUE_MAX + 1), sizeof(U32)) >> 2; 508 weightList = (BYTE *)((U32 *)workSpace + spaceUsed32); 509 spaceUsed32 += HUF_ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2; 510 511 if ((spaceUsed32 << 2) > wkspSize) 512 return ERROR(tableLog_tooLarge); 513 workSpace = (U32 *)workSpace + spaceUsed32; 514 wkspSize -= (spaceUsed32 << 2); 515 516 rankStart = rankStart0 + 1; 517 memset(rankStats, 0, sizeof(U32) * (2 * HUF_TABLELOG_MAX + 2 + 1)); 518 519 HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(HUF_DTable)); /* if compiler fails here, assertion is wrong */ 520 if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); 521 /* memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */ 522 523 iSize = HUF_readStats(weightList, HUF_SYMBOLVALUE_MAX + 1, rankStats, &nbSymbols, &tableLog, src, srcSize); 524 if (HUF_isError(iSize)) return iSize; 525 526 /* check result */ 527 if (tableLog > maxTableLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */ 528 529 /* find maxWeight */ 530 for (maxW = tableLog; rankStats[maxW]==0; maxW--) {} /* necessarily finds a solution before 0 */ 531 532 /* Get start index of each weight */ 533 { U32 w, nextRankStart = 0; 534 for (w=1; w<maxW+1; w++) { 535 U32 current = nextRankStart; 536 nextRankStart += rankStats[w]; 537 rankStart[w] = current; 538 } 539 rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/ 540 sizeOfSort = nextRankStart; 541 } 542 543 /* sort symbols by weight */ 544 { U32 s; 545 for (s=0; s<nbSymbols; s++) { 546 U32 const w = weightList[s]; 547 U32 const r = rankStart[w]++; 548 sortedSymbol[r].symbol = (BYTE)s; 549 sortedSymbol[r].weight = (BYTE)w; 550 } 551 rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */ 552 } 553 554 /* Build rankVal */ 555 { U32* const rankVal0 = rankVal[0]; 556 { int const rescale = (maxTableLog-tableLog) - 1; /* tableLog <= maxTableLog */ 557 U32 nextRankVal = 0; 558 U32 w; 559 for (w=1; w<maxW+1; w++) { 560 U32 current = nextRankVal; 561 nextRankVal += rankStats[w] << (w+rescale); 562 rankVal0[w] = current; 563 } } 564 { U32 const minBits = tableLog+1 - maxW; 565 U32 consumed; 566 for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) { 567 U32* const rankValPtr = rankVal[consumed]; 568 U32 w; 569 for (w = 1; w < maxW+1; w++) { 570 rankValPtr[w] = rankVal0[w] >> consumed; 571 } } } } 572 573 HUF_fillDTableX4(dt, maxTableLog, 574 sortedSymbol, sizeOfSort, 575 rankStart0, rankVal, maxW, 576 tableLog+1); 577 578 dtd.tableLog = (BYTE)maxTableLog; 579 dtd.tableType = 1; 580 memcpy(DTable, &dtd, sizeof(dtd)); 581 return iSize; 582 } 583 584 size_t HUF_readDTableX4(HUF_DTable* DTable, const void* src, size_t srcSize) 585 { 586 U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; 587 return HUF_readDTableX4_wksp(DTable, src, srcSize, 588 workSpace, sizeof(workSpace)); 589 } 590 591 static U32 HUF_decodeSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog) 592 { 593 size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ 594 memcpy(op, dt+val, 2); 595 BIT_skipBits(DStream, dt[val].nbBits); 596 return dt[val].length; 597 } 598 599 static U32 HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog) 600 { 601 size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ 602 memcpy(op, dt+val, 1); 603 if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits); 604 else { 605 if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) { 606 BIT_skipBits(DStream, dt[val].nbBits); 607 if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8)) 608 /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */ 609 DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); 610 } } 611 return 1; 612 } 613 614 615 #define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \ 616 ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) 617 618 #define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \ 619 if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \ 620 ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) 621 622 #define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \ 623 if (MEM_64bits()) \ 624 ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) 625 626 HINT_INLINE size_t HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const HUF_DEltX4* const dt, const U32 dtLog) 627 { 628 BYTE* const pStart = p; 629 630 /* up to 8 symbols at a time */ 631 while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) { 632 HUF_DECODE_SYMBOLX4_2(p, bitDPtr); 633 HUF_DECODE_SYMBOLX4_1(p, bitDPtr); 634 HUF_DECODE_SYMBOLX4_2(p, bitDPtr); 635 HUF_DECODE_SYMBOLX4_0(p, bitDPtr); 636 } 637 638 /* closer to end : up to 2 symbols at a time */ 639 while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2)) 640 HUF_DECODE_SYMBOLX4_0(p, bitDPtr); 641 642 while (p <= pEnd-2) 643 HUF_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */ 644 645 if (p < pEnd) 646 p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog); 647 648 return p-pStart; 649 } 650 651 652 static size_t HUF_decompress1X4_usingDTable_internal( 653 void* dst, size_t dstSize, 654 const void* cSrc, size_t cSrcSize, 655 const HUF_DTable* DTable) 656 { 657 BIT_DStream_t bitD; 658 659 /* Init */ 660 { size_t const errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize); 661 if (HUF_isError(errorCode)) return errorCode; 662 } 663 664 /* decode */ 665 { BYTE* const ostart = (BYTE*) dst; 666 BYTE* const oend = ostart + dstSize; 667 const void* const dtPtr = DTable+1; /* force compiler to not use strict-aliasing */ 668 const HUF_DEltX4* const dt = (const HUF_DEltX4*)dtPtr; 669 DTableDesc const dtd = HUF_getDTableDesc(DTable); 670 HUF_decodeStreamX4(ostart, &bitD, oend, dt, dtd.tableLog); 671 } 672 673 /* check */ 674 if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected); 675 676 /* decoded size */ 677 return dstSize; 678 } 679 680 size_t HUF_decompress1X4_usingDTable( 681 void* dst, size_t dstSize, 682 const void* cSrc, size_t cSrcSize, 683 const HUF_DTable* DTable) 684 { 685 DTableDesc dtd = HUF_getDTableDesc(DTable); 686 if (dtd.tableType != 1) return ERROR(GENERIC); 687 return HUF_decompress1X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable); 688 } 689 690 size_t HUF_decompress1X4_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize, 691 const void* cSrc, size_t cSrcSize, 692 void* workSpace, size_t wkspSize) 693 { 694 const BYTE* ip = (const BYTE*) cSrc; 695 696 size_t const hSize = HUF_readDTableX4_wksp(DCtx, cSrc, cSrcSize, 697 workSpace, wkspSize); 698 if (HUF_isError(hSize)) return hSize; 699 if (hSize >= cSrcSize) return ERROR(srcSize_wrong); 700 ip += hSize; cSrcSize -= hSize; 701 702 return HUF_decompress1X4_usingDTable_internal (dst, dstSize, ip, cSrcSize, DCtx); 703 } 704 705 706 size_t HUF_decompress1X4_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize, 707 const void* cSrc, size_t cSrcSize) 708 { 709 U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; 710 return HUF_decompress1X4_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize, 711 workSpace, sizeof(workSpace)); 712 } 713 714 size_t HUF_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) 715 { 716 HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_TABLELOG_MAX); 717 return HUF_decompress1X4_DCtx(DTable, dst, dstSize, cSrc, cSrcSize); 718 } 719 720 static size_t HUF_decompress4X4_usingDTable_internal( 721 void* dst, size_t dstSize, 722 const void* cSrc, size_t cSrcSize, 723 const HUF_DTable* DTable) 724 { 725 if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ 726 727 { const BYTE* const istart = (const BYTE*) cSrc; 728 BYTE* const ostart = (BYTE*) dst; 729 BYTE* const oend = ostart + dstSize; 730 const void* const dtPtr = DTable+1; 731 const HUF_DEltX4* const dt = (const HUF_DEltX4*)dtPtr; 732 733 /* Init */ 734 BIT_DStream_t bitD1; 735 BIT_DStream_t bitD2; 736 BIT_DStream_t bitD3; 737 BIT_DStream_t bitD4; 738 size_t const length1 = MEM_readLE16(istart); 739 size_t const length2 = MEM_readLE16(istart+2); 740 size_t const length3 = MEM_readLE16(istart+4); 741 size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6); 742 const BYTE* const istart1 = istart + 6; /* jumpTable */ 743 const BYTE* const istart2 = istart1 + length1; 744 const BYTE* const istart3 = istart2 + length2; 745 const BYTE* const istart4 = istart3 + length3; 746 size_t const segmentSize = (dstSize+3) / 4; 747 BYTE* const opStart2 = ostart + segmentSize; 748 BYTE* const opStart3 = opStart2 + segmentSize; 749 BYTE* const opStart4 = opStart3 + segmentSize; 750 BYTE* op1 = ostart; 751 BYTE* op2 = opStart2; 752 BYTE* op3 = opStart3; 753 BYTE* op4 = opStart4; 754 U32 endSignal; 755 DTableDesc const dtd = HUF_getDTableDesc(DTable); 756 U32 const dtLog = dtd.tableLog; 757 758 if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ 759 { size_t const errorCode = BIT_initDStream(&bitD1, istart1, length1); 760 if (HUF_isError(errorCode)) return errorCode; } 761 { size_t const errorCode = BIT_initDStream(&bitD2, istart2, length2); 762 if (HUF_isError(errorCode)) return errorCode; } 763 { size_t const errorCode = BIT_initDStream(&bitD3, istart3, length3); 764 if (HUF_isError(errorCode)) return errorCode; } 765 { size_t const errorCode = BIT_initDStream(&bitD4, istart4, length4); 766 if (HUF_isError(errorCode)) return errorCode; } 767 768 /* 16-32 symbols per loop (4-8 symbols per stream) */ 769 endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); 770 for ( ; (endSignal==BIT_DStream_unfinished) & (op4<(oend-(sizeof(bitD4.bitContainer)-1))) ; ) { 771 HUF_DECODE_SYMBOLX4_2(op1, &bitD1); 772 HUF_DECODE_SYMBOLX4_2(op2, &bitD2); 773 HUF_DECODE_SYMBOLX4_2(op3, &bitD3); 774 HUF_DECODE_SYMBOLX4_2(op4, &bitD4); 775 HUF_DECODE_SYMBOLX4_1(op1, &bitD1); 776 HUF_DECODE_SYMBOLX4_1(op2, &bitD2); 777 HUF_DECODE_SYMBOLX4_1(op3, &bitD3); 778 HUF_DECODE_SYMBOLX4_1(op4, &bitD4); 779 HUF_DECODE_SYMBOLX4_2(op1, &bitD1); 780 HUF_DECODE_SYMBOLX4_2(op2, &bitD2); 781 HUF_DECODE_SYMBOLX4_2(op3, &bitD3); 782 HUF_DECODE_SYMBOLX4_2(op4, &bitD4); 783 HUF_DECODE_SYMBOLX4_0(op1, &bitD1); 784 HUF_DECODE_SYMBOLX4_0(op2, &bitD2); 785 HUF_DECODE_SYMBOLX4_0(op3, &bitD3); 786 HUF_DECODE_SYMBOLX4_0(op4, &bitD4); 787 788 endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); 789 } 790 791 /* check corruption */ 792 if (op1 > opStart2) return ERROR(corruption_detected); 793 if (op2 > opStart3) return ERROR(corruption_detected); 794 if (op3 > opStart4) return ERROR(corruption_detected); 795 /* note : op4 already verified within main loop */ 796 797 /* finish bitStreams one by one */ 798 HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog); 799 HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog); 800 HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog); 801 HUF_decodeStreamX4(op4, &bitD4, oend, dt, dtLog); 802 803 /* check */ 804 { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); 805 if (!endCheck) return ERROR(corruption_detected); } 806 807 /* decoded size */ 808 return dstSize; 809 } 810 } 811 812 813 size_t HUF_decompress4X4_usingDTable( 814 void* dst, size_t dstSize, 815 const void* cSrc, size_t cSrcSize, 816 const HUF_DTable* DTable) 817 { 818 DTableDesc dtd = HUF_getDTableDesc(DTable); 819 if (dtd.tableType != 1) return ERROR(GENERIC); 820 return HUF_decompress4X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable); 821 } 822 823 824 size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, 825 const void* cSrc, size_t cSrcSize, 826 void* workSpace, size_t wkspSize) 827 { 828 const BYTE* ip = (const BYTE*) cSrc; 829 830 size_t hSize = HUF_readDTableX4_wksp(dctx, cSrc, cSrcSize, 831 workSpace, wkspSize); 832 if (HUF_isError(hSize)) return hSize; 833 if (hSize >= cSrcSize) return ERROR(srcSize_wrong); 834 ip += hSize; cSrcSize -= hSize; 835 836 return HUF_decompress4X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx); 837 } 838 839 840 size_t HUF_decompress4X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, 841 const void* cSrc, size_t cSrcSize) 842 { 843 U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; 844 return HUF_decompress4X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, 845 workSpace, sizeof(workSpace)); 846 } 847 848 size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) 849 { 850 HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_TABLELOG_MAX); 851 return HUF_decompress4X4_DCtx(DTable, dst, dstSize, cSrc, cSrcSize); 852 } 853 854 855 /* ********************************/ 856 /* Generic decompression selector */ 857 /* ********************************/ 858 859 size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, 860 const void* cSrc, size_t cSrcSize, 861 const HUF_DTable* DTable) 862 { 863 DTableDesc const dtd = HUF_getDTableDesc(DTable); 864 return dtd.tableType ? HUF_decompress1X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable) : 865 HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable); 866 } 867 868 size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, 869 const void* cSrc, size_t cSrcSize, 870 const HUF_DTable* DTable) 871 { 872 DTableDesc const dtd = HUF_getDTableDesc(DTable); 873 return dtd.tableType ? HUF_decompress4X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable) : 874 HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable); 875 } 876 877 878 typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t; 879 static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] = 880 { 881 /* single, double, quad */ 882 {{0,0}, {1,1}, {2,2}}, /* Q==0 : impossible */ 883 {{0,0}, {1,1}, {2,2}}, /* Q==1 : impossible */ 884 {{ 38,130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */ 885 {{ 448,128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */ 886 {{ 556,128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */ 887 {{ 714,128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */ 888 {{ 883,128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */ 889 {{ 897,128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */ 890 {{ 926,128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */ 891 {{ 947,128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */ 892 {{1107,128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */ 893 {{1177,128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */ 894 {{1242,128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */ 895 {{1349,128}, {2644,106}, {5260,106}}, /* Q ==13 : 81-87% */ 896 {{1455,128}, {2422,124}, {4174,124}}, /* Q ==14 : 87-93% */ 897 {{ 722,128}, {1891,145}, {1936,146}}, /* Q ==15 : 93-99% */ 898 }; 899 900 /** HUF_selectDecoder() : 901 * Tells which decoder is likely to decode faster, 902 * based on a set of pre-determined metrics. 903 * @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 . 904 * Assumption : 0 < cSrcSize, dstSize <= 128 KB */ 905 U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize) 906 { 907 /* decoder timing evaluation */ 908 U32 const Q = cSrcSize >= dstSize ? 15 : (U32)(cSrcSize * 16 / dstSize); /* Q < 16 */ 909 U32 const D256 = (U32)(dstSize >> 8); 910 U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256); 911 U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256); 912 DTime1 += DTime1 >> 3; /* advantage to algorithm using less memory, for cache eviction */ 913 914 return DTime1 < DTime0; 915 } 916 917 918 typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); 919 920 size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) 921 { 922 static const decompressionAlgo decompress[2] = { HUF_decompress4X2, HUF_decompress4X4 }; 923 924 /* validation checks */ 925 if (dstSize == 0) return ERROR(dstSize_tooSmall); 926 if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */ 927 if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ 928 if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ 929 930 { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); 931 return decompress[algoNb](dst, dstSize, cSrc, cSrcSize); 932 } 933 } 934 935 size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) 936 { 937 /* validation checks */ 938 if (dstSize == 0) return ERROR(dstSize_tooSmall); 939 if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */ 940 if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ 941 if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ 942 943 { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); 944 return algoNb ? HUF_decompress4X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) : 945 HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ; 946 } 947 } 948 949 size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) 950 { 951 U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; 952 return HUF_decompress4X_hufOnly_wksp(dctx, dst, dstSize, cSrc, cSrcSize, 953 workSpace, sizeof(workSpace)); 954 } 955 956 957 size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, 958 size_t dstSize, const void* cSrc, 959 size_t cSrcSize, void* workSpace, 960 size_t wkspSize) 961 { 962 /* validation checks */ 963 if (dstSize == 0) return ERROR(dstSize_tooSmall); 964 if (cSrcSize == 0) return ERROR(corruption_detected); 965 966 { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); 967 return algoNb ? HUF_decompress4X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize): 968 HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize); 969 } 970 } 971 972 size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, 973 const void* cSrc, size_t cSrcSize, 974 void* workSpace, size_t wkspSize) 975 { 976 /* validation checks */ 977 if (dstSize == 0) return ERROR(dstSize_tooSmall); 978 if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */ 979 if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ 980 if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ 981 982 { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); 983 return algoNb ? HUF_decompress1X4_DCtx_wksp(dctx, dst, dstSize, cSrc, 984 cSrcSize, workSpace, wkspSize): 985 HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc, 986 cSrcSize, workSpace, wkspSize); 987 } 988 } 989 990 size_t HUF_decompress1X_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, 991 const void* cSrc, size_t cSrcSize) 992 { 993 U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; 994 return HUF_decompress1X_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, 995 workSpace, sizeof(workSpace)); 996 } 997