1 /* 2 * Copyright (c) Yann Collet, Facebook, Inc. 3 * All rights reserved. 4 * 5 * This source code is licensed under both the BSD-style license (found in the 6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found 7 * in the COPYING file in the root directory of this source tree). 8 * You may select, at your option, one of the above-listed licenses. 9 */ 10 11 12 #include <stddef.h> /* size_t, ptrdiff_t */ 13 #include "zstd_v02.h" 14 #include "../common/error_private.h" 15 16 17 /****************************************** 18 * Compiler-specific 19 ******************************************/ 20 #if defined(_MSC_VER) /* Visual Studio */ 21 # include <stdlib.h> /* _byteswap_ulong */ 22 # include <intrin.h> /* _byteswap_* */ 23 #endif 24 25 26 /* ****************************************************************** 27 mem.h 28 low-level memory access routines 29 Copyright (C) 2013-2015, Yann Collet. 30 31 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) 32 33 Redistribution and use in source and binary forms, with or without 34 modification, are permitted provided that the following conditions are 35 met: 36 37 * Redistributions of source code must retain the above copyright 38 notice, this list of conditions and the following disclaimer. 39 * Redistributions in binary form must reproduce the above 40 copyright notice, this list of conditions and the following disclaimer 41 in the documentation and/or other materials provided with the 42 distribution. 43 44 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 45 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 46 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 47 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 48 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 49 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 50 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 51 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 52 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 53 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 54 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 55 56 You can contact the author at : 57 - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy 58 - Public forum : https://groups.google.com/forum/#!forum/lz4c 59 ****************************************************************** */ 60 #ifndef MEM_H_MODULE 61 #define MEM_H_MODULE 62 63 #if defined (__cplusplus) 64 extern "C" { 65 #endif 66 67 /****************************************** 68 * Includes 69 ******************************************/ 70 #include <stddef.h> /* size_t, ptrdiff_t */ 71 #include <string.h> /* memcpy */ 72 73 74 /****************************************** 75 * Compiler-specific 76 ******************************************/ 77 #if defined(__GNUC__) 78 # define MEM_STATIC static __attribute__((unused)) 79 #elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) 80 # define MEM_STATIC static inline 81 #elif defined(_MSC_VER) 82 # define MEM_STATIC static __inline 83 #else 84 # define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ 85 #endif 86 87 88 /**************************************************************** 89 * Basic Types 90 *****************************************************************/ 91 #if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) 92 # if defined(_AIX) 93 # include <inttypes.h> 94 # else 95 # include <stdint.h> /* intptr_t */ 96 # endif 97 typedef uint8_t BYTE; 98 typedef uint16_t U16; 99 typedef int16_t S16; 100 typedef uint32_t U32; 101 typedef int32_t S32; 102 typedef uint64_t U64; 103 typedef int64_t S64; 104 #else 105 typedef unsigned char BYTE; 106 typedef unsigned short U16; 107 typedef signed short S16; 108 typedef unsigned int U32; 109 typedef signed int S32; 110 typedef unsigned long long U64; 111 typedef signed long long S64; 112 #endif 113 114 115 /**************************************************************** 116 * Memory I/O 117 *****************************************************************/ 118 /* MEM_FORCE_MEMORY_ACCESS 119 * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. 120 * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. 121 * The below switch allow to select different access method for improved performance. 122 * Method 0 (default) : use `memcpy()`. Safe and portable. 123 * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). 124 * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. 125 * Method 2 : direct access. This method is portable but violate C standard. 126 * It can generate buggy code on targets generating assembly depending on alignment. 127 * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) 128 * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. 129 * Prefer these methods in priority order (0 > 1 > 2) 130 */ 131 #ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ 132 # if defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__) 133 # define MEM_FORCE_MEMORY_ACCESS 1 134 # endif 135 #endif 136 137 MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; } 138 MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; } 139 140 MEM_STATIC unsigned MEM_isLittleEndian(void) 141 { 142 const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ 143 return one.c[0]; 144 } 145 146 #if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) 147 148 /* violates C standard on structure alignment. 149 Only use if no other choice to achieve best performance on target platform */ 150 MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } 151 MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } 152 MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } 153 154 MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } 155 156 #elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) 157 158 /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ 159 /* currently only defined for gcc and icc */ 160 typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign; 161 162 MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } 163 MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } 164 MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } 165 166 MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } 167 168 #else 169 170 /* default method, safe and standard. 171 can sometimes prove slower */ 172 173 MEM_STATIC U16 MEM_read16(const void* memPtr) 174 { 175 U16 val; memcpy(&val, memPtr, sizeof(val)); return val; 176 } 177 178 MEM_STATIC U32 MEM_read32(const void* memPtr) 179 { 180 U32 val; memcpy(&val, memPtr, sizeof(val)); return val; 181 } 182 183 MEM_STATIC U64 MEM_read64(const void* memPtr) 184 { 185 U64 val; memcpy(&val, memPtr, sizeof(val)); return val; 186 } 187 188 MEM_STATIC void MEM_write16(void* memPtr, U16 value) 189 { 190 memcpy(memPtr, &value, sizeof(value)); 191 } 192 193 #endif /* MEM_FORCE_MEMORY_ACCESS */ 194 195 196 MEM_STATIC U16 MEM_readLE16(const void* memPtr) 197 { 198 if (MEM_isLittleEndian()) 199 return MEM_read16(memPtr); 200 else 201 { 202 const BYTE* p = (const BYTE*)memPtr; 203 return (U16)(p[0] + (p[1]<<8)); 204 } 205 } 206 207 MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val) 208 { 209 if (MEM_isLittleEndian()) 210 { 211 MEM_write16(memPtr, val); 212 } 213 else 214 { 215 BYTE* p = (BYTE*)memPtr; 216 p[0] = (BYTE)val; 217 p[1] = (BYTE)(val>>8); 218 } 219 } 220 221 MEM_STATIC U32 MEM_readLE24(const void* memPtr) 222 { 223 return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16); 224 } 225 226 MEM_STATIC U32 MEM_readLE32(const void* memPtr) 227 { 228 if (MEM_isLittleEndian()) 229 return MEM_read32(memPtr); 230 else 231 { 232 const BYTE* p = (const BYTE*)memPtr; 233 return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24)); 234 } 235 } 236 237 238 MEM_STATIC U64 MEM_readLE64(const void* memPtr) 239 { 240 if (MEM_isLittleEndian()) 241 return MEM_read64(memPtr); 242 else 243 { 244 const BYTE* p = (const BYTE*)memPtr; 245 return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24) 246 + ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56)); 247 } 248 } 249 250 251 MEM_STATIC size_t MEM_readLEST(const void* memPtr) 252 { 253 if (MEM_32bits()) 254 return (size_t)MEM_readLE32(memPtr); 255 else 256 return (size_t)MEM_readLE64(memPtr); 257 } 258 259 #if defined (__cplusplus) 260 } 261 #endif 262 263 #endif /* MEM_H_MODULE */ 264 265 266 /* ****************************************************************** 267 bitstream 268 Part of NewGen Entropy library 269 header file (to include) 270 Copyright (C) 2013-2015, Yann Collet. 271 272 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) 273 274 Redistribution and use in source and binary forms, with or without 275 modification, are permitted provided that the following conditions are 276 met: 277 278 * Redistributions of source code must retain the above copyright 279 notice, this list of conditions and the following disclaimer. 280 * Redistributions in binary form must reproduce the above 281 copyright notice, this list of conditions and the following disclaimer 282 in the documentation and/or other materials provided with the 283 distribution. 284 285 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 286 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 287 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 288 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 289 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 290 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 291 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 292 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 293 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 294 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 295 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 296 297 You can contact the author at : 298 - Source repository : https://github.com/Cyan4973/FiniteStateEntropy 299 - Public forum : https://groups.google.com/forum/#!forum/lz4c 300 ****************************************************************** */ 301 #ifndef BITSTREAM_H_MODULE 302 #define BITSTREAM_H_MODULE 303 304 #if defined (__cplusplus) 305 extern "C" { 306 #endif 307 308 309 /* 310 * This API consists of small unitary functions, which highly benefit from being inlined. 311 * Since link-time-optimization is not available for all compilers, 312 * these functions are defined into a .h to be included. 313 */ 314 315 316 /********************************************** 317 * bitStream decompression API (read backward) 318 **********************************************/ 319 typedef struct 320 { 321 size_t bitContainer; 322 unsigned bitsConsumed; 323 const char* ptr; 324 const char* start; 325 } BIT_DStream_t; 326 327 typedef enum { BIT_DStream_unfinished = 0, 328 BIT_DStream_endOfBuffer = 1, 329 BIT_DStream_completed = 2, 330 BIT_DStream_overflow = 3 } BIT_DStream_status; /* result of BIT_reloadDStream() */ 331 /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */ 332 333 MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize); 334 MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits); 335 MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD); 336 MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD); 337 338 339 /****************************************** 340 * unsafe API 341 ******************************************/ 342 MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits); 343 /* faster, but works only if nbBits >= 1 */ 344 345 346 347 /**************************************************************** 348 * Helper functions 349 ****************************************************************/ 350 MEM_STATIC unsigned BIT_highbit32 (U32 val) 351 { 352 # if defined(_MSC_VER) /* Visual */ 353 unsigned long r; 354 return _BitScanReverse(&r, val) ? (unsigned)r : 0; 355 # elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */ 356 return __builtin_clz (val) ^ 31; 357 # else /* Software version */ 358 static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; 359 U32 v = val; 360 unsigned r; 361 v |= v >> 1; 362 v |= v >> 2; 363 v |= v >> 4; 364 v |= v >> 8; 365 v |= v >> 16; 366 r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27]; 367 return r; 368 # endif 369 } 370 371 372 373 /********************************************************** 374 * bitStream decoding 375 **********************************************************/ 376 377 /*!BIT_initDStream 378 * Initialize a BIT_DStream_t. 379 * @bitD : a pointer to an already allocated BIT_DStream_t structure 380 * @srcBuffer must point at the beginning of a bitStream 381 * @srcSize must be the exact size of the bitStream 382 * @result : size of stream (== srcSize) or an errorCode if a problem is detected 383 */ 384 MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize) 385 { 386 if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); } 387 388 if (srcSize >= sizeof(size_t)) /* normal case */ 389 { 390 U32 contain32; 391 bitD->start = (const char*)srcBuffer; 392 bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(size_t); 393 bitD->bitContainer = MEM_readLEST(bitD->ptr); 394 contain32 = ((const BYTE*)srcBuffer)[srcSize-1]; 395 if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */ 396 bitD->bitsConsumed = 8 - BIT_highbit32(contain32); 397 } 398 else 399 { 400 U32 contain32; 401 bitD->start = (const char*)srcBuffer; 402 bitD->ptr = bitD->start; 403 bitD->bitContainer = *(const BYTE*)(bitD->start); 404 switch(srcSize) 405 { 406 case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16); 407 /* fallthrough */ 408 case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24); 409 /* fallthrough */ 410 case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32); 411 /* fallthrough */ 412 case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24; 413 /* fallthrough */ 414 case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16; 415 /* fallthrough */ 416 case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) << 8; 417 /* fallthrough */ 418 default:; 419 } 420 contain32 = ((const BYTE*)srcBuffer)[srcSize-1]; 421 if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */ 422 bitD->bitsConsumed = 8 - BIT_highbit32(contain32); 423 bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8; 424 } 425 426 return srcSize; 427 } 428 429 MEM_STATIC size_t BIT_lookBits(BIT_DStream_t* bitD, U32 nbBits) 430 { 431 const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1; 432 return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask); 433 } 434 435 /*! BIT_lookBitsFast : 436 * unsafe version; only works only if nbBits >= 1 */ 437 MEM_STATIC size_t BIT_lookBitsFast(BIT_DStream_t* bitD, U32 nbBits) 438 { 439 const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1; 440 return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask); 441 } 442 443 MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits) 444 { 445 bitD->bitsConsumed += nbBits; 446 } 447 448 MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits) 449 { 450 size_t value = BIT_lookBits(bitD, nbBits); 451 BIT_skipBits(bitD, nbBits); 452 return value; 453 } 454 455 /*!BIT_readBitsFast : 456 * unsafe version; only works only if nbBits >= 1 */ 457 MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits) 458 { 459 size_t value = BIT_lookBitsFast(bitD, nbBits); 460 BIT_skipBits(bitD, nbBits); 461 return value; 462 } 463 464 MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD) 465 { 466 if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should never happen */ 467 return BIT_DStream_overflow; 468 469 if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) 470 { 471 bitD->ptr -= bitD->bitsConsumed >> 3; 472 bitD->bitsConsumed &= 7; 473 bitD->bitContainer = MEM_readLEST(bitD->ptr); 474 return BIT_DStream_unfinished; 475 } 476 if (bitD->ptr == bitD->start) 477 { 478 if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer; 479 return BIT_DStream_completed; 480 } 481 { 482 U32 nbBytes = bitD->bitsConsumed >> 3; 483 BIT_DStream_status result = BIT_DStream_unfinished; 484 if (bitD->ptr - nbBytes < bitD->start) 485 { 486 nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */ 487 result = BIT_DStream_endOfBuffer; 488 } 489 bitD->ptr -= nbBytes; 490 bitD->bitsConsumed -= nbBytes*8; 491 bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */ 492 return result; 493 } 494 } 495 496 /*! BIT_endOfDStream 497 * @return Tells if DStream has reached its exact end 498 */ 499 MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream) 500 { 501 return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8)); 502 } 503 504 #if defined (__cplusplus) 505 } 506 #endif 507 508 #endif /* BITSTREAM_H_MODULE */ 509 /* ****************************************************************** 510 Error codes and messages 511 Copyright (C) 2013-2015, Yann Collet 512 513 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) 514 515 Redistribution and use in source and binary forms, with or without 516 modification, are permitted provided that the following conditions are 517 met: 518 519 * Redistributions of source code must retain the above copyright 520 notice, this list of conditions and the following disclaimer. 521 * Redistributions in binary form must reproduce the above 522 copyright notice, this list of conditions and the following disclaimer 523 in the documentation and/or other materials provided with the 524 distribution. 525 526 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 527 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 528 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 529 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 530 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 531 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 532 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 533 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 534 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 535 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 536 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 537 538 You can contact the author at : 539 - Source repository : https://github.com/Cyan4973/FiniteStateEntropy 540 - Public forum : https://groups.google.com/forum/#!forum/lz4c 541 ****************************************************************** */ 542 #ifndef ERROR_H_MODULE 543 #define ERROR_H_MODULE 544 545 #if defined (__cplusplus) 546 extern "C" { 547 #endif 548 549 550 /****************************************** 551 * Compiler-specific 552 ******************************************/ 553 #if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) 554 # define ERR_STATIC static inline 555 #elif defined(_MSC_VER) 556 # define ERR_STATIC static __inline 557 #elif defined(__GNUC__) 558 # define ERR_STATIC static __attribute__((unused)) 559 #else 560 # define ERR_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ 561 #endif 562 563 564 /****************************************** 565 * Error Management 566 ******************************************/ 567 #define PREFIX(name) ZSTD_error_##name 568 569 #define ERROR(name) (size_t)-PREFIX(name) 570 571 #define ERROR_LIST(ITEM) \ 572 ITEM(PREFIX(No_Error)) ITEM(PREFIX(GENERIC)) \ 573 ITEM(PREFIX(dstSize_tooSmall)) ITEM(PREFIX(srcSize_wrong)) \ 574 ITEM(PREFIX(prefix_unknown)) ITEM(PREFIX(corruption_detected)) \ 575 ITEM(PREFIX(tableLog_tooLarge)) ITEM(PREFIX(maxSymbolValue_tooLarge)) ITEM(PREFIX(maxSymbolValue_tooSmall)) \ 576 ITEM(PREFIX(maxCode)) 577 578 #define ERROR_GENERATE_ENUM(ENUM) ENUM, 579 typedef enum { ERROR_LIST(ERROR_GENERATE_ENUM) } ERR_codes; /* enum is exposed, to detect & handle specific errors; compare function result to -enum value */ 580 581 #define ERROR_CONVERTTOSTRING(STRING) #STRING, 582 #define ERROR_GENERATE_STRING(EXPR) ERROR_CONVERTTOSTRING(EXPR) 583 static const char* ERR_strings[] = { ERROR_LIST(ERROR_GENERATE_STRING) }; 584 585 ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); } 586 587 ERR_STATIC const char* ERR_getErrorName(size_t code) 588 { 589 static const char* codeError = "Unspecified error code"; 590 if (ERR_isError(code)) return ERR_strings[-(int)(code)]; 591 return codeError; 592 } 593 594 595 #if defined (__cplusplus) 596 } 597 #endif 598 599 #endif /* ERROR_H_MODULE */ 600 /* 601 Constructor and Destructor of type FSE_CTable 602 Note that its size depends on 'tableLog' and 'maxSymbolValue' */ 603 typedef unsigned FSE_CTable; /* don't allocate that. It's just a way to be more restrictive than void* */ 604 typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */ 605 606 607 /* ****************************************************************** 608 FSE : Finite State Entropy coder 609 header file for static linking (only) 610 Copyright (C) 2013-2015, Yann Collet 611 612 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) 613 614 Redistribution and use in source and binary forms, with or without 615 modification, are permitted provided that the following conditions are 616 met: 617 618 * Redistributions of source code must retain the above copyright 619 notice, this list of conditions and the following disclaimer. 620 * Redistributions in binary form must reproduce the above 621 copyright notice, this list of conditions and the following disclaimer 622 in the documentation and/or other materials provided with the 623 distribution. 624 625 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 626 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 627 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 628 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 629 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 630 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 631 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 632 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 633 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 634 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 635 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 636 637 You can contact the author at : 638 - Source repository : https://github.com/Cyan4973/FiniteStateEntropy 639 - Public forum : https://groups.google.com/forum/#!forum/lz4c 640 ****************************************************************** */ 641 #if defined (__cplusplus) 642 extern "C" { 643 #endif 644 645 646 /****************************************** 647 * Static allocation 648 ******************************************/ 649 /* FSE buffer bounds */ 650 #define FSE_NCOUNTBOUND 512 651 #define FSE_BLOCKBOUND(size) (size + (size>>7)) 652 #define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ 653 654 /* You can statically allocate FSE CTable/DTable as a table of unsigned using below macro */ 655 #define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2)) 656 #define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<<maxTableLog)) 657 658 659 /****************************************** 660 * FSE advanced API 661 ******************************************/ 662 static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits); 663 /* build a fake FSE_DTable, designed to read an uncompressed bitstream where each symbol uses nbBits */ 664 665 static size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue); 666 /* build a fake FSE_DTable, designed to always generate the same symbolValue */ 667 668 669 /****************************************** 670 * FSE symbol decompression API 671 ******************************************/ 672 typedef struct 673 { 674 size_t state; 675 const void* table; /* precise table may vary, depending on U16 */ 676 } FSE_DState_t; 677 678 679 static void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt); 680 681 static unsigned char FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD); 682 683 static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr); 684 685 686 /****************************************** 687 * FSE unsafe API 688 ******************************************/ 689 static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD); 690 /* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */ 691 692 693 /****************************************** 694 * Implementation of inline functions 695 ******************************************/ 696 697 /* decompression */ 698 699 typedef struct { 700 U16 tableLog; 701 U16 fastMode; 702 } FSE_DTableHeader; /* sizeof U32 */ 703 704 typedef struct 705 { 706 unsigned short newState; 707 unsigned char symbol; 708 unsigned char nbBits; 709 } FSE_decode_t; /* size == U32 */ 710 711 MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt) 712 { 713 FSE_DTableHeader DTableH; 714 memcpy(&DTableH, dt, sizeof(DTableH)); 715 DStatePtr->state = BIT_readBits(bitD, DTableH.tableLog); 716 BIT_reloadDStream(bitD); 717 DStatePtr->table = dt + 1; 718 } 719 720 MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) 721 { 722 const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; 723 const U32 nbBits = DInfo.nbBits; 724 BYTE symbol = DInfo.symbol; 725 size_t lowBits = BIT_readBits(bitD, nbBits); 726 727 DStatePtr->state = DInfo.newState + lowBits; 728 return symbol; 729 } 730 731 MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) 732 { 733 const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; 734 const U32 nbBits = DInfo.nbBits; 735 BYTE symbol = DInfo.symbol; 736 size_t lowBits = BIT_readBitsFast(bitD, nbBits); 737 738 DStatePtr->state = DInfo.newState + lowBits; 739 return symbol; 740 } 741 742 MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr) 743 { 744 return DStatePtr->state == 0; 745 } 746 747 748 #if defined (__cplusplus) 749 } 750 #endif 751 /* ****************************************************************** 752 Huff0 : Huffman coder, part of New Generation Entropy library 753 header file for static linking (only) 754 Copyright (C) 2013-2015, Yann Collet 755 756 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) 757 758 Redistribution and use in source and binary forms, with or without 759 modification, are permitted provided that the following conditions are 760 met: 761 762 * Redistributions of source code must retain the above copyright 763 notice, this list of conditions and the following disclaimer. 764 * Redistributions in binary form must reproduce the above 765 copyright notice, this list of conditions and the following disclaimer 766 in the documentation and/or other materials provided with the 767 distribution. 768 769 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 770 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 771 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 772 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 773 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 774 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 775 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 776 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 777 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 778 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 779 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 780 781 You can contact the author at : 782 - Source repository : https://github.com/Cyan4973/FiniteStateEntropy 783 - Public forum : https://groups.google.com/forum/#!forum/lz4c 784 ****************************************************************** */ 785 786 #if defined (__cplusplus) 787 extern "C" { 788 #endif 789 790 /****************************************** 791 * Static allocation macros 792 ******************************************/ 793 /* Huff0 buffer bounds */ 794 #define HUF_CTABLEBOUND 129 795 #define HUF_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true if incompressible pre-filtered with fast heuristic */ 796 #define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ 797 798 /* static allocation of Huff0's DTable */ 799 #define HUF_DTABLE_SIZE(maxTableLog) (1 + (1<<maxTableLog)) /* nb Cells; use unsigned short for X2, unsigned int for X4 */ 800 #define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \ 801 unsigned short DTable[HUF_DTABLE_SIZE(maxTableLog)] = { maxTableLog } 802 #define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \ 803 unsigned int DTable[HUF_DTABLE_SIZE(maxTableLog)] = { maxTableLog } 804 #define HUF_CREATE_STATIC_DTABLEX6(DTable, maxTableLog) \ 805 unsigned int DTable[HUF_DTABLE_SIZE(maxTableLog) * 3 / 2] = { maxTableLog } 806 807 808 /****************************************** 809 * Advanced functions 810 ******************************************/ 811 static size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */ 812 static size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbols decoder */ 813 static size_t HUF_decompress4X6 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* quad-symbols decoder */ 814 815 816 #if defined (__cplusplus) 817 } 818 #endif 819 820 /* 821 zstd - standard compression library 822 Header File 823 Copyright (C) 2014-2015, Yann Collet. 824 825 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) 826 827 Redistribution and use in source and binary forms, with or without 828 modification, are permitted provided that the following conditions are 829 met: 830 * Redistributions of source code must retain the above copyright 831 notice, this list of conditions and the following disclaimer. 832 * Redistributions in binary form must reproduce the above 833 copyright notice, this list of conditions and the following disclaimer 834 in the documentation and/or other materials provided with the 835 distribution. 836 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 837 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 838 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 839 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 840 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 841 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 842 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 843 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 844 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 845 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 846 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 847 848 You can contact the author at : 849 - zstd source repository : https://github.com/Cyan4973/zstd 850 - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c 851 */ 852 853 #if defined (__cplusplus) 854 extern "C" { 855 #endif 856 857 /* ************************************* 858 * Includes 859 ***************************************/ 860 #include <stddef.h> /* size_t */ 861 862 863 /* ************************************* 864 * Version 865 ***************************************/ 866 #define ZSTD_VERSION_MAJOR 0 /* for breaking interface changes */ 867 #define ZSTD_VERSION_MINOR 2 /* for new (non-breaking) interface capabilities */ 868 #define ZSTD_VERSION_RELEASE 2 /* for tweaks, bug-fixes, or development */ 869 #define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE) 870 871 872 /* ************************************* 873 * Advanced functions 874 ***************************************/ 875 typedef struct ZSTD_CCtx_s ZSTD_CCtx; /* incomplete type */ 876 877 #if defined (__cplusplus) 878 } 879 #endif 880 /* 881 zstd - standard compression library 882 Header File for static linking only 883 Copyright (C) 2014-2015, Yann Collet. 884 885 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) 886 887 Redistribution and use in source and binary forms, with or without 888 modification, are permitted provided that the following conditions are 889 met: 890 * Redistributions of source code must retain the above copyright 891 notice, this list of conditions and the following disclaimer. 892 * Redistributions in binary form must reproduce the above 893 copyright notice, this list of conditions and the following disclaimer 894 in the documentation and/or other materials provided with the 895 distribution. 896 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 897 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 898 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 899 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 900 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 901 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 902 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 903 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 904 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 905 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 906 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 907 908 You can contact the author at : 909 - zstd source repository : https://github.com/Cyan4973/zstd 910 - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c 911 */ 912 913 /* The objects defined into this file should be considered experimental. 914 * They are not labelled stable, as their prototype may change in the future. 915 * You can use them for tests, provide feedback, or if you can endure risk of future changes. 916 */ 917 918 #if defined (__cplusplus) 919 extern "C" { 920 #endif 921 922 /* ************************************* 923 * Streaming functions 924 ***************************************/ 925 926 typedef struct ZSTD_DCtx_s ZSTD_DCtx; 927 928 /* 929 Use above functions alternatively. 930 ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue(). 931 ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block. 932 Result is the number of bytes regenerated within 'dst'. 933 It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header. 934 */ 935 936 /* ************************************* 937 * Prefix - version detection 938 ***************************************/ 939 #define ZSTD_magicNumber 0xFD2FB522 /* v0.2 (current)*/ 940 941 942 #if defined (__cplusplus) 943 } 944 #endif 945 /* ****************************************************************** 946 FSE : Finite State Entropy coder 947 Copyright (C) 2013-2015, Yann Collet. 948 949 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) 950 951 Redistribution and use in source and binary forms, with or without 952 modification, are permitted provided that the following conditions are 953 met: 954 955 * Redistributions of source code must retain the above copyright 956 notice, this list of conditions and the following disclaimer. 957 * Redistributions in binary form must reproduce the above 958 copyright notice, this list of conditions and the following disclaimer 959 in the documentation and/or other materials provided with the 960 distribution. 961 962 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 963 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 964 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 965 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 966 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 967 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 968 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 969 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 970 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 971 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 972 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 973 974 You can contact the author at : 975 - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy 976 - Public forum : https://groups.google.com/forum/#!forum/lz4c 977 ****************************************************************** */ 978 979 #ifndef FSE_COMMONDEFS_ONLY 980 981 /**************************************************************** 982 * Tuning parameters 983 ****************************************************************/ 984 /* MEMORY_USAGE : 985 * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) 986 * Increasing memory usage improves compression ratio 987 * Reduced memory usage can improve speed, due to cache effect 988 * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ 989 #define FSE_MAX_MEMORY_USAGE 14 990 #define FSE_DEFAULT_MEMORY_USAGE 13 991 992 /* FSE_MAX_SYMBOL_VALUE : 993 * Maximum symbol value authorized. 994 * Required for proper stack allocation */ 995 #define FSE_MAX_SYMBOL_VALUE 255 996 997 998 /**************************************************************** 999 * template functions type & suffix 1000 ****************************************************************/ 1001 #define FSE_FUNCTION_TYPE BYTE 1002 #define FSE_FUNCTION_EXTENSION 1003 1004 1005 /**************************************************************** 1006 * Byte symbol type 1007 ****************************************************************/ 1008 #endif /* !FSE_COMMONDEFS_ONLY */ 1009 1010 1011 /**************************************************************** 1012 * Compiler specifics 1013 ****************************************************************/ 1014 #ifdef _MSC_VER /* Visual Studio */ 1015 # define FORCE_INLINE static __forceinline 1016 # include <intrin.h> /* For Visual 2005 */ 1017 # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ 1018 # pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */ 1019 #else 1020 # if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ 1021 # ifdef __GNUC__ 1022 # define FORCE_INLINE static inline __attribute__((always_inline)) 1023 # else 1024 # define FORCE_INLINE static inline 1025 # endif 1026 # else 1027 # define FORCE_INLINE static 1028 # endif /* __STDC_VERSION__ */ 1029 #endif 1030 1031 1032 /**************************************************************** 1033 * Includes 1034 ****************************************************************/ 1035 #include <stdlib.h> /* malloc, free, qsort */ 1036 #include <string.h> /* memcpy, memset */ 1037 #include <stdio.h> /* printf (debug) */ 1038 1039 /**************************************************************** 1040 * Constants 1041 *****************************************************************/ 1042 #define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2) 1043 #define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG) 1044 #define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1) 1045 #define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2) 1046 #define FSE_MIN_TABLELOG 5 1047 1048 #define FSE_TABLELOG_ABSOLUTE_MAX 15 1049 #if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX 1050 #error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported" 1051 #endif 1052 1053 1054 /**************************************************************** 1055 * Error Management 1056 ****************************************************************/ 1057 #define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ 1058 1059 1060 /**************************************************************** 1061 * Complex types 1062 ****************************************************************/ 1063 typedef U32 DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)]; 1064 1065 1066 /**************************************************************** 1067 * Templates 1068 ****************************************************************/ 1069 /* 1070 designed to be included 1071 for type-specific functions (template emulation in C) 1072 Objective is to write these functions only once, for improved maintenance 1073 */ 1074 1075 /* safety checks */ 1076 #ifndef FSE_FUNCTION_EXTENSION 1077 # error "FSE_FUNCTION_EXTENSION must be defined" 1078 #endif 1079 #ifndef FSE_FUNCTION_TYPE 1080 # error "FSE_FUNCTION_TYPE must be defined" 1081 #endif 1082 1083 /* Function names */ 1084 #define FSE_CAT(X,Y) X##Y 1085 #define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y) 1086 #define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y) 1087 1088 1089 /* Function templates */ 1090 1091 #define FSE_DECODE_TYPE FSE_decode_t 1092 1093 static U32 FSE_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; } 1094 1095 static size_t FSE_buildDTable 1096 (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) 1097 { 1098 void* ptr = dt+1; 1099 FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*)ptr; 1100 FSE_DTableHeader DTableH; 1101 const U32 tableSize = 1 << tableLog; 1102 const U32 tableMask = tableSize-1; 1103 const U32 step = FSE_tableStep(tableSize); 1104 U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1]; 1105 U32 position = 0; 1106 U32 highThreshold = tableSize-1; 1107 const S16 largeLimit= (S16)(1 << (tableLog-1)); 1108 U32 noLarge = 1; 1109 U32 s; 1110 1111 /* Sanity Checks */ 1112 if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge); 1113 if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); 1114 1115 /* Init, lay down lowprob symbols */ 1116 DTableH.tableLog = (U16)tableLog; 1117 for (s=0; s<=maxSymbolValue; s++) 1118 { 1119 if (normalizedCounter[s]==-1) 1120 { 1121 tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s; 1122 symbolNext[s] = 1; 1123 } 1124 else 1125 { 1126 if (normalizedCounter[s] >= largeLimit) noLarge=0; 1127 symbolNext[s] = normalizedCounter[s]; 1128 } 1129 } 1130 1131 /* Spread symbols */ 1132 for (s=0; s<=maxSymbolValue; s++) 1133 { 1134 int i; 1135 for (i=0; i<normalizedCounter[s]; i++) 1136 { 1137 tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s; 1138 position = (position + step) & tableMask; 1139 while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */ 1140 } 1141 } 1142 1143 if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */ 1144 1145 /* Build Decoding table */ 1146 { 1147 U32 i; 1148 for (i=0; i<tableSize; i++) 1149 { 1150 FSE_FUNCTION_TYPE symbol = (FSE_FUNCTION_TYPE)(tableDecode[i].symbol); 1151 U16 nextState = symbolNext[symbol]++; 1152 tableDecode[i].nbBits = (BYTE) (tableLog - BIT_highbit32 ((U32)nextState) ); 1153 tableDecode[i].newState = (U16) ( (nextState << tableDecode[i].nbBits) - tableSize); 1154 } 1155 } 1156 1157 DTableH.fastMode = (U16)noLarge; 1158 memcpy(dt, &DTableH, sizeof(DTableH)); /* memcpy(), to avoid strict aliasing warnings */ 1159 return 0; 1160 } 1161 1162 1163 #ifndef FSE_COMMONDEFS_ONLY 1164 /****************************************** 1165 * FSE helper functions 1166 ******************************************/ 1167 static unsigned FSE_isError(size_t code) { return ERR_isError(code); } 1168 1169 1170 /**************************************************************** 1171 * FSE NCount encoding-decoding 1172 ****************************************************************/ 1173 static short FSE_abs(short a) 1174 { 1175 return (short)(a<0 ? -a : a); 1176 } 1177 1178 static size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr, 1179 const void* headerBuffer, size_t hbSize) 1180 { 1181 const BYTE* const istart = (const BYTE*) headerBuffer; 1182 const BYTE* const iend = istart + hbSize; 1183 const BYTE* ip = istart; 1184 int nbBits; 1185 int remaining; 1186 int threshold; 1187 U32 bitStream; 1188 int bitCount; 1189 unsigned charnum = 0; 1190 int previous0 = 0; 1191 1192 if (hbSize < 4) return ERROR(srcSize_wrong); 1193 bitStream = MEM_readLE32(ip); 1194 nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */ 1195 if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge); 1196 bitStream >>= 4; 1197 bitCount = 4; 1198 *tableLogPtr = nbBits; 1199 remaining = (1<<nbBits)+1; 1200 threshold = 1<<nbBits; 1201 nbBits++; 1202 1203 while ((remaining>1) && (charnum<=*maxSVPtr)) 1204 { 1205 if (previous0) 1206 { 1207 unsigned n0 = charnum; 1208 while ((bitStream & 0xFFFF) == 0xFFFF) 1209 { 1210 n0+=24; 1211 if (ip < iend-5) 1212 { 1213 ip+=2; 1214 bitStream = MEM_readLE32(ip) >> bitCount; 1215 } 1216 else 1217 { 1218 bitStream >>= 16; 1219 bitCount+=16; 1220 } 1221 } 1222 while ((bitStream & 3) == 3) 1223 { 1224 n0+=3; 1225 bitStream>>=2; 1226 bitCount+=2; 1227 } 1228 n0 += bitStream & 3; 1229 bitCount += 2; 1230 if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall); 1231 while (charnum < n0) normalizedCounter[charnum++] = 0; 1232 if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) 1233 { 1234 ip += bitCount>>3; 1235 bitCount &= 7; 1236 bitStream = MEM_readLE32(ip) >> bitCount; 1237 } 1238 else 1239 bitStream >>= 2; 1240 } 1241 { 1242 const short max = (short)((2*threshold-1)-remaining); 1243 short count; 1244 1245 if ((bitStream & (threshold-1)) < (U32)max) 1246 { 1247 count = (short)(bitStream & (threshold-1)); 1248 bitCount += nbBits-1; 1249 } 1250 else 1251 { 1252 count = (short)(bitStream & (2*threshold-1)); 1253 if (count >= threshold) count -= max; 1254 bitCount += nbBits; 1255 } 1256 1257 count--; /* extra accuracy */ 1258 remaining -= FSE_abs(count); 1259 normalizedCounter[charnum++] = count; 1260 previous0 = !count; 1261 while (remaining < threshold) 1262 { 1263 nbBits--; 1264 threshold >>= 1; 1265 } 1266 1267 { 1268 if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) 1269 { 1270 ip += bitCount>>3; 1271 bitCount &= 7; 1272 } 1273 else 1274 { 1275 bitCount -= (int)(8 * (iend - 4 - ip)); 1276 ip = iend - 4; 1277 } 1278 bitStream = MEM_readLE32(ip) >> (bitCount & 31); 1279 } 1280 } 1281 } 1282 if (remaining != 1) return ERROR(GENERIC); 1283 *maxSVPtr = charnum-1; 1284 1285 ip += (bitCount+7)>>3; 1286 if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong); 1287 return ip-istart; 1288 } 1289 1290 1291 /********************************************************* 1292 * Decompression (Byte symbols) 1293 *********************************************************/ 1294 static size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue) 1295 { 1296 void* ptr = dt; 1297 FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; 1298 FSE_decode_t* const cell = (FSE_decode_t*)(ptr) + 1; /* because dt is unsigned */ 1299 1300 DTableH->tableLog = 0; 1301 DTableH->fastMode = 0; 1302 1303 cell->newState = 0; 1304 cell->symbol = symbolValue; 1305 cell->nbBits = 0; 1306 1307 return 0; 1308 } 1309 1310 1311 static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits) 1312 { 1313 void* ptr = dt; 1314 FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; 1315 FSE_decode_t* const dinfo = (FSE_decode_t*)(ptr) + 1; /* because dt is unsigned */ 1316 const unsigned tableSize = 1 << nbBits; 1317 const unsigned tableMask = tableSize - 1; 1318 const unsigned maxSymbolValue = tableMask; 1319 unsigned s; 1320 1321 /* Sanity checks */ 1322 if (nbBits < 1) return ERROR(GENERIC); /* min size */ 1323 1324 /* Build Decoding Table */ 1325 DTableH->tableLog = (U16)nbBits; 1326 DTableH->fastMode = 1; 1327 for (s=0; s<=maxSymbolValue; s++) 1328 { 1329 dinfo[s].newState = 0; 1330 dinfo[s].symbol = (BYTE)s; 1331 dinfo[s].nbBits = (BYTE)nbBits; 1332 } 1333 1334 return 0; 1335 } 1336 1337 FORCE_INLINE size_t FSE_decompress_usingDTable_generic( 1338 void* dst, size_t maxDstSize, 1339 const void* cSrc, size_t cSrcSize, 1340 const FSE_DTable* dt, const unsigned fast) 1341 { 1342 BYTE* const ostart = (BYTE*) dst; 1343 BYTE* op = ostart; 1344 BYTE* const omax = op + maxDstSize; 1345 BYTE* const olimit = omax-3; 1346 1347 BIT_DStream_t bitD; 1348 FSE_DState_t state1; 1349 FSE_DState_t state2; 1350 size_t errorCode; 1351 1352 /* Init */ 1353 errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize); /* replaced last arg by maxCompressed Size */ 1354 if (FSE_isError(errorCode)) return errorCode; 1355 1356 FSE_initDState(&state1, &bitD, dt); 1357 FSE_initDState(&state2, &bitD, dt); 1358 1359 #define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD) 1360 1361 /* 4 symbols per loop */ 1362 for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) && (op<olimit) ; op+=4) 1363 { 1364 op[0] = FSE_GETSYMBOL(&state1); 1365 1366 if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ 1367 BIT_reloadDStream(&bitD); 1368 1369 op[1] = FSE_GETSYMBOL(&state2); 1370 1371 if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ 1372 { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } } 1373 1374 op[2] = FSE_GETSYMBOL(&state1); 1375 1376 if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ 1377 BIT_reloadDStream(&bitD); 1378 1379 op[3] = FSE_GETSYMBOL(&state2); 1380 } 1381 1382 /* tail */ 1383 /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */ 1384 while (1) 1385 { 1386 if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state1))) ) 1387 break; 1388 1389 *op++ = FSE_GETSYMBOL(&state1); 1390 1391 if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state2))) ) 1392 break; 1393 1394 *op++ = FSE_GETSYMBOL(&state2); 1395 } 1396 1397 /* end ? */ 1398 if (BIT_endOfDStream(&bitD) && FSE_endOfDState(&state1) && FSE_endOfDState(&state2)) 1399 return op-ostart; 1400 1401 if (op==omax) return ERROR(dstSize_tooSmall); /* dst buffer is full, but cSrc unfinished */ 1402 1403 return ERROR(corruption_detected); 1404 } 1405 1406 1407 static size_t FSE_decompress_usingDTable(void* dst, size_t originalSize, 1408 const void* cSrc, size_t cSrcSize, 1409 const FSE_DTable* dt) 1410 { 1411 FSE_DTableHeader DTableH; 1412 memcpy(&DTableH, dt, sizeof(DTableH)); 1413 1414 /* select fast mode (static) */ 1415 if (DTableH.fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1); 1416 return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0); 1417 } 1418 1419 1420 static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize) 1421 { 1422 const BYTE* const istart = (const BYTE*)cSrc; 1423 const BYTE* ip = istart; 1424 short counting[FSE_MAX_SYMBOL_VALUE+1]; 1425 DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */ 1426 unsigned tableLog; 1427 unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE; 1428 size_t errorCode; 1429 1430 if (cSrcSize<2) return ERROR(srcSize_wrong); /* too small input size */ 1431 1432 /* normal FSE decoding mode */ 1433 errorCode = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize); 1434 if (FSE_isError(errorCode)) return errorCode; 1435 if (errorCode >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size */ 1436 ip += errorCode; 1437 cSrcSize -= errorCode; 1438 1439 errorCode = FSE_buildDTable (dt, counting, maxSymbolValue, tableLog); 1440 if (FSE_isError(errorCode)) return errorCode; 1441 1442 /* always return, even if it is an error code */ 1443 return FSE_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt); 1444 } 1445 1446 1447 1448 #endif /* FSE_COMMONDEFS_ONLY */ 1449 /* ****************************************************************** 1450 Huff0 : Huffman coder, part of New Generation Entropy library 1451 Copyright (C) 2013-2015, Yann Collet. 1452 1453 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) 1454 1455 Redistribution and use in source and binary forms, with or without 1456 modification, are permitted provided that the following conditions are 1457 met: 1458 1459 * Redistributions of source code must retain the above copyright 1460 notice, this list of conditions and the following disclaimer. 1461 * Redistributions in binary form must reproduce the above 1462 copyright notice, this list of conditions and the following disclaimer 1463 in the documentation and/or other materials provided with the 1464 distribution. 1465 1466 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 1467 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 1468 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 1469 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 1470 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 1471 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 1472 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 1473 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 1474 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 1475 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 1476 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 1477 1478 You can contact the author at : 1479 - FSE+Huff0 source repository : https://github.com/Cyan4973/FiniteStateEntropy 1480 - Public forum : https://groups.google.com/forum/#!forum/lz4c 1481 ****************************************************************** */ 1482 1483 /**************************************************************** 1484 * Compiler specifics 1485 ****************************************************************/ 1486 #if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) 1487 /* inline is defined */ 1488 #elif defined(_MSC_VER) 1489 # define inline __inline 1490 #else 1491 # define inline /* disable inline */ 1492 #endif 1493 1494 1495 #ifdef _MSC_VER /* Visual Studio */ 1496 # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ 1497 #endif 1498 1499 1500 /**************************************************************** 1501 * Includes 1502 ****************************************************************/ 1503 #include <stdlib.h> /* malloc, free, qsort */ 1504 #include <string.h> /* memcpy, memset */ 1505 #include <stdio.h> /* printf (debug) */ 1506 1507 /**************************************************************** 1508 * Error Management 1509 ****************************************************************/ 1510 #define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ 1511 1512 1513 /****************************************** 1514 * Helper functions 1515 ******************************************/ 1516 static unsigned HUF_isError(size_t code) { return ERR_isError(code); } 1517 1518 #define HUF_ABSOLUTEMAX_TABLELOG 16 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */ 1519 #define HUF_MAX_TABLELOG 12 /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */ 1520 #define HUF_DEFAULT_TABLELOG HUF_MAX_TABLELOG /* tableLog by default, when not specified */ 1521 #define HUF_MAX_SYMBOL_VALUE 255 1522 #if (HUF_MAX_TABLELOG > HUF_ABSOLUTEMAX_TABLELOG) 1523 # error "HUF_MAX_TABLELOG is too large !" 1524 #endif 1525 1526 1527 1528 /********************************************************* 1529 * Huff0 : Huffman block decompression 1530 *********************************************************/ 1531 typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX2; /* single-symbol decoding */ 1532 1533 typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX4; /* double-symbols decoding */ 1534 1535 typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t; 1536 1537 /*! HUF_readStats 1538 Read compact Huffman tree, saved by HUF_writeCTable 1539 @huffWeight : destination buffer 1540 @return : size read from `src` 1541 */ 1542 static size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, 1543 U32* nbSymbolsPtr, U32* tableLogPtr, 1544 const void* src, size_t srcSize) 1545 { 1546 U32 weightTotal; 1547 U32 tableLog; 1548 const BYTE* ip = (const BYTE*) src; 1549 size_t iSize; 1550 size_t oSize; 1551 U32 n; 1552 1553 if (!srcSize) return ERROR(srcSize_wrong); 1554 iSize = ip[0]; 1555 //memset(huffWeight, 0, hwSize); /* is not necessary, even though some analyzer complain ... */ 1556 1557 if (iSize >= 128) /* special header */ 1558 { 1559 if (iSize >= (242)) /* RLE */ 1560 { 1561 static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 }; 1562 oSize = l[iSize-242]; 1563 memset(huffWeight, 1, hwSize); 1564 iSize = 0; 1565 } 1566 else /* Incompressible */ 1567 { 1568 oSize = iSize - 127; 1569 iSize = ((oSize+1)/2); 1570 if (iSize+1 > srcSize) return ERROR(srcSize_wrong); 1571 if (oSize >= hwSize) return ERROR(corruption_detected); 1572 ip += 1; 1573 for (n=0; n<oSize; n+=2) 1574 { 1575 huffWeight[n] = ip[n/2] >> 4; 1576 huffWeight[n+1] = ip[n/2] & 15; 1577 } 1578 } 1579 } 1580 else /* header compressed with FSE (normal case) */ 1581 { 1582 if (iSize+1 > srcSize) return ERROR(srcSize_wrong); 1583 oSize = FSE_decompress(huffWeight, hwSize-1, ip+1, iSize); /* max (hwSize-1) values decoded, as last one is implied */ 1584 if (FSE_isError(oSize)) return oSize; 1585 } 1586 1587 /* collect weight stats */ 1588 memset(rankStats, 0, (HUF_ABSOLUTEMAX_TABLELOG + 1) * sizeof(U32)); 1589 weightTotal = 0; 1590 for (n=0; n<oSize; n++) 1591 { 1592 if (huffWeight[n] >= HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected); 1593 rankStats[huffWeight[n]]++; 1594 weightTotal += (1 << huffWeight[n]) >> 1; 1595 } 1596 if (weightTotal == 0) return ERROR(corruption_detected); 1597 1598 /* get last non-null symbol weight (implied, total must be 2^n) */ 1599 tableLog = BIT_highbit32(weightTotal) + 1; 1600 if (tableLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected); 1601 { 1602 U32 total = 1 << tableLog; 1603 U32 rest = total - weightTotal; 1604 U32 verif = 1 << BIT_highbit32(rest); 1605 U32 lastWeight = BIT_highbit32(rest) + 1; 1606 if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */ 1607 huffWeight[oSize] = (BYTE)lastWeight; 1608 rankStats[lastWeight]++; 1609 } 1610 1611 /* check tree construction validity */ 1612 if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */ 1613 1614 /* results */ 1615 *nbSymbolsPtr = (U32)(oSize+1); 1616 *tableLogPtr = tableLog; 1617 return iSize+1; 1618 } 1619 1620 1621 /**************************/ 1622 /* single-symbol decoding */ 1623 /**************************/ 1624 1625 static size_t HUF_readDTableX2 (U16* DTable, const void* src, size_t srcSize) 1626 { 1627 BYTE huffWeight[HUF_MAX_SYMBOL_VALUE + 1]; 1628 U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; /* large enough for values from 0 to 16 */ 1629 U32 tableLog = 0; 1630 const BYTE* ip = (const BYTE*) src; 1631 size_t iSize = ip[0]; 1632 U32 nbSymbols = 0; 1633 U32 n; 1634 U32 nextRankStart; 1635 void* ptr = DTable+1; 1636 HUF_DEltX2* const dt = (HUF_DEltX2*)ptr; 1637 1638 HUF_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(U16)); /* if compilation fails here, assertion is false */ 1639 //memset(huffWeight, 0, sizeof(huffWeight)); /* is not necessary, even though some analyzer complain ... */ 1640 1641 iSize = HUF_readStats(huffWeight, HUF_MAX_SYMBOL_VALUE + 1, rankVal, &nbSymbols, &tableLog, src, srcSize); 1642 if (HUF_isError(iSize)) return iSize; 1643 1644 /* check result */ 1645 if (tableLog > DTable[0]) return ERROR(tableLog_tooLarge); /* DTable is too small */ 1646 DTable[0] = (U16)tableLog; /* maybe should separate sizeof DTable, as allocated, from used size of DTable, in case of DTable re-use */ 1647 1648 /* Prepare ranks */ 1649 nextRankStart = 0; 1650 for (n=1; n<=tableLog; n++) 1651 { 1652 U32 current = nextRankStart; 1653 nextRankStart += (rankVal[n] << (n-1)); 1654 rankVal[n] = current; 1655 } 1656 1657 /* fill DTable */ 1658 for (n=0; n<nbSymbols; n++) 1659 { 1660 const U32 w = huffWeight[n]; 1661 const U32 length = (1 << w) >> 1; 1662 U32 i; 1663 HUF_DEltX2 D; 1664 D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w); 1665 for (i = rankVal[w]; i < rankVal[w] + length; i++) 1666 dt[i] = D; 1667 rankVal[w] += length; 1668 } 1669 1670 return iSize; 1671 } 1672 1673 static BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog) 1674 { 1675 const size_t val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */ 1676 const BYTE c = dt[val].byte; 1677 BIT_skipBits(Dstream, dt[val].nbBits); 1678 return c; 1679 } 1680 1681 #define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \ 1682 *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog) 1683 1684 #define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \ 1685 if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \ 1686 HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) 1687 1688 #define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \ 1689 if (MEM_64bits()) \ 1690 HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) 1691 1692 static inline size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog) 1693 { 1694 BYTE* const pStart = p; 1695 1696 /* up to 4 symbols at a time */ 1697 while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-4)) 1698 { 1699 HUF_DECODE_SYMBOLX2_2(p, bitDPtr); 1700 HUF_DECODE_SYMBOLX2_1(p, bitDPtr); 1701 HUF_DECODE_SYMBOLX2_2(p, bitDPtr); 1702 HUF_DECODE_SYMBOLX2_0(p, bitDPtr); 1703 } 1704 1705 /* closer to the end */ 1706 while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd)) 1707 HUF_DECODE_SYMBOLX2_0(p, bitDPtr); 1708 1709 /* no more data to retrieve from bitstream, hence no need to reload */ 1710 while (p < pEnd) 1711 HUF_DECODE_SYMBOLX2_0(p, bitDPtr); 1712 1713 return pEnd-pStart; 1714 } 1715 1716 1717 static size_t HUF_decompress4X2_usingDTable( 1718 void* dst, size_t dstSize, 1719 const void* cSrc, size_t cSrcSize, 1720 const U16* DTable) 1721 { 1722 if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ 1723 1724 { 1725 const BYTE* const istart = (const BYTE*) cSrc; 1726 BYTE* const ostart = (BYTE*) dst; 1727 BYTE* const oend = ostart + dstSize; 1728 1729 const void* ptr = DTable; 1730 const HUF_DEltX2* const dt = ((const HUF_DEltX2*)ptr) +1; 1731 const U32 dtLog = DTable[0]; 1732 size_t errorCode; 1733 1734 /* Init */ 1735 BIT_DStream_t bitD1; 1736 BIT_DStream_t bitD2; 1737 BIT_DStream_t bitD3; 1738 BIT_DStream_t bitD4; 1739 const size_t length1 = MEM_readLE16(istart); 1740 const size_t length2 = MEM_readLE16(istart+2); 1741 const size_t length3 = MEM_readLE16(istart+4); 1742 size_t length4; 1743 const BYTE* const istart1 = istart + 6; /* jumpTable */ 1744 const BYTE* const istart2 = istart1 + length1; 1745 const BYTE* const istart3 = istart2 + length2; 1746 const BYTE* const istart4 = istart3 + length3; 1747 const size_t segmentSize = (dstSize+3) / 4; 1748 BYTE* const opStart2 = ostart + segmentSize; 1749 BYTE* const opStart3 = opStart2 + segmentSize; 1750 BYTE* const opStart4 = opStart3 + segmentSize; 1751 BYTE* op1 = ostart; 1752 BYTE* op2 = opStart2; 1753 BYTE* op3 = opStart3; 1754 BYTE* op4 = opStart4; 1755 U32 endSignal; 1756 1757 length4 = cSrcSize - (length1 + length2 + length3 + 6); 1758 if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ 1759 errorCode = BIT_initDStream(&bitD1, istart1, length1); 1760 if (HUF_isError(errorCode)) return errorCode; 1761 errorCode = BIT_initDStream(&bitD2, istart2, length2); 1762 if (HUF_isError(errorCode)) return errorCode; 1763 errorCode = BIT_initDStream(&bitD3, istart3, length3); 1764 if (HUF_isError(errorCode)) return errorCode; 1765 errorCode = BIT_initDStream(&bitD4, istart4, length4); 1766 if (HUF_isError(errorCode)) return errorCode; 1767 1768 /* 16-32 symbols per loop (4-8 symbols per stream) */ 1769 endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); 1770 for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; ) 1771 { 1772 HUF_DECODE_SYMBOLX2_2(op1, &bitD1); 1773 HUF_DECODE_SYMBOLX2_2(op2, &bitD2); 1774 HUF_DECODE_SYMBOLX2_2(op3, &bitD3); 1775 HUF_DECODE_SYMBOLX2_2(op4, &bitD4); 1776 HUF_DECODE_SYMBOLX2_1(op1, &bitD1); 1777 HUF_DECODE_SYMBOLX2_1(op2, &bitD2); 1778 HUF_DECODE_SYMBOLX2_1(op3, &bitD3); 1779 HUF_DECODE_SYMBOLX2_1(op4, &bitD4); 1780 HUF_DECODE_SYMBOLX2_2(op1, &bitD1); 1781 HUF_DECODE_SYMBOLX2_2(op2, &bitD2); 1782 HUF_DECODE_SYMBOLX2_2(op3, &bitD3); 1783 HUF_DECODE_SYMBOLX2_2(op4, &bitD4); 1784 HUF_DECODE_SYMBOLX2_0(op1, &bitD1); 1785 HUF_DECODE_SYMBOLX2_0(op2, &bitD2); 1786 HUF_DECODE_SYMBOLX2_0(op3, &bitD3); 1787 HUF_DECODE_SYMBOLX2_0(op4, &bitD4); 1788 1789 endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); 1790 } 1791 1792 /* check corruption */ 1793 if (op1 > opStart2) return ERROR(corruption_detected); 1794 if (op2 > opStart3) return ERROR(corruption_detected); 1795 if (op3 > opStart4) return ERROR(corruption_detected); 1796 /* note : op4 supposed already verified within main loop */ 1797 1798 /* finish bitStreams one by one */ 1799 HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog); 1800 HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog); 1801 HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog); 1802 HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog); 1803 1804 /* check */ 1805 endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); 1806 if (!endSignal) return ERROR(corruption_detected); 1807 1808 /* decoded size */ 1809 return dstSize; 1810 } 1811 } 1812 1813 1814 static size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) 1815 { 1816 HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_MAX_TABLELOG); 1817 const BYTE* ip = (const BYTE*) cSrc; 1818 size_t errorCode; 1819 1820 errorCode = HUF_readDTableX2 (DTable, cSrc, cSrcSize); 1821 if (HUF_isError(errorCode)) return errorCode; 1822 if (errorCode >= cSrcSize) return ERROR(srcSize_wrong); 1823 ip += errorCode; 1824 cSrcSize -= errorCode; 1825 1826 return HUF_decompress4X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable); 1827 } 1828 1829 1830 /***************************/ 1831 /* double-symbols decoding */ 1832 /***************************/ 1833 1834 static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 consumed, 1835 const U32* rankValOrigin, const int minWeight, 1836 const sortedSymbol_t* sortedSymbols, const U32 sortedListSize, 1837 U32 nbBitsBaseline, U16 baseSeq) 1838 { 1839 HUF_DEltX4 DElt; 1840 U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; 1841 U32 s; 1842 1843 /* get pre-calculated rankVal */ 1844 memcpy(rankVal, rankValOrigin, sizeof(rankVal)); 1845 1846 /* fill skipped values */ 1847 if (minWeight>1) 1848 { 1849 U32 i, skipSize = rankVal[minWeight]; 1850 MEM_writeLE16(&(DElt.sequence), baseSeq); 1851 DElt.nbBits = (BYTE)(consumed); 1852 DElt.length = 1; 1853 for (i = 0; i < skipSize; i++) 1854 DTable[i] = DElt; 1855 } 1856 1857 /* fill DTable */ 1858 for (s=0; s<sortedListSize; s++) /* note : sortedSymbols already skipped */ 1859 { 1860 const U32 symbol = sortedSymbols[s].symbol; 1861 const U32 weight = sortedSymbols[s].weight; 1862 const U32 nbBits = nbBitsBaseline - weight; 1863 const U32 length = 1 << (sizeLog-nbBits); 1864 const U32 start = rankVal[weight]; 1865 U32 i = start; 1866 const U32 end = start + length; 1867 1868 MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8))); 1869 DElt.nbBits = (BYTE)(nbBits + consumed); 1870 DElt.length = 2; 1871 do { DTable[i++] = DElt; } while (i<end); /* since length >= 1 */ 1872 1873 rankVal[weight] += length; 1874 } 1875 } 1876 1877 typedef U32 rankVal_t[HUF_ABSOLUTEMAX_TABLELOG][HUF_ABSOLUTEMAX_TABLELOG + 1]; 1878 1879 static void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog, 1880 const sortedSymbol_t* sortedList, const U32 sortedListSize, 1881 const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight, 1882 const U32 nbBitsBaseline) 1883 { 1884 U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; 1885 const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */ 1886 const U32 minBits = nbBitsBaseline - maxWeight; 1887 U32 s; 1888 1889 memcpy(rankVal, rankValOrigin, sizeof(rankVal)); 1890 1891 /* fill DTable */ 1892 for (s=0; s<sortedListSize; s++) 1893 { 1894 const U16 symbol = sortedList[s].symbol; 1895 const U32 weight = sortedList[s].weight; 1896 const U32 nbBits = nbBitsBaseline - weight; 1897 const U32 start = rankVal[weight]; 1898 const U32 length = 1 << (targetLog-nbBits); 1899 1900 if (targetLog-nbBits >= minBits) /* enough room for a second symbol */ 1901 { 1902 U32 sortedRank; 1903 int minWeight = nbBits + scaleLog; 1904 if (minWeight < 1) minWeight = 1; 1905 sortedRank = rankStart[minWeight]; 1906 HUF_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits, 1907 rankValOrigin[nbBits], minWeight, 1908 sortedList+sortedRank, sortedListSize-sortedRank, 1909 nbBitsBaseline, symbol); 1910 } 1911 else 1912 { 1913 U32 i; 1914 const U32 end = start + length; 1915 HUF_DEltX4 DElt; 1916 1917 MEM_writeLE16(&(DElt.sequence), symbol); 1918 DElt.nbBits = (BYTE)(nbBits); 1919 DElt.length = 1; 1920 for (i = start; i < end; i++) 1921 DTable[i] = DElt; 1922 } 1923 rankVal[weight] += length; 1924 } 1925 } 1926 1927 static size_t HUF_readDTableX4 (U32* DTable, const void* src, size_t srcSize) 1928 { 1929 BYTE weightList[HUF_MAX_SYMBOL_VALUE + 1]; 1930 sortedSymbol_t sortedSymbol[HUF_MAX_SYMBOL_VALUE + 1]; 1931 U32 rankStats[HUF_ABSOLUTEMAX_TABLELOG + 1] = { 0 }; 1932 U32 rankStart0[HUF_ABSOLUTEMAX_TABLELOG + 2] = { 0 }; 1933 U32* const rankStart = rankStart0+1; 1934 rankVal_t rankVal; 1935 U32 tableLog, maxW, sizeOfSort, nbSymbols; 1936 const U32 memLog = DTable[0]; 1937 const BYTE* ip = (const BYTE*) src; 1938 size_t iSize = ip[0]; 1939 void* ptr = DTable; 1940 HUF_DEltX4* const dt = ((HUF_DEltX4*)ptr) + 1; 1941 1942 HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(U32)); /* if compilation fails here, assertion is false */ 1943 if (memLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge); 1944 //memset(weightList, 0, sizeof(weightList)); /* is not necessary, even though some analyzer complain ... */ 1945 1946 iSize = HUF_readStats(weightList, HUF_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize); 1947 if (HUF_isError(iSize)) return iSize; 1948 1949 /* check result */ 1950 if (tableLog > memLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */ 1951 1952 /* find maxWeight */ 1953 for (maxW = tableLog; rankStats[maxW]==0; maxW--) 1954 {if (!maxW) return ERROR(GENERIC); } /* necessarily finds a solution before maxW==0 */ 1955 1956 /* Get start index of each weight */ 1957 { 1958 U32 w, nextRankStart = 0; 1959 for (w=1; w<=maxW; w++) 1960 { 1961 U32 current = nextRankStart; 1962 nextRankStart += rankStats[w]; 1963 rankStart[w] = current; 1964 } 1965 rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/ 1966 sizeOfSort = nextRankStart; 1967 } 1968 1969 /* sort symbols by weight */ 1970 { 1971 U32 s; 1972 for (s=0; s<nbSymbols; s++) 1973 { 1974 U32 w = weightList[s]; 1975 U32 r = rankStart[w]++; 1976 sortedSymbol[r].symbol = (BYTE)s; 1977 sortedSymbol[r].weight = (BYTE)w; 1978 } 1979 rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */ 1980 } 1981 1982 /* Build rankVal */ 1983 { 1984 const U32 minBits = tableLog+1 - maxW; 1985 U32 nextRankVal = 0; 1986 U32 w, consumed; 1987 const int rescale = (memLog-tableLog) - 1; /* tableLog <= memLog */ 1988 U32* rankVal0 = rankVal[0]; 1989 for (w=1; w<=maxW; w++) 1990 { 1991 U32 current = nextRankVal; 1992 nextRankVal += rankStats[w] << (w+rescale); 1993 rankVal0[w] = current; 1994 } 1995 for (consumed = minBits; consumed <= memLog - minBits; consumed++) 1996 { 1997 U32* rankValPtr = rankVal[consumed]; 1998 for (w = 1; w <= maxW; w++) 1999 { 2000 rankValPtr[w] = rankVal0[w] >> consumed; 2001 } 2002 } 2003 } 2004 2005 HUF_fillDTableX4(dt, memLog, 2006 sortedSymbol, sizeOfSort, 2007 rankStart0, rankVal, maxW, 2008 tableLog+1); 2009 2010 return iSize; 2011 } 2012 2013 2014 static U32 HUF_decodeSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog) 2015 { 2016 const size_t val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ 2017 memcpy(op, dt+val, 2); 2018 BIT_skipBits(DStream, dt[val].nbBits); 2019 return dt[val].length; 2020 } 2021 2022 static U32 HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog) 2023 { 2024 const size_t val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ 2025 memcpy(op, dt+val, 1); 2026 if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits); 2027 else 2028 { 2029 if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) 2030 { 2031 BIT_skipBits(DStream, dt[val].nbBits); 2032 if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8)) 2033 DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */ 2034 } 2035 } 2036 return 1; 2037 } 2038 2039 2040 #define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \ 2041 ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) 2042 2043 #define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \ 2044 if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \ 2045 ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) 2046 2047 #define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \ 2048 if (MEM_64bits()) \ 2049 ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) 2050 2051 static inline size_t HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const HUF_DEltX4* const dt, const U32 dtLog) 2052 { 2053 BYTE* const pStart = p; 2054 2055 /* up to 8 symbols at a time */ 2056 while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd-7)) 2057 { 2058 HUF_DECODE_SYMBOLX4_2(p, bitDPtr); 2059 HUF_DECODE_SYMBOLX4_1(p, bitDPtr); 2060 HUF_DECODE_SYMBOLX4_2(p, bitDPtr); 2061 HUF_DECODE_SYMBOLX4_0(p, bitDPtr); 2062 } 2063 2064 /* closer to the end */ 2065 while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-2)) 2066 HUF_DECODE_SYMBOLX4_0(p, bitDPtr); 2067 2068 while (p <= pEnd-2) 2069 HUF_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */ 2070 2071 if (p < pEnd) 2072 p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog); 2073 2074 return p-pStart; 2075 } 2076 2077 2078 2079 static size_t HUF_decompress4X4_usingDTable( 2080 void* dst, size_t dstSize, 2081 const void* cSrc, size_t cSrcSize, 2082 const U32* DTable) 2083 { 2084 if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ 2085 2086 { 2087 const BYTE* const istart = (const BYTE*) cSrc; 2088 BYTE* const ostart = (BYTE*) dst; 2089 BYTE* const oend = ostart + dstSize; 2090 2091 const void* ptr = DTable; 2092 const HUF_DEltX4* const dt = ((const HUF_DEltX4*)ptr) +1; 2093 const U32 dtLog = DTable[0]; 2094 size_t errorCode; 2095 2096 /* Init */ 2097 BIT_DStream_t bitD1; 2098 BIT_DStream_t bitD2; 2099 BIT_DStream_t bitD3; 2100 BIT_DStream_t bitD4; 2101 const size_t length1 = MEM_readLE16(istart); 2102 const size_t length2 = MEM_readLE16(istart+2); 2103 const size_t length3 = MEM_readLE16(istart+4); 2104 size_t length4; 2105 const BYTE* const istart1 = istart + 6; /* jumpTable */ 2106 const BYTE* const istart2 = istart1 + length1; 2107 const BYTE* const istart3 = istart2 + length2; 2108 const BYTE* const istart4 = istart3 + length3; 2109 const size_t segmentSize = (dstSize+3) / 4; 2110 BYTE* const opStart2 = ostart + segmentSize; 2111 BYTE* const opStart3 = opStart2 + segmentSize; 2112 BYTE* const opStart4 = opStart3 + segmentSize; 2113 BYTE* op1 = ostart; 2114 BYTE* op2 = opStart2; 2115 BYTE* op3 = opStart3; 2116 BYTE* op4 = opStart4; 2117 U32 endSignal; 2118 2119 length4 = cSrcSize - (length1 + length2 + length3 + 6); 2120 if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ 2121 errorCode = BIT_initDStream(&bitD1, istart1, length1); 2122 if (HUF_isError(errorCode)) return errorCode; 2123 errorCode = BIT_initDStream(&bitD2, istart2, length2); 2124 if (HUF_isError(errorCode)) return errorCode; 2125 errorCode = BIT_initDStream(&bitD3, istart3, length3); 2126 if (HUF_isError(errorCode)) return errorCode; 2127 errorCode = BIT_initDStream(&bitD4, istart4, length4); 2128 if (HUF_isError(errorCode)) return errorCode; 2129 2130 /* 16-32 symbols per loop (4-8 symbols per stream) */ 2131 endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); 2132 for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; ) 2133 { 2134 HUF_DECODE_SYMBOLX4_2(op1, &bitD1); 2135 HUF_DECODE_SYMBOLX4_2(op2, &bitD2); 2136 HUF_DECODE_SYMBOLX4_2(op3, &bitD3); 2137 HUF_DECODE_SYMBOLX4_2(op4, &bitD4); 2138 HUF_DECODE_SYMBOLX4_1(op1, &bitD1); 2139 HUF_DECODE_SYMBOLX4_1(op2, &bitD2); 2140 HUF_DECODE_SYMBOLX4_1(op3, &bitD3); 2141 HUF_DECODE_SYMBOLX4_1(op4, &bitD4); 2142 HUF_DECODE_SYMBOLX4_2(op1, &bitD1); 2143 HUF_DECODE_SYMBOLX4_2(op2, &bitD2); 2144 HUF_DECODE_SYMBOLX4_2(op3, &bitD3); 2145 HUF_DECODE_SYMBOLX4_2(op4, &bitD4); 2146 HUF_DECODE_SYMBOLX4_0(op1, &bitD1); 2147 HUF_DECODE_SYMBOLX4_0(op2, &bitD2); 2148 HUF_DECODE_SYMBOLX4_0(op3, &bitD3); 2149 HUF_DECODE_SYMBOLX4_0(op4, &bitD4); 2150 2151 endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); 2152 } 2153 2154 /* check corruption */ 2155 if (op1 > opStart2) return ERROR(corruption_detected); 2156 if (op2 > opStart3) return ERROR(corruption_detected); 2157 if (op3 > opStart4) return ERROR(corruption_detected); 2158 /* note : op4 supposed already verified within main loop */ 2159 2160 /* finish bitStreams one by one */ 2161 HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog); 2162 HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog); 2163 HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog); 2164 HUF_decodeStreamX4(op4, &bitD4, oend, dt, dtLog); 2165 2166 /* check */ 2167 endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); 2168 if (!endSignal) return ERROR(corruption_detected); 2169 2170 /* decoded size */ 2171 return dstSize; 2172 } 2173 } 2174 2175 2176 static size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) 2177 { 2178 HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_MAX_TABLELOG); 2179 const BYTE* ip = (const BYTE*) cSrc; 2180 2181 size_t hSize = HUF_readDTableX4 (DTable, cSrc, cSrcSize); 2182 if (HUF_isError(hSize)) return hSize; 2183 if (hSize >= cSrcSize) return ERROR(srcSize_wrong); 2184 ip += hSize; 2185 cSrcSize -= hSize; 2186 2187 return HUF_decompress4X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable); 2188 } 2189 2190 2191 /**********************************/ 2192 /* quad-symbol decoding */ 2193 /**********************************/ 2194 typedef struct { BYTE nbBits; BYTE nbBytes; } HUF_DDescX6; 2195 typedef union { BYTE byte[4]; U32 sequence; } HUF_DSeqX6; 2196 2197 /* recursive, up to level 3; may benefit from <template>-like strategy to nest each level inline */ 2198 static void HUF_fillDTableX6LevelN(HUF_DDescX6* DDescription, HUF_DSeqX6* DSequence, int sizeLog, 2199 const rankVal_t rankValOrigin, const U32 consumed, const int minWeight, const U32 maxWeight, 2200 const sortedSymbol_t* sortedSymbols, const U32 sortedListSize, const U32* rankStart, 2201 const U32 nbBitsBaseline, HUF_DSeqX6 baseSeq, HUF_DDescX6 DDesc) 2202 { 2203 const int scaleLog = nbBitsBaseline - sizeLog; /* note : targetLog >= (nbBitsBaseline-1), hence scaleLog <= 1 */ 2204 const int minBits = nbBitsBaseline - maxWeight; 2205 const U32 level = DDesc.nbBytes; 2206 U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; 2207 U32 symbolStartPos, s; 2208 2209 /* local rankVal, will be modified */ 2210 memcpy(rankVal, rankValOrigin[consumed], sizeof(rankVal)); 2211 2212 /* fill skipped values */ 2213 if (minWeight>1) 2214 { 2215 U32 i; 2216 const U32 skipSize = rankVal[minWeight]; 2217 for (i = 0; i < skipSize; i++) 2218 { 2219 DSequence[i] = baseSeq; 2220 DDescription[i] = DDesc; 2221 } 2222 } 2223 2224 /* fill DTable */ 2225 DDesc.nbBytes++; 2226 symbolStartPos = rankStart[minWeight]; 2227 for (s=symbolStartPos; s<sortedListSize; s++) 2228 { 2229 const BYTE symbol = sortedSymbols[s].symbol; 2230 const U32 weight = sortedSymbols[s].weight; /* >= 1 (sorted) */ 2231 const int nbBits = nbBitsBaseline - weight; /* >= 1 (by construction) */ 2232 const int totalBits = consumed+nbBits; 2233 const U32 start = rankVal[weight]; 2234 const U32 length = 1 << (sizeLog-nbBits); 2235 baseSeq.byte[level] = symbol; 2236 DDesc.nbBits = (BYTE)totalBits; 2237 2238 if ((level<3) && (sizeLog-totalBits >= minBits)) /* enough room for another symbol */ 2239 { 2240 int nextMinWeight = totalBits + scaleLog; 2241 if (nextMinWeight < 1) nextMinWeight = 1; 2242 HUF_fillDTableX6LevelN(DDescription+start, DSequence+start, sizeLog-nbBits, 2243 rankValOrigin, totalBits, nextMinWeight, maxWeight, 2244 sortedSymbols, sortedListSize, rankStart, 2245 nbBitsBaseline, baseSeq, DDesc); /* recursive (max : level 3) */ 2246 } 2247 else 2248 { 2249 U32 i; 2250 const U32 end = start + length; 2251 for (i = start; i < end; i++) 2252 { 2253 DDescription[i] = DDesc; 2254 DSequence[i] = baseSeq; 2255 } 2256 } 2257 rankVal[weight] += length; 2258 } 2259 } 2260 2261 2262 /* note : same preparation as X4 */ 2263 static size_t HUF_readDTableX6 (U32* DTable, const void* src, size_t srcSize) 2264 { 2265 BYTE weightList[HUF_MAX_SYMBOL_VALUE + 1]; 2266 sortedSymbol_t sortedSymbol[HUF_MAX_SYMBOL_VALUE + 1]; 2267 U32 rankStats[HUF_ABSOLUTEMAX_TABLELOG + 1] = { 0 }; 2268 U32 rankStart0[HUF_ABSOLUTEMAX_TABLELOG + 2] = { 0 }; 2269 U32* const rankStart = rankStart0+1; 2270 U32 tableLog, maxW, sizeOfSort, nbSymbols; 2271 rankVal_t rankVal; 2272 const U32 memLog = DTable[0]; 2273 const BYTE* ip = (const BYTE*) src; 2274 size_t iSize = ip[0]; 2275 2276 if (memLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge); 2277 //memset(weightList, 0, sizeof(weightList)); /* is not necessary, even though some analyzer complain ... */ 2278 2279 iSize = HUF_readStats(weightList, HUF_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize); 2280 if (HUF_isError(iSize)) return iSize; 2281 2282 /* check result */ 2283 if (tableLog > memLog) return ERROR(tableLog_tooLarge); /* DTable is too small */ 2284 2285 /* find maxWeight */ 2286 for (maxW = tableLog; rankStats[maxW]==0; maxW--) 2287 { if (!maxW) return ERROR(GENERIC); } /* necessarily finds a solution before maxW==0 */ 2288 2289 2290 /* Get start index of each weight */ 2291 { 2292 U32 w, nextRankStart = 0; 2293 for (w=1; w<=maxW; w++) 2294 { 2295 U32 current = nextRankStart; 2296 nextRankStart += rankStats[w]; 2297 rankStart[w] = current; 2298 } 2299 rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/ 2300 sizeOfSort = nextRankStart; 2301 } 2302 2303 /* sort symbols by weight */ 2304 { 2305 U32 s; 2306 for (s=0; s<nbSymbols; s++) 2307 { 2308 U32 w = weightList[s]; 2309 U32 r = rankStart[w]++; 2310 sortedSymbol[r].symbol = (BYTE)s; 2311 sortedSymbol[r].weight = (BYTE)w; 2312 } 2313 rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */ 2314 } 2315 2316 /* Build rankVal */ 2317 { 2318 const U32 minBits = tableLog+1 - maxW; 2319 U32 nextRankVal = 0; 2320 U32 w, consumed; 2321 const int rescale = (memLog-tableLog) - 1; /* tableLog <= memLog */ 2322 U32* rankVal0 = rankVal[0]; 2323 for (w=1; w<=maxW; w++) 2324 { 2325 U32 current = nextRankVal; 2326 nextRankVal += rankStats[w] << (w+rescale); 2327 rankVal0[w] = current; 2328 } 2329 for (consumed = minBits; consumed <= memLog - minBits; consumed++) 2330 { 2331 U32* rankValPtr = rankVal[consumed]; 2332 for (w = 1; w <= maxW; w++) 2333 { 2334 rankValPtr[w] = rankVal0[w] >> consumed; 2335 } 2336 } 2337 } 2338 2339 2340 /* fill tables */ 2341 { 2342 void* ptr = DTable+1; 2343 HUF_DDescX6* DDescription = (HUF_DDescX6*)(ptr); 2344 void* dSeqStart = DTable + 1 + ((size_t)1<<(memLog-1)); 2345 HUF_DSeqX6* DSequence = (HUF_DSeqX6*)(dSeqStart); 2346 HUF_DSeqX6 DSeq; 2347 HUF_DDescX6 DDesc; 2348 DSeq.sequence = 0; 2349 DDesc.nbBits = 0; 2350 DDesc.nbBytes = 0; 2351 HUF_fillDTableX6LevelN(DDescription, DSequence, memLog, 2352 (const U32 (*)[HUF_ABSOLUTEMAX_TABLELOG + 1])rankVal, 0, 1, maxW, 2353 sortedSymbol, sizeOfSort, rankStart0, 2354 tableLog+1, DSeq, DDesc); 2355 } 2356 2357 return iSize; 2358 } 2359 2360 2361 static U32 HUF_decodeSymbolX6(void* op, BIT_DStream_t* DStream, const HUF_DDescX6* dd, const HUF_DSeqX6* ds, const U32 dtLog) 2362 { 2363 const size_t val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ 2364 memcpy(op, ds+val, sizeof(HUF_DSeqX6)); 2365 BIT_skipBits(DStream, dd[val].nbBits); 2366 return dd[val].nbBytes; 2367 } 2368 2369 static U32 HUF_decodeLastSymbolsX6(void* op, const U32 maxL, BIT_DStream_t* DStream, 2370 const HUF_DDescX6* dd, const HUF_DSeqX6* ds, const U32 dtLog) 2371 { 2372 const size_t val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ 2373 U32 length = dd[val].nbBytes; 2374 if (length <= maxL) 2375 { 2376 memcpy(op, ds+val, length); 2377 BIT_skipBits(DStream, dd[val].nbBits); 2378 return length; 2379 } 2380 memcpy(op, ds+val, maxL); 2381 if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) 2382 { 2383 BIT_skipBits(DStream, dd[val].nbBits); 2384 if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8)) 2385 DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */ 2386 } 2387 return maxL; 2388 } 2389 2390 2391 #define HUF_DECODE_SYMBOLX6_0(ptr, DStreamPtr) \ 2392 ptr += HUF_decodeSymbolX6(ptr, DStreamPtr, dd, ds, dtLog) 2393 2394 #define HUF_DECODE_SYMBOLX6_1(ptr, DStreamPtr) \ 2395 if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \ 2396 HUF_DECODE_SYMBOLX6_0(ptr, DStreamPtr) 2397 2398 #define HUF_DECODE_SYMBOLX6_2(ptr, DStreamPtr) \ 2399 if (MEM_64bits()) \ 2400 HUF_DECODE_SYMBOLX6_0(ptr, DStreamPtr) 2401 2402 static inline size_t HUF_decodeStreamX6(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const U32* DTable, const U32 dtLog) 2403 { 2404 const void* ddPtr = DTable+1; 2405 const HUF_DDescX6* dd = (const HUF_DDescX6*)(ddPtr); 2406 const void* dsPtr = DTable + 1 + ((size_t)1<<(dtLog-1)); 2407 const HUF_DSeqX6* ds = (const HUF_DSeqX6*)(dsPtr); 2408 BYTE* const pStart = p; 2409 2410 /* up to 16 symbols at a time */ 2411 while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-16)) 2412 { 2413 HUF_DECODE_SYMBOLX6_2(p, bitDPtr); 2414 HUF_DECODE_SYMBOLX6_1(p, bitDPtr); 2415 HUF_DECODE_SYMBOLX6_2(p, bitDPtr); 2416 HUF_DECODE_SYMBOLX6_0(p, bitDPtr); 2417 } 2418 2419 /* closer to the end, up to 4 symbols at a time */ 2420 while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-4)) 2421 HUF_DECODE_SYMBOLX6_0(p, bitDPtr); 2422 2423 while (p <= pEnd-4) 2424 HUF_DECODE_SYMBOLX6_0(p, bitDPtr); /* no need to reload : reached the end of DStream */ 2425 2426 while (p < pEnd) 2427 p += HUF_decodeLastSymbolsX6(p, (U32)(pEnd-p), bitDPtr, dd, ds, dtLog); 2428 2429 return p-pStart; 2430 } 2431 2432 2433 2434 static size_t HUF_decompress4X6_usingDTable( 2435 void* dst, size_t dstSize, 2436 const void* cSrc, size_t cSrcSize, 2437 const U32* DTable) 2438 { 2439 if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ 2440 2441 { 2442 const BYTE* const istart = (const BYTE*) cSrc; 2443 BYTE* const ostart = (BYTE*) dst; 2444 BYTE* const oend = ostart + dstSize; 2445 2446 const U32 dtLog = DTable[0]; 2447 const void* ddPtr = DTable+1; 2448 const HUF_DDescX6* dd = (const HUF_DDescX6*)(ddPtr); 2449 const void* dsPtr = DTable + 1 + ((size_t)1<<(dtLog-1)); 2450 const HUF_DSeqX6* ds = (const HUF_DSeqX6*)(dsPtr); 2451 size_t errorCode; 2452 2453 /* Init */ 2454 BIT_DStream_t bitD1; 2455 BIT_DStream_t bitD2; 2456 BIT_DStream_t bitD3; 2457 BIT_DStream_t bitD4; 2458 const size_t length1 = MEM_readLE16(istart); 2459 const size_t length2 = MEM_readLE16(istart+2); 2460 const size_t length3 = MEM_readLE16(istart+4); 2461 size_t length4; 2462 const BYTE* const istart1 = istart + 6; /* jumpTable */ 2463 const BYTE* const istart2 = istart1 + length1; 2464 const BYTE* const istart3 = istart2 + length2; 2465 const BYTE* const istart4 = istart3 + length3; 2466 const size_t segmentSize = (dstSize+3) / 4; 2467 BYTE* const opStart2 = ostart + segmentSize; 2468 BYTE* const opStart3 = opStart2 + segmentSize; 2469 BYTE* const opStart4 = opStart3 + segmentSize; 2470 BYTE* op1 = ostart; 2471 BYTE* op2 = opStart2; 2472 BYTE* op3 = opStart3; 2473 BYTE* op4 = opStart4; 2474 U32 endSignal; 2475 2476 length4 = cSrcSize - (length1 + length2 + length3 + 6); 2477 if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ 2478 errorCode = BIT_initDStream(&bitD1, istart1, length1); 2479 if (HUF_isError(errorCode)) return errorCode; 2480 errorCode = BIT_initDStream(&bitD2, istart2, length2); 2481 if (HUF_isError(errorCode)) return errorCode; 2482 errorCode = BIT_initDStream(&bitD3, istart3, length3); 2483 if (HUF_isError(errorCode)) return errorCode; 2484 errorCode = BIT_initDStream(&bitD4, istart4, length4); 2485 if (HUF_isError(errorCode)) return errorCode; 2486 2487 /* 16-64 symbols per loop (4-16 symbols per stream) */ 2488 endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); 2489 for ( ; (op3 <= opStart4) && (endSignal==BIT_DStream_unfinished) && (op4<=(oend-16)) ; ) 2490 { 2491 HUF_DECODE_SYMBOLX6_2(op1, &bitD1); 2492 HUF_DECODE_SYMBOLX6_2(op2, &bitD2); 2493 HUF_DECODE_SYMBOLX6_2(op3, &bitD3); 2494 HUF_DECODE_SYMBOLX6_2(op4, &bitD4); 2495 HUF_DECODE_SYMBOLX6_1(op1, &bitD1); 2496 HUF_DECODE_SYMBOLX6_1(op2, &bitD2); 2497 HUF_DECODE_SYMBOLX6_1(op3, &bitD3); 2498 HUF_DECODE_SYMBOLX6_1(op4, &bitD4); 2499 HUF_DECODE_SYMBOLX6_2(op1, &bitD1); 2500 HUF_DECODE_SYMBOLX6_2(op2, &bitD2); 2501 HUF_DECODE_SYMBOLX6_2(op3, &bitD3); 2502 HUF_DECODE_SYMBOLX6_2(op4, &bitD4); 2503 HUF_DECODE_SYMBOLX6_0(op1, &bitD1); 2504 HUF_DECODE_SYMBOLX6_0(op2, &bitD2); 2505 HUF_DECODE_SYMBOLX6_0(op3, &bitD3); 2506 HUF_DECODE_SYMBOLX6_0(op4, &bitD4); 2507 2508 endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); 2509 } 2510 2511 /* check corruption */ 2512 if (op1 > opStart2) return ERROR(corruption_detected); 2513 if (op2 > opStart3) return ERROR(corruption_detected); 2514 if (op3 > opStart4) return ERROR(corruption_detected); 2515 /* note : op4 supposed already verified within main loop */ 2516 2517 /* finish bitStreams one by one */ 2518 HUF_decodeStreamX6(op1, &bitD1, opStart2, DTable, dtLog); 2519 HUF_decodeStreamX6(op2, &bitD2, opStart3, DTable, dtLog); 2520 HUF_decodeStreamX6(op3, &bitD3, opStart4, DTable, dtLog); 2521 HUF_decodeStreamX6(op4, &bitD4, oend, DTable, dtLog); 2522 2523 /* check */ 2524 endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); 2525 if (!endSignal) return ERROR(corruption_detected); 2526 2527 /* decoded size */ 2528 return dstSize; 2529 } 2530 } 2531 2532 2533 static size_t HUF_decompress4X6 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) 2534 { 2535 HUF_CREATE_STATIC_DTABLEX6(DTable, HUF_MAX_TABLELOG); 2536 const BYTE* ip = (const BYTE*) cSrc; 2537 2538 size_t hSize = HUF_readDTableX6 (DTable, cSrc, cSrcSize); 2539 if (HUF_isError(hSize)) return hSize; 2540 if (hSize >= cSrcSize) return ERROR(srcSize_wrong); 2541 ip += hSize; 2542 cSrcSize -= hSize; 2543 2544 return HUF_decompress4X6_usingDTable (dst, dstSize, ip, cSrcSize, DTable); 2545 } 2546 2547 2548 /**********************************/ 2549 /* Generic decompression selector */ 2550 /**********************************/ 2551 2552 typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t; 2553 static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] = 2554 { 2555 /* single, double, quad */ 2556 {{0,0}, {1,1}, {2,2}}, /* Q==0 : impossible */ 2557 {{0,0}, {1,1}, {2,2}}, /* Q==1 : impossible */ 2558 {{ 38,130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */ 2559 {{ 448,128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */ 2560 {{ 556,128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */ 2561 {{ 714,128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */ 2562 {{ 883,128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */ 2563 {{ 897,128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */ 2564 {{ 926,128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */ 2565 {{ 947,128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */ 2566 {{1107,128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */ 2567 {{1177,128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */ 2568 {{1242,128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */ 2569 {{1349,128}, {2644,106}, {5260,106}}, /* Q ==13 : 81-87% */ 2570 {{1455,128}, {2422,124}, {4174,124}}, /* Q ==14 : 87-93% */ 2571 {{ 722,128}, {1891,145}, {1936,146}}, /* Q ==15 : 93-99% */ 2572 }; 2573 2574 typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); 2575 2576 static size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) 2577 { 2578 static const decompressionAlgo decompress[3] = { HUF_decompress4X2, HUF_decompress4X4, HUF_decompress4X6 }; 2579 /* estimate decompression time */ 2580 U32 Q; 2581 const U32 D256 = (U32)(dstSize >> 8); 2582 U32 Dtime[3]; 2583 U32 algoNb = 0; 2584 int n; 2585 2586 /* validation checks */ 2587 if (dstSize == 0) return ERROR(dstSize_tooSmall); 2588 if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */ 2589 if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ 2590 if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ 2591 2592 /* decoder timing evaluation */ 2593 Q = (U32)(cSrcSize * 16 / dstSize); /* Q < 16 since dstSize > cSrcSize */ 2594 for (n=0; n<3; n++) 2595 Dtime[n] = algoTime[Q][n].tableTime + (algoTime[Q][n].decode256Time * D256); 2596 2597 Dtime[1] += Dtime[1] >> 4; Dtime[2] += Dtime[2] >> 3; /* advantage to algorithms using less memory, for cache eviction */ 2598 2599 if (Dtime[1] < Dtime[0]) algoNb = 1; 2600 if (Dtime[2] < Dtime[algoNb]) algoNb = 2; 2601 2602 return decompress[algoNb](dst, dstSize, cSrc, cSrcSize); 2603 2604 //return HUF_decompress4X2(dst, dstSize, cSrc, cSrcSize); /* multi-streams single-symbol decoding */ 2605 //return HUF_decompress4X4(dst, dstSize, cSrc, cSrcSize); /* multi-streams double-symbols decoding */ 2606 //return HUF_decompress4X6(dst, dstSize, cSrc, cSrcSize); /* multi-streams quad-symbols decoding */ 2607 } 2608 /* 2609 zstd - standard compression library 2610 Copyright (C) 2014-2015, Yann Collet. 2611 2612 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) 2613 2614 Redistribution and use in source and binary forms, with or without 2615 modification, are permitted provided that the following conditions are 2616 met: 2617 * Redistributions of source code must retain the above copyright 2618 notice, this list of conditions and the following disclaimer. 2619 * Redistributions in binary form must reproduce the above 2620 copyright notice, this list of conditions and the following disclaimer 2621 in the documentation and/or other materials provided with the 2622 distribution. 2623 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 2624 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 2625 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 2626 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 2627 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 2628 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 2629 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 2630 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 2631 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 2632 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 2633 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 2634 2635 You can contact the author at : 2636 - zstd source repository : https://github.com/Cyan4973/zstd 2637 - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c 2638 */ 2639 2640 /* *************************************************************** 2641 * Tuning parameters 2642 *****************************************************************/ 2643 /*! 2644 * MEMORY_USAGE : 2645 * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) 2646 * Increasing memory usage improves compression ratio 2647 * Reduced memory usage can improve speed, due to cache effect 2648 */ 2649 #define ZSTD_MEMORY_USAGE 17 2650 2651 /*! 2652 * HEAPMODE : 2653 * Select how default compression functions will allocate memory for their hash table, 2654 * in memory stack (0, fastest), or in memory heap (1, requires malloc()) 2655 * Note that compression context is fairly large, as a consequence heap memory is recommended. 2656 */ 2657 #ifndef ZSTD_HEAPMODE 2658 # define ZSTD_HEAPMODE 1 2659 #endif /* ZSTD_HEAPMODE */ 2660 2661 /*! 2662 * LEGACY_SUPPORT : 2663 * decompressor can decode older formats (starting from Zstd 0.1+) 2664 */ 2665 #ifndef ZSTD_LEGACY_SUPPORT 2666 # define ZSTD_LEGACY_SUPPORT 1 2667 #endif 2668 2669 2670 /* ******************************************************* 2671 * Includes 2672 *********************************************************/ 2673 #include <stdlib.h> /* calloc */ 2674 #include <string.h> /* memcpy, memmove */ 2675 #include <stdio.h> /* debug : printf */ 2676 2677 2678 /* ******************************************************* 2679 * Compiler specifics 2680 *********************************************************/ 2681 #ifdef __AVX2__ 2682 # include <immintrin.h> /* AVX2 intrinsics */ 2683 #endif 2684 2685 #ifdef _MSC_VER /* Visual Studio */ 2686 # include <intrin.h> /* For Visual 2005 */ 2687 # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ 2688 # pragma warning(disable : 4324) /* disable: C4324: padded structure */ 2689 #endif 2690 2691 2692 /* ******************************************************* 2693 * Constants 2694 *********************************************************/ 2695 #define HASH_LOG (ZSTD_MEMORY_USAGE - 2) 2696 #define HASH_TABLESIZE (1 << HASH_LOG) 2697 #define HASH_MASK (HASH_TABLESIZE - 1) 2698 2699 #define KNUTH 2654435761 2700 2701 #define BIT7 128 2702 #define BIT6 64 2703 #define BIT5 32 2704 #define BIT4 16 2705 #define BIT1 2 2706 #define BIT0 1 2707 2708 #define KB *(1 <<10) 2709 #define MB *(1 <<20) 2710 #define GB *(1U<<30) 2711 2712 #define BLOCKSIZE (128 KB) /* define, for static allocation */ 2713 #define MIN_SEQUENCES_SIZE (2 /*seqNb*/ + 2 /*dumps*/ + 3 /*seqTables*/ + 1 /*bitStream*/) 2714 #define MIN_CBLOCK_SIZE (3 /*litCSize*/ + MIN_SEQUENCES_SIZE) 2715 #define IS_RAW BIT0 2716 #define IS_RLE BIT1 2717 2718 #define WORKPLACESIZE (BLOCKSIZE*3) 2719 #define MINMATCH 4 2720 #define MLbits 7 2721 #define LLbits 6 2722 #define Offbits 5 2723 #define MaxML ((1<<MLbits )-1) 2724 #define MaxLL ((1<<LLbits )-1) 2725 #define MaxOff 31 2726 #define LitFSELog 11 2727 #define MLFSELog 10 2728 #define LLFSELog 10 2729 #define OffFSELog 9 2730 #define MAX(a,b) ((a)<(b)?(b):(a)) 2731 #define MaxSeq MAX(MaxLL, MaxML) 2732 2733 #define LITERAL_NOENTROPY 63 2734 #define COMMAND_NOENTROPY 7 /* to remove */ 2735 2736 #define ZSTD_CONTENTSIZE_ERROR (0ULL - 2) 2737 2738 static const size_t ZSTD_blockHeaderSize = 3; 2739 static const size_t ZSTD_frameHeaderSize = 4; 2740 2741 2742 /* ******************************************************* 2743 * Memory operations 2744 **********************************************************/ 2745 static void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); } 2746 2747 static void ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); } 2748 2749 #define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; } 2750 2751 /*! ZSTD_wildcopy : custom version of memcpy(), can copy up to 7-8 bytes too many */ 2752 static void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length) 2753 { 2754 const BYTE* ip = (const BYTE*)src; 2755 BYTE* op = (BYTE*)dst; 2756 BYTE* const oend = op + length; 2757 do COPY8(op, ip) while (op < oend); 2758 } 2759 2760 2761 /* ************************************** 2762 * Local structures 2763 ****************************************/ 2764 typedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t; 2765 2766 typedef struct 2767 { 2768 blockType_t blockType; 2769 U32 origSize; 2770 } blockProperties_t; 2771 2772 typedef struct { 2773 void* buffer; 2774 U32* offsetStart; 2775 U32* offset; 2776 BYTE* offCodeStart; 2777 BYTE* offCode; 2778 BYTE* litStart; 2779 BYTE* lit; 2780 BYTE* litLengthStart; 2781 BYTE* litLength; 2782 BYTE* matchLengthStart; 2783 BYTE* matchLength; 2784 BYTE* dumpsStart; 2785 BYTE* dumps; 2786 } seqStore_t; 2787 2788 2789 /* ************************************* 2790 * Error Management 2791 ***************************************/ 2792 /*! ZSTD_isError 2793 * tells if a return value is an error code */ 2794 static unsigned ZSTD_isError(size_t code) { return ERR_isError(code); } 2795 2796 2797 2798 /* ************************************************************* 2799 * Decompression section 2800 ***************************************************************/ 2801 struct ZSTD_DCtx_s 2802 { 2803 U32 LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)]; 2804 U32 OffTable[FSE_DTABLE_SIZE_U32(OffFSELog)]; 2805 U32 MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)]; 2806 void* previousDstEnd; 2807 void* base; 2808 size_t expected; 2809 blockType_t bType; 2810 U32 phase; 2811 const BYTE* litPtr; 2812 size_t litSize; 2813 BYTE litBuffer[BLOCKSIZE + 8 /* margin for wildcopy */]; 2814 }; /* typedef'd to ZSTD_Dctx within "zstd_static.h" */ 2815 2816 2817 static size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr) 2818 { 2819 const BYTE* const in = (const BYTE* const)src; 2820 BYTE headerFlags; 2821 U32 cSize; 2822 2823 if (srcSize < 3) return ERROR(srcSize_wrong); 2824 2825 headerFlags = *in; 2826 cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16); 2827 2828 bpPtr->blockType = (blockType_t)(headerFlags >> 6); 2829 bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0; 2830 2831 if (bpPtr->blockType == bt_end) return 0; 2832 if (bpPtr->blockType == bt_rle) return 1; 2833 return cSize; 2834 } 2835 2836 static size_t ZSTD_copyUncompressedBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize) 2837 { 2838 if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall); 2839 if (srcSize > 0) { 2840 memcpy(dst, src, srcSize); 2841 } 2842 return srcSize; 2843 } 2844 2845 2846 /** ZSTD_decompressLiterals 2847 @return : nb of bytes read from src, or an error code*/ 2848 static size_t ZSTD_decompressLiterals(void* dst, size_t* maxDstSizePtr, 2849 const void* src, size_t srcSize) 2850 { 2851 const BYTE* ip = (const BYTE*)src; 2852 2853 const size_t litSize = (MEM_readLE32(src) & 0x1FFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */ 2854 const size_t litCSize = (MEM_readLE32(ip+2) & 0xFFFFFF) >> 5; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */ 2855 2856 if (litSize > *maxDstSizePtr) return ERROR(corruption_detected); 2857 if (litCSize + 5 > srcSize) return ERROR(corruption_detected); 2858 2859 if (HUF_isError(HUF_decompress(dst, litSize, ip+5, litCSize))) return ERROR(corruption_detected); 2860 2861 *maxDstSizePtr = litSize; 2862 return litCSize + 5; 2863 } 2864 2865 2866 /** ZSTD_decodeLiteralsBlock 2867 @return : nb of bytes read from src (< srcSize )*/ 2868 static size_t ZSTD_decodeLiteralsBlock(void* ctx, 2869 const void* src, size_t srcSize) 2870 { 2871 ZSTD_DCtx* dctx = (ZSTD_DCtx*)ctx; 2872 const BYTE* const istart = (const BYTE* const)src; 2873 2874 /* any compressed block with literals segment must be at least this size */ 2875 if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected); 2876 2877 switch(*istart & 3) 2878 { 2879 default: 2880 case 0: 2881 { 2882 size_t litSize = BLOCKSIZE; 2883 const size_t readSize = ZSTD_decompressLiterals(dctx->litBuffer, &litSize, src, srcSize); 2884 dctx->litPtr = dctx->litBuffer; 2885 dctx->litSize = litSize; 2886 memset(dctx->litBuffer + dctx->litSize, 0, 8); 2887 return readSize; /* works if it's an error too */ 2888 } 2889 case IS_RAW: 2890 { 2891 const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */ 2892 if (litSize > srcSize-11) /* risk of reading too far with wildcopy */ 2893 { 2894 if (litSize > BLOCKSIZE) return ERROR(corruption_detected); 2895 if (litSize > srcSize-3) return ERROR(corruption_detected); 2896 memcpy(dctx->litBuffer, istart, litSize); 2897 dctx->litPtr = dctx->litBuffer; 2898 dctx->litSize = litSize; 2899 memset(dctx->litBuffer + dctx->litSize, 0, 8); 2900 return litSize+3; 2901 } 2902 /* direct reference into compressed stream */ 2903 dctx->litPtr = istart+3; 2904 dctx->litSize = litSize; 2905 return litSize+3; 2906 } 2907 case IS_RLE: 2908 { 2909 const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */ 2910 if (litSize > BLOCKSIZE) return ERROR(corruption_detected); 2911 memset(dctx->litBuffer, istart[3], litSize + 8); 2912 dctx->litPtr = dctx->litBuffer; 2913 dctx->litSize = litSize; 2914 return 4; 2915 } 2916 } 2917 } 2918 2919 2920 static size_t ZSTD_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr, 2921 FSE_DTable* DTableLL, FSE_DTable* DTableML, FSE_DTable* DTableOffb, 2922 const void* src, size_t srcSize) 2923 { 2924 const BYTE* const istart = (const BYTE* const)src; 2925 const BYTE* ip = istart; 2926 const BYTE* const iend = istart + srcSize; 2927 U32 LLtype, Offtype, MLtype; 2928 U32 LLlog, Offlog, MLlog; 2929 size_t dumpsLength; 2930 2931 /* check */ 2932 if (srcSize < 5) return ERROR(srcSize_wrong); 2933 2934 /* SeqHead */ 2935 *nbSeq = MEM_readLE16(ip); ip+=2; 2936 LLtype = *ip >> 6; 2937 Offtype = (*ip >> 4) & 3; 2938 MLtype = (*ip >> 2) & 3; 2939 if (*ip & 2) 2940 { 2941 dumpsLength = ip[2]; 2942 dumpsLength += ip[1] << 8; 2943 ip += 3; 2944 } 2945 else 2946 { 2947 dumpsLength = ip[1]; 2948 dumpsLength += (ip[0] & 1) << 8; 2949 ip += 2; 2950 } 2951 *dumpsPtr = ip; 2952 ip += dumpsLength; 2953 *dumpsLengthPtr = dumpsLength; 2954 2955 /* check */ 2956 if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */ 2957 2958 /* sequences */ 2959 { 2960 S16 norm[MaxML+1]; /* assumption : MaxML >= MaxLL and MaxOff */ 2961 size_t headerSize; 2962 2963 /* Build DTables */ 2964 switch(LLtype) 2965 { 2966 case bt_rle : 2967 LLlog = 0; 2968 FSE_buildDTable_rle(DTableLL, *ip++); break; 2969 case bt_raw : 2970 LLlog = LLbits; 2971 FSE_buildDTable_raw(DTableLL, LLbits); break; 2972 default : 2973 { U32 max = MaxLL; 2974 headerSize = FSE_readNCount(norm, &max, &LLlog, ip, iend-ip); 2975 if (FSE_isError(headerSize)) return ERROR(GENERIC); 2976 if (LLlog > LLFSELog) return ERROR(corruption_detected); 2977 ip += headerSize; 2978 FSE_buildDTable(DTableLL, norm, max, LLlog); 2979 } } 2980 2981 switch(Offtype) 2982 { 2983 case bt_rle : 2984 Offlog = 0; 2985 if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */ 2986 FSE_buildDTable_rle(DTableOffb, *ip++ & MaxOff); /* if *ip > MaxOff, data is corrupted */ 2987 break; 2988 case bt_raw : 2989 Offlog = Offbits; 2990 FSE_buildDTable_raw(DTableOffb, Offbits); break; 2991 default : 2992 { U32 max = MaxOff; 2993 headerSize = FSE_readNCount(norm, &max, &Offlog, ip, iend-ip); 2994 if (FSE_isError(headerSize)) return ERROR(GENERIC); 2995 if (Offlog > OffFSELog) return ERROR(corruption_detected); 2996 ip += headerSize; 2997 FSE_buildDTable(DTableOffb, norm, max, Offlog); 2998 } } 2999 3000 switch(MLtype) 3001 { 3002 case bt_rle : 3003 MLlog = 0; 3004 if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */ 3005 FSE_buildDTable_rle(DTableML, *ip++); break; 3006 case bt_raw : 3007 MLlog = MLbits; 3008 FSE_buildDTable_raw(DTableML, MLbits); break; 3009 default : 3010 { U32 max = MaxML; 3011 headerSize = FSE_readNCount(norm, &max, &MLlog, ip, iend-ip); 3012 if (FSE_isError(headerSize)) return ERROR(GENERIC); 3013 if (MLlog > MLFSELog) return ERROR(corruption_detected); 3014 ip += headerSize; 3015 FSE_buildDTable(DTableML, norm, max, MLlog); 3016 } } } 3017 3018 return ip-istart; 3019 } 3020 3021 3022 typedef struct { 3023 size_t litLength; 3024 size_t offset; 3025 size_t matchLength; 3026 } seq_t; 3027 3028 typedef struct { 3029 BIT_DStream_t DStream; 3030 FSE_DState_t stateLL; 3031 FSE_DState_t stateOffb; 3032 FSE_DState_t stateML; 3033 size_t prevOffset; 3034 const BYTE* dumps; 3035 const BYTE* dumpsEnd; 3036 } seqState_t; 3037 3038 3039 static void ZSTD_decodeSequence(seq_t* seq, seqState_t* seqState) 3040 { 3041 size_t litLength; 3042 size_t prevOffset; 3043 size_t offset; 3044 size_t matchLength; 3045 const BYTE* dumps = seqState->dumps; 3046 const BYTE* const de = seqState->dumpsEnd; 3047 3048 /* Literal length */ 3049 litLength = FSE_decodeSymbol(&(seqState->stateLL), &(seqState->DStream)); 3050 prevOffset = litLength ? seq->offset : seqState->prevOffset; 3051 seqState->prevOffset = seq->offset; 3052 if (litLength == MaxLL) 3053 { 3054 const U32 add = dumps<de ? *dumps++ : 0; 3055 if (add < 255) litLength += add; 3056 else if (dumps + 3 <= de) 3057 { 3058 litLength = MEM_readLE24(dumps); 3059 dumps += 3; 3060 } 3061 if (dumps >= de) dumps = de-1; /* late correction, to avoid read overflow (data is now corrupted anyway) */ 3062 } 3063 3064 /* Offset */ 3065 { 3066 static const size_t offsetPrefix[MaxOff+1] = { /* note : size_t faster than U32 */ 3067 1 /*fake*/, 1, 2, 4, 8, 16, 32, 64, 128, 256, 3068 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 3069 524288, 1048576, 2097152, 4194304, 8388608, 16777216, 33554432, /*fake*/ 1, 1, 1, 1, 1 }; 3070 U32 offsetCode, nbBits; 3071 offsetCode = FSE_decodeSymbol(&(seqState->stateOffb), &(seqState->DStream)); /* <= maxOff, by table construction */ 3072 if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream)); 3073 nbBits = offsetCode - 1; 3074 if (offsetCode==0) nbBits = 0; /* cmove */ 3075 offset = offsetPrefix[offsetCode] + BIT_readBits(&(seqState->DStream), nbBits); 3076 if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream)); 3077 if (offsetCode==0) offset = prevOffset; /* cmove */ 3078 } 3079 3080 /* MatchLength */ 3081 matchLength = FSE_decodeSymbol(&(seqState->stateML), &(seqState->DStream)); 3082 if (matchLength == MaxML) 3083 { 3084 const U32 add = dumps<de ? *dumps++ : 0; 3085 if (add < 255) matchLength += add; 3086 else if (dumps + 3 <= de) 3087 { 3088 matchLength = MEM_readLE24(dumps); 3089 dumps += 3; 3090 } 3091 if (dumps >= de) dumps = de-1; /* late correction, to avoid read overflow (data is now corrupted anyway) */ 3092 } 3093 matchLength += MINMATCH; 3094 3095 /* save result */ 3096 seq->litLength = litLength; 3097 seq->offset = offset; 3098 seq->matchLength = matchLength; 3099 seqState->dumps = dumps; 3100 } 3101 3102 3103 static size_t ZSTD_execSequence(BYTE* op, 3104 seq_t sequence, 3105 const BYTE** litPtr, const BYTE* const litLimit, 3106 BYTE* const base, BYTE* const oend) 3107 { 3108 static const int dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4}; /* added */ 3109 static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11}; /* subtracted */ 3110 const BYTE* const ostart = op; 3111 BYTE* const oLitEnd = op + sequence.litLength; 3112 BYTE* const oMatchEnd = op + sequence.litLength + sequence.matchLength; /* risk : address space overflow (32-bits) */ 3113 BYTE* const oend_8 = oend-8; 3114 const BYTE* const litEnd = *litPtr + sequence.litLength; 3115 3116 /* checks */ 3117 if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */ 3118 if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */ 3119 if (litEnd > litLimit) return ERROR(corruption_detected); /* overRead beyond lit buffer */ 3120 3121 /* copy Literals */ 3122 ZSTD_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */ 3123 op = oLitEnd; 3124 *litPtr = litEnd; /* update for next sequence */ 3125 3126 /* copy Match */ 3127 { 3128 const BYTE* match = op - sequence.offset; 3129 3130 /* check */ 3131 if (sequence.offset > (size_t)op) return ERROR(corruption_detected); /* address space overflow test (this test seems kept by clang optimizer) */ 3132 //if (match > op) return ERROR(corruption_detected); /* address space overflow test (is clang optimizer removing this test ?) */ 3133 if (match < base) return ERROR(corruption_detected); 3134 3135 /* close range match, overlap */ 3136 if (sequence.offset < 8) 3137 { 3138 const int dec64 = dec64table[sequence.offset]; 3139 op[0] = match[0]; 3140 op[1] = match[1]; 3141 op[2] = match[2]; 3142 op[3] = match[3]; 3143 match += dec32table[sequence.offset]; 3144 ZSTD_copy4(op+4, match); 3145 match -= dec64; 3146 } 3147 else 3148 { 3149 ZSTD_copy8(op, match); 3150 } 3151 op += 8; match += 8; 3152 3153 if (oMatchEnd > oend-(16-MINMATCH)) 3154 { 3155 if (op < oend_8) 3156 { 3157 ZSTD_wildcopy(op, match, oend_8 - op); 3158 match += oend_8 - op; 3159 op = oend_8; 3160 } 3161 while (op < oMatchEnd) *op++ = *match++; 3162 } 3163 else 3164 { 3165 ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */ 3166 } 3167 } 3168 3169 return oMatchEnd - ostart; 3170 } 3171 3172 static size_t ZSTD_decompressSequences( 3173 void* ctx, 3174 void* dst, size_t maxDstSize, 3175 const void* seqStart, size_t seqSize) 3176 { 3177 ZSTD_DCtx* dctx = (ZSTD_DCtx*)ctx; 3178 const BYTE* ip = (const BYTE*)seqStart; 3179 const BYTE* const iend = ip + seqSize; 3180 BYTE* const ostart = (BYTE* const)dst; 3181 BYTE* op = ostart; 3182 BYTE* const oend = ostart + maxDstSize; 3183 size_t errorCode, dumpsLength; 3184 const BYTE* litPtr = dctx->litPtr; 3185 const BYTE* const litEnd = litPtr + dctx->litSize; 3186 int nbSeq; 3187 const BYTE* dumps; 3188 U32* DTableLL = dctx->LLTable; 3189 U32* DTableML = dctx->MLTable; 3190 U32* DTableOffb = dctx->OffTable; 3191 BYTE* const base = (BYTE*) (dctx->base); 3192 3193 /* Build Decoding Tables */ 3194 errorCode = ZSTD_decodeSeqHeaders(&nbSeq, &dumps, &dumpsLength, 3195 DTableLL, DTableML, DTableOffb, 3196 ip, iend-ip); 3197 if (ZSTD_isError(errorCode)) return errorCode; 3198 ip += errorCode; 3199 3200 /* Regen sequences */ 3201 { 3202 seq_t sequence; 3203 seqState_t seqState; 3204 3205 memset(&sequence, 0, sizeof(sequence)); 3206 seqState.dumps = dumps; 3207 seqState.dumpsEnd = dumps + dumpsLength; 3208 seqState.prevOffset = 1; 3209 errorCode = BIT_initDStream(&(seqState.DStream), ip, iend-ip); 3210 if (ERR_isError(errorCode)) return ERROR(corruption_detected); 3211 FSE_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL); 3212 FSE_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb); 3213 FSE_initDState(&(seqState.stateML), &(seqState.DStream), DTableML); 3214 3215 for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (nbSeq>0) ; ) 3216 { 3217 size_t oneSeqSize; 3218 nbSeq--; 3219 ZSTD_decodeSequence(&sequence, &seqState); 3220 oneSeqSize = ZSTD_execSequence(op, sequence, &litPtr, litEnd, base, oend); 3221 if (ZSTD_isError(oneSeqSize)) return oneSeqSize; 3222 op += oneSeqSize; 3223 } 3224 3225 /* check if reached exact end */ 3226 if ( !BIT_endOfDStream(&(seqState.DStream)) ) return ERROR(corruption_detected); /* requested too much : data is corrupted */ 3227 if (nbSeq<0) return ERROR(corruption_detected); /* requested too many sequences : data is corrupted */ 3228 3229 /* last literal segment */ 3230 { 3231 size_t lastLLSize = litEnd - litPtr; 3232 if (litPtr > litEnd) return ERROR(corruption_detected); 3233 if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall); 3234 if (lastLLSize > 0) { 3235 if (op != litPtr) memmove(op, litPtr, lastLLSize); 3236 op += lastLLSize; 3237 } 3238 } 3239 } 3240 3241 return op-ostart; 3242 } 3243 3244 3245 static size_t ZSTD_decompressBlock( 3246 void* ctx, 3247 void* dst, size_t maxDstSize, 3248 const void* src, size_t srcSize) 3249 { 3250 /* blockType == blockCompressed */ 3251 const BYTE* ip = (const BYTE*)src; 3252 3253 /* Decode literals sub-block */ 3254 size_t litCSize = ZSTD_decodeLiteralsBlock(ctx, src, srcSize); 3255 if (ZSTD_isError(litCSize)) return litCSize; 3256 ip += litCSize; 3257 srcSize -= litCSize; 3258 3259 return ZSTD_decompressSequences(ctx, dst, maxDstSize, ip, srcSize); 3260 } 3261 3262 3263 static size_t ZSTD_decompressDCtx(void* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize) 3264 { 3265 const BYTE* ip = (const BYTE*)src; 3266 const BYTE* iend = ip + srcSize; 3267 BYTE* const ostart = (BYTE* const)dst; 3268 BYTE* op = ostart; 3269 BYTE* const oend = ostart + maxDstSize; 3270 size_t remainingSize = srcSize; 3271 U32 magicNumber; 3272 blockProperties_t blockProperties; 3273 3274 /* Frame Header */ 3275 if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong); 3276 magicNumber = MEM_readLE32(src); 3277 if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown); 3278 ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize; 3279 3280 /* Loop on each block */ 3281 while (1) 3282 { 3283 size_t decodedSize=0; 3284 size_t cBlockSize = ZSTD_getcBlockSize(ip, iend-ip, &blockProperties); 3285 if (ZSTD_isError(cBlockSize)) return cBlockSize; 3286 3287 ip += ZSTD_blockHeaderSize; 3288 remainingSize -= ZSTD_blockHeaderSize; 3289 if (cBlockSize > remainingSize) return ERROR(srcSize_wrong); 3290 3291 switch(blockProperties.blockType) 3292 { 3293 case bt_compressed: 3294 decodedSize = ZSTD_decompressBlock(ctx, op, oend-op, ip, cBlockSize); 3295 break; 3296 case bt_raw : 3297 decodedSize = ZSTD_copyUncompressedBlock(op, oend-op, ip, cBlockSize); 3298 break; 3299 case bt_rle : 3300 return ERROR(GENERIC); /* not yet supported */ 3301 break; 3302 case bt_end : 3303 /* end of frame */ 3304 if (remainingSize) return ERROR(srcSize_wrong); 3305 break; 3306 default: 3307 return ERROR(GENERIC); /* impossible */ 3308 } 3309 if (cBlockSize == 0) break; /* bt_end */ 3310 3311 if (ZSTD_isError(decodedSize)) return decodedSize; 3312 op += decodedSize; 3313 ip += cBlockSize; 3314 remainingSize -= cBlockSize; 3315 } 3316 3317 return op-ostart; 3318 } 3319 3320 static size_t ZSTD_decompress(void* dst, size_t maxDstSize, const void* src, size_t srcSize) 3321 { 3322 ZSTD_DCtx ctx; 3323 ctx.base = dst; 3324 return ZSTD_decompressDCtx(&ctx, dst, maxDstSize, src, srcSize); 3325 } 3326 3327 /* ZSTD_errorFrameSizeInfoLegacy() : 3328 assumes `cSize` and `dBound` are _not_ NULL */ 3329 static void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret) 3330 { 3331 *cSize = ret; 3332 *dBound = ZSTD_CONTENTSIZE_ERROR; 3333 } 3334 3335 void ZSTDv02_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound) 3336 { 3337 const BYTE* ip = (const BYTE*)src; 3338 size_t remainingSize = srcSize; 3339 size_t nbBlocks = 0; 3340 U32 magicNumber; 3341 blockProperties_t blockProperties; 3342 3343 /* Frame Header */ 3344 if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) { 3345 ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong)); 3346 return; 3347 } 3348 magicNumber = MEM_readLE32(src); 3349 if (magicNumber != ZSTD_magicNumber) { 3350 ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown)); 3351 return; 3352 } 3353 ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize; 3354 3355 /* Loop on each block */ 3356 while (1) 3357 { 3358 size_t cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties); 3359 if (ZSTD_isError(cBlockSize)) { 3360 ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize); 3361 return; 3362 } 3363 3364 ip += ZSTD_blockHeaderSize; 3365 remainingSize -= ZSTD_blockHeaderSize; 3366 if (cBlockSize > remainingSize) { 3367 ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong)); 3368 return; 3369 } 3370 3371 if (cBlockSize == 0) break; /* bt_end */ 3372 3373 ip += cBlockSize; 3374 remainingSize -= cBlockSize; 3375 nbBlocks++; 3376 } 3377 3378 *cSize = ip - (const BYTE*)src; 3379 *dBound = nbBlocks * BLOCKSIZE; 3380 } 3381 3382 /******************************* 3383 * Streaming Decompression API 3384 *******************************/ 3385 3386 static size_t ZSTD_resetDCtx(ZSTD_DCtx* dctx) 3387 { 3388 dctx->expected = ZSTD_frameHeaderSize; 3389 dctx->phase = 0; 3390 dctx->previousDstEnd = NULL; 3391 dctx->base = NULL; 3392 return 0; 3393 } 3394 3395 static ZSTD_DCtx* ZSTD_createDCtx(void) 3396 { 3397 ZSTD_DCtx* dctx = (ZSTD_DCtx*)malloc(sizeof(ZSTD_DCtx)); 3398 if (dctx==NULL) return NULL; 3399 ZSTD_resetDCtx(dctx); 3400 return dctx; 3401 } 3402 3403 static size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx) 3404 { 3405 free(dctx); 3406 return 0; 3407 } 3408 3409 static size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx) 3410 { 3411 return dctx->expected; 3412 } 3413 3414 static size_t ZSTD_decompressContinue(ZSTD_DCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize) 3415 { 3416 /* Sanity check */ 3417 if (srcSize != ctx->expected) return ERROR(srcSize_wrong); 3418 if (dst != ctx->previousDstEnd) /* not contiguous */ 3419 ctx->base = dst; 3420 3421 /* Decompress : frame header */ 3422 if (ctx->phase == 0) 3423 { 3424 /* Check frame magic header */ 3425 U32 magicNumber = MEM_readLE32(src); 3426 if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown); 3427 ctx->phase = 1; 3428 ctx->expected = ZSTD_blockHeaderSize; 3429 return 0; 3430 } 3431 3432 /* Decompress : block header */ 3433 if (ctx->phase == 1) 3434 { 3435 blockProperties_t bp; 3436 size_t blockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp); 3437 if (ZSTD_isError(blockSize)) return blockSize; 3438 if (bp.blockType == bt_end) 3439 { 3440 ctx->expected = 0; 3441 ctx->phase = 0; 3442 } 3443 else 3444 { 3445 ctx->expected = blockSize; 3446 ctx->bType = bp.blockType; 3447 ctx->phase = 2; 3448 } 3449 3450 return 0; 3451 } 3452 3453 /* Decompress : block content */ 3454 { 3455 size_t rSize; 3456 switch(ctx->bType) 3457 { 3458 case bt_compressed: 3459 rSize = ZSTD_decompressBlock(ctx, dst, maxDstSize, src, srcSize); 3460 break; 3461 case bt_raw : 3462 rSize = ZSTD_copyUncompressedBlock(dst, maxDstSize, src, srcSize); 3463 break; 3464 case bt_rle : 3465 return ERROR(GENERIC); /* not yet handled */ 3466 break; 3467 case bt_end : /* should never happen (filtered at phase 1) */ 3468 rSize = 0; 3469 break; 3470 default: 3471 return ERROR(GENERIC); 3472 } 3473 ctx->phase = 1; 3474 ctx->expected = ZSTD_blockHeaderSize; 3475 ctx->previousDstEnd = (void*)( ((char*)dst) + rSize); 3476 return rSize; 3477 } 3478 3479 } 3480 3481 3482 /* wrapper layer */ 3483 3484 unsigned ZSTDv02_isError(size_t code) 3485 { 3486 return ZSTD_isError(code); 3487 } 3488 3489 size_t ZSTDv02_decompress( void* dst, size_t maxOriginalSize, 3490 const void* src, size_t compressedSize) 3491 { 3492 return ZSTD_decompress(dst, maxOriginalSize, src, compressedSize); 3493 } 3494 3495 ZSTDv02_Dctx* ZSTDv02_createDCtx(void) 3496 { 3497 return (ZSTDv02_Dctx*)ZSTD_createDCtx(); 3498 } 3499 3500 size_t ZSTDv02_freeDCtx(ZSTDv02_Dctx* dctx) 3501 { 3502 return ZSTD_freeDCtx((ZSTD_DCtx*)dctx); 3503 } 3504 3505 size_t ZSTDv02_resetDCtx(ZSTDv02_Dctx* dctx) 3506 { 3507 return ZSTD_resetDCtx((ZSTD_DCtx*)dctx); 3508 } 3509 3510 size_t ZSTDv02_nextSrcSizeToDecompress(ZSTDv02_Dctx* dctx) 3511 { 3512 return ZSTD_nextSrcSizeToDecompress((ZSTD_DCtx*)dctx); 3513 } 3514 3515 size_t ZSTDv02_decompressContinue(ZSTDv02_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize) 3516 { 3517 return ZSTD_decompressContinue((ZSTD_DCtx*)dctx, dst, maxDstSize, src, srcSize); 3518 } 3519