1 /* 2 * xxHash - Fast Hash algorithm 3 * Copyright (C) 2012-2016, Yann Collet 4 * 5 * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are 9 * met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above 14 * copyright notice, this list of conditions and the following disclaimer 15 * in the documentation and/or other materials provided with the 16 * distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 * 30 * You can contact the author at : 31 * - xxHash homepage: http://www.xxhash.com 32 * - xxHash source repository : https://github.com/Cyan4973/xxHash 33 */ 34 35 36 /* ************************************* 37 * Tuning parameters 38 ***************************************/ 39 /*!XXH_FORCE_MEMORY_ACCESS : 40 * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. 41 * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. 42 * The below switch allow to select different access method for improved performance. 43 * Method 0 (default) : use `memcpy()`. Safe and portable. 44 * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). 45 * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. 46 * Method 2 : direct access. This method doesn't depend on compiler but violate C standard. 47 * It can generate buggy code on targets which do not support unaligned memory accesses. 48 * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) 49 * See http://stackoverflow.com/a/32095106/646947 for details. 50 * Prefer these methods in priority order (0 > 1 > 2) 51 */ 52 #ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ 53 # if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) 54 # define XXH_FORCE_MEMORY_ACCESS 2 55 # elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \ 56 (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) 57 # define XXH_FORCE_MEMORY_ACCESS 1 58 # endif 59 #endif 60 61 /*!XXH_ACCEPT_NULL_INPUT_POINTER : 62 * If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer. 63 * When this option is enabled, xxHash output for null input pointers will be the same as a null-length input. 64 * By default, this option is disabled. To enable it, uncomment below define : 65 */ 66 /* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */ 67 68 /*!XXH_FORCE_NATIVE_FORMAT : 69 * By default, xxHash library provides endian-independant Hash values, based on little-endian convention. 70 * Results are therefore identical for little-endian and big-endian CPU. 71 * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format. 72 * Should endian-independance be of no importance for your application, you may set the #define below to 1, 73 * to improve speed for Big-endian CPU. 74 * This option has no impact on Little_Endian CPU. 75 */ 76 #ifndef XXH_FORCE_NATIVE_FORMAT /* can be defined externally */ 77 # define XXH_FORCE_NATIVE_FORMAT 0 78 #endif 79 80 /*!XXH_FORCE_ALIGN_CHECK : 81 * This is a minor performance trick, only useful with lots of very small keys. 82 * It means : check for aligned/unaligned input. 83 * The check costs one initial branch per hash; set to 0 when the input data 84 * is guaranteed to be aligned. 85 */ 86 #ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ 87 # if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64) 88 # define XXH_FORCE_ALIGN_CHECK 0 89 # else 90 # define XXH_FORCE_ALIGN_CHECK 1 91 # endif 92 #endif 93 94 95 /* ************************************* 96 * Includes & Memory related functions 97 ***************************************/ 98 /* Modify the local functions below should you wish to use some other memory routines */ 99 /* for malloc(), free() */ 100 #include <stdlib.h> 101 static void* XXH_malloc(size_t s) { return malloc(s); } 102 static void XXH_free (void* p) { free(p); } 103 /* for memcpy() */ 104 #include <string.h> 105 static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); } 106 107 #ifndef XXH_STATIC_LINKING_ONLY 108 # define XXH_STATIC_LINKING_ONLY 109 #endif 110 #include "xxhash.h" 111 112 113 /* ************************************* 114 * Compiler Specific Options 115 ***************************************/ 116 #if defined (__GNUC__) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ 117 # define INLINE_KEYWORD inline 118 #else 119 # define INLINE_KEYWORD 120 #endif 121 122 #if defined(__GNUC__) 123 # define FORCE_INLINE_ATTR __attribute__((always_inline)) 124 #elif defined(_MSC_VER) 125 # define FORCE_INLINE_ATTR __forceinline 126 #else 127 # define FORCE_INLINE_ATTR 128 #endif 129 130 #define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR 131 132 133 #ifdef _MSC_VER 134 # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ 135 #endif 136 137 138 /* ************************************* 139 * Basic Types 140 ***************************************/ 141 #ifndef MEM_MODULE 142 # define MEM_MODULE 143 # if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) 144 # include <stdint.h> 145 typedef uint8_t BYTE; 146 typedef uint16_t U16; 147 typedef uint32_t U32; 148 typedef int32_t S32; 149 typedef uint64_t U64; 150 # else 151 typedef unsigned char BYTE; 152 typedef unsigned short U16; 153 typedef unsigned int U32; 154 typedef signed int S32; 155 typedef unsigned long long U64; /* if your compiler doesn't support unsigned long long, replace by another 64-bit type here. Note that xxhash.h will also need to be updated. */ 156 # endif 157 #endif 158 159 160 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) 161 162 /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ 163 static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; } 164 static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; } 165 166 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) 167 168 /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ 169 /* currently only defined for gcc and icc */ 170 typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign; 171 172 static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } 173 static U64 XXH_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } 174 175 #else 176 177 /* portable and safe solution. Generally efficient. 178 * see : http://stackoverflow.com/a/32095106/646947 179 */ 180 181 static U32 XXH_read32(const void* memPtr) 182 { 183 U32 val; 184 memcpy(&val, memPtr, sizeof(val)); 185 return val; 186 } 187 188 static U64 XXH_read64(const void* memPtr) 189 { 190 U64 val; 191 memcpy(&val, memPtr, sizeof(val)); 192 return val; 193 } 194 195 #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ 196 197 198 /* **************************************** 199 * Compiler-specific Functions and Macros 200 ******************************************/ 201 #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) 202 203 /* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */ 204 #if defined(_MSC_VER) 205 # define XXH_rotl32(x,r) _rotl(x,r) 206 # define XXH_rotl64(x,r) _rotl64(x,r) 207 #else 208 # define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r))) 209 # define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r))) 210 #endif 211 212 #if defined(_MSC_VER) /* Visual Studio */ 213 # define XXH_swap32 _byteswap_ulong 214 # define XXH_swap64 _byteswap_uint64 215 #elif (GCC_VERSION >= 403 && !defined(__riscv)) 216 # define XXH_swap32 __builtin_bswap32 217 # define XXH_swap64 __builtin_bswap64 218 #else 219 static U32 XXH_swap32 (U32 x) 220 { 221 return ((x << 24) & 0xff000000 ) | 222 ((x << 8) & 0x00ff0000 ) | 223 ((x >> 8) & 0x0000ff00 ) | 224 ((x >> 24) & 0x000000ff ); 225 } 226 static U64 XXH_swap64 (U64 x) 227 { 228 return ((x << 56) & 0xff00000000000000ULL) | 229 ((x << 40) & 0x00ff000000000000ULL) | 230 ((x << 24) & 0x0000ff0000000000ULL) | 231 ((x << 8) & 0x000000ff00000000ULL) | 232 ((x >> 8) & 0x00000000ff000000ULL) | 233 ((x >> 24) & 0x0000000000ff0000ULL) | 234 ((x >> 40) & 0x000000000000ff00ULL) | 235 ((x >> 56) & 0x00000000000000ffULL); 236 } 237 #endif 238 239 240 /* ************************************* 241 * Architecture Macros 242 ***************************************/ 243 typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess; 244 245 /* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */ 246 #ifndef XXH_CPU_LITTLE_ENDIAN 247 static const int g_one = 1; 248 # define XXH_CPU_LITTLE_ENDIAN (*(const char*)(&g_one)) 249 #endif 250 251 252 /* *************************** 253 * Memory reads 254 *****************************/ 255 typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment; 256 257 FORCE_INLINE_TEMPLATE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align) 258 { 259 if (align==XXH_unaligned) 260 return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr)); 261 else 262 return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr); 263 } 264 265 FORCE_INLINE_TEMPLATE U32 XXH_readLE32(const void* ptr, XXH_endianess endian) 266 { 267 return XXH_readLE32_align(ptr, endian, XXH_unaligned); 268 } 269 270 static U32 XXH_readBE32(const void* ptr) 271 { 272 return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr); 273 } 274 275 FORCE_INLINE_TEMPLATE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align) 276 { 277 if (align==XXH_unaligned) 278 return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr)); 279 else 280 return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr); 281 } 282 283 FORCE_INLINE_TEMPLATE U64 XXH_readLE64(const void* ptr, XXH_endianess endian) 284 { 285 return XXH_readLE64_align(ptr, endian, XXH_unaligned); 286 } 287 288 static U64 XXH_readBE64(const void* ptr) 289 { 290 return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr); 291 } 292 293 294 /* ************************************* 295 * Macros 296 ***************************************/ 297 #define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ 298 299 300 /* ************************************* 301 * Constants 302 ***************************************/ 303 static const U32 PRIME32_1 = 2654435761U; 304 static const U32 PRIME32_2 = 2246822519U; 305 static const U32 PRIME32_3 = 3266489917U; 306 static const U32 PRIME32_4 = 668265263U; 307 static const U32 PRIME32_5 = 374761393U; 308 309 static const U64 PRIME64_1 = 11400714785074694791ULL; 310 static const U64 PRIME64_2 = 14029467366897019727ULL; 311 static const U64 PRIME64_3 = 1609587929392839161ULL; 312 static const U64 PRIME64_4 = 9650029242287828579ULL; 313 static const U64 PRIME64_5 = 2870177450012600261ULL; 314 315 XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; } 316 317 318 /* ************************** 319 * Utils 320 ****************************/ 321 XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dstState, const XXH32_state_t* restrict srcState) 322 { 323 memcpy(dstState, srcState, sizeof(*dstState)); 324 } 325 326 XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dstState, const XXH64_state_t* restrict srcState) 327 { 328 memcpy(dstState, srcState, sizeof(*dstState)); 329 } 330 331 332 /* *************************** 333 * Simple Hash Functions 334 *****************************/ 335 336 static U32 XXH32_round(U32 seed, U32 input) 337 { 338 seed += input * PRIME32_2; 339 seed = XXH_rotl32(seed, 13); 340 seed *= PRIME32_1; 341 return seed; 342 } 343 344 FORCE_INLINE_TEMPLATE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align) 345 { 346 const BYTE* p = (const BYTE*)input; 347 const BYTE* bEnd = p + len; 348 U32 h32; 349 #define XXH_get32bits(p) XXH_readLE32_align(p, endian, align) 350 351 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER 352 if (p==NULL) { 353 len=0; 354 bEnd=p=(const BYTE*)(size_t)16; 355 } 356 #endif 357 358 if (len>=16) { 359 const BYTE* const limit = bEnd - 16; 360 U32 v1 = seed + PRIME32_1 + PRIME32_2; 361 U32 v2 = seed + PRIME32_2; 362 U32 v3 = seed + 0; 363 U32 v4 = seed - PRIME32_1; 364 365 do { 366 v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4; 367 v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4; 368 v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4; 369 v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4; 370 } while (p<=limit); 371 372 h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18); 373 } else { 374 h32 = seed + PRIME32_5; 375 } 376 377 h32 += (U32) len; 378 379 while (p+4<=bEnd) { 380 h32 += XXH_get32bits(p) * PRIME32_3; 381 h32 = XXH_rotl32(h32, 17) * PRIME32_4 ; 382 p+=4; 383 } 384 385 while (p<bEnd) { 386 h32 += (*p) * PRIME32_5; 387 h32 = XXH_rotl32(h32, 11) * PRIME32_1 ; 388 p++; 389 } 390 391 h32 ^= h32 >> 15; 392 h32 *= PRIME32_2; 393 h32 ^= h32 >> 13; 394 h32 *= PRIME32_3; 395 h32 ^= h32 >> 16; 396 397 return h32; 398 } 399 400 401 XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed) 402 { 403 #if 0 404 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ 405 XXH32_CREATESTATE_STATIC(state); 406 XXH32_reset(state, seed); 407 XXH32_update(state, input, len); 408 return XXH32_digest(state); 409 #else 410 XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; 411 412 if (XXH_FORCE_ALIGN_CHECK) { 413 if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */ 414 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) 415 return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); 416 else 417 return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); 418 } } 419 420 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) 421 return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); 422 else 423 return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); 424 #endif 425 } 426 427 428 static U64 XXH64_round(U64 acc, U64 input) 429 { 430 acc += input * PRIME64_2; 431 acc = XXH_rotl64(acc, 31); 432 acc *= PRIME64_1; 433 return acc; 434 } 435 436 static U64 XXH64_mergeRound(U64 acc, U64 val) 437 { 438 val = XXH64_round(0, val); 439 acc ^= val; 440 acc = acc * PRIME64_1 + PRIME64_4; 441 return acc; 442 } 443 444 FORCE_INLINE_TEMPLATE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align) 445 { 446 const BYTE* p = (const BYTE*)input; 447 const BYTE* const bEnd = p + len; 448 U64 h64; 449 #define XXH_get64bits(p) XXH_readLE64_align(p, endian, align) 450 451 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER 452 if (p==NULL) { 453 len=0; 454 bEnd=p=(const BYTE*)(size_t)32; 455 } 456 #endif 457 458 if (len>=32) { 459 const BYTE* const limit = bEnd - 32; 460 U64 v1 = seed + PRIME64_1 + PRIME64_2; 461 U64 v2 = seed + PRIME64_2; 462 U64 v3 = seed + 0; 463 U64 v4 = seed - PRIME64_1; 464 465 do { 466 v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8; 467 v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8; 468 v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8; 469 v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8; 470 } while (p<=limit); 471 472 h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); 473 h64 = XXH64_mergeRound(h64, v1); 474 h64 = XXH64_mergeRound(h64, v2); 475 h64 = XXH64_mergeRound(h64, v3); 476 h64 = XXH64_mergeRound(h64, v4); 477 478 } else { 479 h64 = seed + PRIME64_5; 480 } 481 482 h64 += (U64) len; 483 484 while (p+8<=bEnd) { 485 U64 const k1 = XXH64_round(0, XXH_get64bits(p)); 486 h64 ^= k1; 487 h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; 488 p+=8; 489 } 490 491 if (p+4<=bEnd) { 492 h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1; 493 h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; 494 p+=4; 495 } 496 497 while (p<bEnd) { 498 h64 ^= (*p) * PRIME64_5; 499 h64 = XXH_rotl64(h64, 11) * PRIME64_1; 500 p++; 501 } 502 503 h64 ^= h64 >> 33; 504 h64 *= PRIME64_2; 505 h64 ^= h64 >> 29; 506 h64 *= PRIME64_3; 507 h64 ^= h64 >> 32; 508 509 return h64; 510 } 511 512 513 XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed) 514 { 515 #if 0 516 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ 517 XXH64_CREATESTATE_STATIC(state); 518 XXH64_reset(state, seed); 519 XXH64_update(state, input, len); 520 return XXH64_digest(state); 521 #else 522 XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; 523 524 if (XXH_FORCE_ALIGN_CHECK) { 525 if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */ 526 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) 527 return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); 528 else 529 return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); 530 } } 531 532 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) 533 return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); 534 else 535 return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); 536 #endif 537 } 538 539 540 /* ************************************************** 541 * Advanced Hash Functions 542 ****************************************************/ 543 544 XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void) 545 { 546 return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t)); 547 } 548 XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr) 549 { 550 XXH_free(statePtr); 551 return XXH_OK; 552 } 553 554 XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void) 555 { 556 return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t)); 557 } 558 XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr) 559 { 560 XXH_free(statePtr); 561 return XXH_OK; 562 } 563 564 565 /*** Hash feed ***/ 566 567 XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed) 568 { 569 XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ 570 memset(&state, 0, sizeof(state)-4); /* do not write into reserved, for future removal */ 571 state.v1 = seed + PRIME32_1 + PRIME32_2; 572 state.v2 = seed + PRIME32_2; 573 state.v3 = seed + 0; 574 state.v4 = seed - PRIME32_1; 575 memcpy(statePtr, &state, sizeof(state)); 576 return XXH_OK; 577 } 578 579 580 XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed) 581 { 582 XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ 583 memset(&state, 0, sizeof(state)-8); /* do not write into reserved, for future removal */ 584 state.v1 = seed + PRIME64_1 + PRIME64_2; 585 state.v2 = seed + PRIME64_2; 586 state.v3 = seed + 0; 587 state.v4 = seed - PRIME64_1; 588 memcpy(statePtr, &state, sizeof(state)); 589 return XXH_OK; 590 } 591 592 593 FORCE_INLINE_TEMPLATE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian) 594 { 595 const BYTE* p = (const BYTE*)input; 596 const BYTE* const bEnd = p + len; 597 598 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER 599 if (input==NULL) return XXH_ERROR; 600 #endif 601 602 state->total_len_32 += (unsigned)len; 603 state->large_len |= (len>=16) | (state->total_len_32>=16); 604 605 if (state->memsize + len < 16) { /* fill in tmp buffer */ 606 XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len); 607 state->memsize += (unsigned)len; 608 return XXH_OK; 609 } 610 611 if (state->memsize) { /* some data left from previous update */ 612 XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize); 613 { const U32* p32 = state->mem32; 614 state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++; 615 state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++; 616 state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++; 617 state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); p32++; 618 } 619 p += 16-state->memsize; 620 state->memsize = 0; 621 } 622 623 if (p <= bEnd-16) { 624 const BYTE* const limit = bEnd - 16; 625 U32 v1 = state->v1; 626 U32 v2 = state->v2; 627 U32 v3 = state->v3; 628 U32 v4 = state->v4; 629 630 do { 631 v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4; 632 v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4; 633 v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4; 634 v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4; 635 } while (p<=limit); 636 637 state->v1 = v1; 638 state->v2 = v2; 639 state->v3 = v3; 640 state->v4 = v4; 641 } 642 643 if (p < bEnd) { 644 XXH_memcpy(state->mem32, p, (size_t)(bEnd-p)); 645 state->memsize = (unsigned)(bEnd-p); 646 } 647 648 return XXH_OK; 649 } 650 651 XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len) 652 { 653 XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; 654 655 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) 656 return XXH32_update_endian(state_in, input, len, XXH_littleEndian); 657 else 658 return XXH32_update_endian(state_in, input, len, XXH_bigEndian); 659 } 660 661 662 663 FORCE_INLINE_TEMPLATE U32 XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian) 664 { 665 const BYTE * p = (const BYTE*)state->mem32; 666 const BYTE* const bEnd = (const BYTE*)(state->mem32) + state->memsize; 667 U32 h32; 668 669 if (state->large_len) { 670 h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18); 671 } else { 672 h32 = state->v3 /* == seed */ + PRIME32_5; 673 } 674 675 h32 += state->total_len_32; 676 677 while (p+4<=bEnd) { 678 h32 += XXH_readLE32(p, endian) * PRIME32_3; 679 h32 = XXH_rotl32(h32, 17) * PRIME32_4; 680 p+=4; 681 } 682 683 while (p<bEnd) { 684 h32 += (*p) * PRIME32_5; 685 h32 = XXH_rotl32(h32, 11) * PRIME32_1; 686 p++; 687 } 688 689 h32 ^= h32 >> 15; 690 h32 *= PRIME32_2; 691 h32 ^= h32 >> 13; 692 h32 *= PRIME32_3; 693 h32 ^= h32 >> 16; 694 695 return h32; 696 } 697 698 699 XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in) 700 { 701 XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; 702 703 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) 704 return XXH32_digest_endian(state_in, XXH_littleEndian); 705 else 706 return XXH32_digest_endian(state_in, XXH_bigEndian); 707 } 708 709 710 711 /* **** XXH64 **** */ 712 713 FORCE_INLINE_TEMPLATE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian) 714 { 715 const BYTE* p = (const BYTE*)input; 716 const BYTE* const bEnd = p + len; 717 718 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER 719 if (input==NULL) return XXH_ERROR; 720 #endif 721 722 state->total_len += len; 723 724 if (state->memsize + len < 32) { /* fill in tmp buffer */ 725 XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len); 726 state->memsize += (U32)len; 727 return XXH_OK; 728 } 729 730 if (state->memsize) { /* tmp buffer is full */ 731 XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize); 732 state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian)); 733 state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian)); 734 state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian)); 735 state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian)); 736 p += 32-state->memsize; 737 state->memsize = 0; 738 } 739 740 if (p+32 <= bEnd) { 741 const BYTE* const limit = bEnd - 32; 742 U64 v1 = state->v1; 743 U64 v2 = state->v2; 744 U64 v3 = state->v3; 745 U64 v4 = state->v4; 746 747 do { 748 v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8; 749 v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8; 750 v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8; 751 v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8; 752 } while (p<=limit); 753 754 state->v1 = v1; 755 state->v2 = v2; 756 state->v3 = v3; 757 state->v4 = v4; 758 } 759 760 if (p < bEnd) { 761 XXH_memcpy(state->mem64, p, (size_t)(bEnd-p)); 762 state->memsize = (unsigned)(bEnd-p); 763 } 764 765 return XXH_OK; 766 } 767 768 XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len) 769 { 770 XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; 771 772 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) 773 return XXH64_update_endian(state_in, input, len, XXH_littleEndian); 774 else 775 return XXH64_update_endian(state_in, input, len, XXH_bigEndian); 776 } 777 778 779 780 FORCE_INLINE_TEMPLATE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian) 781 { 782 const BYTE * p = (const BYTE*)state->mem64; 783 const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize; 784 U64 h64; 785 786 if (state->total_len >= 32) { 787 U64 const v1 = state->v1; 788 U64 const v2 = state->v2; 789 U64 const v3 = state->v3; 790 U64 const v4 = state->v4; 791 792 h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); 793 h64 = XXH64_mergeRound(h64, v1); 794 h64 = XXH64_mergeRound(h64, v2); 795 h64 = XXH64_mergeRound(h64, v3); 796 h64 = XXH64_mergeRound(h64, v4); 797 } else { 798 h64 = state->v3 + PRIME64_5; 799 } 800 801 h64 += (U64) state->total_len; 802 803 while (p+8<=bEnd) { 804 U64 const k1 = XXH64_round(0, XXH_readLE64(p, endian)); 805 h64 ^= k1; 806 h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; 807 p+=8; 808 } 809 810 if (p+4<=bEnd) { 811 h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1; 812 h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; 813 p+=4; 814 } 815 816 while (p<bEnd) { 817 h64 ^= (*p) * PRIME64_5; 818 h64 = XXH_rotl64(h64, 11) * PRIME64_1; 819 p++; 820 } 821 822 h64 ^= h64 >> 33; 823 h64 *= PRIME64_2; 824 h64 ^= h64 >> 29; 825 h64 *= PRIME64_3; 826 h64 ^= h64 >> 32; 827 828 return h64; 829 } 830 831 832 XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in) 833 { 834 XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; 835 836 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) 837 return XXH64_digest_endian(state_in, XXH_littleEndian); 838 else 839 return XXH64_digest_endian(state_in, XXH_bigEndian); 840 } 841 842 843 /* ************************** 844 * Canonical representation 845 ****************************/ 846 847 /*! Default XXH result types are basic unsigned 32 and 64 bits. 848 * The canonical representation follows human-readable write convention, aka big-endian (large digits first). 849 * These functions allow transformation of hash result into and from its canonical format. 850 * This way, hash values can be written into a file or buffer, and remain comparable across different systems and programs. 851 */ 852 853 XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash) 854 { 855 XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); 856 if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash); 857 memcpy(dst, &hash, sizeof(*dst)); 858 } 859 860 XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash) 861 { 862 XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); 863 if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash); 864 memcpy(dst, &hash, sizeof(*dst)); 865 } 866 867 XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src) 868 { 869 return XXH_readBE32(src); 870 } 871 872 XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src) 873 { 874 return XXH_readBE64(src); 875 } 876