xref: /freebsd/sys/contrib/zstd/lib/common/xxhash.h (revision 5ca8e32633c4ffbbcd6762e5888b6a4ba0708c6c)
1 /*
2  *  xxHash - Fast Hash algorithm
3  *  Copyright (c) Yann Collet, Facebook, Inc.
4  *
5  *  You can contact the author at :
6  *  - xxHash homepage: http://www.xxhash.com
7  *  - xxHash source repository : https://github.com/Cyan4973/xxHash
8  *
9  * This source code is licensed under both the BSD-style license (found in the
10  * LICENSE file in the root directory of this source tree) and the GPLv2 (found
11  * in the COPYING file in the root directory of this source tree).
12  * You may select, at your option, one of the above-listed licenses.
13 */
14 
15 
16 #ifndef XXH_NO_XXH3
17 # define XXH_NO_XXH3
18 #endif
19 
20 #ifndef XXH_NAMESPACE
21 # define XXH_NAMESPACE ZSTD_
22 #endif
23 
24 /*!
25  * @mainpage xxHash
26  *
27  * @file xxhash.h
28  * xxHash prototypes and implementation
29  */
30 /* TODO: update */
31 /* Notice extracted from xxHash homepage:
32 
33 xxHash is an extremely fast hash algorithm, running at RAM speed limits.
34 It also successfully passes all tests from the SMHasher suite.
35 
36 Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz)
37 
38 Name            Speed       Q.Score   Author
39 xxHash          5.4 GB/s     10
40 CrapWow         3.2 GB/s      2       Andrew
41 MurmurHash 3a   2.7 GB/s     10       Austin Appleby
42 SpookyHash      2.0 GB/s     10       Bob Jenkins
43 SBox            1.4 GB/s      9       Bret Mulvey
44 Lookup3         1.2 GB/s      9       Bob Jenkins
45 SuperFastHash   1.2 GB/s      1       Paul Hsieh
46 CityHash64      1.05 GB/s    10       Pike & Alakuijala
47 FNV             0.55 GB/s     5       Fowler, Noll, Vo
48 CRC32           0.43 GB/s     9
49 MD5-32          0.33 GB/s    10       Ronald L. Rivest
50 SHA1-32         0.28 GB/s    10
51 
52 Q.Score is a measure of quality of the hash function.
53 It depends on successfully passing SMHasher test set.
54 10 is a perfect score.
55 
56 Note: SMHasher's CRC32 implementation is not the fastest one.
57 Other speed-oriented implementations can be faster,
58 especially in combination with PCLMUL instruction:
59 https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html?showComment=1552696407071#c3490092340461170735
60 
61 A 64-bit version, named XXH64, is available since r35.
62 It offers much better speed, but for 64-bit applications only.
63 Name     Speed on 64 bits    Speed on 32 bits
64 XXH64       13.8 GB/s            1.9 GB/s
65 XXH32        6.8 GB/s            6.0 GB/s
66 */
67 
68 #if defined (__cplusplus)
69 extern "C" {
70 #endif
71 
72 /* ****************************
73  *  INLINE mode
74  ******************************/
75 /*!
76  * XXH_INLINE_ALL (and XXH_PRIVATE_API)
77  * Use these build macros to inline xxhash into the target unit.
78  * Inlining improves performance on small inputs, especially when the length is
79  * expressed as a compile-time constant:
80  *
81  *      https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html
82  *
83  * It also keeps xxHash symbols private to the unit, so they are not exported.
84  *
85  * Usage:
86  *     #define XXH_INLINE_ALL
87  *     #include "xxhash.h"
88  *
89  * Do not compile and link xxhash.o as a separate object, as it is not useful.
90  */
91 #if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) \
92     && !defined(XXH_INLINE_ALL_31684351384)
93    /* this section should be traversed only once */
94 #  define XXH_INLINE_ALL_31684351384
95    /* give access to the advanced API, required to compile implementations */
96 #  undef XXH_STATIC_LINKING_ONLY   /* avoid macro redef */
97 #  define XXH_STATIC_LINKING_ONLY
98    /* make all functions private */
99 #  undef XXH_PUBLIC_API
100 #  if defined(__GNUC__)
101 #    define XXH_PUBLIC_API static __inline __attribute__((unused))
102 #  elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
103 #    define XXH_PUBLIC_API static inline
104 #  elif defined(_MSC_VER)
105 #    define XXH_PUBLIC_API static __inline
106 #  else
107      /* note: this version may generate warnings for unused static functions */
108 #    define XXH_PUBLIC_API static
109 #  endif
110 
111    /*
112     * This part deals with the special case where a unit wants to inline xxHash,
113     * but "xxhash.h" has previously been included without XXH_INLINE_ALL,
114     * such as part of some previously included *.h header file.
115     * Without further action, the new include would just be ignored,
116     * and functions would effectively _not_ be inlined (silent failure).
117     * The following macros solve this situation by prefixing all inlined names,
118     * avoiding naming collision with previous inclusions.
119     */
120    /* Before that, we unconditionally #undef all symbols,
121     * in case they were already defined with XXH_NAMESPACE.
122     * They will then be redefined for XXH_INLINE_ALL
123     */
124 #  undef XXH_versionNumber
125     /* XXH32 */
126 #  undef XXH32
127 #  undef XXH32_createState
128 #  undef XXH32_freeState
129 #  undef XXH32_reset
130 #  undef XXH32_update
131 #  undef XXH32_digest
132 #  undef XXH32_copyState
133 #  undef XXH32_canonicalFromHash
134 #  undef XXH32_hashFromCanonical
135     /* XXH64 */
136 #  undef XXH64
137 #  undef XXH64_createState
138 #  undef XXH64_freeState
139 #  undef XXH64_reset
140 #  undef XXH64_update
141 #  undef XXH64_digest
142 #  undef XXH64_copyState
143 #  undef XXH64_canonicalFromHash
144 #  undef XXH64_hashFromCanonical
145     /* XXH3_64bits */
146 #  undef XXH3_64bits
147 #  undef XXH3_64bits_withSecret
148 #  undef XXH3_64bits_withSeed
149 #  undef XXH3_64bits_withSecretandSeed
150 #  undef XXH3_createState
151 #  undef XXH3_freeState
152 #  undef XXH3_copyState
153 #  undef XXH3_64bits_reset
154 #  undef XXH3_64bits_reset_withSeed
155 #  undef XXH3_64bits_reset_withSecret
156 #  undef XXH3_64bits_update
157 #  undef XXH3_64bits_digest
158 #  undef XXH3_generateSecret
159     /* XXH3_128bits */
160 #  undef XXH128
161 #  undef XXH3_128bits
162 #  undef XXH3_128bits_withSeed
163 #  undef XXH3_128bits_withSecret
164 #  undef XXH3_128bits_reset
165 #  undef XXH3_128bits_reset_withSeed
166 #  undef XXH3_128bits_reset_withSecret
167 #  undef XXH3_128bits_reset_withSecretandSeed
168 #  undef XXH3_128bits_update
169 #  undef XXH3_128bits_digest
170 #  undef XXH128_isEqual
171 #  undef XXH128_cmp
172 #  undef XXH128_canonicalFromHash
173 #  undef XXH128_hashFromCanonical
174     /* Finally, free the namespace itself */
175 #  undef XXH_NAMESPACE
176 
177     /* employ the namespace for XXH_INLINE_ALL */
178 #  define XXH_NAMESPACE XXH_INLINE_
179    /*
180     * Some identifiers (enums, type names) are not symbols,
181     * but they must nonetheless be renamed to avoid redeclaration.
182     * Alternative solution: do not redeclare them.
183     * However, this requires some #ifdefs, and has a more dispersed impact.
184     * Meanwhile, renaming can be achieved in a single place.
185     */
186 #  define XXH_IPREF(Id)   XXH_NAMESPACE ## Id
187 #  define XXH_OK XXH_IPREF(XXH_OK)
188 #  define XXH_ERROR XXH_IPREF(XXH_ERROR)
189 #  define XXH_errorcode XXH_IPREF(XXH_errorcode)
190 #  define XXH32_canonical_t  XXH_IPREF(XXH32_canonical_t)
191 #  define XXH64_canonical_t  XXH_IPREF(XXH64_canonical_t)
192 #  define XXH128_canonical_t XXH_IPREF(XXH128_canonical_t)
193 #  define XXH32_state_s XXH_IPREF(XXH32_state_s)
194 #  define XXH32_state_t XXH_IPREF(XXH32_state_t)
195 #  define XXH64_state_s XXH_IPREF(XXH64_state_s)
196 #  define XXH64_state_t XXH_IPREF(XXH64_state_t)
197 #  define XXH3_state_s  XXH_IPREF(XXH3_state_s)
198 #  define XXH3_state_t  XXH_IPREF(XXH3_state_t)
199 #  define XXH128_hash_t XXH_IPREF(XXH128_hash_t)
200    /* Ensure the header is parsed again, even if it was previously included */
201 #  undef XXHASH_H_5627135585666179
202 #  undef XXHASH_H_STATIC_13879238742
203 #endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */
204 
205 
206 
207 /* ****************************************************************
208  *  Stable API
209  *****************************************************************/
210 #ifndef XXHASH_H_5627135585666179
211 #define XXHASH_H_5627135585666179 1
212 
213 
214 /*!
215  * @defgroup public Public API
216  * Contains details on the public xxHash functions.
217  * @{
218  */
219 /* specific declaration modes for Windows */
220 #if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API)
221 #  if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT))
222 #    ifdef XXH_EXPORT
223 #      define XXH_PUBLIC_API __declspec(dllexport)
224 #    elif XXH_IMPORT
225 #      define XXH_PUBLIC_API __declspec(dllimport)
226 #    endif
227 #  else
228 #    define XXH_PUBLIC_API   /* do nothing */
229 #  endif
230 #endif
231 
232 #ifdef XXH_DOXYGEN
233 /*!
234  * @brief Emulate a namespace by transparently prefixing all symbols.
235  *
236  * If you want to include _and expose_ xxHash functions from within your own
237  * library, but also want to avoid symbol collisions with other libraries which
238  * may also include xxHash, you can use XXH_NAMESPACE to automatically prefix
239  * any public symbol from xxhash library with the value of XXH_NAMESPACE
240  * (therefore, avoid empty or numeric values).
241  *
242  * Note that no change is required within the calling program as long as it
243  * includes `xxhash.h`: Regular symbol names will be automatically translated
244  * by this header.
245  */
246 #  define XXH_NAMESPACE /* YOUR NAME HERE */
247 #  undef XXH_NAMESPACE
248 #endif
249 
250 #ifdef XXH_NAMESPACE
251 #  define XXH_CAT(A,B) A##B
252 #  define XXH_NAME2(A,B) XXH_CAT(A,B)
253 #  define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
254 /* XXH32 */
255 #  define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
256 #  define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
257 #  define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
258 #  define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
259 #  define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
260 #  define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
261 #  define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
262 #  define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
263 #  define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
264 /* XXH64 */
265 #  define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
266 #  define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
267 #  define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
268 #  define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
269 #  define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
270 #  define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
271 #  define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
272 #  define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
273 #  define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
274 /* XXH3_64bits */
275 #  define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits)
276 #  define XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret)
277 #  define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed)
278 #  define XXH3_64bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecretandSeed)
279 #  define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState)
280 #  define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState)
281 #  define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState)
282 #  define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset)
283 #  define XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed)
284 #  define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret)
285 #  define XXH3_64bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecretandSeed)
286 #  define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update)
287 #  define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest)
288 #  define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret)
289 #  define XXH3_generateSecret_fromSeed XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret_fromSeed)
290 /* XXH3_128bits */
291 #  define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128)
292 #  define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits)
293 #  define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed)
294 #  define XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret)
295 #  define XXH3_128bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecretandSeed)
296 #  define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset)
297 #  define XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed)
298 #  define XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret)
299 #  define XXH3_128bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecretandSeed)
300 #  define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update)
301 #  define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest)
302 #  define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual)
303 #  define XXH128_cmp     XXH_NAME2(XXH_NAMESPACE, XXH128_cmp)
304 #  define XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash)
305 #  define XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical)
306 #endif
307 
308 
309 /* *************************************
310 *  Version
311 ***************************************/
312 #define XXH_VERSION_MAJOR    0
313 #define XXH_VERSION_MINOR    8
314 #define XXH_VERSION_RELEASE  1
315 #define XXH_VERSION_NUMBER  (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
316 
317 /*!
318  * @brief Obtains the xxHash version.
319  *
320  * This is mostly useful when xxHash is compiled as a shared library,
321  * since the returned value comes from the library, as opposed to header file.
322  *
323  * @return `XXH_VERSION_NUMBER` of the invoked library.
324  */
325 XXH_PUBLIC_API unsigned XXH_versionNumber (void);
326 
327 
328 /* ****************************
329 *  Common basic types
330 ******************************/
331 #include <stddef.h>   /* size_t */
332 typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode;
333 
334 
335 /*-**********************************************************************
336 *  32-bit hash
337 ************************************************************************/
338 #if defined(XXH_DOXYGEN) /* Don't show <stdint.h> include */
339 /*!
340  * @brief An unsigned 32-bit integer.
341  *
342  * Not necessarily defined to `uint32_t` but functionally equivalent.
343  */
344 typedef uint32_t XXH32_hash_t;
345 
346 #elif !defined (__VMS) \
347   && (defined (__cplusplus) \
348   || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
349 #   include <stdint.h>
350     typedef uint32_t XXH32_hash_t;
351 
352 #else
353 #   include <limits.h>
354 #   if UINT_MAX == 0xFFFFFFFFUL
355       typedef unsigned int XXH32_hash_t;
356 #   else
357 #     if ULONG_MAX == 0xFFFFFFFFUL
358         typedef unsigned long XXH32_hash_t;
359 #     else
360 #       error "unsupported platform: need a 32-bit type"
361 #     endif
362 #   endif
363 #endif
364 
365 /*!
366  * @}
367  *
368  * @defgroup xxh32_family XXH32 family
369  * @ingroup public
370  * Contains functions used in the classic 32-bit xxHash algorithm.
371  *
372  * @note
373  *   XXH32 is useful for older platforms, with no or poor 64-bit performance.
374  *   Note that @ref xxh3_family provides competitive speed
375  *   for both 32-bit and 64-bit systems, and offers true 64/128 bit hash results.
376  *
377  * @see @ref xxh64_family, @ref xxh3_family : Other xxHash families
378  * @see @ref xxh32_impl for implementation details
379  * @{
380  */
381 
382 /*!
383  * @brief Calculates the 32-bit hash of @p input using xxHash32.
384  *
385  * Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark): 5.4 GB/s
386  *
387  * @param input The block of data to be hashed, at least @p length bytes in size.
388  * @param length The length of @p input, in bytes.
389  * @param seed The 32-bit seed to alter the hash's output predictably.
390  *
391  * @pre
392  *   The memory between @p input and @p input + @p length must be valid,
393  *   readable, contiguous memory. However, if @p length is `0`, @p input may be
394  *   `NULL`. In C++, this also must be *TriviallyCopyable*.
395  *
396  * @return The calculated 32-bit hash value.
397  *
398  * @see
399  *    XXH64(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128():
400  *    Direct equivalents for the other variants of xxHash.
401  * @see
402  *    XXH32_createState(), XXH32_update(), XXH32_digest(): Streaming version.
403  */
404 XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed);
405 
406 /*!
407  * Streaming functions generate the xxHash value from an incremental input.
408  * This method is slower than single-call functions, due to state management.
409  * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized.
410  *
411  * An XXH state must first be allocated using `XXH*_createState()`.
412  *
413  * Start a new hash by initializing the state with a seed using `XXH*_reset()`.
414  *
415  * Then, feed the hash state by calling `XXH*_update()` as many times as necessary.
416  *
417  * The function returns an error code, with 0 meaning OK, and any other value
418  * meaning there is an error.
419  *
420  * Finally, a hash value can be produced anytime, by using `XXH*_digest()`.
421  * This function returns the nn-bits hash as an int or long long.
422  *
423  * It's still possible to continue inserting input into the hash state after a
424  * digest, and generate new hash values later on by invoking `XXH*_digest()`.
425  *
426  * When done, release the state using `XXH*_freeState()`.
427  *
428  * Example code for incrementally hashing a file:
429  * @code{.c}
430  *    #include <stdio.h>
431  *    #include <xxhash.h>
432  *    #define BUFFER_SIZE 256
433  *
434  *    // Note: XXH64 and XXH3 use the same interface.
435  *    XXH32_hash_t
436  *    hashFile(FILE* stream)
437  *    {
438  *        XXH32_state_t* state;
439  *        unsigned char buf[BUFFER_SIZE];
440  *        size_t amt;
441  *        XXH32_hash_t hash;
442  *
443  *        state = XXH32_createState();       // Create a state
444  *        assert(state != NULL);             // Error check here
445  *        XXH32_reset(state, 0xbaad5eed);    // Reset state with our seed
446  *        while ((amt = fread(buf, 1, sizeof(buf), stream)) != 0) {
447  *            XXH32_update(state, buf, amt); // Hash the file in chunks
448  *        }
449  *        hash = XXH32_digest(state);        // Finalize the hash
450  *        XXH32_freeState(state);            // Clean up
451  *        return hash;
452  *    }
453  * @endcode
454  */
455 
456 /*!
457  * @typedef struct XXH32_state_s XXH32_state_t
458  * @brief The opaque state struct for the XXH32 streaming API.
459  *
460  * @see XXH32_state_s for details.
461  */
462 typedef struct XXH32_state_s XXH32_state_t;
463 
464 /*!
465  * @brief Allocates an @ref XXH32_state_t.
466  *
467  * Must be freed with XXH32_freeState().
468  * @return An allocated XXH32_state_t on success, `NULL` on failure.
469  */
470 XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void);
471 /*!
472  * @brief Frees an @ref XXH32_state_t.
473  *
474  * Must be allocated with XXH32_createState().
475  * @param statePtr A pointer to an @ref XXH32_state_t allocated with @ref XXH32_createState().
476  * @return XXH_OK.
477  */
478 XXH_PUBLIC_API XXH_errorcode  XXH32_freeState(XXH32_state_t* statePtr);
479 /*!
480  * @brief Copies one @ref XXH32_state_t to another.
481  *
482  * @param dst_state The state to copy to.
483  * @param src_state The state to copy from.
484  * @pre
485  *   @p dst_state and @p src_state must not be `NULL` and must not overlap.
486  */
487 XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state);
488 
489 /*!
490  * @brief Resets an @ref XXH32_state_t to begin a new hash.
491  *
492  * This function resets and seeds a state. Call it before @ref XXH32_update().
493  *
494  * @param statePtr The state struct to reset.
495  * @param seed The 32-bit seed to alter the hash result predictably.
496  *
497  * @pre
498  *   @p statePtr must not be `NULL`.
499  *
500  * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
501  */
502 XXH_PUBLIC_API XXH_errorcode XXH32_reset  (XXH32_state_t* statePtr, XXH32_hash_t seed);
503 
504 /*!
505  * @brief Consumes a block of @p input to an @ref XXH32_state_t.
506  *
507  * Call this to incrementally consume blocks of data.
508  *
509  * @param statePtr The state struct to update.
510  * @param input The block of data to be hashed, at least @p length bytes in size.
511  * @param length The length of @p input, in bytes.
512  *
513  * @pre
514  *   @p statePtr must not be `NULL`.
515  * @pre
516  *   The memory between @p input and @p input + @p length must be valid,
517  *   readable, contiguous memory. However, if @p length is `0`, @p input may be
518  *   `NULL`. In C++, this also must be *TriviallyCopyable*.
519  *
520  * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
521  */
522 XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length);
523 
524 /*!
525  * @brief Returns the calculated hash value from an @ref XXH32_state_t.
526  *
527  * @note
528  *   Calling XXH32_digest() will not affect @p statePtr, so you can update,
529  *   digest, and update again.
530  *
531  * @param statePtr The state struct to calculate the hash from.
532  *
533  * @pre
534  *  @p statePtr must not be `NULL`.
535  *
536  * @return The calculated xxHash32 value from that state.
537  */
538 XXH_PUBLIC_API XXH32_hash_t  XXH32_digest (const XXH32_state_t* statePtr);
539 
540 /*******   Canonical representation   *******/
541 
542 /*
543  * The default return values from XXH functions are unsigned 32 and 64 bit
544  * integers.
545  * This the simplest and fastest format for further post-processing.
546  *
547  * However, this leaves open the question of what is the order on the byte level,
548  * since little and big endian conventions will store the same number differently.
549  *
550  * The canonical representation settles this issue by mandating big-endian
551  * convention, the same convention as human-readable numbers (large digits first).
552  *
553  * When writing hash values to storage, sending them over a network, or printing
554  * them, it's highly recommended to use the canonical representation to ensure
555  * portability across a wider range of systems, present and future.
556  *
557  * The following functions allow transformation of hash values to and from
558  * canonical format.
559  */
560 
561 /*!
562  * @brief Canonical (big endian) representation of @ref XXH32_hash_t.
563  */
564 typedef struct {
565     unsigned char digest[4]; /*!< Hash bytes, big endian */
566 } XXH32_canonical_t;
567 
568 /*!
569  * @brief Converts an @ref XXH32_hash_t to a big endian @ref XXH32_canonical_t.
570  *
571  * @param dst The @ref XXH32_canonical_t pointer to be stored to.
572  * @param hash The @ref XXH32_hash_t to be converted.
573  *
574  * @pre
575  *   @p dst must not be `NULL`.
576  */
577 XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash);
578 
579 /*!
580  * @brief Converts an @ref XXH32_canonical_t to a native @ref XXH32_hash_t.
581  *
582  * @param src The @ref XXH32_canonical_t to convert.
583  *
584  * @pre
585  *   @p src must not be `NULL`.
586  *
587  * @return The converted hash.
588  */
589 XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src);
590 
591 
592 #ifdef __has_attribute
593 # define XXH_HAS_ATTRIBUTE(x) __has_attribute(x)
594 #else
595 # define XXH_HAS_ATTRIBUTE(x) 0
596 #endif
597 
598 /* C-language Attributes are added in C23. */
599 #if defined(__STDC_VERSION__) && (__STDC_VERSION__ > 201710L) && defined(__has_c_attribute)
600 # define XXH_HAS_C_ATTRIBUTE(x) __has_c_attribute(x)
601 #else
602 # define XXH_HAS_C_ATTRIBUTE(x) 0
603 #endif
604 
605 #if defined(__cplusplus) && defined(__has_cpp_attribute)
606 # define XXH_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
607 #else
608 # define XXH_HAS_CPP_ATTRIBUTE(x) 0
609 #endif
610 
611 /*
612 Define XXH_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute
613 introduced in CPP17 and C23.
614 CPP17 : https://en.cppreference.com/w/cpp/language/attributes/fallthrough
615 C23   : https://en.cppreference.com/w/c/language/attributes/fallthrough
616 */
617 #if XXH_HAS_C_ATTRIBUTE(x)
618 # define XXH_FALLTHROUGH [[fallthrough]]
619 #elif XXH_HAS_CPP_ATTRIBUTE(x)
620 # define XXH_FALLTHROUGH [[fallthrough]]
621 #elif XXH_HAS_ATTRIBUTE(__fallthrough__)
622 # define XXH_FALLTHROUGH __attribute__ ((fallthrough))
623 #else
624 # define XXH_FALLTHROUGH
625 #endif
626 
627 /*!
628  * @}
629  * @ingroup public
630  * @{
631  */
632 
633 #ifndef XXH_NO_LONG_LONG
634 /*-**********************************************************************
635 *  64-bit hash
636 ************************************************************************/
637 #if defined(XXH_DOXYGEN) /* don't include <stdint.h> */
638 /*!
639  * @brief An unsigned 64-bit integer.
640  *
641  * Not necessarily defined to `uint64_t` but functionally equivalent.
642  */
643 typedef uint64_t XXH64_hash_t;
644 #elif !defined (__VMS) \
645   && (defined (__cplusplus) \
646   || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
647 #  include <stdint.h>
648    typedef uint64_t XXH64_hash_t;
649 #else
650 #  include <limits.h>
651 #  if defined(__LP64__) && ULONG_MAX == 0xFFFFFFFFFFFFFFFFULL
652      /* LP64 ABI says uint64_t is unsigned long */
653      typedef unsigned long XXH64_hash_t;
654 #  else
655      /* the following type must have a width of 64-bit */
656      typedef unsigned long long XXH64_hash_t;
657 #  endif
658 #endif
659 
660 /*!
661  * @}
662  *
663  * @defgroup xxh64_family XXH64 family
664  * @ingroup public
665  * @{
666  * Contains functions used in the classic 64-bit xxHash algorithm.
667  *
668  * @note
669  *   XXH3 provides competitive speed for both 32-bit and 64-bit systems,
670  *   and offers true 64/128 bit hash results.
671  *   It provides better speed for systems with vector processing capabilities.
672  */
673 
674 
675 /*!
676  * @brief Calculates the 64-bit hash of @p input using xxHash64.
677  *
678  * This function usually runs faster on 64-bit systems, but slower on 32-bit
679  * systems (see benchmark).
680  *
681  * @param input The block of data to be hashed, at least @p length bytes in size.
682  * @param length The length of @p input, in bytes.
683  * @param seed The 64-bit seed to alter the hash's output predictably.
684  *
685  * @pre
686  *   The memory between @p input and @p input + @p length must be valid,
687  *   readable, contiguous memory. However, if @p length is `0`, @p input may be
688  *   `NULL`. In C++, this also must be *TriviallyCopyable*.
689  *
690  * @return The calculated 64-bit hash.
691  *
692  * @see
693  *    XXH32(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128():
694  *    Direct equivalents for the other variants of xxHash.
695  * @see
696  *    XXH64_createState(), XXH64_update(), XXH64_digest(): Streaming version.
697  */
698 /* Begin FreeBSD - This symbol is needed by dll-linked CLI zstd(1). */
699 __attribute__((visibility ("default")))
700 /* End FreeBSD */
701 XXH_PUBLIC_API XXH64_hash_t XXH64(const void* input, size_t length, XXH64_hash_t seed);
702 
703 /*******   Streaming   *******/
704 /*!
705  * @brief The opaque state struct for the XXH64 streaming API.
706  *
707  * @see XXH64_state_s for details.
708  */
709 typedef struct XXH64_state_s XXH64_state_t;   /* incomplete type */
710 XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void);
711 XXH_PUBLIC_API XXH_errorcode  XXH64_freeState(XXH64_state_t* statePtr);
712 XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state);
713 
714 XXH_PUBLIC_API XXH_errorcode XXH64_reset  (XXH64_state_t* statePtr, XXH64_hash_t seed);
715 XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length);
716 XXH_PUBLIC_API XXH64_hash_t  XXH64_digest (const XXH64_state_t* statePtr);
717 
718 /*******   Canonical representation   *******/
719 typedef struct { unsigned char digest[sizeof(XXH64_hash_t)]; } XXH64_canonical_t;
720 XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash);
721 XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src);
722 
723 #ifndef XXH_NO_XXH3
724 /*!
725  * @}
726  * ************************************************************************
727  * @defgroup xxh3_family XXH3 family
728  * @ingroup public
729  * @{
730  *
731  * XXH3 is a more recent hash algorithm featuring:
732  *  - Improved speed for both small and large inputs
733  *  - True 64-bit and 128-bit outputs
734  *  - SIMD acceleration
735  *  - Improved 32-bit viability
736  *
737  * Speed analysis methodology is explained here:
738  *
739  *    https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html
740  *
741  * Compared to XXH64, expect XXH3 to run approximately
742  * ~2x faster on large inputs and >3x faster on small ones,
743  * exact differences vary depending on platform.
744  *
745  * XXH3's speed benefits greatly from SIMD and 64-bit arithmetic,
746  * but does not require it.
747  * Any 32-bit and 64-bit targets that can run XXH32 smoothly
748  * can run XXH3 at competitive speeds, even without vector support.
749  * Further details are explained in the implementation.
750  *
751  * Optimized implementations are provided for AVX512, AVX2, SSE2, NEON, POWER8,
752  * ZVector and scalar targets. This can be controlled via the XXH_VECTOR macro.
753  *
754  * XXH3 implementation is portable:
755  * it has a generic C90 formulation that can be compiled on any platform,
756  * all implementations generage exactly the same hash value on all platforms.
757  * Starting from v0.8.0, it's also labelled "stable", meaning that
758  * any future version will also generate the same hash value.
759  *
760  * XXH3 offers 2 variants, _64bits and _128bits.
761  *
762  * When only 64 bits are needed, prefer invoking the _64bits variant, as it
763  * reduces the amount of mixing, resulting in faster speed on small inputs.
764  * It's also generally simpler to manipulate a scalar return type than a struct.
765  *
766  * The API supports one-shot hashing, streaming mode, and custom secrets.
767  */
768 
769 /*-**********************************************************************
770 *  XXH3 64-bit variant
771 ************************************************************************/
772 
773 /* XXH3_64bits():
774  * default 64-bit variant, using default secret and default seed of 0.
775  * It's the fastest variant. */
776 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void* data, size_t len);
777 
778 /*
779  * XXH3_64bits_withSeed():
780  * This variant generates a custom secret on the fly
781  * based on default secret altered using the `seed` value.
782  * While this operation is decently fast, note that it's not completely free.
783  * Note: seed==0 produces the same results as XXH3_64bits().
784  */
785 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSeed(const void* data, size_t len, XXH64_hash_t seed);
786 
787 /*!
788  * The bare minimum size for a custom secret.
789  *
790  * @see
791  *  XXH3_64bits_withSecret(), XXH3_64bits_reset_withSecret(),
792  *  XXH3_128bits_withSecret(), XXH3_128bits_reset_withSecret().
793  */
794 #define XXH3_SECRET_SIZE_MIN 136
795 
796 /*
797  * XXH3_64bits_withSecret():
798  * It's possible to provide any blob of bytes as a "secret" to generate the hash.
799  * This makes it more difficult for an external actor to prepare an intentional collision.
800  * The main condition is that secretSize *must* be large enough (>= XXH3_SECRET_SIZE_MIN).
801  * However, the quality of the secret impacts the dispersion of the hash algorithm.
802  * Therefore, the secret _must_ look like a bunch of random bytes.
803  * Avoid "trivial" or structured data such as repeated sequences or a text document.
804  * Whenever in doubt about the "randomness" of the blob of bytes,
805  * consider employing "XXH3_generateSecret()" instead (see below).
806  * It will generate a proper high entropy secret derived from the blob of bytes.
807  * Another advantage of using XXH3_generateSecret() is that
808  * it guarantees that all bits within the initial blob of bytes
809  * will impact every bit of the output.
810  * This is not necessarily the case when using the blob of bytes directly
811  * because, when hashing _small_ inputs, only a portion of the secret is employed.
812  */
813 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize);
814 
815 
816 /*******   Streaming   *******/
817 /*
818  * Streaming requires state maintenance.
819  * This operation costs memory and CPU.
820  * As a consequence, streaming is slower than one-shot hashing.
821  * For better performance, prefer one-shot functions whenever applicable.
822  */
823 
824 /*!
825  * @brief The state struct for the XXH3 streaming API.
826  *
827  * @see XXH3_state_s for details.
828  */
829 typedef struct XXH3_state_s XXH3_state_t;
830 XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void);
831 XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr);
832 XXH_PUBLIC_API void XXH3_copyState(XXH3_state_t* dst_state, const XXH3_state_t* src_state);
833 
834 /*
835  * XXH3_64bits_reset():
836  * Initialize with default parameters.
837  * digest will be equivalent to `XXH3_64bits()`.
838  */
839 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH3_state_t* statePtr);
840 /*
841  * XXH3_64bits_reset_withSeed():
842  * Generate a custom secret from `seed`, and store it into `statePtr`.
843  * digest will be equivalent to `XXH3_64bits_withSeed()`.
844  */
845 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed);
846 /*
847  * XXH3_64bits_reset_withSecret():
848  * `secret` is referenced, it _must outlive_ the hash streaming session.
849  * Similar to one-shot API, `secretSize` must be >= `XXH3_SECRET_SIZE_MIN`,
850  * and the quality of produced hash values depends on secret's entropy
851  * (secret's content should look like a bunch of random bytes).
852  * When in doubt about the randomness of a candidate `secret`,
853  * consider employing `XXH3_generateSecret()` instead (see below).
854  */
855 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize);
856 
857 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update (XXH3_state_t* statePtr, const void* input, size_t length);
858 XXH_PUBLIC_API XXH64_hash_t  XXH3_64bits_digest (const XXH3_state_t* statePtr);
859 
860 /* note : canonical representation of XXH3 is the same as XXH64
861  * since they both produce XXH64_hash_t values */
862 
863 
864 /*-**********************************************************************
865 *  XXH3 128-bit variant
866 ************************************************************************/
867 
868 /*!
869  * @brief The return value from 128-bit hashes.
870  *
871  * Stored in little endian order, although the fields themselves are in native
872  * endianness.
873  */
874 typedef struct {
875     XXH64_hash_t low64;   /*!< `value & 0xFFFFFFFFFFFFFFFF` */
876     XXH64_hash_t high64;  /*!< `value >> 64` */
877 } XXH128_hash_t;
878 
879 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void* data, size_t len);
880 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSeed(const void* data, size_t len, XXH64_hash_t seed);
881 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize);
882 
883 /*******   Streaming   *******/
884 /*
885  * Streaming requires state maintenance.
886  * This operation costs memory and CPU.
887  * As a consequence, streaming is slower than one-shot hashing.
888  * For better performance, prefer one-shot functions whenever applicable.
889  *
890  * XXH3_128bits uses the same XXH3_state_t as XXH3_64bits().
891  * Use already declared XXH3_createState() and XXH3_freeState().
892  *
893  * All reset and streaming functions have same meaning as their 64-bit counterpart.
894  */
895 
896 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH3_state_t* statePtr);
897 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed);
898 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize);
899 
900 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update (XXH3_state_t* statePtr, const void* input, size_t length);
901 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (const XXH3_state_t* statePtr);
902 
903 /* Following helper functions make it possible to compare XXH128_hast_t values.
904  * Since XXH128_hash_t is a structure, this capability is not offered by the language.
905  * Note: For better performance, these functions can be inlined using XXH_INLINE_ALL */
906 
907 /*!
908  * XXH128_isEqual():
909  * Return: 1 if `h1` and `h2` are equal, 0 if they are not.
910  */
911 XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2);
912 
913 /*!
914  * XXH128_cmp():
915  *
916  * This comparator is compatible with stdlib's `qsort()`/`bsearch()`.
917  *
918  * return: >0 if *h128_1  > *h128_2
919  *         =0 if *h128_1 == *h128_2
920  *         <0 if *h128_1  < *h128_2
921  */
922 XXH_PUBLIC_API int XXH128_cmp(const void* h128_1, const void* h128_2);
923 
924 
925 /*******   Canonical representation   *******/
926 typedef struct { unsigned char digest[sizeof(XXH128_hash_t)]; } XXH128_canonical_t;
927 XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH128_canonical_t* dst, XXH128_hash_t hash);
928 XXH_PUBLIC_API XXH128_hash_t XXH128_hashFromCanonical(const XXH128_canonical_t* src);
929 
930 
931 #endif  /* !XXH_NO_XXH3 */
932 #endif  /* XXH_NO_LONG_LONG */
933 
934 /*!
935  * @}
936  */
937 #endif /* XXHASH_H_5627135585666179 */
938 
939 
940 
941 #if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742)
942 #define XXHASH_H_STATIC_13879238742
943 /* ****************************************************************************
944  * This section contains declarations which are not guaranteed to remain stable.
945  * They may change in future versions, becoming incompatible with a different
946  * version of the library.
947  * These declarations should only be used with static linking.
948  * Never use them in association with dynamic linking!
949  ***************************************************************************** */
950 
951 /*
952  * These definitions are only present to allow static allocation
953  * of XXH states, on stack or in a struct, for example.
954  * Never **ever** access their members directly.
955  */
956 
957 /*!
958  * @internal
959  * @brief Structure for XXH32 streaming API.
960  *
961  * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
962  * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is
963  * an opaque type. This allows fields to safely be changed.
964  *
965  * Typedef'd to @ref XXH32_state_t.
966  * Do not access the members of this struct directly.
967  * @see XXH64_state_s, XXH3_state_s
968  */
969 struct XXH32_state_s {
970    XXH32_hash_t total_len_32; /*!< Total length hashed, modulo 2^32 */
971    XXH32_hash_t large_len;    /*!< Whether the hash is >= 16 (handles @ref total_len_32 overflow) */
972    XXH32_hash_t v[4];         /*!< Accumulator lanes */
973    XXH32_hash_t mem32[4];     /*!< Internal buffer for partial reads. Treated as unsigned char[16]. */
974    XXH32_hash_t memsize;      /*!< Amount of data in @ref mem32 */
975    XXH32_hash_t reserved;     /*!< Reserved field. Do not read nor write to it. */
976 };   /* typedef'd to XXH32_state_t */
977 
978 
979 #ifndef XXH_NO_LONG_LONG  /* defined when there is no 64-bit support */
980 
981 /*!
982  * @internal
983  * @brief Structure for XXH64 streaming API.
984  *
985  * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
986  * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is
987  * an opaque type. This allows fields to safely be changed.
988  *
989  * Typedef'd to @ref XXH64_state_t.
990  * Do not access the members of this struct directly.
991  * @see XXH32_state_s, XXH3_state_s
992  */
993 struct XXH64_state_s {
994    XXH64_hash_t total_len;    /*!< Total length hashed. This is always 64-bit. */
995    XXH64_hash_t v[4];         /*!< Accumulator lanes */
996    XXH64_hash_t mem64[4];     /*!< Internal buffer for partial reads. Treated as unsigned char[32]. */
997    XXH32_hash_t memsize;      /*!< Amount of data in @ref mem64 */
998    XXH32_hash_t reserved32;   /*!< Reserved field, needed for padding anyways*/
999    XXH64_hash_t reserved64;   /*!< Reserved field. Do not read or write to it. */
1000 };   /* typedef'd to XXH64_state_t */
1001 
1002 
1003 #ifndef XXH_NO_XXH3
1004 
1005 #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* >= C11 */
1006 #  include <stdalign.h>
1007 #  define XXH_ALIGN(n)      alignas(n)
1008 #elif defined(__cplusplus) && (__cplusplus >= 201103L) /* >= C++11 */
1009 /* In C++ alignas() is a keyword */
1010 #  define XXH_ALIGN(n)      alignas(n)
1011 #elif defined(__GNUC__)
1012 #  define XXH_ALIGN(n)      __attribute__ ((aligned(n)))
1013 #elif defined(_MSC_VER)
1014 #  define XXH_ALIGN(n)      __declspec(align(n))
1015 #else
1016 #  define XXH_ALIGN(n)   /* disabled */
1017 #endif
1018 
1019 /* Old GCC versions only accept the attribute after the type in structures. */
1020 #if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L))   /* C11+ */ \
1021     && ! (defined(__cplusplus) && (__cplusplus >= 201103L)) /* >= C++11 */ \
1022     && defined(__GNUC__)
1023 #   define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align)
1024 #else
1025 #   define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type
1026 #endif
1027 
1028 /*!
1029  * @brief The size of the internal XXH3 buffer.
1030  *
1031  * This is the optimal update size for incremental hashing.
1032  *
1033  * @see XXH3_64b_update(), XXH3_128b_update().
1034  */
1035 #define XXH3_INTERNALBUFFER_SIZE 256
1036 
1037 /*!
1038  * @brief Default size of the secret buffer (and @ref XXH3_kSecret).
1039  *
1040  * This is the size used in @ref XXH3_kSecret and the seeded functions.
1041  *
1042  * Not to be confused with @ref XXH3_SECRET_SIZE_MIN.
1043  */
1044 #define XXH3_SECRET_DEFAULT_SIZE 192
1045 
1046 /*!
1047  * @internal
1048  * @brief Structure for XXH3 streaming API.
1049  *
1050  * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
1051  * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined.
1052  * Otherwise it is an opaque type.
1053  * Never use this definition in combination with dynamic library.
1054  * This allows fields to safely be changed in the future.
1055  *
1056  * @note ** This structure has a strict alignment requirement of 64 bytes!! **
1057  * Do not allocate this with `malloc()` or `new`,
1058  * it will not be sufficiently aligned.
1059  * Use @ref XXH3_createState() and @ref XXH3_freeState(), or stack allocation.
1060  *
1061  * Typedef'd to @ref XXH3_state_t.
1062  * Do never access the members of this struct directly.
1063  *
1064  * @see XXH3_INITSTATE() for stack initialization.
1065  * @see XXH3_createState(), XXH3_freeState().
1066  * @see XXH32_state_s, XXH64_state_s
1067  */
1068 struct XXH3_state_s {
1069    XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]);
1070        /*!< The 8 accumulators. Similar to `vN` in @ref XXH32_state_s::v1 and @ref XXH64_state_s */
1071    XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]);
1072        /*!< Used to store a custom secret generated from a seed. */
1073    XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]);
1074        /*!< The internal buffer. @see XXH32_state_s::mem32 */
1075    XXH32_hash_t bufferedSize;
1076        /*!< The amount of memory in @ref buffer, @see XXH32_state_s::memsize */
1077    XXH32_hash_t useSeed;
1078        /*!< Reserved field. Needed for padding on 64-bit. */
1079    size_t nbStripesSoFar;
1080        /*!< Number or stripes processed. */
1081    XXH64_hash_t totalLen;
1082        /*!< Total length hashed. 64-bit even on 32-bit targets. */
1083    size_t nbStripesPerBlock;
1084        /*!< Number of stripes per block. */
1085    size_t secretLimit;
1086        /*!< Size of @ref customSecret or @ref extSecret */
1087    XXH64_hash_t seed;
1088        /*!< Seed for _withSeed variants. Must be zero otherwise, @see XXH3_INITSTATE() */
1089    XXH64_hash_t reserved64;
1090        /*!< Reserved field. */
1091    const unsigned char* extSecret;
1092        /*!< Reference to an external secret for the _withSecret variants, NULL
1093         *   for other variants. */
1094    /* note: there may be some padding at the end due to alignment on 64 bytes */
1095 }; /* typedef'd to XXH3_state_t */
1096 
1097 #undef XXH_ALIGN_MEMBER
1098 
1099 /*!
1100  * @brief Initializes a stack-allocated `XXH3_state_s`.
1101  *
1102  * When the @ref XXH3_state_t structure is merely emplaced on stack,
1103  * it should be initialized with XXH3_INITSTATE() or a memset()
1104  * in case its first reset uses XXH3_NNbits_reset_withSeed().
1105  * This init can be omitted if the first reset uses default or _withSecret mode.
1106  * This operation isn't necessary when the state is created with XXH3_createState().
1107  * Note that this doesn't prepare the state for a streaming operation,
1108  * it's still necessary to use XXH3_NNbits_reset*() afterwards.
1109  */
1110 #define XXH3_INITSTATE(XXH3_state_ptr)   { (XXH3_state_ptr)->seed = 0; }
1111 
1112 
1113 /* XXH128() :
1114  * simple alias to pre-selected XXH3_128bits variant
1115  */
1116 XXH_PUBLIC_API XXH128_hash_t XXH128(const void* data, size_t len, XXH64_hash_t seed);
1117 
1118 
1119 /* ===   Experimental API   === */
1120 /* Symbols defined below must be considered tied to a specific library version. */
1121 
1122 /*
1123  * XXH3_generateSecret():
1124  *
1125  * Derive a high-entropy secret from any user-defined content, named customSeed.
1126  * The generated secret can be used in combination with `*_withSecret()` functions.
1127  * The `_withSecret()` variants are useful to provide a higher level of protection than 64-bit seed,
1128  * as it becomes much more difficult for an external actor to guess how to impact the calculation logic.
1129  *
1130  * The function accepts as input a custom seed of any length and any content,
1131  * and derives from it a high-entropy secret of length @secretSize
1132  * into an already allocated buffer @secretBuffer.
1133  * @secretSize must be >= XXH3_SECRET_SIZE_MIN
1134  *
1135  * The generated secret can then be used with any `*_withSecret()` variant.
1136  * Functions `XXH3_128bits_withSecret()`, `XXH3_64bits_withSecret()`,
1137  * `XXH3_128bits_reset_withSecret()` and `XXH3_64bits_reset_withSecret()`
1138  * are part of this list. They all accept a `secret` parameter
1139  * which must be large enough for implementation reasons (>= XXH3_SECRET_SIZE_MIN)
1140  * _and_ feature very high entropy (consist of random-looking bytes).
1141  * These conditions can be a high bar to meet, so
1142  * XXH3_generateSecret() can be employed to ensure proper quality.
1143  *
1144  * customSeed can be anything. It can have any size, even small ones,
1145  * and its content can be anything, even "poor entropy" sources such as a bunch of zeroes.
1146  * The resulting `secret` will nonetheless provide all required qualities.
1147  *
1148  * When customSeedSize > 0, supplying NULL as customSeed is undefined behavior.
1149  */
1150 XXH_PUBLIC_API XXH_errorcode XXH3_generateSecret(void* secretBuffer, size_t secretSize, const void* customSeed, size_t customSeedSize);
1151 
1152 
1153 /*
1154  * XXH3_generateSecret_fromSeed():
1155  *
1156  * Generate the same secret as the _withSeed() variants.
1157  *
1158  * The resulting secret has a length of XXH3_SECRET_DEFAULT_SIZE (necessarily).
1159  * @secretBuffer must be already allocated, of size at least XXH3_SECRET_DEFAULT_SIZE bytes.
1160  *
1161  * The generated secret can be used in combination with
1162  *`*_withSecret()` and `_withSecretandSeed()` variants.
1163  * This generator is notably useful in combination with `_withSecretandSeed()`,
1164  * as a way to emulate a faster `_withSeed()` variant.
1165  */
1166 XXH_PUBLIC_API void XXH3_generateSecret_fromSeed(void* secretBuffer, XXH64_hash_t seed);
1167 
1168 /*
1169  * *_withSecretandSeed() :
1170  * These variants generate hash values using either
1171  * @seed for "short" keys (< XXH3_MIDSIZE_MAX = 240 bytes)
1172  * or @secret for "large" keys (>= XXH3_MIDSIZE_MAX).
1173  *
1174  * This generally benefits speed, compared to `_withSeed()` or `_withSecret()`.
1175  * `_withSeed()` has to generate the secret on the fly for "large" keys.
1176  * It's fast, but can be perceptible for "not so large" keys (< 1 KB).
1177  * `_withSecret()` has to generate the masks on the fly for "small" keys,
1178  * which requires more instructions than _withSeed() variants.
1179  * Therefore, _withSecretandSeed variant combines the best of both worlds.
1180  *
1181  * When @secret has been generated by XXH3_generateSecret_fromSeed(),
1182  * this variant produces *exactly* the same results as `_withSeed()` variant,
1183  * hence offering only a pure speed benefit on "large" input,
1184  * by skipping the need to regenerate the secret for every large input.
1185  *
1186  * Another usage scenario is to hash the secret to a 64-bit hash value,
1187  * for example with XXH3_64bits(), which then becomes the seed,
1188  * and then employ both the seed and the secret in _withSecretandSeed().
1189  * On top of speed, an added benefit is that each bit in the secret
1190  * has a 50% chance to swap each bit in the output,
1191  * via its impact to the seed.
1192  * This is not guaranteed when using the secret directly in "small data" scenarios,
1193  * because only portions of the secret are employed for small data.
1194  */
1195 XXH_PUBLIC_API XXH64_hash_t
1196 XXH3_64bits_withSecretandSeed(const void* data, size_t len,
1197                               const void* secret, size_t secretSize,
1198                               XXH64_hash_t seed);
1199 
1200 XXH_PUBLIC_API XXH128_hash_t
1201 XXH3_128bits_withSecretandSeed(const void* data, size_t len,
1202                                const void* secret, size_t secretSize,
1203                                XXH64_hash_t seed64);
1204 
1205 XXH_PUBLIC_API XXH_errorcode
1206 XXH3_64bits_reset_withSecretandSeed(XXH3_state_t* statePtr,
1207                                     const void* secret, size_t secretSize,
1208                                     XXH64_hash_t seed64);
1209 
1210 XXH_PUBLIC_API XXH_errorcode
1211 XXH3_128bits_reset_withSecretandSeed(XXH3_state_t* statePtr,
1212                                      const void* secret, size_t secretSize,
1213                                      XXH64_hash_t seed64);
1214 
1215 
1216 #endif  /* XXH_NO_XXH3 */
1217 #endif  /* XXH_NO_LONG_LONG */
1218 #if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
1219 #  define XXH_IMPLEMENTATION
1220 #endif
1221 
1222 #endif  /* defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) */
1223 
1224 
1225 /* ======================================================================== */
1226 /* ======================================================================== */
1227 /* ======================================================================== */
1228 
1229 
1230 /*-**********************************************************************
1231  * xxHash implementation
1232  *-**********************************************************************
1233  * xxHash's implementation used to be hosted inside xxhash.c.
1234  *
1235  * However, inlining requires implementation to be visible to the compiler,
1236  * hence be included alongside the header.
1237  * Previously, implementation was hosted inside xxhash.c,
1238  * which was then #included when inlining was activated.
1239  * This construction created issues with a few build and install systems,
1240  * as it required xxhash.c to be stored in /include directory.
1241  *
1242  * xxHash implementation is now directly integrated within xxhash.h.
1243  * As a consequence, xxhash.c is no longer needed in /include.
1244  *
1245  * xxhash.c is still available and is still useful.
1246  * In a "normal" setup, when xxhash is not inlined,
1247  * xxhash.h only exposes the prototypes and public symbols,
1248  * while xxhash.c can be built into an object file xxhash.o
1249  * which can then be linked into the final binary.
1250  ************************************************************************/
1251 
1252 #if ( defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) \
1253    || defined(XXH_IMPLEMENTATION) ) && !defined(XXH_IMPLEM_13a8737387)
1254 #  define XXH_IMPLEM_13a8737387
1255 
1256 /* *************************************
1257 *  Tuning parameters
1258 ***************************************/
1259 
1260 /*!
1261  * @defgroup tuning Tuning parameters
1262  * @{
1263  *
1264  * Various macros to control xxHash's behavior.
1265  */
1266 #ifdef XXH_DOXYGEN
1267 /*!
1268  * @brief Define this to disable 64-bit code.
1269  *
1270  * Useful if only using the @ref xxh32_family and you have a strict C90 compiler.
1271  */
1272 #  define XXH_NO_LONG_LONG
1273 #  undef XXH_NO_LONG_LONG /* don't actually */
1274 /*!
1275  * @brief Controls how unaligned memory is accessed.
1276  *
1277  * By default, access to unaligned memory is controlled by `memcpy()`, which is
1278  * safe and portable.
1279  *
1280  * Unfortunately, on some target/compiler combinations, the generated assembly
1281  * is sub-optimal.
1282  *
1283  * The below switch allow selection of a different access method
1284  * in the search for improved performance.
1285  *
1286  * @par Possible options:
1287  *
1288  *  - `XXH_FORCE_MEMORY_ACCESS=0` (default): `memcpy`
1289  *   @par
1290  *     Use `memcpy()`. Safe and portable. Note that most modern compilers will
1291  *     eliminate the function call and treat it as an unaligned access.
1292  *
1293  *  - `XXH_FORCE_MEMORY_ACCESS=1`: `__attribute__((packed))`
1294  *   @par
1295  *     Depends on compiler extensions and is therefore not portable.
1296  *     This method is safe _if_ your compiler supports it,
1297  *     and *generally* as fast or faster than `memcpy`.
1298  *
1299  *  - `XXH_FORCE_MEMORY_ACCESS=2`: Direct cast
1300  *  @par
1301  *     Casts directly and dereferences. This method doesn't depend on the
1302  *     compiler, but it violates the C standard as it directly dereferences an
1303  *     unaligned pointer. It can generate buggy code on targets which do not
1304  *     support unaligned memory accesses, but in some circumstances, it's the
1305  *     only known way to get the most performance.
1306  *
1307  *  - `XXH_FORCE_MEMORY_ACCESS=3`: Byteshift
1308  *  @par
1309  *     Also portable. This can generate the best code on old compilers which don't
1310  *     inline small `memcpy()` calls, and it might also be faster on big-endian
1311  *     systems which lack a native byteswap instruction. However, some compilers
1312  *     will emit literal byteshifts even if the target supports unaligned access.
1313  *  .
1314  *
1315  * @warning
1316  *   Methods 1 and 2 rely on implementation-defined behavior. Use these with
1317  *   care, as what works on one compiler/platform/optimization level may cause
1318  *   another to read garbage data or even crash.
1319  *
1320  * See http://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html for details.
1321  *
1322  * Prefer these methods in priority order (0 > 3 > 1 > 2)
1323  */
1324 #  define XXH_FORCE_MEMORY_ACCESS 0
1325 
1326 /*!
1327  * @def XXH_FORCE_ALIGN_CHECK
1328  * @brief If defined to non-zero, adds a special path for aligned inputs (XXH32()
1329  * and XXH64() only).
1330  *
1331  * This is an important performance trick for architectures without decent
1332  * unaligned memory access performance.
1333  *
1334  * It checks for input alignment, and when conditions are met, uses a "fast
1335  * path" employing direct 32-bit/64-bit reads, resulting in _dramatically
1336  * faster_ read speed.
1337  *
1338  * The check costs one initial branch per hash, which is generally negligible,
1339  * but not zero.
1340  *
1341  * Moreover, it's not useful to generate an additional code path if memory
1342  * access uses the same instruction for both aligned and unaligned
1343  * addresses (e.g. x86 and aarch64).
1344  *
1345  * In these cases, the alignment check can be removed by setting this macro to 0.
1346  * Then the code will always use unaligned memory access.
1347  * Align check is automatically disabled on x86, x64 & arm64,
1348  * which are platforms known to offer good unaligned memory accesses performance.
1349  *
1350  * This option does not affect XXH3 (only XXH32 and XXH64).
1351  */
1352 #  define XXH_FORCE_ALIGN_CHECK 0
1353 
1354 /*!
1355  * @def XXH_NO_INLINE_HINTS
1356  * @brief When non-zero, sets all functions to `static`.
1357  *
1358  * By default, xxHash tries to force the compiler to inline almost all internal
1359  * functions.
1360  *
1361  * This can usually improve performance due to reduced jumping and improved
1362  * constant folding, but significantly increases the size of the binary which
1363  * might not be favorable.
1364  *
1365  * Additionally, sometimes the forced inlining can be detrimental to performance,
1366  * depending on the architecture.
1367  *
1368  * XXH_NO_INLINE_HINTS marks all internal functions as static, giving the
1369  * compiler full control on whether to inline or not.
1370  *
1371  * When not optimizing (-O0), optimizing for size (-Os, -Oz), or using
1372  * -fno-inline with GCC or Clang, this will automatically be defined.
1373  */
1374 #  define XXH_NO_INLINE_HINTS 0
1375 
1376 /*!
1377  * @def XXH32_ENDJMP
1378  * @brief Whether to use a jump for `XXH32_finalize`.
1379  *
1380  * For performance, `XXH32_finalize` uses multiple branches in the finalizer.
1381  * This is generally preferable for performance,
1382  * but depending on exact architecture, a jmp may be preferable.
1383  *
1384  * This setting is only possibly making a difference for very small inputs.
1385  */
1386 #  define XXH32_ENDJMP 0
1387 
1388 /*!
1389  * @internal
1390  * @brief Redefines old internal names.
1391  *
1392  * For compatibility with code that uses xxHash's internals before the names
1393  * were changed to improve namespacing. There is no other reason to use this.
1394  */
1395 #  define XXH_OLD_NAMES
1396 #  undef XXH_OLD_NAMES /* don't actually use, it is ugly. */
1397 #endif /* XXH_DOXYGEN */
1398 /*!
1399  * @}
1400  */
1401 
1402 #ifndef XXH_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */
1403    /* prefer __packed__ structures (method 1) for gcc on armv7+ and mips */
1404 #  if !defined(__clang__) && \
1405 ( \
1406     (defined(__INTEL_COMPILER) && !defined(_WIN32)) || \
1407     ( \
1408         defined(__GNUC__) && ( \
1409             (defined(__ARM_ARCH) && __ARM_ARCH >= 7) || \
1410             ( \
1411                 defined(__mips__) && \
1412                 (__mips <= 5 || __mips_isa_rev < 6) && \
1413                 (!defined(__mips16) || defined(__mips_mips16e2)) \
1414             ) \
1415         ) \
1416     ) \
1417 )
1418 #    define XXH_FORCE_MEMORY_ACCESS 1
1419 #  endif
1420 #endif
1421 
1422 #ifndef XXH_FORCE_ALIGN_CHECK  /* can be defined externally */
1423 #  if defined(__i386)  || defined(__x86_64__) || defined(__aarch64__) \
1424    || defined(_M_IX86) || defined(_M_X64)     || defined(_M_ARM64) /* visual */
1425 #    define XXH_FORCE_ALIGN_CHECK 0
1426 #  else
1427 #    define XXH_FORCE_ALIGN_CHECK 1
1428 #  endif
1429 #endif
1430 
1431 #ifndef XXH_NO_INLINE_HINTS
1432 #  if defined(__OPTIMIZE_SIZE__) /* -Os, -Oz */ \
1433    || defined(__NO_INLINE__)     /* -O0, -fno-inline */
1434 #    define XXH_NO_INLINE_HINTS 1
1435 #  else
1436 #    define XXH_NO_INLINE_HINTS 0
1437 #  endif
1438 #endif
1439 
1440 #ifndef XXH32_ENDJMP
1441 /* generally preferable for performance */
1442 #  define XXH32_ENDJMP 0
1443 #endif
1444 
1445 /*!
1446  * @defgroup impl Implementation
1447  * @{
1448  */
1449 
1450 
1451 /* *************************************
1452 *  Includes & Memory related functions
1453 ***************************************/
1454 /* Modify the local functions below should you wish to use some other memory routines */
1455 /* for ZSTD_malloc(), ZSTD_free() */
1456 #define ZSTD_DEPS_NEED_MALLOC
1457 #include "zstd_deps.h"  /* size_t, ZSTD_malloc, ZSTD_free, ZSTD_memcpy */
1458 static void* XXH_malloc(size_t s) { return ZSTD_malloc(s); }
1459 static void  XXH_free  (void* p)  { ZSTD_free(p); }
1460 static void* XXH_memcpy(void* dest, const void* src, size_t size) { return ZSTD_memcpy(dest,src,size); }
1461 
1462 
1463 /* *************************************
1464 *  Compiler Specific Options
1465 ***************************************/
1466 #ifdef _MSC_VER /* Visual Studio warning fix */
1467 #  pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
1468 #endif
1469 
1470 #if XXH_NO_INLINE_HINTS  /* disable inlining hints */
1471 #  if defined(__GNUC__) || defined(__clang__)
1472 #    define XXH_FORCE_INLINE static __attribute__((unused))
1473 #  else
1474 #    define XXH_FORCE_INLINE static
1475 #  endif
1476 #  define XXH_NO_INLINE static
1477 /* enable inlining hints */
1478 #elif defined(__GNUC__) || defined(__clang__)
1479 #  define XXH_FORCE_INLINE static __inline__ __attribute__((always_inline, unused))
1480 #  define XXH_NO_INLINE static __attribute__((noinline))
1481 #elif defined(_MSC_VER)  /* Visual Studio */
1482 #  define XXH_FORCE_INLINE static __forceinline
1483 #  define XXH_NO_INLINE static __declspec(noinline)
1484 #elif defined (__cplusplus) \
1485   || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L))   /* C99 */
1486 #  define XXH_FORCE_INLINE static inline
1487 #  define XXH_NO_INLINE static
1488 #else
1489 #  define XXH_FORCE_INLINE static
1490 #  define XXH_NO_INLINE static
1491 #endif
1492 
1493 
1494 
1495 /* *************************************
1496 *  Debug
1497 ***************************************/
1498 /*!
1499  * @ingroup tuning
1500  * @def XXH_DEBUGLEVEL
1501  * @brief Sets the debugging level.
1502  *
1503  * XXH_DEBUGLEVEL is expected to be defined externally, typically via the
1504  * compiler's command line options. The value must be a number.
1505  */
1506 #ifndef XXH_DEBUGLEVEL
1507 #  ifdef DEBUGLEVEL /* backwards compat */
1508 #    define XXH_DEBUGLEVEL DEBUGLEVEL
1509 #  else
1510 #    define XXH_DEBUGLEVEL 0
1511 #  endif
1512 #endif
1513 
1514 #if (XXH_DEBUGLEVEL>=1)
1515 #  include <assert.h>   /* note: can still be disabled with NDEBUG */
1516 #  define XXH_ASSERT(c)   assert(c)
1517 #else
1518 #  define XXH_ASSERT(c)   ((void)0)
1519 #endif
1520 
1521 /* note: use after variable declarations */
1522 #ifndef XXH_STATIC_ASSERT
1523 #  if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)    /* C11 */
1524 #    include <assert.h>
1525 #    define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0)
1526 #  elif defined(__cplusplus) && (__cplusplus >= 201103L)            /* C++11 */
1527 #    define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0)
1528 #  else
1529 #    define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { struct xxh_sa { char x[(c) ? 1 : -1]; }; } while(0)
1530 #  endif
1531 #  define XXH_STATIC_ASSERT(c) XXH_STATIC_ASSERT_WITH_MESSAGE((c),#c)
1532 #endif
1533 
1534 /*!
1535  * @internal
1536  * @def XXH_COMPILER_GUARD(var)
1537  * @brief Used to prevent unwanted optimizations for @p var.
1538  *
1539  * It uses an empty GCC inline assembly statement with a register constraint
1540  * which forces @p var into a general purpose register (eg eax, ebx, ecx
1541  * on x86) and marks it as modified.
1542  *
1543  * This is used in a few places to avoid unwanted autovectorization (e.g.
1544  * XXH32_round()). All vectorization we want is explicit via intrinsics,
1545  * and _usually_ isn't wanted elsewhere.
1546  *
1547  * We also use it to prevent unwanted constant folding for AArch64 in
1548  * XXH3_initCustomSecret_scalar().
1549  */
1550 #if defined(__GNUC__) || defined(__clang__)
1551 #  define XXH_COMPILER_GUARD(var) __asm__ __volatile__("" : "+r" (var))
1552 #else
1553 #  define XXH_COMPILER_GUARD(var) ((void)0)
1554 #endif
1555 
1556 /* *************************************
1557 *  Basic Types
1558 ***************************************/
1559 #if !defined (__VMS) \
1560  && (defined (__cplusplus) \
1561  || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
1562 # include <stdint.h>
1563   typedef uint8_t xxh_u8;
1564 #else
1565   typedef unsigned char xxh_u8;
1566 #endif
1567 typedef XXH32_hash_t xxh_u32;
1568 
1569 #ifdef XXH_OLD_NAMES
1570 #  define BYTE xxh_u8
1571 #  define U8   xxh_u8
1572 #  define U32  xxh_u32
1573 #endif
1574 
1575 /* ***   Memory access   *** */
1576 
1577 /*!
1578  * @internal
1579  * @fn xxh_u32 XXH_read32(const void* ptr)
1580  * @brief Reads an unaligned 32-bit integer from @p ptr in native endianness.
1581  *
1582  * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
1583  *
1584  * @param ptr The pointer to read from.
1585  * @return The 32-bit native endian integer from the bytes at @p ptr.
1586  */
1587 
1588 /*!
1589  * @internal
1590  * @fn xxh_u32 XXH_readLE32(const void* ptr)
1591  * @brief Reads an unaligned 32-bit little endian integer from @p ptr.
1592  *
1593  * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
1594  *
1595  * @param ptr The pointer to read from.
1596  * @return The 32-bit little endian integer from the bytes at @p ptr.
1597  */
1598 
1599 /*!
1600  * @internal
1601  * @fn xxh_u32 XXH_readBE32(const void* ptr)
1602  * @brief Reads an unaligned 32-bit big endian integer from @p ptr.
1603  *
1604  * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
1605  *
1606  * @param ptr The pointer to read from.
1607  * @return The 32-bit big endian integer from the bytes at @p ptr.
1608  */
1609 
1610 /*!
1611  * @internal
1612  * @fn xxh_u32 XXH_readLE32_align(const void* ptr, XXH_alignment align)
1613  * @brief Like @ref XXH_readLE32(), but has an option for aligned reads.
1614  *
1615  * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
1616  * Note that when @ref XXH_FORCE_ALIGN_CHECK == 0, the @p align parameter is
1617  * always @ref XXH_alignment::XXH_unaligned.
1618  *
1619  * @param ptr The pointer to read from.
1620  * @param align Whether @p ptr is aligned.
1621  * @pre
1622  *   If @p align == @ref XXH_alignment::XXH_aligned, @p ptr must be 4 byte
1623  *   aligned.
1624  * @return The 32-bit little endian integer from the bytes at @p ptr.
1625  */
1626 
1627 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
1628 /*
1629  * Manual byteshift. Best for old compilers which don't inline memcpy.
1630  * We actually directly use XXH_readLE32 and XXH_readBE32.
1631  */
1632 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
1633 
1634 /*
1635  * Force direct memory access. Only works on CPU which support unaligned memory
1636  * access in hardware.
1637  */
1638 static xxh_u32 XXH_read32(const void* memPtr) { return *(const xxh_u32*) memPtr; }
1639 
1640 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
1641 
1642 /*
1643  * __pack instructions are safer but compiler specific, hence potentially
1644  * problematic for some compilers.
1645  *
1646  * Currently only defined for GCC and ICC.
1647  */
1648 #ifdef XXH_OLD_NAMES
1649 typedef union { xxh_u32 u32; } __attribute__((packed)) unalign;
1650 #endif
1651 static xxh_u32 XXH_read32(const void* ptr)
1652 {
1653     typedef union { xxh_u32 u32; } __attribute__((packed)) xxh_unalign;
1654     return ((const xxh_unalign*)ptr)->u32;
1655 }
1656 
1657 #else
1658 
1659 /*
1660  * Portable and safe solution. Generally efficient.
1661  * see: http://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html
1662  */
1663 static xxh_u32 XXH_read32(const void* memPtr)
1664 {
1665     xxh_u32 val;
1666     XXH_memcpy(&val, memPtr, sizeof(val));
1667     return val;
1668 }
1669 
1670 #endif   /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
1671 
1672 
1673 /* ***   Endianness   *** */
1674 
1675 /*!
1676  * @ingroup tuning
1677  * @def XXH_CPU_LITTLE_ENDIAN
1678  * @brief Whether the target is little endian.
1679  *
1680  * Defined to 1 if the target is little endian, or 0 if it is big endian.
1681  * It can be defined externally, for example on the compiler command line.
1682  *
1683  * If it is not defined,
1684  * a runtime check (which is usually constant folded) is used instead.
1685  *
1686  * @note
1687  *   This is not necessarily defined to an integer constant.
1688  *
1689  * @see XXH_isLittleEndian() for the runtime check.
1690  */
1691 #ifndef XXH_CPU_LITTLE_ENDIAN
1692 /*
1693  * Try to detect endianness automatically, to avoid the nonstandard behavior
1694  * in `XXH_isLittleEndian()`
1695  */
1696 #  if defined(_WIN32) /* Windows is always little endian */ \
1697      || defined(__LITTLE_ENDIAN__) \
1698      || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
1699 #    define XXH_CPU_LITTLE_ENDIAN 1
1700 #  elif defined(__BIG_ENDIAN__) \
1701      || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
1702 #    define XXH_CPU_LITTLE_ENDIAN 0
1703 #  else
1704 /*!
1705  * @internal
1706  * @brief Runtime check for @ref XXH_CPU_LITTLE_ENDIAN.
1707  *
1708  * Most compilers will constant fold this.
1709  */
1710 static int XXH_isLittleEndian(void)
1711 {
1712     /*
1713      * Portable and well-defined behavior.
1714      * Don't use static: it is detrimental to performance.
1715      */
1716     const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 };
1717     return one.c[0];
1718 }
1719 #   define XXH_CPU_LITTLE_ENDIAN   XXH_isLittleEndian()
1720 #  endif
1721 #endif
1722 
1723 
1724 
1725 
1726 /* ****************************************
1727 *  Compiler-specific Functions and Macros
1728 ******************************************/
1729 #define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
1730 
1731 #ifdef __has_builtin
1732 #  define XXH_HAS_BUILTIN(x) __has_builtin(x)
1733 #else
1734 #  define XXH_HAS_BUILTIN(x) 0
1735 #endif
1736 
1737 /*!
1738  * @internal
1739  * @def XXH_rotl32(x,r)
1740  * @brief 32-bit rotate left.
1741  *
1742  * @param x The 32-bit integer to be rotated.
1743  * @param r The number of bits to rotate.
1744  * @pre
1745  *   @p r > 0 && @p r < 32
1746  * @note
1747  *   @p x and @p r may be evaluated multiple times.
1748  * @return The rotated result.
1749  */
1750 #if !defined(NO_CLANG_BUILTIN) && XXH_HAS_BUILTIN(__builtin_rotateleft32) \
1751                                && XXH_HAS_BUILTIN(__builtin_rotateleft64)
1752 #  define XXH_rotl32 __builtin_rotateleft32
1753 #  define XXH_rotl64 __builtin_rotateleft64
1754 /* Note: although _rotl exists for minGW (GCC under windows), performance seems poor */
1755 #elif defined(_MSC_VER)
1756 #  define XXH_rotl32(x,r) _rotl(x,r)
1757 #  define XXH_rotl64(x,r) _rotl64(x,r)
1758 #else
1759 #  define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r))))
1760 #  define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r))))
1761 #endif
1762 
1763 /*!
1764  * @internal
1765  * @fn xxh_u32 XXH_swap32(xxh_u32 x)
1766  * @brief A 32-bit byteswap.
1767  *
1768  * @param x The 32-bit integer to byteswap.
1769  * @return @p x, byteswapped.
1770  */
1771 #if defined(_MSC_VER)     /* Visual Studio */
1772 #  define XXH_swap32 _byteswap_ulong
1773 #elif XXH_GCC_VERSION >= 403
1774 #  define XXH_swap32 __builtin_bswap32
1775 #else
1776 static xxh_u32 XXH_swap32 (xxh_u32 x)
1777 {
1778     return  ((x << 24) & 0xff000000 ) |
1779             ((x <<  8) & 0x00ff0000 ) |
1780             ((x >>  8) & 0x0000ff00 ) |
1781             ((x >> 24) & 0x000000ff );
1782 }
1783 #endif
1784 
1785 
1786 /* ***************************
1787 *  Memory reads
1788 *****************************/
1789 
1790 /*!
1791  * @internal
1792  * @brief Enum to indicate whether a pointer is aligned.
1793  */
1794 typedef enum {
1795     XXH_aligned,  /*!< Aligned */
1796     XXH_unaligned /*!< Possibly unaligned */
1797 } XXH_alignment;
1798 
1799 /*
1800  * XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load.
1801  *
1802  * This is ideal for older compilers which don't inline memcpy.
1803  */
1804 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
1805 
1806 XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* memPtr)
1807 {
1808     const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
1809     return bytePtr[0]
1810          | ((xxh_u32)bytePtr[1] << 8)
1811          | ((xxh_u32)bytePtr[2] << 16)
1812          | ((xxh_u32)bytePtr[3] << 24);
1813 }
1814 
1815 XXH_FORCE_INLINE xxh_u32 XXH_readBE32(const void* memPtr)
1816 {
1817     const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
1818     return bytePtr[3]
1819          | ((xxh_u32)bytePtr[2] << 8)
1820          | ((xxh_u32)bytePtr[1] << 16)
1821          | ((xxh_u32)bytePtr[0] << 24);
1822 }
1823 
1824 #else
1825 XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* ptr)
1826 {
1827     return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
1828 }
1829 
1830 static xxh_u32 XXH_readBE32(const void* ptr)
1831 {
1832     return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
1833 }
1834 #endif
1835 
1836 XXH_FORCE_INLINE xxh_u32
1837 XXH_readLE32_align(const void* ptr, XXH_alignment align)
1838 {
1839     if (align==XXH_unaligned) {
1840         return XXH_readLE32(ptr);
1841     } else {
1842         return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u32*)ptr : XXH_swap32(*(const xxh_u32*)ptr);
1843     }
1844 }
1845 
1846 
1847 /* *************************************
1848 *  Misc
1849 ***************************************/
1850 /*! @ingroup public */
1851 XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
1852 
1853 
1854 /* *******************************************************************
1855 *  32-bit hash functions
1856 *********************************************************************/
1857 /*!
1858  * @}
1859  * @defgroup xxh32_impl XXH32 implementation
1860  * @ingroup impl
1861  * @{
1862  */
1863  /* #define instead of static const, to be used as initializers */
1864 #define XXH_PRIME32_1  0x9E3779B1U  /*!< 0b10011110001101110111100110110001 */
1865 #define XXH_PRIME32_2  0x85EBCA77U  /*!< 0b10000101111010111100101001110111 */
1866 #define XXH_PRIME32_3  0xC2B2AE3DU  /*!< 0b11000010101100101010111000111101 */
1867 #define XXH_PRIME32_4  0x27D4EB2FU  /*!< 0b00100111110101001110101100101111 */
1868 #define XXH_PRIME32_5  0x165667B1U  /*!< 0b00010110010101100110011110110001 */
1869 
1870 #ifdef XXH_OLD_NAMES
1871 #  define PRIME32_1 XXH_PRIME32_1
1872 #  define PRIME32_2 XXH_PRIME32_2
1873 #  define PRIME32_3 XXH_PRIME32_3
1874 #  define PRIME32_4 XXH_PRIME32_4
1875 #  define PRIME32_5 XXH_PRIME32_5
1876 #endif
1877 
1878 /*!
1879  * @internal
1880  * @brief Normal stripe processing routine.
1881  *
1882  * This shuffles the bits so that any bit from @p input impacts several bits in
1883  * @p acc.
1884  *
1885  * @param acc The accumulator lane.
1886  * @param input The stripe of input to mix.
1887  * @return The mixed accumulator lane.
1888  */
1889 static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input)
1890 {
1891     acc += input * XXH_PRIME32_2;
1892     acc  = XXH_rotl32(acc, 13);
1893     acc *= XXH_PRIME32_1;
1894 #if (defined(__SSE4_1__) || defined(__aarch64__)) && !defined(XXH_ENABLE_AUTOVECTORIZE)
1895     /*
1896      * UGLY HACK:
1897      * A compiler fence is the only thing that prevents GCC and Clang from
1898      * autovectorizing the XXH32 loop (pragmas and attributes don't work for some
1899      * reason) without globally disabling SSE4.1.
1900      *
1901      * The reason we want to avoid vectorization is because despite working on
1902      * 4 integers at a time, there are multiple factors slowing XXH32 down on
1903      * SSE4:
1904      * - There's a ridiculous amount of lag from pmulld (10 cycles of latency on
1905      *   newer chips!) making it slightly slower to multiply four integers at
1906      *   once compared to four integers independently. Even when pmulld was
1907      *   fastest, Sandy/Ivy Bridge, it is still not worth it to go into SSE
1908      *   just to multiply unless doing a long operation.
1909      *
1910      * - Four instructions are required to rotate,
1911      *      movqda tmp,  v // not required with VEX encoding
1912      *      pslld  tmp, 13 // tmp <<= 13
1913      *      psrld  v,   19 // x >>= 19
1914      *      por    v,  tmp // x |= tmp
1915      *   compared to one for scalar:
1916      *      roll   v, 13    // reliably fast across the board
1917      *      shldl  v, v, 13 // Sandy Bridge and later prefer this for some reason
1918      *
1919      * - Instruction level parallelism is actually more beneficial here because
1920      *   the SIMD actually serializes this operation: While v1 is rotating, v2
1921      *   can load data, while v3 can multiply. SSE forces them to operate
1922      *   together.
1923      *
1924      * This is also enabled on AArch64, as Clang autovectorizes it incorrectly
1925      * and it is pointless writing a NEON implementation that is basically the
1926      * same speed as scalar for XXH32.
1927      */
1928     XXH_COMPILER_GUARD(acc);
1929 #endif
1930     return acc;
1931 }
1932 
1933 /*!
1934  * @internal
1935  * @brief Mixes all bits to finalize the hash.
1936  *
1937  * The final mix ensures that all input bits have a chance to impact any bit in
1938  * the output digest, resulting in an unbiased distribution.
1939  *
1940  * @param h32 The hash to avalanche.
1941  * @return The avalanched hash.
1942  */
1943 static xxh_u32 XXH32_avalanche(xxh_u32 h32)
1944 {
1945     h32 ^= h32 >> 15;
1946     h32 *= XXH_PRIME32_2;
1947     h32 ^= h32 >> 13;
1948     h32 *= XXH_PRIME32_3;
1949     h32 ^= h32 >> 16;
1950     return(h32);
1951 }
1952 
1953 #define XXH_get32bits(p) XXH_readLE32_align(p, align)
1954 
1955 /*!
1956  * @internal
1957  * @brief Processes the last 0-15 bytes of @p ptr.
1958  *
1959  * There may be up to 15 bytes remaining to consume from the input.
1960  * This final stage will digest them to ensure that all input bytes are present
1961  * in the final mix.
1962  *
1963  * @param h32 The hash to finalize.
1964  * @param ptr The pointer to the remaining input.
1965  * @param len The remaining length, modulo 16.
1966  * @param align Whether @p ptr is aligned.
1967  * @return The finalized hash.
1968  */
1969 static xxh_u32
1970 XXH32_finalize(xxh_u32 h32, const xxh_u8* ptr, size_t len, XXH_alignment align)
1971 {
1972 #define XXH_PROCESS1 do {                           \
1973     h32 += (*ptr++) * XXH_PRIME32_5;                \
1974     h32 = XXH_rotl32(h32, 11) * XXH_PRIME32_1;      \
1975 } while (0)
1976 
1977 #define XXH_PROCESS4 do {                           \
1978     h32 += XXH_get32bits(ptr) * XXH_PRIME32_3;      \
1979     ptr += 4;                                   \
1980     h32  = XXH_rotl32(h32, 17) * XXH_PRIME32_4;     \
1981 } while (0)
1982 
1983     if (ptr==NULL) XXH_ASSERT(len == 0);
1984 
1985     /* Compact rerolled version; generally faster */
1986     if (!XXH32_ENDJMP) {
1987         len &= 15;
1988         while (len >= 4) {
1989             XXH_PROCESS4;
1990             len -= 4;
1991         }
1992         while (len > 0) {
1993             XXH_PROCESS1;
1994             --len;
1995         }
1996         return XXH32_avalanche(h32);
1997     } else {
1998          switch(len&15) /* or switch(bEnd - p) */ {
1999            case 12:      XXH_PROCESS4;
2000                          XXH_FALLTHROUGH;
2001            case 8:       XXH_PROCESS4;
2002                          XXH_FALLTHROUGH;
2003            case 4:       XXH_PROCESS4;
2004                          return XXH32_avalanche(h32);
2005 
2006            case 13:      XXH_PROCESS4;
2007                          XXH_FALLTHROUGH;
2008            case 9:       XXH_PROCESS4;
2009                          XXH_FALLTHROUGH;
2010            case 5:       XXH_PROCESS4;
2011                          XXH_PROCESS1;
2012                          return XXH32_avalanche(h32);
2013 
2014            case 14:      XXH_PROCESS4;
2015                          XXH_FALLTHROUGH;
2016            case 10:      XXH_PROCESS4;
2017                          XXH_FALLTHROUGH;
2018            case 6:       XXH_PROCESS4;
2019                          XXH_PROCESS1;
2020                          XXH_PROCESS1;
2021                          return XXH32_avalanche(h32);
2022 
2023            case 15:      XXH_PROCESS4;
2024                          XXH_FALLTHROUGH;
2025            case 11:      XXH_PROCESS4;
2026                          XXH_FALLTHROUGH;
2027            case 7:       XXH_PROCESS4;
2028                          XXH_FALLTHROUGH;
2029            case 3:       XXH_PROCESS1;
2030                          XXH_FALLTHROUGH;
2031            case 2:       XXH_PROCESS1;
2032                          XXH_FALLTHROUGH;
2033            case 1:       XXH_PROCESS1;
2034                          XXH_FALLTHROUGH;
2035            case 0:       return XXH32_avalanche(h32);
2036         }
2037         XXH_ASSERT(0);
2038         return h32;   /* reaching this point is deemed impossible */
2039     }
2040 }
2041 
2042 #ifdef XXH_OLD_NAMES
2043 #  define PROCESS1 XXH_PROCESS1
2044 #  define PROCESS4 XXH_PROCESS4
2045 #else
2046 #  undef XXH_PROCESS1
2047 #  undef XXH_PROCESS4
2048 #endif
2049 
2050 /*!
2051  * @internal
2052  * @brief The implementation for @ref XXH32().
2053  *
2054  * @param input , len , seed Directly passed from @ref XXH32().
2055  * @param align Whether @p input is aligned.
2056  * @return The calculated hash.
2057  */
2058 XXH_FORCE_INLINE xxh_u32
2059 XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align)
2060 {
2061     xxh_u32 h32;
2062 
2063     if (input==NULL) XXH_ASSERT(len == 0);
2064 
2065     if (len>=16) {
2066         const xxh_u8* const bEnd = input + len;
2067         const xxh_u8* const limit = bEnd - 15;
2068         xxh_u32 v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
2069         xxh_u32 v2 = seed + XXH_PRIME32_2;
2070         xxh_u32 v3 = seed + 0;
2071         xxh_u32 v4 = seed - XXH_PRIME32_1;
2072 
2073         do {
2074             v1 = XXH32_round(v1, XXH_get32bits(input)); input += 4;
2075             v2 = XXH32_round(v2, XXH_get32bits(input)); input += 4;
2076             v3 = XXH32_round(v3, XXH_get32bits(input)); input += 4;
2077             v4 = XXH32_round(v4, XXH_get32bits(input)); input += 4;
2078         } while (input < limit);
2079 
2080         h32 = XXH_rotl32(v1, 1)  + XXH_rotl32(v2, 7)
2081             + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
2082     } else {
2083         h32  = seed + XXH_PRIME32_5;
2084     }
2085 
2086     h32 += (xxh_u32)len;
2087 
2088     return XXH32_finalize(h32, input, len&15, align);
2089 }
2090 
2091 /*! @ingroup xxh32_family */
2092 XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t seed)
2093 {
2094 #if 0
2095     /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
2096     XXH32_state_t state;
2097     XXH32_reset(&state, seed);
2098     XXH32_update(&state, (const xxh_u8*)input, len);
2099     return XXH32_digest(&state);
2100 #else
2101     if (XXH_FORCE_ALIGN_CHECK) {
2102         if ((((size_t)input) & 3) == 0) {   /* Input is 4-bytes aligned, leverage the speed benefit */
2103             return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
2104     }   }
2105 
2106     return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
2107 #endif
2108 }
2109 
2110 
2111 
2112 /*******   Hash streaming   *******/
2113 /*!
2114  * @ingroup xxh32_family
2115  */
2116 XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
2117 {
2118     return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
2119 }
2120 /*! @ingroup xxh32_family */
2121 XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
2122 {
2123     XXH_free(statePtr);
2124     return XXH_OK;
2125 }
2126 
2127 /*! @ingroup xxh32_family */
2128 XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState)
2129 {
2130     XXH_memcpy(dstState, srcState, sizeof(*dstState));
2131 }
2132 
2133 /*! @ingroup xxh32_family */
2134 XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, XXH32_hash_t seed)
2135 {
2136     XXH_ASSERT(statePtr != NULL);
2137     memset(statePtr, 0, sizeof(*statePtr));
2138     statePtr->v[0] = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
2139     statePtr->v[1] = seed + XXH_PRIME32_2;
2140     statePtr->v[2] = seed + 0;
2141     statePtr->v[3] = seed - XXH_PRIME32_1;
2142     return XXH_OK;
2143 }
2144 
2145 
2146 /*! @ingroup xxh32_family */
2147 XXH_PUBLIC_API XXH_errorcode
2148 XXH32_update(XXH32_state_t* state, const void* input, size_t len)
2149 {
2150     if (input==NULL) {
2151         XXH_ASSERT(len == 0);
2152         return XXH_OK;
2153     }
2154 
2155     {   const xxh_u8* p = (const xxh_u8*)input;
2156         const xxh_u8* const bEnd = p + len;
2157 
2158         state->total_len_32 += (XXH32_hash_t)len;
2159         state->large_len |= (XXH32_hash_t)((len>=16) | (state->total_len_32>=16));
2160 
2161         if (state->memsize + len < 16)  {   /* fill in tmp buffer */
2162             XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, len);
2163             state->memsize += (XXH32_hash_t)len;
2164             return XXH_OK;
2165         }
2166 
2167         if (state->memsize) {   /* some data left from previous update */
2168             XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, 16-state->memsize);
2169             {   const xxh_u32* p32 = state->mem32;
2170                 state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p32)); p32++;
2171                 state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p32)); p32++;
2172                 state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p32)); p32++;
2173                 state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p32));
2174             }
2175             p += 16-state->memsize;
2176             state->memsize = 0;
2177         }
2178 
2179         if (p <= bEnd-16) {
2180             const xxh_u8* const limit = bEnd - 16;
2181 
2182             do {
2183                 state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p)); p+=4;
2184                 state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p)); p+=4;
2185                 state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p)); p+=4;
2186                 state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p)); p+=4;
2187             } while (p<=limit);
2188 
2189         }
2190 
2191         if (p < bEnd) {
2192             XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
2193             state->memsize = (unsigned)(bEnd-p);
2194         }
2195     }
2196 
2197     return XXH_OK;
2198 }
2199 
2200 
2201 /*! @ingroup xxh32_family */
2202 XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t* state)
2203 {
2204     xxh_u32 h32;
2205 
2206     if (state->large_len) {
2207         h32 = XXH_rotl32(state->v[0], 1)
2208             + XXH_rotl32(state->v[1], 7)
2209             + XXH_rotl32(state->v[2], 12)
2210             + XXH_rotl32(state->v[3], 18);
2211     } else {
2212         h32 = state->v[2] /* == seed */ + XXH_PRIME32_5;
2213     }
2214 
2215     h32 += state->total_len_32;
2216 
2217     return XXH32_finalize(h32, (const xxh_u8*)state->mem32, state->memsize, XXH_aligned);
2218 }
2219 
2220 
2221 /*******   Canonical representation   *******/
2222 
2223 /*!
2224  * @ingroup xxh32_family
2225  * The default return values from XXH functions are unsigned 32 and 64 bit
2226  * integers.
2227  *
2228  * The canonical representation uses big endian convention, the same convention
2229  * as human-readable numbers (large digits first).
2230  *
2231  * This way, hash values can be written into a file or buffer, remaining
2232  * comparable across different systems.
2233  *
2234  * The following functions allow transformation of hash values to and from their
2235  * canonical format.
2236  */
2237 XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
2238 {
2239     /* XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); */
2240     if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
2241     XXH_memcpy(dst, &hash, sizeof(*dst));
2242 }
2243 /*! @ingroup xxh32_family */
2244 XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
2245 {
2246     return XXH_readBE32(src);
2247 }
2248 
2249 
2250 #ifndef XXH_NO_LONG_LONG
2251 
2252 /* *******************************************************************
2253 *  64-bit hash functions
2254 *********************************************************************/
2255 /*!
2256  * @}
2257  * @ingroup impl
2258  * @{
2259  */
2260 /*******   Memory access   *******/
2261 
2262 typedef XXH64_hash_t xxh_u64;
2263 
2264 #ifdef XXH_OLD_NAMES
2265 #  define U64 xxh_u64
2266 #endif
2267 
2268 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
2269 /*
2270  * Manual byteshift. Best for old compilers which don't inline memcpy.
2271  * We actually directly use XXH_readLE64 and XXH_readBE64.
2272  */
2273 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
2274 
2275 /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
2276 static xxh_u64 XXH_read64(const void* memPtr)
2277 {
2278     return *(const xxh_u64*) memPtr;
2279 }
2280 
2281 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
2282 
2283 /*
2284  * __pack instructions are safer, but compiler specific, hence potentially
2285  * problematic for some compilers.
2286  *
2287  * Currently only defined for GCC and ICC.
2288  */
2289 #ifdef XXH_OLD_NAMES
2290 typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) unalign64;
2291 #endif
2292 static xxh_u64 XXH_read64(const void* ptr)
2293 {
2294     typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) xxh_unalign64;
2295     return ((const xxh_unalign64*)ptr)->u64;
2296 }
2297 
2298 #else
2299 
2300 /*
2301  * Portable and safe solution. Generally efficient.
2302  * see: http://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html
2303  */
2304 static xxh_u64 XXH_read64(const void* memPtr)
2305 {
2306     xxh_u64 val;
2307     XXH_memcpy(&val, memPtr, sizeof(val));
2308     return val;
2309 }
2310 
2311 #endif   /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
2312 
2313 #if defined(_MSC_VER)     /* Visual Studio */
2314 #  define XXH_swap64 _byteswap_uint64
2315 #elif XXH_GCC_VERSION >= 403
2316 #  define XXH_swap64 __builtin_bswap64
2317 #else
2318 static xxh_u64 XXH_swap64(xxh_u64 x)
2319 {
2320     return  ((x << 56) & 0xff00000000000000ULL) |
2321             ((x << 40) & 0x00ff000000000000ULL) |
2322             ((x << 24) & 0x0000ff0000000000ULL) |
2323             ((x << 8)  & 0x000000ff00000000ULL) |
2324             ((x >> 8)  & 0x00000000ff000000ULL) |
2325             ((x >> 24) & 0x0000000000ff0000ULL) |
2326             ((x >> 40) & 0x000000000000ff00ULL) |
2327             ((x >> 56) & 0x00000000000000ffULL);
2328 }
2329 #endif
2330 
2331 
2332 /* XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. */
2333 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
2334 
2335 XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* memPtr)
2336 {
2337     const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
2338     return bytePtr[0]
2339          | ((xxh_u64)bytePtr[1] << 8)
2340          | ((xxh_u64)bytePtr[2] << 16)
2341          | ((xxh_u64)bytePtr[3] << 24)
2342          | ((xxh_u64)bytePtr[4] << 32)
2343          | ((xxh_u64)bytePtr[5] << 40)
2344          | ((xxh_u64)bytePtr[6] << 48)
2345          | ((xxh_u64)bytePtr[7] << 56);
2346 }
2347 
2348 XXH_FORCE_INLINE xxh_u64 XXH_readBE64(const void* memPtr)
2349 {
2350     const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
2351     return bytePtr[7]
2352          | ((xxh_u64)bytePtr[6] << 8)
2353          | ((xxh_u64)bytePtr[5] << 16)
2354          | ((xxh_u64)bytePtr[4] << 24)
2355          | ((xxh_u64)bytePtr[3] << 32)
2356          | ((xxh_u64)bytePtr[2] << 40)
2357          | ((xxh_u64)bytePtr[1] << 48)
2358          | ((xxh_u64)bytePtr[0] << 56);
2359 }
2360 
2361 #else
2362 XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* ptr)
2363 {
2364     return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
2365 }
2366 
2367 static xxh_u64 XXH_readBE64(const void* ptr)
2368 {
2369     return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
2370 }
2371 #endif
2372 
2373 XXH_FORCE_INLINE xxh_u64
2374 XXH_readLE64_align(const void* ptr, XXH_alignment align)
2375 {
2376     if (align==XXH_unaligned)
2377         return XXH_readLE64(ptr);
2378     else
2379         return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u64*)ptr : XXH_swap64(*(const xxh_u64*)ptr);
2380 }
2381 
2382 
2383 /*******   xxh64   *******/
2384 /*!
2385  * @}
2386  * @defgroup xxh64_impl XXH64 implementation
2387  * @ingroup impl
2388  * @{
2389  */
2390 /* #define rather that static const, to be used as initializers */
2391 #define XXH_PRIME64_1  0x9E3779B185EBCA87ULL  /*!< 0b1001111000110111011110011011000110000101111010111100101010000111 */
2392 #define XXH_PRIME64_2  0xC2B2AE3D27D4EB4FULL  /*!< 0b1100001010110010101011100011110100100111110101001110101101001111 */
2393 #define XXH_PRIME64_3  0x165667B19E3779F9ULL  /*!< 0b0001011001010110011001111011000110011110001101110111100111111001 */
2394 #define XXH_PRIME64_4  0x85EBCA77C2B2AE63ULL  /*!< 0b1000010111101011110010100111011111000010101100101010111001100011 */
2395 #define XXH_PRIME64_5  0x27D4EB2F165667C5ULL  /*!< 0b0010011111010100111010110010111100010110010101100110011111000101 */
2396 
2397 #ifdef XXH_OLD_NAMES
2398 #  define PRIME64_1 XXH_PRIME64_1
2399 #  define PRIME64_2 XXH_PRIME64_2
2400 #  define PRIME64_3 XXH_PRIME64_3
2401 #  define PRIME64_4 XXH_PRIME64_4
2402 #  define PRIME64_5 XXH_PRIME64_5
2403 #endif
2404 
2405 static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input)
2406 {
2407     acc += input * XXH_PRIME64_2;
2408     acc  = XXH_rotl64(acc, 31);
2409     acc *= XXH_PRIME64_1;
2410     return acc;
2411 }
2412 
2413 static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val)
2414 {
2415     val  = XXH64_round(0, val);
2416     acc ^= val;
2417     acc  = acc * XXH_PRIME64_1 + XXH_PRIME64_4;
2418     return acc;
2419 }
2420 
2421 static xxh_u64 XXH64_avalanche(xxh_u64 h64)
2422 {
2423     h64 ^= h64 >> 33;
2424     h64 *= XXH_PRIME64_2;
2425     h64 ^= h64 >> 29;
2426     h64 *= XXH_PRIME64_3;
2427     h64 ^= h64 >> 32;
2428     return h64;
2429 }
2430 
2431 
2432 #define XXH_get64bits(p) XXH_readLE64_align(p, align)
2433 
2434 static xxh_u64
2435 XXH64_finalize(xxh_u64 h64, const xxh_u8* ptr, size_t len, XXH_alignment align)
2436 {
2437     if (ptr==NULL) XXH_ASSERT(len == 0);
2438     len &= 31;
2439     while (len >= 8) {
2440         xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr));
2441         ptr += 8;
2442         h64 ^= k1;
2443         h64  = XXH_rotl64(h64,27) * XXH_PRIME64_1 + XXH_PRIME64_4;
2444         len -= 8;
2445     }
2446     if (len >= 4) {
2447         h64 ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1;
2448         ptr += 4;
2449         h64 = XXH_rotl64(h64, 23) * XXH_PRIME64_2 + XXH_PRIME64_3;
2450         len -= 4;
2451     }
2452     while (len > 0) {
2453         h64 ^= (*ptr++) * XXH_PRIME64_5;
2454         h64 = XXH_rotl64(h64, 11) * XXH_PRIME64_1;
2455         --len;
2456     }
2457     return  XXH64_avalanche(h64);
2458 }
2459 
2460 #ifdef XXH_OLD_NAMES
2461 #  define PROCESS1_64 XXH_PROCESS1_64
2462 #  define PROCESS4_64 XXH_PROCESS4_64
2463 #  define PROCESS8_64 XXH_PROCESS8_64
2464 #else
2465 #  undef XXH_PROCESS1_64
2466 #  undef XXH_PROCESS4_64
2467 #  undef XXH_PROCESS8_64
2468 #endif
2469 
2470 XXH_FORCE_INLINE xxh_u64
2471 XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align)
2472 {
2473     xxh_u64 h64;
2474     if (input==NULL) XXH_ASSERT(len == 0);
2475 
2476     if (len>=32) {
2477         const xxh_u8* const bEnd = input + len;
2478         const xxh_u8* const limit = bEnd - 31;
2479         xxh_u64 v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
2480         xxh_u64 v2 = seed + XXH_PRIME64_2;
2481         xxh_u64 v3 = seed + 0;
2482         xxh_u64 v4 = seed - XXH_PRIME64_1;
2483 
2484         do {
2485             v1 = XXH64_round(v1, XXH_get64bits(input)); input+=8;
2486             v2 = XXH64_round(v2, XXH_get64bits(input)); input+=8;
2487             v3 = XXH64_round(v3, XXH_get64bits(input)); input+=8;
2488             v4 = XXH64_round(v4, XXH_get64bits(input)); input+=8;
2489         } while (input<limit);
2490 
2491         h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
2492         h64 = XXH64_mergeRound(h64, v1);
2493         h64 = XXH64_mergeRound(h64, v2);
2494         h64 = XXH64_mergeRound(h64, v3);
2495         h64 = XXH64_mergeRound(h64, v4);
2496 
2497     } else {
2498         h64  = seed + XXH_PRIME64_5;
2499     }
2500 
2501     h64 += (xxh_u64) len;
2502 
2503     return XXH64_finalize(h64, input, len, align);
2504 }
2505 
2506 
2507 /*! @ingroup xxh64_family */
2508 XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t len, XXH64_hash_t seed)
2509 {
2510 #if 0
2511     /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
2512     XXH64_state_t state;
2513     XXH64_reset(&state, seed);
2514     XXH64_update(&state, (const xxh_u8*)input, len);
2515     return XXH64_digest(&state);
2516 #else
2517     if (XXH_FORCE_ALIGN_CHECK) {
2518         if ((((size_t)input) & 7)==0) {  /* Input is aligned, let's leverage the speed advantage */
2519             return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
2520     }   }
2521 
2522     return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
2523 
2524 #endif
2525 }
2526 
2527 /*******   Hash Streaming   *******/
2528 
2529 /*! @ingroup xxh64_family*/
2530 XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
2531 {
2532     return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
2533 }
2534 /*! @ingroup xxh64_family */
2535 XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
2536 {
2537     XXH_free(statePtr);
2538     return XXH_OK;
2539 }
2540 
2541 /*! @ingroup xxh64_family */
2542 XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState)
2543 {
2544     XXH_memcpy(dstState, srcState, sizeof(*dstState));
2545 }
2546 
2547 /*! @ingroup xxh64_family */
2548 XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, XXH64_hash_t seed)
2549 {
2550     XXH_ASSERT(statePtr != NULL);
2551     memset(statePtr, 0, sizeof(*statePtr));
2552     statePtr->v[0] = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
2553     statePtr->v[1] = seed + XXH_PRIME64_2;
2554     statePtr->v[2] = seed + 0;
2555     statePtr->v[3] = seed - XXH_PRIME64_1;
2556     return XXH_OK;
2557 }
2558 
2559 /*! @ingroup xxh64_family */
2560 XXH_PUBLIC_API XXH_errorcode
2561 XXH64_update (XXH64_state_t* state, const void* input, size_t len)
2562 {
2563     if (input==NULL) {
2564         XXH_ASSERT(len == 0);
2565         return XXH_OK;
2566     }
2567 
2568     {   const xxh_u8* p = (const xxh_u8*)input;
2569         const xxh_u8* const bEnd = p + len;
2570 
2571         state->total_len += len;
2572 
2573         if (state->memsize + len < 32) {  /* fill in tmp buffer */
2574             XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, len);
2575             state->memsize += (xxh_u32)len;
2576             return XXH_OK;
2577         }
2578 
2579         if (state->memsize) {   /* tmp buffer is full */
2580             XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, 32-state->memsize);
2581             state->v[0] = XXH64_round(state->v[0], XXH_readLE64(state->mem64+0));
2582             state->v[1] = XXH64_round(state->v[1], XXH_readLE64(state->mem64+1));
2583             state->v[2] = XXH64_round(state->v[2], XXH_readLE64(state->mem64+2));
2584             state->v[3] = XXH64_round(state->v[3], XXH_readLE64(state->mem64+3));
2585             p += 32 - state->memsize;
2586             state->memsize = 0;
2587         }
2588 
2589         if (p+32 <= bEnd) {
2590             const xxh_u8* const limit = bEnd - 32;
2591 
2592             do {
2593                 state->v[0] = XXH64_round(state->v[0], XXH_readLE64(p)); p+=8;
2594                 state->v[1] = XXH64_round(state->v[1], XXH_readLE64(p)); p+=8;
2595                 state->v[2] = XXH64_round(state->v[2], XXH_readLE64(p)); p+=8;
2596                 state->v[3] = XXH64_round(state->v[3], XXH_readLE64(p)); p+=8;
2597             } while (p<=limit);
2598 
2599         }
2600 
2601         if (p < bEnd) {
2602             XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
2603             state->memsize = (unsigned)(bEnd-p);
2604         }
2605     }
2606 
2607     return XXH_OK;
2608 }
2609 
2610 
2611 /*! @ingroup xxh64_family */
2612 XXH_PUBLIC_API XXH64_hash_t XXH64_digest(const XXH64_state_t* state)
2613 {
2614     xxh_u64 h64;
2615 
2616     if (state->total_len >= 32) {
2617         h64 = XXH_rotl64(state->v[0], 1) + XXH_rotl64(state->v[1], 7) + XXH_rotl64(state->v[2], 12) + XXH_rotl64(state->v[3], 18);
2618         h64 = XXH64_mergeRound(h64, state->v[0]);
2619         h64 = XXH64_mergeRound(h64, state->v[1]);
2620         h64 = XXH64_mergeRound(h64, state->v[2]);
2621         h64 = XXH64_mergeRound(h64, state->v[3]);
2622     } else {
2623         h64  = state->v[2] /*seed*/ + XXH_PRIME64_5;
2624     }
2625 
2626     h64 += (xxh_u64) state->total_len;
2627 
2628     return XXH64_finalize(h64, (const xxh_u8*)state->mem64, (size_t)state->total_len, XXH_aligned);
2629 }
2630 
2631 
2632 /******* Canonical representation   *******/
2633 
2634 /*! @ingroup xxh64_family */
2635 XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)
2636 {
2637     /* XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); */
2638     if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
2639     XXH_memcpy(dst, &hash, sizeof(*dst));
2640 }
2641 
2642 /*! @ingroup xxh64_family */
2643 XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src)
2644 {
2645     return XXH_readBE64(src);
2646 }
2647 
2648 #ifndef XXH_NO_XXH3
2649 
2650 /* *********************************************************************
2651 *  XXH3
2652 *  New generation hash designed for speed on small keys and vectorization
2653 ************************************************************************ */
2654 /*!
2655  * @}
2656  * @defgroup xxh3_impl XXH3 implementation
2657  * @ingroup impl
2658  * @{
2659  */
2660 
2661 /* ===   Compiler specifics   === */
2662 
2663 #if ((defined(sun) || defined(__sun)) && __cplusplus) /* Solaris includes __STDC_VERSION__ with C++. Tested with GCC 5.5 */
2664 #  define XXH_RESTRICT /* disable */
2665 #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* >= C99 */
2666 #  define XXH_RESTRICT   restrict
2667 #else
2668 /* Note: it might be useful to define __restrict or __restrict__ for some C++ compilers */
2669 #  define XXH_RESTRICT   /* disable */
2670 #endif
2671 
2672 #if (defined(__GNUC__) && (__GNUC__ >= 3))  \
2673   || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) \
2674   || defined(__clang__)
2675 #    define XXH_likely(x) __builtin_expect(x, 1)
2676 #    define XXH_unlikely(x) __builtin_expect(x, 0)
2677 #else
2678 #    define XXH_likely(x) (x)
2679 #    define XXH_unlikely(x) (x)
2680 #endif
2681 
2682 #if defined(__GNUC__) || defined(__clang__)
2683 #  if defined(__ARM_NEON__) || defined(__ARM_NEON) \
2684    || defined(__aarch64__)  || defined(_M_ARM) \
2685    || defined(_M_ARM64)     || defined(_M_ARM64EC)
2686 #    define inline __inline__  /* circumvent a clang bug */
2687 #    include <arm_neon.h>
2688 #    undef inline
2689 #  elif defined(__AVX2__)
2690 #    include <immintrin.h>
2691 #  elif defined(__SSE2__)
2692 #    include <emmintrin.h>
2693 #  endif
2694 #endif
2695 
2696 #if defined(_MSC_VER)
2697 #  include <intrin.h>
2698 #endif
2699 
2700 /*
2701  * One goal of XXH3 is to make it fast on both 32-bit and 64-bit, while
2702  * remaining a true 64-bit/128-bit hash function.
2703  *
2704  * This is done by prioritizing a subset of 64-bit operations that can be
2705  * emulated without too many steps on the average 32-bit machine.
2706  *
2707  * For example, these two lines seem similar, and run equally fast on 64-bit:
2708  *
2709  *   xxh_u64 x;
2710  *   x ^= (x >> 47); // good
2711  *   x ^= (x >> 13); // bad
2712  *
2713  * However, to a 32-bit machine, there is a major difference.
2714  *
2715  * x ^= (x >> 47) looks like this:
2716  *
2717  *   x.lo ^= (x.hi >> (47 - 32));
2718  *
2719  * while x ^= (x >> 13) looks like this:
2720  *
2721  *   // note: funnel shifts are not usually cheap.
2722  *   x.lo ^= (x.lo >> 13) | (x.hi << (32 - 13));
2723  *   x.hi ^= (x.hi >> 13);
2724  *
2725  * The first one is significantly faster than the second, simply because the
2726  * shift is larger than 32. This means:
2727  *  - All the bits we need are in the upper 32 bits, so we can ignore the lower
2728  *    32 bits in the shift.
2729  *  - The shift result will always fit in the lower 32 bits, and therefore,
2730  *    we can ignore the upper 32 bits in the xor.
2731  *
2732  * Thanks to this optimization, XXH3 only requires these features to be efficient:
2733  *
2734  *  - Usable unaligned access
2735  *  - A 32-bit or 64-bit ALU
2736  *      - If 32-bit, a decent ADC instruction
2737  *  - A 32 or 64-bit multiply with a 64-bit result
2738  *  - For the 128-bit variant, a decent byteswap helps short inputs.
2739  *
2740  * The first two are already required by XXH32, and almost all 32-bit and 64-bit
2741  * platforms which can run XXH32 can run XXH3 efficiently.
2742  *
2743  * Thumb-1, the classic 16-bit only subset of ARM's instruction set, is one
2744  * notable exception.
2745  *
2746  * First of all, Thumb-1 lacks support for the UMULL instruction which
2747  * performs the important long multiply. This means numerous __aeabi_lmul
2748  * calls.
2749  *
2750  * Second of all, the 8 functional registers are just not enough.
2751  * Setup for __aeabi_lmul, byteshift loads, pointers, and all arithmetic need
2752  * Lo registers, and this shuffling results in thousands more MOVs than A32.
2753  *
2754  * A32 and T32 don't have this limitation. They can access all 14 registers,
2755  * do a 32->64 multiply with UMULL, and the flexible operand allowing free
2756  * shifts is helpful, too.
2757  *
2758  * Therefore, we do a quick sanity check.
2759  *
2760  * If compiling Thumb-1 for a target which supports ARM instructions, we will
2761  * emit a warning, as it is not a "sane" platform to compile for.
2762  *
2763  * Usually, if this happens, it is because of an accident and you probably need
2764  * to specify -march, as you likely meant to compile for a newer architecture.
2765  *
2766  * Credit: large sections of the vectorial and asm source code paths
2767  *         have been contributed by @easyaspi314
2768  */
2769 #if defined(__thumb__) && !defined(__thumb2__) && defined(__ARM_ARCH_ISA_ARM)
2770 #   warning "XXH3 is highly inefficient without ARM or Thumb-2."
2771 #endif
2772 
2773 /* ==========================================
2774  * Vectorization detection
2775  * ========================================== */
2776 
2777 #ifdef XXH_DOXYGEN
2778 /*!
2779  * @ingroup tuning
2780  * @brief Overrides the vectorization implementation chosen for XXH3.
2781  *
2782  * Can be defined to 0 to disable SIMD or any of the values mentioned in
2783  * @ref XXH_VECTOR_TYPE.
2784  *
2785  * If this is not defined, it uses predefined macros to determine the best
2786  * implementation.
2787  */
2788 #  define XXH_VECTOR XXH_SCALAR
2789 /*!
2790  * @ingroup tuning
2791  * @brief Possible values for @ref XXH_VECTOR.
2792  *
2793  * Note that these are actually implemented as macros.
2794  *
2795  * If this is not defined, it is detected automatically.
2796  * @ref XXH_X86DISPATCH overrides this.
2797  */
2798 enum XXH_VECTOR_TYPE /* fake enum */ {
2799     XXH_SCALAR = 0,  /*!< Portable scalar version */
2800     XXH_SSE2   = 1,  /*!<
2801                       * SSE2 for Pentium 4, Opteron, all x86_64.
2802                       *
2803                       * @note SSE2 is also guaranteed on Windows 10, macOS, and
2804                       * Android x86.
2805                       */
2806     XXH_AVX2   = 2,  /*!< AVX2 for Haswell and Bulldozer */
2807     XXH_AVX512 = 3,  /*!< AVX512 for Skylake and Icelake */
2808     XXH_NEON   = 4,  /*!< NEON for most ARMv7-A and all AArch64 */
2809     XXH_VSX    = 5,  /*!< VSX and ZVector for POWER8/z13 (64-bit) */
2810 };
2811 /*!
2812  * @ingroup tuning
2813  * @brief Selects the minimum alignment for XXH3's accumulators.
2814  *
2815  * When using SIMD, this should match the alignment reqired for said vector
2816  * type, so, for example, 32 for AVX2.
2817  *
2818  * Default: Auto detected.
2819  */
2820 #  define XXH_ACC_ALIGN 8
2821 #endif
2822 
2823 /* Actual definition */
2824 #ifndef XXH_DOXYGEN
2825 #  define XXH_SCALAR 0
2826 #  define XXH_SSE2   1
2827 #  define XXH_AVX2   2
2828 #  define XXH_AVX512 3
2829 #  define XXH_NEON   4
2830 #  define XXH_VSX    5
2831 #endif
2832 
2833 #ifndef XXH_VECTOR    /* can be defined on command line */
2834 #  if ( \
2835         defined(__ARM_NEON__) || defined(__ARM_NEON) /* gcc */ \
2836      || defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) /* msvc */ \
2837    ) && ( \
2838         defined(_WIN32) || defined(__LITTLE_ENDIAN__) /* little endian only */ \
2839     || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \
2840    )
2841 #    define XXH_VECTOR XXH_NEON
2842 #  elif defined(__AVX512F__)
2843 #    define XXH_VECTOR XXH_AVX512
2844 #  elif defined(__AVX2__)
2845 #    define XXH_VECTOR XXH_AVX2
2846 #  elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2))
2847 #    define XXH_VECTOR XXH_SSE2
2848 #  elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) \
2849      || (defined(__s390x__) && defined(__VEC__)) \
2850      && defined(__GNUC__) /* TODO: IBM XL */
2851 #    define XXH_VECTOR XXH_VSX
2852 #  else
2853 #    define XXH_VECTOR XXH_SCALAR
2854 #  endif
2855 #endif
2856 
2857 /*
2858  * Controls the alignment of the accumulator,
2859  * for compatibility with aligned vector loads, which are usually faster.
2860  */
2861 #ifndef XXH_ACC_ALIGN
2862 #  if defined(XXH_X86DISPATCH)
2863 #     define XXH_ACC_ALIGN 64  /* for compatibility with avx512 */
2864 #  elif XXH_VECTOR == XXH_SCALAR  /* scalar */
2865 #     define XXH_ACC_ALIGN 8
2866 #  elif XXH_VECTOR == XXH_SSE2  /* sse2 */
2867 #     define XXH_ACC_ALIGN 16
2868 #  elif XXH_VECTOR == XXH_AVX2  /* avx2 */
2869 #     define XXH_ACC_ALIGN 32
2870 #  elif XXH_VECTOR == XXH_NEON  /* neon */
2871 #     define XXH_ACC_ALIGN 16
2872 #  elif XXH_VECTOR == XXH_VSX   /* vsx */
2873 #     define XXH_ACC_ALIGN 16
2874 #  elif XXH_VECTOR == XXH_AVX512  /* avx512 */
2875 #     define XXH_ACC_ALIGN 64
2876 #  endif
2877 #endif
2878 
2879 #if defined(XXH_X86DISPATCH) || XXH_VECTOR == XXH_SSE2 \
2880     || XXH_VECTOR == XXH_AVX2 || XXH_VECTOR == XXH_AVX512
2881 #  define XXH_SEC_ALIGN XXH_ACC_ALIGN
2882 #else
2883 #  define XXH_SEC_ALIGN 8
2884 #endif
2885 
2886 /*
2887  * UGLY HACK:
2888  * GCC usually generates the best code with -O3 for xxHash.
2889  *
2890  * However, when targeting AVX2, it is overzealous in its unrolling resulting
2891  * in code roughly 3/4 the speed of Clang.
2892  *
2893  * There are other issues, such as GCC splitting _mm256_loadu_si256 into
2894  * _mm_loadu_si128 + _mm256_inserti128_si256. This is an optimization which
2895  * only applies to Sandy and Ivy Bridge... which don't even support AVX2.
2896  *
2897  * That is why when compiling the AVX2 version, it is recommended to use either
2898  *   -O2 -mavx2 -march=haswell
2899  * or
2900  *   -O2 -mavx2 -mno-avx256-split-unaligned-load
2901  * for decent performance, or to use Clang instead.
2902  *
2903  * Fortunately, we can control the first one with a pragma that forces GCC into
2904  * -O2, but the other one we can't control without "failed to inline always
2905  * inline function due to target mismatch" warnings.
2906  */
2907 #if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
2908   && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
2909   && defined(__OPTIMIZE__) && !defined(__OPTIMIZE_SIZE__) /* respect -O0 and -Os */
2910 #  pragma GCC push_options
2911 #  pragma GCC optimize("-O2")
2912 #endif
2913 
2914 
2915 #if XXH_VECTOR == XXH_NEON
2916 /*
2917  * NEON's setup for vmlal_u32 is a little more complicated than it is on
2918  * SSE2, AVX2, and VSX.
2919  *
2920  * While PMULUDQ and VMULEUW both perform a mask, VMLAL.U32 performs an upcast.
2921  *
2922  * To do the same operation, the 128-bit 'Q' register needs to be split into
2923  * two 64-bit 'D' registers, performing this operation::
2924  *
2925  *   [                a                 |                 b                ]
2926  *            |              '---------. .--------'                |
2927  *            |                         x                          |
2928  *            |              .---------' '--------.                |
2929  *   [ a & 0xFFFFFFFF | b & 0xFFFFFFFF ],[    a >> 32     |     b >> 32    ]
2930  *
2931  * Due to significant changes in aarch64, the fastest method for aarch64 is
2932  * completely different than the fastest method for ARMv7-A.
2933  *
2934  * ARMv7-A treats D registers as unions overlaying Q registers, so modifying
2935  * D11 will modify the high half of Q5. This is similar to how modifying AH
2936  * will only affect bits 8-15 of AX on x86.
2937  *
2938  * VZIP takes two registers, and puts even lanes in one register and odd lanes
2939  * in the other.
2940  *
2941  * On ARMv7-A, this strangely modifies both parameters in place instead of
2942  * taking the usual 3-operand form.
2943  *
2944  * Therefore, if we want to do this, we can simply use a D-form VZIP.32 on the
2945  * lower and upper halves of the Q register to end up with the high and low
2946  * halves where we want - all in one instruction.
2947  *
2948  *   vzip.32   d10, d11       @ d10 = { d10[0], d11[0] }; d11 = { d10[1], d11[1] }
2949  *
2950  * Unfortunately we need inline assembly for this: Instructions modifying two
2951  * registers at once is not possible in GCC or Clang's IR, and they have to
2952  * create a copy.
2953  *
2954  * aarch64 requires a different approach.
2955  *
2956  * In order to make it easier to write a decent compiler for aarch64, many
2957  * quirks were removed, such as conditional execution.
2958  *
2959  * NEON was also affected by this.
2960  *
2961  * aarch64 cannot access the high bits of a Q-form register, and writes to a
2962  * D-form register zero the high bits, similar to how writes to W-form scalar
2963  * registers (or DWORD registers on x86_64) work.
2964  *
2965  * The formerly free vget_high intrinsics now require a vext (with a few
2966  * exceptions)
2967  *
2968  * Additionally, VZIP was replaced by ZIP1 and ZIP2, which are the equivalent
2969  * of PUNPCKL* and PUNPCKH* in SSE, respectively, in order to only modify one
2970  * operand.
2971  *
2972  * The equivalent of the VZIP.32 on the lower and upper halves would be this
2973  * mess:
2974  *
2975  *   ext     v2.4s, v0.4s, v0.4s, #2 // v2 = { v0[2], v0[3], v0[0], v0[1] }
2976  *   zip1    v1.2s, v0.2s, v2.2s     // v1 = { v0[0], v2[0] }
2977  *   zip2    v0.2s, v0.2s, v1.2s     // v0 = { v0[1], v2[1] }
2978  *
2979  * Instead, we use a literal downcast, vmovn_u64 (XTN), and vshrn_n_u64 (SHRN):
2980  *
2981  *   shrn    v1.2s, v0.2d, #32  // v1 = (uint32x2_t)(v0 >> 32);
2982  *   xtn     v0.2s, v0.2d       // v0 = (uint32x2_t)(v0 & 0xFFFFFFFF);
2983  *
2984  * This is available on ARMv7-A, but is less efficient than a single VZIP.32.
2985  */
2986 
2987 /*!
2988  * Function-like macro:
2989  * void XXH_SPLIT_IN_PLACE(uint64x2_t &in, uint32x2_t &outLo, uint32x2_t &outHi)
2990  * {
2991  *     outLo = (uint32x2_t)(in & 0xFFFFFFFF);
2992  *     outHi = (uint32x2_t)(in >> 32);
2993  *     in = UNDEFINED;
2994  * }
2995  */
2996 # if !defined(XXH_NO_VZIP_HACK) /* define to disable */ \
2997    && (defined(__GNUC__) || defined(__clang__)) \
2998    && (defined(__arm__) || defined(__thumb__) || defined(_M_ARM))
2999 #  define XXH_SPLIT_IN_PLACE(in, outLo, outHi)                                              \
3000     do {                                                                                    \
3001       /* Undocumented GCC/Clang operand modifier: %e0 = lower D half, %f0 = upper D half */ \
3002       /* https://github.com/gcc-mirror/gcc/blob/38cf91e5/gcc/config/arm/arm.c#L22486 */     \
3003       /* https://github.com/llvm-mirror/llvm/blob/2c4ca683/lib/Target/ARM/ARMAsmPrinter.cpp#L399 */ \
3004       __asm__("vzip.32  %e0, %f0" : "+w" (in));                                             \
3005       (outLo) = vget_low_u32 (vreinterpretq_u32_u64(in));                                   \
3006       (outHi) = vget_high_u32(vreinterpretq_u32_u64(in));                                   \
3007    } while (0)
3008 # else
3009 #  define XXH_SPLIT_IN_PLACE(in, outLo, outHi)                                            \
3010     do {                                                                                  \
3011       (outLo) = vmovn_u64    (in);                                                        \
3012       (outHi) = vshrn_n_u64  ((in), 32);                                                  \
3013     } while (0)
3014 # endif
3015 
3016 /*!
3017  * @ingroup tuning
3018  * @brief Controls the NEON to scalar ratio for XXH3
3019  *
3020  * On AArch64 when not optimizing for size, XXH3 will run 6 lanes using NEON and
3021  * 2 lanes on scalar by default.
3022  *
3023  * This can be set to 2, 4, 6, or 8. ARMv7 will default to all 8 NEON lanes, as the
3024  * emulated 64-bit arithmetic is too slow.
3025  *
3026  * Modern ARM CPUs are _very_ sensitive to how their pipelines are used.
3027  *
3028  * For example, the Cortex-A73 can dispatch 3 micro-ops per cycle, but it can't
3029  * have more than 2 NEON (F0/F1) micro-ops. If you are only using NEON instructions,
3030  * you are only using 2/3 of the CPU bandwidth.
3031  *
3032  * This is even more noticable on the more advanced cores like the A76 which
3033  * can dispatch 8 micro-ops per cycle, but still only 2 NEON micro-ops at once.
3034  *
3035  * Therefore, @ref XXH3_NEON_LANES lanes will be processed using NEON, and the
3036  * remaining lanes will use scalar instructions. This improves the bandwidth
3037  * and also gives the integer pipelines something to do besides twiddling loop
3038  * counters and pointers.
3039  *
3040  * This change benefits CPUs with large micro-op buffers without negatively affecting
3041  * other CPUs:
3042  *
3043  *  | Chipset               | Dispatch type       | NEON only | 6:2 hybrid | Diff. |
3044  *  |:----------------------|:--------------------|----------:|-----------:|------:|
3045  *  | Snapdragon 730 (A76)  | 2 NEON/8 micro-ops  |  8.8 GB/s |  10.1 GB/s |  ~16% |
3046  *  | Snapdragon 835 (A73)  | 2 NEON/3 micro-ops  |  5.1 GB/s |   5.3 GB/s |   ~5% |
3047  *  | Marvell PXA1928 (A53) | In-order dual-issue |  1.9 GB/s |   1.9 GB/s |    0% |
3048  *
3049  * It also seems to fix some bad codegen on GCC, making it almost as fast as clang.
3050  *
3051  * @see XXH3_accumulate_512_neon()
3052  */
3053 # ifndef XXH3_NEON_LANES
3054 #  if (defined(__aarch64__) || defined(__arm64__) || defined(_M_ARM64) || defined(_M_ARM64EC)) \
3055    && !defined(__OPTIMIZE_SIZE__)
3056 #   define XXH3_NEON_LANES 6
3057 #  else
3058 #   define XXH3_NEON_LANES XXH_ACC_NB
3059 #  endif
3060 # endif
3061 #endif  /* XXH_VECTOR == XXH_NEON */
3062 
3063 /*
3064  * VSX and Z Vector helpers.
3065  *
3066  * This is very messy, and any pull requests to clean this up are welcome.
3067  *
3068  * There are a lot of problems with supporting VSX and s390x, due to
3069  * inconsistent intrinsics, spotty coverage, and multiple endiannesses.
3070  */
3071 #if XXH_VECTOR == XXH_VSX
3072 #  if defined(__s390x__)
3073 #    include <s390intrin.h>
3074 #  else
3075 /* gcc's altivec.h can have the unwanted consequence to unconditionally
3076  * #define bool, vector, and pixel keywords,
3077  * with bad consequences for programs already using these keywords for other purposes.
3078  * The paragraph defining these macros is skipped when __APPLE_ALTIVEC__ is defined.
3079  * __APPLE_ALTIVEC__ is _generally_ defined automatically by the compiler,
3080  * but it seems that, in some cases, it isn't.
3081  * Force the build macro to be defined, so that keywords are not altered.
3082  */
3083 #    if defined(__GNUC__) && !defined(__APPLE_ALTIVEC__)
3084 #      define __APPLE_ALTIVEC__
3085 #    endif
3086 #    include <altivec.h>
3087 #  endif
3088 
3089 typedef __vector unsigned long long xxh_u64x2;
3090 typedef __vector unsigned char xxh_u8x16;
3091 typedef __vector unsigned xxh_u32x4;
3092 
3093 # ifndef XXH_VSX_BE
3094 #  if defined(__BIG_ENDIAN__) \
3095   || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
3096 #    define XXH_VSX_BE 1
3097 #  elif defined(__VEC_ELEMENT_REG_ORDER__) && __VEC_ELEMENT_REG_ORDER__ == __ORDER_BIG_ENDIAN__
3098 #    warning "-maltivec=be is not recommended. Please use native endianness."
3099 #    define XXH_VSX_BE 1
3100 #  else
3101 #    define XXH_VSX_BE 0
3102 #  endif
3103 # endif /* !defined(XXH_VSX_BE) */
3104 
3105 # if XXH_VSX_BE
3106 #  if defined(__POWER9_VECTOR__) || (defined(__clang__) && defined(__s390x__))
3107 #    define XXH_vec_revb vec_revb
3108 #  else
3109 /*!
3110  * A polyfill for POWER9's vec_revb().
3111  */
3112 XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val)
3113 {
3114     xxh_u8x16 const vByteSwap = { 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
3115                                   0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08 };
3116     return vec_perm(val, val, vByteSwap);
3117 }
3118 #  endif
3119 # endif /* XXH_VSX_BE */
3120 
3121 /*!
3122  * Performs an unaligned vector load and byte swaps it on big endian.
3123  */
3124 XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(const void *ptr)
3125 {
3126     xxh_u64x2 ret;
3127     XXH_memcpy(&ret, ptr, sizeof(xxh_u64x2));
3128 # if XXH_VSX_BE
3129     ret = XXH_vec_revb(ret);
3130 # endif
3131     return ret;
3132 }
3133 
3134 /*
3135  * vec_mulo and vec_mule are very problematic intrinsics on PowerPC
3136  *
3137  * These intrinsics weren't added until GCC 8, despite existing for a while,
3138  * and they are endian dependent. Also, their meaning swap depending on version.
3139  * */
3140 # if defined(__s390x__)
3141  /* s390x is always big endian, no issue on this platform */
3142 #  define XXH_vec_mulo vec_mulo
3143 #  define XXH_vec_mule vec_mule
3144 # elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw)
3145 /* Clang has a better way to control this, we can just use the builtin which doesn't swap. */
3146 #  define XXH_vec_mulo __builtin_altivec_vmulouw
3147 #  define XXH_vec_mule __builtin_altivec_vmuleuw
3148 # else
3149 /* gcc needs inline assembly */
3150 /* Adapted from https://github.com/google/highwayhash/blob/master/highwayhash/hh_vsx.h. */
3151 XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mulo(xxh_u32x4 a, xxh_u32x4 b)
3152 {
3153     xxh_u64x2 result;
3154     __asm__("vmulouw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b));
3155     return result;
3156 }
3157 XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b)
3158 {
3159     xxh_u64x2 result;
3160     __asm__("vmuleuw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b));
3161     return result;
3162 }
3163 # endif /* XXH_vec_mulo, XXH_vec_mule */
3164 #endif /* XXH_VECTOR == XXH_VSX */
3165 
3166 
3167 /* prefetch
3168  * can be disabled, by declaring XXH_NO_PREFETCH build macro */
3169 #if defined(XXH_NO_PREFETCH)
3170 #  define XXH_PREFETCH(ptr)  (void)(ptr)  /* disabled */
3171 #else
3172 #  if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86))  /* _mm_prefetch() not defined outside of x86/x64 */
3173 #    include <mmintrin.h>   /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
3174 #    define XXH_PREFETCH(ptr)  _mm_prefetch((const char*)(ptr), _MM_HINT_T0)
3175 #  elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
3176 #    define XXH_PREFETCH(ptr)  __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
3177 #  else
3178 #    define XXH_PREFETCH(ptr) (void)(ptr)  /* disabled */
3179 #  endif
3180 #endif  /* XXH_NO_PREFETCH */
3181 
3182 
3183 /* ==========================================
3184  * XXH3 default settings
3185  * ========================================== */
3186 
3187 #define XXH_SECRET_DEFAULT_SIZE 192   /* minimum XXH3_SECRET_SIZE_MIN */
3188 
3189 #if (XXH_SECRET_DEFAULT_SIZE < XXH3_SECRET_SIZE_MIN)
3190 #  error "default keyset is not large enough"
3191 #endif
3192 
3193 /*! Pseudorandom secret taken directly from FARSH. */
3194 XXH_ALIGN(64) static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = {
3195     0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, 0xf7, 0x21, 0xad, 0x1c,
3196     0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f,
3197     0xcb, 0x79, 0xe6, 0x4e, 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21,
3198     0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6, 0x81, 0x3a, 0x26, 0x4c,
3199     0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb, 0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3,
3200     0x71, 0x64, 0x48, 0x97, 0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8,
3201     0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7, 0xc7, 0x0b, 0x4f, 0x1d,
3202     0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31, 0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64,
3203     0xea, 0xc5, 0xac, 0x83, 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb,
3204     0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26, 0x29, 0xd4, 0x68, 0x9e,
3205     0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc, 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce,
3206     0x45, 0xcb, 0x3a, 0x8f, 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e,
3207 };
3208 
3209 
3210 #ifdef XXH_OLD_NAMES
3211 #  define kSecret XXH3_kSecret
3212 #endif
3213 
3214 #ifdef XXH_DOXYGEN
3215 /*!
3216  * @brief Calculates a 32-bit to 64-bit long multiply.
3217  *
3218  * Implemented as a macro.
3219  *
3220  * Wraps `__emulu` on MSVC x86 because it tends to call `__allmul` when it doesn't
3221  * need to (but it shouldn't need to anyways, it is about 7 instructions to do
3222  * a 64x64 multiply...). Since we know that this will _always_ emit `MULL`, we
3223  * use that instead of the normal method.
3224  *
3225  * If you are compiling for platforms like Thumb-1 and don't have a better option,
3226  * you may also want to write your own long multiply routine here.
3227  *
3228  * @param x, y Numbers to be multiplied
3229  * @return 64-bit product of the low 32 bits of @p x and @p y.
3230  */
3231 XXH_FORCE_INLINE xxh_u64
3232 XXH_mult32to64(xxh_u64 x, xxh_u64 y)
3233 {
3234    return (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF);
3235 }
3236 #elif defined(_MSC_VER) && defined(_M_IX86)
3237 #    define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y))
3238 #else
3239 /*
3240  * Downcast + upcast is usually better than masking on older compilers like
3241  * GCC 4.2 (especially 32-bit ones), all without affecting newer compilers.
3242  *
3243  * The other method, (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF), will AND both operands
3244  * and perform a full 64x64 multiply -- entirely redundant on 32-bit.
3245  */
3246 #    define XXH_mult32to64(x, y) ((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y))
3247 #endif
3248 
3249 /*!
3250  * @brief Calculates a 64->128-bit long multiply.
3251  *
3252  * Uses `__uint128_t` and `_umul128` if available, otherwise uses a scalar
3253  * version.
3254  *
3255  * @param lhs , rhs The 64-bit integers to be multiplied
3256  * @return The 128-bit result represented in an @ref XXH128_hash_t.
3257  */
3258 static XXH128_hash_t
3259 XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs)
3260 {
3261     /*
3262      * GCC/Clang __uint128_t method.
3263      *
3264      * On most 64-bit targets, GCC and Clang define a __uint128_t type.
3265      * This is usually the best way as it usually uses a native long 64-bit
3266      * multiply, such as MULQ on x86_64 or MUL + UMULH on aarch64.
3267      *
3268      * Usually.
3269      *
3270      * Despite being a 32-bit platform, Clang (and emscripten) define this type
3271      * despite not having the arithmetic for it. This results in a laggy
3272      * compiler builtin call which calculates a full 128-bit multiply.
3273      * In that case it is best to use the portable one.
3274      * https://github.com/Cyan4973/xxHash/issues/211#issuecomment-515575677
3275      */
3276 #if (defined(__GNUC__) || defined(__clang__)) && !defined(__wasm__) \
3277     && defined(__SIZEOF_INT128__) \
3278     || (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
3279 
3280     __uint128_t const product = (__uint128_t)lhs * (__uint128_t)rhs;
3281     XXH128_hash_t r128;
3282     r128.low64  = (xxh_u64)(product);
3283     r128.high64 = (xxh_u64)(product >> 64);
3284     return r128;
3285 
3286     /*
3287      * MSVC for x64's _umul128 method.
3288      *
3289      * xxh_u64 _umul128(xxh_u64 Multiplier, xxh_u64 Multiplicand, xxh_u64 *HighProduct);
3290      *
3291      * This compiles to single operand MUL on x64.
3292      */
3293 #elif (defined(_M_X64) || defined(_M_IA64)) && !defined(_M_ARM64EC)
3294 
3295 #ifndef _MSC_VER
3296 #   pragma intrinsic(_umul128)
3297 #endif
3298     xxh_u64 product_high;
3299     xxh_u64 const product_low = _umul128(lhs, rhs, &product_high);
3300     XXH128_hash_t r128;
3301     r128.low64  = product_low;
3302     r128.high64 = product_high;
3303     return r128;
3304 
3305     /*
3306      * MSVC for ARM64's __umulh method.
3307      *
3308      * This compiles to the same MUL + UMULH as GCC/Clang's __uint128_t method.
3309      */
3310 #elif defined(_M_ARM64) || defined(_M_ARM64EC)
3311 
3312 #ifndef _MSC_VER
3313 #   pragma intrinsic(__umulh)
3314 #endif
3315     XXH128_hash_t r128;
3316     r128.low64  = lhs * rhs;
3317     r128.high64 = __umulh(lhs, rhs);
3318     return r128;
3319 
3320 #else
3321     /*
3322      * Portable scalar method. Optimized for 32-bit and 64-bit ALUs.
3323      *
3324      * This is a fast and simple grade school multiply, which is shown below
3325      * with base 10 arithmetic instead of base 0x100000000.
3326      *
3327      *           9 3 // D2 lhs = 93
3328      *         x 7 5 // D2 rhs = 75
3329      *     ----------
3330      *           1 5 // D2 lo_lo = (93 % 10) * (75 % 10) = 15
3331      *         4 5 | // D2 hi_lo = (93 / 10) * (75 % 10) = 45
3332      *         2 1 | // D2 lo_hi = (93 % 10) * (75 / 10) = 21
3333      *     + 6 3 | | // D2 hi_hi = (93 / 10) * (75 / 10) = 63
3334      *     ---------
3335      *         2 7 | // D2 cross = (15 / 10) + (45 % 10) + 21 = 27
3336      *     + 6 7 | | // D2 upper = (27 / 10) + (45 / 10) + 63 = 67
3337      *     ---------
3338      *       6 9 7 5 // D4 res = (27 * 10) + (15 % 10) + (67 * 100) = 6975
3339      *
3340      * The reasons for adding the products like this are:
3341      *  1. It avoids manual carry tracking. Just like how
3342      *     (9 * 9) + 9 + 9 = 99, the same applies with this for UINT64_MAX.
3343      *     This avoids a lot of complexity.
3344      *
3345      *  2. It hints for, and on Clang, compiles to, the powerful UMAAL
3346      *     instruction available in ARM's Digital Signal Processing extension
3347      *     in 32-bit ARMv6 and later, which is shown below:
3348      *
3349      *         void UMAAL(xxh_u32 *RdLo, xxh_u32 *RdHi, xxh_u32 Rn, xxh_u32 Rm)
3350      *         {
3351      *             xxh_u64 product = (xxh_u64)*RdLo * (xxh_u64)*RdHi + Rn + Rm;
3352      *             *RdLo = (xxh_u32)(product & 0xFFFFFFFF);
3353      *             *RdHi = (xxh_u32)(product >> 32);
3354      *         }
3355      *
3356      *     This instruction was designed for efficient long multiplication, and
3357      *     allows this to be calculated in only 4 instructions at speeds
3358      *     comparable to some 64-bit ALUs.
3359      *
3360      *  3. It isn't terrible on other platforms. Usually this will be a couple
3361      *     of 32-bit ADD/ADCs.
3362      */
3363 
3364     /* First calculate all of the cross products. */
3365     xxh_u64 const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF);
3366     xxh_u64 const hi_lo = XXH_mult32to64(lhs >> 32,        rhs & 0xFFFFFFFF);
3367     xxh_u64 const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32);
3368     xxh_u64 const hi_hi = XXH_mult32to64(lhs >> 32,        rhs >> 32);
3369 
3370     /* Now add the products together. These will never overflow. */
3371     xxh_u64 const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi;
3372     xxh_u64 const upper = (hi_lo >> 32) + (cross >> 32)        + hi_hi;
3373     xxh_u64 const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF);
3374 
3375     XXH128_hash_t r128;
3376     r128.low64  = lower;
3377     r128.high64 = upper;
3378     return r128;
3379 #endif
3380 }
3381 
3382 /*!
3383  * @brief Calculates a 64-bit to 128-bit multiply, then XOR folds it.
3384  *
3385  * The reason for the separate function is to prevent passing too many structs
3386  * around by value. This will hopefully inline the multiply, but we don't force it.
3387  *
3388  * @param lhs , rhs The 64-bit integers to multiply
3389  * @return The low 64 bits of the product XOR'd by the high 64 bits.
3390  * @see XXH_mult64to128()
3391  */
3392 static xxh_u64
3393 XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs)
3394 {
3395     XXH128_hash_t product = XXH_mult64to128(lhs, rhs);
3396     return product.low64 ^ product.high64;
3397 }
3398 
3399 /*! Seems to produce slightly better code on GCC for some reason. */
3400 XXH_FORCE_INLINE xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift)
3401 {
3402     XXH_ASSERT(0 <= shift && shift < 64);
3403     return v64 ^ (v64 >> shift);
3404 }
3405 
3406 /*
3407  * This is a fast avalanche stage,
3408  * suitable when input bits are already partially mixed
3409  */
3410 static XXH64_hash_t XXH3_avalanche(xxh_u64 h64)
3411 {
3412     h64 = XXH_xorshift64(h64, 37);
3413     h64 *= 0x165667919E3779F9ULL;
3414     h64 = XXH_xorshift64(h64, 32);
3415     return h64;
3416 }
3417 
3418 /*
3419  * This is a stronger avalanche,
3420  * inspired by Pelle Evensen's rrmxmx
3421  * preferable when input has not been previously mixed
3422  */
3423 static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len)
3424 {
3425     /* this mix is inspired by Pelle Evensen's rrmxmx */
3426     h64 ^= XXH_rotl64(h64, 49) ^ XXH_rotl64(h64, 24);
3427     h64 *= 0x9FB21C651E98DF25ULL;
3428     h64 ^= (h64 >> 35) + len ;
3429     h64 *= 0x9FB21C651E98DF25ULL;
3430     return XXH_xorshift64(h64, 28);
3431 }
3432 
3433 
3434 /* ==========================================
3435  * Short keys
3436  * ==========================================
3437  * One of the shortcomings of XXH32 and XXH64 was that their performance was
3438  * sub-optimal on short lengths. It used an iterative algorithm which strongly
3439  * favored lengths that were a multiple of 4 or 8.
3440  *
3441  * Instead of iterating over individual inputs, we use a set of single shot
3442  * functions which piece together a range of lengths and operate in constant time.
3443  *
3444  * Additionally, the number of multiplies has been significantly reduced. This
3445  * reduces latency, especially when emulating 64-bit multiplies on 32-bit.
3446  *
3447  * Depending on the platform, this may or may not be faster than XXH32, but it
3448  * is almost guaranteed to be faster than XXH64.
3449  */
3450 
3451 /*
3452  * At very short lengths, there isn't enough input to fully hide secrets, or use
3453  * the entire secret.
3454  *
3455  * There is also only a limited amount of mixing we can do before significantly
3456  * impacting performance.
3457  *
3458  * Therefore, we use different sections of the secret and always mix two secret
3459  * samples with an XOR. This should have no effect on performance on the
3460  * seedless or withSeed variants because everything _should_ be constant folded
3461  * by modern compilers.
3462  *
3463  * The XOR mixing hides individual parts of the secret and increases entropy.
3464  *
3465  * This adds an extra layer of strength for custom secrets.
3466  */
3467 XXH_FORCE_INLINE XXH64_hash_t
3468 XXH3_len_1to3_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
3469 {
3470     XXH_ASSERT(input != NULL);
3471     XXH_ASSERT(1 <= len && len <= 3);
3472     XXH_ASSERT(secret != NULL);
3473     /*
3474      * len = 1: combined = { input[0], 0x01, input[0], input[0] }
3475      * len = 2: combined = { input[1], 0x02, input[0], input[1] }
3476      * len = 3: combined = { input[2], 0x03, input[0], input[1] }
3477      */
3478     {   xxh_u8  const c1 = input[0];
3479         xxh_u8  const c2 = input[len >> 1];
3480         xxh_u8  const c3 = input[len - 1];
3481         xxh_u32 const combined = ((xxh_u32)c1 << 16) | ((xxh_u32)c2  << 24)
3482                                | ((xxh_u32)c3 <<  0) | ((xxh_u32)len << 8);
3483         xxh_u64 const bitflip = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
3484         xxh_u64 const keyed = (xxh_u64)combined ^ bitflip;
3485         return XXH64_avalanche(keyed);
3486     }
3487 }
3488 
3489 XXH_FORCE_INLINE XXH64_hash_t
3490 XXH3_len_4to8_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
3491 {
3492     XXH_ASSERT(input != NULL);
3493     XXH_ASSERT(secret != NULL);
3494     XXH_ASSERT(4 <= len && len <= 8);
3495     seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
3496     {   xxh_u32 const input1 = XXH_readLE32(input);
3497         xxh_u32 const input2 = XXH_readLE32(input + len - 4);
3498         xxh_u64 const bitflip = (XXH_readLE64(secret+8) ^ XXH_readLE64(secret+16)) - seed;
3499         xxh_u64 const input64 = input2 + (((xxh_u64)input1) << 32);
3500         xxh_u64 const keyed = input64 ^ bitflip;
3501         return XXH3_rrmxmx(keyed, len);
3502     }
3503 }
3504 
3505 XXH_FORCE_INLINE XXH64_hash_t
3506 XXH3_len_9to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
3507 {
3508     XXH_ASSERT(input != NULL);
3509     XXH_ASSERT(secret != NULL);
3510     XXH_ASSERT(9 <= len && len <= 16);
3511     {   xxh_u64 const bitflip1 = (XXH_readLE64(secret+24) ^ XXH_readLE64(secret+32)) + seed;
3512         xxh_u64 const bitflip2 = (XXH_readLE64(secret+40) ^ XXH_readLE64(secret+48)) - seed;
3513         xxh_u64 const input_lo = XXH_readLE64(input)           ^ bitflip1;
3514         xxh_u64 const input_hi = XXH_readLE64(input + len - 8) ^ bitflip2;
3515         xxh_u64 const acc = len
3516                           + XXH_swap64(input_lo) + input_hi
3517                           + XXH3_mul128_fold64(input_lo, input_hi);
3518         return XXH3_avalanche(acc);
3519     }
3520 }
3521 
3522 XXH_FORCE_INLINE XXH64_hash_t
3523 XXH3_len_0to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
3524 {
3525     XXH_ASSERT(len <= 16);
3526     {   if (XXH_likely(len >  8)) return XXH3_len_9to16_64b(input, len, secret, seed);
3527         if (XXH_likely(len >= 4)) return XXH3_len_4to8_64b(input, len, secret, seed);
3528         if (len) return XXH3_len_1to3_64b(input, len, secret, seed);
3529         return XXH64_avalanche(seed ^ (XXH_readLE64(secret+56) ^ XXH_readLE64(secret+64)));
3530     }
3531 }
3532 
3533 /*
3534  * DISCLAIMER: There are known *seed-dependent* multicollisions here due to
3535  * multiplication by zero, affecting hashes of lengths 17 to 240.
3536  *
3537  * However, they are very unlikely.
3538  *
3539  * Keep this in mind when using the unseeded XXH3_64bits() variant: As with all
3540  * unseeded non-cryptographic hashes, it does not attempt to defend itself
3541  * against specially crafted inputs, only random inputs.
3542  *
3543  * Compared to classic UMAC where a 1 in 2^31 chance of 4 consecutive bytes
3544  * cancelling out the secret is taken an arbitrary number of times (addressed
3545  * in XXH3_accumulate_512), this collision is very unlikely with random inputs
3546  * and/or proper seeding:
3547  *
3548  * This only has a 1 in 2^63 chance of 8 consecutive bytes cancelling out, in a
3549  * function that is only called up to 16 times per hash with up to 240 bytes of
3550  * input.
3551  *
3552  * This is not too bad for a non-cryptographic hash function, especially with
3553  * only 64 bit outputs.
3554  *
3555  * The 128-bit variant (which trades some speed for strength) is NOT affected
3556  * by this, although it is always a good idea to use a proper seed if you care
3557  * about strength.
3558  */
3559 XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(const xxh_u8* XXH_RESTRICT input,
3560                                      const xxh_u8* XXH_RESTRICT secret, xxh_u64 seed64)
3561 {
3562 #if defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
3563   && defined(__i386__) && defined(__SSE2__)  /* x86 + SSE2 */ \
3564   && !defined(XXH_ENABLE_AUTOVECTORIZE)      /* Define to disable like XXH32 hack */
3565     /*
3566      * UGLY HACK:
3567      * GCC for x86 tends to autovectorize the 128-bit multiply, resulting in
3568      * slower code.
3569      *
3570      * By forcing seed64 into a register, we disrupt the cost model and
3571      * cause it to scalarize. See `XXH32_round()`
3572      *
3573      * FIXME: Clang's output is still _much_ faster -- On an AMD Ryzen 3600,
3574      * XXH3_64bits @ len=240 runs at 4.6 GB/s with Clang 9, but 3.3 GB/s on
3575      * GCC 9.2, despite both emitting scalar code.
3576      *
3577      * GCC generates much better scalar code than Clang for the rest of XXH3,
3578      * which is why finding a more optimal codepath is an interest.
3579      */
3580     XXH_COMPILER_GUARD(seed64);
3581 #endif
3582     {   xxh_u64 const input_lo = XXH_readLE64(input);
3583         xxh_u64 const input_hi = XXH_readLE64(input+8);
3584         return XXH3_mul128_fold64(
3585             input_lo ^ (XXH_readLE64(secret)   + seed64),
3586             input_hi ^ (XXH_readLE64(secret+8) - seed64)
3587         );
3588     }
3589 }
3590 
3591 /* For mid range keys, XXH3 uses a Mum-hash variant. */
3592 XXH_FORCE_INLINE XXH64_hash_t
3593 XXH3_len_17to128_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
3594                      const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
3595                      XXH64_hash_t seed)
3596 {
3597     XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
3598     XXH_ASSERT(16 < len && len <= 128);
3599 
3600     {   xxh_u64 acc = len * XXH_PRIME64_1;
3601         if (len > 32) {
3602             if (len > 64) {
3603                 if (len > 96) {
3604                     acc += XXH3_mix16B(input+48, secret+96, seed);
3605                     acc += XXH3_mix16B(input+len-64, secret+112, seed);
3606                 }
3607                 acc += XXH3_mix16B(input+32, secret+64, seed);
3608                 acc += XXH3_mix16B(input+len-48, secret+80, seed);
3609             }
3610             acc += XXH3_mix16B(input+16, secret+32, seed);
3611             acc += XXH3_mix16B(input+len-32, secret+48, seed);
3612         }
3613         acc += XXH3_mix16B(input+0, secret+0, seed);
3614         acc += XXH3_mix16B(input+len-16, secret+16, seed);
3615 
3616         return XXH3_avalanche(acc);
3617     }
3618 }
3619 
3620 #define XXH3_MIDSIZE_MAX 240
3621 
3622 XXH_NO_INLINE XXH64_hash_t
3623 XXH3_len_129to240_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
3624                       const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
3625                       XXH64_hash_t seed)
3626 {
3627     XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
3628     XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
3629 
3630     #define XXH3_MIDSIZE_STARTOFFSET 3
3631     #define XXH3_MIDSIZE_LASTOFFSET  17
3632 
3633     {   xxh_u64 acc = len * XXH_PRIME64_1;
3634         int const nbRounds = (int)len / 16;
3635         int i;
3636         for (i=0; i<8; i++) {
3637             acc += XXH3_mix16B(input+(16*i), secret+(16*i), seed);
3638         }
3639         acc = XXH3_avalanche(acc);
3640         XXH_ASSERT(nbRounds >= 8);
3641 #if defined(__clang__)                                /* Clang */ \
3642     && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \
3643     && !defined(XXH_ENABLE_AUTOVECTORIZE)             /* Define to disable */
3644         /*
3645          * UGLY HACK:
3646          * Clang for ARMv7-A tries to vectorize this loop, similar to GCC x86.
3647          * In everywhere else, it uses scalar code.
3648          *
3649          * For 64->128-bit multiplies, even if the NEON was 100% optimal, it
3650          * would still be slower than UMAAL (see XXH_mult64to128).
3651          *
3652          * Unfortunately, Clang doesn't handle the long multiplies properly and
3653          * converts them to the nonexistent "vmulq_u64" intrinsic, which is then
3654          * scalarized into an ugly mess of VMOV.32 instructions.
3655          *
3656          * This mess is difficult to avoid without turning autovectorization
3657          * off completely, but they are usually relatively minor and/or not
3658          * worth it to fix.
3659          *
3660          * This loop is the easiest to fix, as unlike XXH32, this pragma
3661          * _actually works_ because it is a loop vectorization instead of an
3662          * SLP vectorization.
3663          */
3664         #pragma clang loop vectorize(disable)
3665 #endif
3666         for (i=8 ; i < nbRounds; i++) {
3667             acc += XXH3_mix16B(input+(16*i), secret+(16*(i-8)) + XXH3_MIDSIZE_STARTOFFSET, seed);
3668         }
3669         /* last bytes */
3670         acc += XXH3_mix16B(input + len - 16, secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET, seed);
3671         return XXH3_avalanche(acc);
3672     }
3673 }
3674 
3675 
3676 /* =======     Long Keys     ======= */
3677 
3678 #define XXH_STRIPE_LEN 64
3679 #define XXH_SECRET_CONSUME_RATE 8   /* nb of secret bytes consumed at each accumulation */
3680 #define XXH_ACC_NB (XXH_STRIPE_LEN / sizeof(xxh_u64))
3681 
3682 #ifdef XXH_OLD_NAMES
3683 #  define STRIPE_LEN XXH_STRIPE_LEN
3684 #  define ACC_NB XXH_ACC_NB
3685 #endif
3686 
3687 XXH_FORCE_INLINE void XXH_writeLE64(void* dst, xxh_u64 v64)
3688 {
3689     if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64);
3690     XXH_memcpy(dst, &v64, sizeof(v64));
3691 }
3692 
3693 /* Several intrinsic functions below are supposed to accept __int64 as argument,
3694  * as documented in https://software.intel.com/sites/landingpage/IntrinsicsGuide/ .
3695  * However, several environments do not define __int64 type,
3696  * requiring a workaround.
3697  */
3698 #if !defined (__VMS) \
3699   && (defined (__cplusplus) \
3700   || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
3701     typedef int64_t xxh_i64;
3702 #else
3703     /* the following type must have a width of 64-bit */
3704     typedef long long xxh_i64;
3705 #endif
3706 
3707 
3708 /*
3709  * XXH3_accumulate_512 is the tightest loop for long inputs, and it is the most optimized.
3710  *
3711  * It is a hardened version of UMAC, based off of FARSH's implementation.
3712  *
3713  * This was chosen because it adapts quite well to 32-bit, 64-bit, and SIMD
3714  * implementations, and it is ridiculously fast.
3715  *
3716  * We harden it by mixing the original input to the accumulators as well as the product.
3717  *
3718  * This means that in the (relatively likely) case of a multiply by zero, the
3719  * original input is preserved.
3720  *
3721  * On 128-bit inputs, we swap 64-bit pairs when we add the input to improve
3722  * cross-pollination, as otherwise the upper and lower halves would be
3723  * essentially independent.
3724  *
3725  * This doesn't matter on 64-bit hashes since they all get merged together in
3726  * the end, so we skip the extra step.
3727  *
3728  * Both XXH3_64bits and XXH3_128bits use this subroutine.
3729  */
3730 
3731 #if (XXH_VECTOR == XXH_AVX512) \
3732      || (defined(XXH_DISPATCH_AVX512) && XXH_DISPATCH_AVX512 != 0)
3733 
3734 #ifndef XXH_TARGET_AVX512
3735 # define XXH_TARGET_AVX512  /* disable attribute target */
3736 #endif
3737 
3738 XXH_FORCE_INLINE XXH_TARGET_AVX512 void
3739 XXH3_accumulate_512_avx512(void* XXH_RESTRICT acc,
3740                      const void* XXH_RESTRICT input,
3741                      const void* XXH_RESTRICT secret)
3742 {
3743     __m512i* const xacc = (__m512i *) acc;
3744     XXH_ASSERT((((size_t)acc) & 63) == 0);
3745     XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
3746 
3747     {
3748         /* data_vec    = input[0]; */
3749         __m512i const data_vec    = _mm512_loadu_si512   (input);
3750         /* key_vec     = secret[0]; */
3751         __m512i const key_vec     = _mm512_loadu_si512   (secret);
3752         /* data_key    = data_vec ^ key_vec; */
3753         __m512i const data_key    = _mm512_xor_si512     (data_vec, key_vec);
3754         /* data_key_lo = data_key >> 32; */
3755         __m512i const data_key_lo = _mm512_shuffle_epi32 (data_key, (_MM_PERM_ENUM)_MM_SHUFFLE(0, 3, 0, 1));
3756         /* product     = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
3757         __m512i const product     = _mm512_mul_epu32     (data_key, data_key_lo);
3758         /* xacc[0] += swap(data_vec); */
3759         __m512i const data_swap = _mm512_shuffle_epi32(data_vec, (_MM_PERM_ENUM)_MM_SHUFFLE(1, 0, 3, 2));
3760         __m512i const sum       = _mm512_add_epi64(*xacc, data_swap);
3761         /* xacc[0] += product; */
3762         *xacc = _mm512_add_epi64(product, sum);
3763     }
3764 }
3765 
3766 /*
3767  * XXH3_scrambleAcc: Scrambles the accumulators to improve mixing.
3768  *
3769  * Multiplication isn't perfect, as explained by Google in HighwayHash:
3770  *
3771  *  // Multiplication mixes/scrambles bytes 0-7 of the 64-bit result to
3772  *  // varying degrees. In descending order of goodness, bytes
3773  *  // 3 4 2 5 1 6 0 7 have quality 228 224 164 160 100 96 36 32.
3774  *  // As expected, the upper and lower bytes are much worse.
3775  *
3776  * Source: https://github.com/google/highwayhash/blob/0aaf66b/highwayhash/hh_avx2.h#L291
3777  *
3778  * Since our algorithm uses a pseudorandom secret to add some variance into the
3779  * mix, we don't need to (or want to) mix as often or as much as HighwayHash does.
3780  *
3781  * This isn't as tight as XXH3_accumulate, but still written in SIMD to avoid
3782  * extraction.
3783  *
3784  * Both XXH3_64bits and XXH3_128bits use this subroutine.
3785  */
3786 
3787 XXH_FORCE_INLINE XXH_TARGET_AVX512 void
3788 XXH3_scrambleAcc_avx512(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
3789 {
3790     XXH_ASSERT((((size_t)acc) & 63) == 0);
3791     XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
3792     {   __m512i* const xacc = (__m512i*) acc;
3793         const __m512i prime32 = _mm512_set1_epi32((int)XXH_PRIME32_1);
3794 
3795         /* xacc[0] ^= (xacc[0] >> 47) */
3796         __m512i const acc_vec     = *xacc;
3797         __m512i const shifted     = _mm512_srli_epi64    (acc_vec, 47);
3798         __m512i const data_vec    = _mm512_xor_si512     (acc_vec, shifted);
3799         /* xacc[0] ^= secret; */
3800         __m512i const key_vec     = _mm512_loadu_si512   (secret);
3801         __m512i const data_key    = _mm512_xor_si512     (data_vec, key_vec);
3802 
3803         /* xacc[0] *= XXH_PRIME32_1; */
3804         __m512i const data_key_hi = _mm512_shuffle_epi32 (data_key, (_MM_PERM_ENUM)_MM_SHUFFLE(0, 3, 0, 1));
3805         __m512i const prod_lo     = _mm512_mul_epu32     (data_key, prime32);
3806         __m512i const prod_hi     = _mm512_mul_epu32     (data_key_hi, prime32);
3807         *xacc = _mm512_add_epi64(prod_lo, _mm512_slli_epi64(prod_hi, 32));
3808     }
3809 }
3810 
3811 XXH_FORCE_INLINE XXH_TARGET_AVX512 void
3812 XXH3_initCustomSecret_avx512(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
3813 {
3814     XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 63) == 0);
3815     XXH_STATIC_ASSERT(XXH_SEC_ALIGN == 64);
3816     XXH_ASSERT(((size_t)customSecret & 63) == 0);
3817     (void)(&XXH_writeLE64);
3818     {   int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m512i);
3819         __m512i const seed = _mm512_mask_set1_epi64(_mm512_set1_epi64((xxh_i64)seed64), 0xAA, (xxh_i64)(0U - seed64));
3820 
3821         const __m512i* const src  = (const __m512i*) ((const void*) XXH3_kSecret);
3822               __m512i* const dest = (      __m512i*) customSecret;
3823         int i;
3824         XXH_ASSERT(((size_t)src & 63) == 0); /* control alignment */
3825         XXH_ASSERT(((size_t)dest & 63) == 0);
3826         for (i=0; i < nbRounds; ++i) {
3827             /* GCC has a bug, _mm512_stream_load_si512 accepts 'void*', not 'void const*',
3828              * this will warn "discards 'const' qualifier". */
3829             union {
3830                 const __m512i* cp;
3831                 void* p;
3832             } remote_const_void;
3833             remote_const_void.cp = src + i;
3834             dest[i] = _mm512_add_epi64(_mm512_stream_load_si512(remote_const_void.p), seed);
3835     }   }
3836 }
3837 
3838 #endif
3839 
3840 #if (XXH_VECTOR == XXH_AVX2) \
3841     || (defined(XXH_DISPATCH_AVX2) && XXH_DISPATCH_AVX2 != 0)
3842 
3843 #ifndef XXH_TARGET_AVX2
3844 # define XXH_TARGET_AVX2  /* disable attribute target */
3845 #endif
3846 
3847 XXH_FORCE_INLINE XXH_TARGET_AVX2 void
3848 XXH3_accumulate_512_avx2( void* XXH_RESTRICT acc,
3849                     const void* XXH_RESTRICT input,
3850                     const void* XXH_RESTRICT secret)
3851 {
3852     XXH_ASSERT((((size_t)acc) & 31) == 0);
3853     {   __m256i* const xacc    =       (__m256i *) acc;
3854         /* Unaligned. This is mainly for pointer arithmetic, and because
3855          * _mm256_loadu_si256 requires  a const __m256i * pointer for some reason. */
3856         const         __m256i* const xinput  = (const __m256i *) input;
3857         /* Unaligned. This is mainly for pointer arithmetic, and because
3858          * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
3859         const         __m256i* const xsecret = (const __m256i *) secret;
3860 
3861         size_t i;
3862         for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) {
3863             /* data_vec    = xinput[i]; */
3864             __m256i const data_vec    = _mm256_loadu_si256    (xinput+i);
3865             /* key_vec     = xsecret[i]; */
3866             __m256i const key_vec     = _mm256_loadu_si256   (xsecret+i);
3867             /* data_key    = data_vec ^ key_vec; */
3868             __m256i const data_key    = _mm256_xor_si256     (data_vec, key_vec);
3869             /* data_key_lo = data_key >> 32; */
3870             __m256i const data_key_lo = _mm256_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
3871             /* product     = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
3872             __m256i const product     = _mm256_mul_epu32     (data_key, data_key_lo);
3873             /* xacc[i] += swap(data_vec); */
3874             __m256i const data_swap = _mm256_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2));
3875             __m256i const sum       = _mm256_add_epi64(xacc[i], data_swap);
3876             /* xacc[i] += product; */
3877             xacc[i] = _mm256_add_epi64(product, sum);
3878     }   }
3879 }
3880 
3881 XXH_FORCE_INLINE XXH_TARGET_AVX2 void
3882 XXH3_scrambleAcc_avx2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
3883 {
3884     XXH_ASSERT((((size_t)acc) & 31) == 0);
3885     {   __m256i* const xacc = (__m256i*) acc;
3886         /* Unaligned. This is mainly for pointer arithmetic, and because
3887          * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
3888         const         __m256i* const xsecret = (const __m256i *) secret;
3889         const __m256i prime32 = _mm256_set1_epi32((int)XXH_PRIME32_1);
3890 
3891         size_t i;
3892         for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) {
3893             /* xacc[i] ^= (xacc[i] >> 47) */
3894             __m256i const acc_vec     = xacc[i];
3895             __m256i const shifted     = _mm256_srli_epi64    (acc_vec, 47);
3896             __m256i const data_vec    = _mm256_xor_si256     (acc_vec, shifted);
3897             /* xacc[i] ^= xsecret; */
3898             __m256i const key_vec     = _mm256_loadu_si256   (xsecret+i);
3899             __m256i const data_key    = _mm256_xor_si256     (data_vec, key_vec);
3900 
3901             /* xacc[i] *= XXH_PRIME32_1; */
3902             __m256i const data_key_hi = _mm256_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
3903             __m256i const prod_lo     = _mm256_mul_epu32     (data_key, prime32);
3904             __m256i const prod_hi     = _mm256_mul_epu32     (data_key_hi, prime32);
3905             xacc[i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32));
3906         }
3907     }
3908 }
3909 
3910 XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_initCustomSecret_avx2(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
3911 {
3912     XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 31) == 0);
3913     XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE / sizeof(__m256i)) == 6);
3914     XXH_STATIC_ASSERT(XXH_SEC_ALIGN <= 64);
3915     (void)(&XXH_writeLE64);
3916     XXH_PREFETCH(customSecret);
3917     {   __m256i const seed = _mm256_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64, (xxh_i64)(0U - seed64), (xxh_i64)seed64);
3918 
3919         const __m256i* const src  = (const __m256i*) ((const void*) XXH3_kSecret);
3920               __m256i*       dest = (      __m256i*) customSecret;
3921 
3922 #       if defined(__GNUC__) || defined(__clang__)
3923         /*
3924          * On GCC & Clang, marking 'dest' as modified will cause the compiler:
3925          *   - do not extract the secret from sse registers in the internal loop
3926          *   - use less common registers, and avoid pushing these reg into stack
3927          */
3928         XXH_COMPILER_GUARD(dest);
3929 #       endif
3930         XXH_ASSERT(((size_t)src & 31) == 0); /* control alignment */
3931         XXH_ASSERT(((size_t)dest & 31) == 0);
3932 
3933         /* GCC -O2 need unroll loop manually */
3934         dest[0] = _mm256_add_epi64(_mm256_stream_load_si256(src+0), seed);
3935         dest[1] = _mm256_add_epi64(_mm256_stream_load_si256(src+1), seed);
3936         dest[2] = _mm256_add_epi64(_mm256_stream_load_si256(src+2), seed);
3937         dest[3] = _mm256_add_epi64(_mm256_stream_load_si256(src+3), seed);
3938         dest[4] = _mm256_add_epi64(_mm256_stream_load_si256(src+4), seed);
3939         dest[5] = _mm256_add_epi64(_mm256_stream_load_si256(src+5), seed);
3940     }
3941 }
3942 
3943 #endif
3944 
3945 /* x86dispatch always generates SSE2 */
3946 #if (XXH_VECTOR == XXH_SSE2) || defined(XXH_X86DISPATCH)
3947 
3948 #ifndef XXH_TARGET_SSE2
3949 # define XXH_TARGET_SSE2  /* disable attribute target */
3950 #endif
3951 
3952 XXH_FORCE_INLINE XXH_TARGET_SSE2 void
3953 XXH3_accumulate_512_sse2( void* XXH_RESTRICT acc,
3954                     const void* XXH_RESTRICT input,
3955                     const void* XXH_RESTRICT secret)
3956 {
3957     /* SSE2 is just a half-scale version of the AVX2 version. */
3958     XXH_ASSERT((((size_t)acc) & 15) == 0);
3959     {   __m128i* const xacc    =       (__m128i *) acc;
3960         /* Unaligned. This is mainly for pointer arithmetic, and because
3961          * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
3962         const         __m128i* const xinput  = (const __m128i *) input;
3963         /* Unaligned. This is mainly for pointer arithmetic, and because
3964          * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
3965         const         __m128i* const xsecret = (const __m128i *) secret;
3966 
3967         size_t i;
3968         for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) {
3969             /* data_vec    = xinput[i]; */
3970             __m128i const data_vec    = _mm_loadu_si128   (xinput+i);
3971             /* key_vec     = xsecret[i]; */
3972             __m128i const key_vec     = _mm_loadu_si128   (xsecret+i);
3973             /* data_key    = data_vec ^ key_vec; */
3974             __m128i const data_key    = _mm_xor_si128     (data_vec, key_vec);
3975             /* data_key_lo = data_key >> 32; */
3976             __m128i const data_key_lo = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
3977             /* product     = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
3978             __m128i const product     = _mm_mul_epu32     (data_key, data_key_lo);
3979             /* xacc[i] += swap(data_vec); */
3980             __m128i const data_swap = _mm_shuffle_epi32(data_vec, _MM_SHUFFLE(1,0,3,2));
3981             __m128i const sum       = _mm_add_epi64(xacc[i], data_swap);
3982             /* xacc[i] += product; */
3983             xacc[i] = _mm_add_epi64(product, sum);
3984     }   }
3985 }
3986 
3987 XXH_FORCE_INLINE XXH_TARGET_SSE2 void
3988 XXH3_scrambleAcc_sse2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
3989 {
3990     XXH_ASSERT((((size_t)acc) & 15) == 0);
3991     {   __m128i* const xacc = (__m128i*) acc;
3992         /* Unaligned. This is mainly for pointer arithmetic, and because
3993          * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
3994         const         __m128i* const xsecret = (const __m128i *) secret;
3995         const __m128i prime32 = _mm_set1_epi32((int)XXH_PRIME32_1);
3996 
3997         size_t i;
3998         for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) {
3999             /* xacc[i] ^= (xacc[i] >> 47) */
4000             __m128i const acc_vec     = xacc[i];
4001             __m128i const shifted     = _mm_srli_epi64    (acc_vec, 47);
4002             __m128i const data_vec    = _mm_xor_si128     (acc_vec, shifted);
4003             /* xacc[i] ^= xsecret[i]; */
4004             __m128i const key_vec     = _mm_loadu_si128   (xsecret+i);
4005             __m128i const data_key    = _mm_xor_si128     (data_vec, key_vec);
4006 
4007             /* xacc[i] *= XXH_PRIME32_1; */
4008             __m128i const data_key_hi = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
4009             __m128i const prod_lo     = _mm_mul_epu32     (data_key, prime32);
4010             __m128i const prod_hi     = _mm_mul_epu32     (data_key_hi, prime32);
4011             xacc[i] = _mm_add_epi64(prod_lo, _mm_slli_epi64(prod_hi, 32));
4012         }
4013     }
4014 }
4015 
4016 XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_initCustomSecret_sse2(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
4017 {
4018     XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
4019     (void)(&XXH_writeLE64);
4020     {   int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m128i);
4021 
4022 #       if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900
4023         /* MSVC 32bit mode does not support _mm_set_epi64x before 2015 */
4024         XXH_ALIGN(16) const xxh_i64 seed64x2[2] = { (xxh_i64)seed64, (xxh_i64)(0U - seed64) };
4025         __m128i const seed = _mm_load_si128((__m128i const*)seed64x2);
4026 #       else
4027         __m128i const seed = _mm_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64);
4028 #       endif
4029         int i;
4030 
4031         const void* const src16 = XXH3_kSecret;
4032         __m128i* dst16 = (__m128i*) customSecret;
4033 #       if defined(__GNUC__) || defined(__clang__)
4034         /*
4035          * On GCC & Clang, marking 'dest' as modified will cause the compiler:
4036          *   - do not extract the secret from sse registers in the internal loop
4037          *   - use less common registers, and avoid pushing these reg into stack
4038          */
4039         XXH_COMPILER_GUARD(dst16);
4040 #       endif
4041         XXH_ASSERT(((size_t)src16 & 15) == 0); /* control alignment */
4042         XXH_ASSERT(((size_t)dst16 & 15) == 0);
4043 
4044         for (i=0; i < nbRounds; ++i) {
4045             dst16[i] = _mm_add_epi64(_mm_load_si128((const __m128i *)src16+i), seed);
4046     }   }
4047 }
4048 
4049 #endif
4050 
4051 #if (XXH_VECTOR == XXH_NEON)
4052 
4053 /* forward declarations for the scalar routines */
4054 XXH_FORCE_INLINE void
4055 XXH3_scalarRound(void* XXH_RESTRICT acc, void const* XXH_RESTRICT input,
4056                  void const* XXH_RESTRICT secret, size_t lane);
4057 
4058 XXH_FORCE_INLINE void
4059 XXH3_scalarScrambleRound(void* XXH_RESTRICT acc,
4060                          void const* XXH_RESTRICT secret, size_t lane);
4061 
4062 /*!
4063  * @internal
4064  * @brief The bulk processing loop for NEON.
4065  *
4066  * The NEON code path is actually partially scalar when running on AArch64. This
4067  * is to optimize the pipelining and can have up to 15% speedup depending on the
4068  * CPU, and it also mitigates some GCC codegen issues.
4069  *
4070  * @see XXH3_NEON_LANES for configuring this and details about this optimization.
4071  */
4072 XXH_FORCE_INLINE void
4073 XXH3_accumulate_512_neon( void* XXH_RESTRICT acc,
4074                     const void* XXH_RESTRICT input,
4075                     const void* XXH_RESTRICT secret)
4076 {
4077     XXH_ASSERT((((size_t)acc) & 15) == 0);
4078     XXH_STATIC_ASSERT(XXH3_NEON_LANES > 0 && XXH3_NEON_LANES <= XXH_ACC_NB && XXH3_NEON_LANES % 2 == 0);
4079     {
4080         uint64x2_t* const xacc = (uint64x2_t *) acc;
4081         /* We don't use a uint32x4_t pointer because it causes bus errors on ARMv7. */
4082         uint8_t const* const xinput = (const uint8_t *) input;
4083         uint8_t const* const xsecret  = (const uint8_t *) secret;
4084 
4085         size_t i;
4086         /* NEON for the first few lanes (these loops are normally interleaved) */
4087         for (i=0; i < XXH3_NEON_LANES / 2; i++) {
4088             /* data_vec = xinput[i]; */
4089             uint8x16_t data_vec    = vld1q_u8(xinput  + (i * 16));
4090             /* key_vec  = xsecret[i];  */
4091             uint8x16_t key_vec     = vld1q_u8(xsecret + (i * 16));
4092             uint64x2_t data_key;
4093             uint32x2_t data_key_lo, data_key_hi;
4094             /* xacc[i] += swap(data_vec); */
4095             uint64x2_t const data64  = vreinterpretq_u64_u8(data_vec);
4096             uint64x2_t const swapped = vextq_u64(data64, data64, 1);
4097             xacc[i] = vaddq_u64 (xacc[i], swapped);
4098             /* data_key = data_vec ^ key_vec; */
4099             data_key = vreinterpretq_u64_u8(veorq_u8(data_vec, key_vec));
4100             /* data_key_lo = (uint32x2_t) (data_key & 0xFFFFFFFF);
4101              * data_key_hi = (uint32x2_t) (data_key >> 32);
4102              * data_key = UNDEFINED; */
4103             XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi);
4104             /* xacc[i] += (uint64x2_t) data_key_lo * (uint64x2_t) data_key_hi; */
4105             xacc[i] = vmlal_u32 (xacc[i], data_key_lo, data_key_hi);
4106 
4107         }
4108         /* Scalar for the remainder. This may be a zero iteration loop. */
4109         for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) {
4110             XXH3_scalarRound(acc, input, secret, i);
4111         }
4112     }
4113 }
4114 
4115 XXH_FORCE_INLINE void
4116 XXH3_scrambleAcc_neon(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
4117 {
4118     XXH_ASSERT((((size_t)acc) & 15) == 0);
4119 
4120     {   uint64x2_t* xacc       = (uint64x2_t*) acc;
4121         uint8_t const* xsecret = (uint8_t const*) secret;
4122         uint32x2_t prime       = vdup_n_u32 (XXH_PRIME32_1);
4123 
4124         size_t i;
4125         /* NEON for the first few lanes (these loops are normally interleaved) */
4126         for (i=0; i < XXH3_NEON_LANES / 2; i++) {
4127             /* xacc[i] ^= (xacc[i] >> 47); */
4128             uint64x2_t acc_vec  = xacc[i];
4129             uint64x2_t shifted  = vshrq_n_u64 (acc_vec, 47);
4130             uint64x2_t data_vec = veorq_u64   (acc_vec, shifted);
4131 
4132             /* xacc[i] ^= xsecret[i]; */
4133             uint8x16_t key_vec  = vld1q_u8    (xsecret + (i * 16));
4134             uint64x2_t data_key = veorq_u64   (data_vec, vreinterpretq_u64_u8(key_vec));
4135 
4136             /* xacc[i] *= XXH_PRIME32_1 */
4137             uint32x2_t data_key_lo, data_key_hi;
4138             /* data_key_lo = (uint32x2_t) (xacc[i] & 0xFFFFFFFF);
4139              * data_key_hi = (uint32x2_t) (xacc[i] >> 32);
4140              * xacc[i] = UNDEFINED; */
4141             XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi);
4142             {   /*
4143                  * prod_hi = (data_key >> 32) * XXH_PRIME32_1;
4144                  *
4145                  * Avoid vmul_u32 + vshll_n_u32 since Clang 6 and 7 will
4146                  * incorrectly "optimize" this:
4147                  *   tmp     = vmul_u32(vmovn_u64(a), vmovn_u64(b));
4148                  *   shifted = vshll_n_u32(tmp, 32);
4149                  * to this:
4150                  *   tmp     = "vmulq_u64"(a, b); // no such thing!
4151                  *   shifted = vshlq_n_u64(tmp, 32);
4152                  *
4153                  * However, unlike SSE, Clang lacks a 64-bit multiply routine
4154                  * for NEON, and it scalarizes two 64-bit multiplies instead.
4155                  *
4156                  * vmull_u32 has the same timing as vmul_u32, and it avoids
4157                  * this bug completely.
4158                  * See https://bugs.llvm.org/show_bug.cgi?id=39967
4159                  */
4160                 uint64x2_t prod_hi = vmull_u32 (data_key_hi, prime);
4161                 /* xacc[i] = prod_hi << 32; */
4162                 xacc[i] = vshlq_n_u64(prod_hi, 32);
4163                 /* xacc[i] += (prod_hi & 0xFFFFFFFF) * XXH_PRIME32_1; */
4164                 xacc[i] = vmlal_u32(xacc[i], data_key_lo, prime);
4165             }
4166         }
4167         /* Scalar for the remainder. This may be a zero iteration loop. */
4168         for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) {
4169             XXH3_scalarScrambleRound(acc, secret, i);
4170         }
4171     }
4172 }
4173 
4174 #endif
4175 
4176 #if (XXH_VECTOR == XXH_VSX)
4177 
4178 XXH_FORCE_INLINE void
4179 XXH3_accumulate_512_vsx(  void* XXH_RESTRICT acc,
4180                     const void* XXH_RESTRICT input,
4181                     const void* XXH_RESTRICT secret)
4182 {
4183     /* presumed aligned */
4184     unsigned int* const xacc = (unsigned int*) acc;
4185     xxh_u64x2 const* const xinput   = (xxh_u64x2 const*) input;   /* no alignment restriction */
4186     xxh_u64x2 const* const xsecret  = (xxh_u64x2 const*) secret;    /* no alignment restriction */
4187     xxh_u64x2 const v32 = { 32, 32 };
4188     size_t i;
4189     for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
4190         /* data_vec = xinput[i]; */
4191         xxh_u64x2 const data_vec = XXH_vec_loadu(xinput + i);
4192         /* key_vec = xsecret[i]; */
4193         xxh_u64x2 const key_vec  = XXH_vec_loadu(xsecret + i);
4194         xxh_u64x2 const data_key = data_vec ^ key_vec;
4195         /* shuffled = (data_key << 32) | (data_key >> 32); */
4196         xxh_u32x4 const shuffled = (xxh_u32x4)vec_rl(data_key, v32);
4197         /* product = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)shuffled & 0xFFFFFFFF); */
4198         xxh_u64x2 const product  = XXH_vec_mulo((xxh_u32x4)data_key, shuffled);
4199         /* acc_vec = xacc[i]; */
4200         xxh_u64x2 acc_vec        = (xxh_u64x2)vec_xl(0, xacc + 4 * i);
4201         acc_vec += product;
4202 
4203         /* swap high and low halves */
4204 #ifdef __s390x__
4205         acc_vec += vec_permi(data_vec, data_vec, 2);
4206 #else
4207         acc_vec += vec_xxpermdi(data_vec, data_vec, 2);
4208 #endif
4209         /* xacc[i] = acc_vec; */
4210         vec_xst((xxh_u32x4)acc_vec, 0, xacc + 4 * i);
4211     }
4212 }
4213 
4214 XXH_FORCE_INLINE void
4215 XXH3_scrambleAcc_vsx(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
4216 {
4217     XXH_ASSERT((((size_t)acc) & 15) == 0);
4218 
4219     {         xxh_u64x2* const xacc    =       (xxh_u64x2*) acc;
4220         const xxh_u64x2* const xsecret = (const xxh_u64x2*) secret;
4221         /* constants */
4222         xxh_u64x2 const v32  = { 32, 32 };
4223         xxh_u64x2 const v47 = { 47, 47 };
4224         xxh_u32x4 const prime = { XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1 };
4225         size_t i;
4226         for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
4227             /* xacc[i] ^= (xacc[i] >> 47); */
4228             xxh_u64x2 const acc_vec  = xacc[i];
4229             xxh_u64x2 const data_vec = acc_vec ^ (acc_vec >> v47);
4230 
4231             /* xacc[i] ^= xsecret[i]; */
4232             xxh_u64x2 const key_vec  = XXH_vec_loadu(xsecret + i);
4233             xxh_u64x2 const data_key = data_vec ^ key_vec;
4234 
4235             /* xacc[i] *= XXH_PRIME32_1 */
4236             /* prod_lo = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)prime & 0xFFFFFFFF);  */
4237             xxh_u64x2 const prod_even  = XXH_vec_mule((xxh_u32x4)data_key, prime);
4238             /* prod_hi = ((xxh_u64x2)data_key >> 32) * ((xxh_u64x2)prime >> 32);  */
4239             xxh_u64x2 const prod_odd  = XXH_vec_mulo((xxh_u32x4)data_key, prime);
4240             xacc[i] = prod_odd + (prod_even << v32);
4241     }   }
4242 }
4243 
4244 #endif
4245 
4246 /* scalar variants - universal */
4247 
4248 /*!
4249  * @internal
4250  * @brief Scalar round for @ref XXH3_accumulate_512_scalar().
4251  *
4252  * This is extracted to its own function because the NEON path uses a combination
4253  * of NEON and scalar.
4254  */
4255 XXH_FORCE_INLINE void
4256 XXH3_scalarRound(void* XXH_RESTRICT acc,
4257                  void const* XXH_RESTRICT input,
4258                  void const* XXH_RESTRICT secret,
4259                  size_t lane)
4260 {
4261     xxh_u64* xacc = (xxh_u64*) acc;
4262     xxh_u8 const* xinput  = (xxh_u8 const*) input;
4263     xxh_u8 const* xsecret = (xxh_u8 const*) secret;
4264     XXH_ASSERT(lane < XXH_ACC_NB);
4265     XXH_ASSERT(((size_t)acc & (XXH_ACC_ALIGN-1)) == 0);
4266     {
4267         xxh_u64 const data_val = XXH_readLE64(xinput + lane * 8);
4268         xxh_u64 const data_key = data_val ^ XXH_readLE64(xsecret + lane * 8);
4269         xacc[lane ^ 1] += data_val; /* swap adjacent lanes */
4270         xacc[lane] += XXH_mult32to64(data_key & 0xFFFFFFFF, data_key >> 32);
4271     }
4272 }
4273 
4274 /*!
4275  * @internal
4276  * @brief Processes a 64 byte block of data using the scalar path.
4277  */
4278 XXH_FORCE_INLINE void
4279 XXH3_accumulate_512_scalar(void* XXH_RESTRICT acc,
4280                      const void* XXH_RESTRICT input,
4281                      const void* XXH_RESTRICT secret)
4282 {
4283     size_t i;
4284     for (i=0; i < XXH_ACC_NB; i++) {
4285         XXH3_scalarRound(acc, input, secret, i);
4286     }
4287 }
4288 
4289 /*!
4290  * @internal
4291  * @brief Scalar scramble step for @ref XXH3_scrambleAcc_scalar().
4292  *
4293  * This is extracted to its own function because the NEON path uses a combination
4294  * of NEON and scalar.
4295  */
4296 XXH_FORCE_INLINE void
4297 XXH3_scalarScrambleRound(void* XXH_RESTRICT acc,
4298                          void const* XXH_RESTRICT secret,
4299                          size_t lane)
4300 {
4301     xxh_u64* const xacc = (xxh_u64*) acc;   /* presumed aligned */
4302     const xxh_u8* const xsecret = (const xxh_u8*) secret;   /* no alignment restriction */
4303     XXH_ASSERT((((size_t)acc) & (XXH_ACC_ALIGN-1)) == 0);
4304     XXH_ASSERT(lane < XXH_ACC_NB);
4305     {
4306         xxh_u64 const key64 = XXH_readLE64(xsecret + lane * 8);
4307         xxh_u64 acc64 = xacc[lane];
4308         acc64 = XXH_xorshift64(acc64, 47);
4309         acc64 ^= key64;
4310         acc64 *= XXH_PRIME32_1;
4311         xacc[lane] = acc64;
4312     }
4313 }
4314 
4315 /*!
4316  * @internal
4317  * @brief Scrambles the accumulators after a large chunk has been read
4318  */
4319 XXH_FORCE_INLINE void
4320 XXH3_scrambleAcc_scalar(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
4321 {
4322     size_t i;
4323     for (i=0; i < XXH_ACC_NB; i++) {
4324         XXH3_scalarScrambleRound(acc, secret, i);
4325     }
4326 }
4327 
4328 XXH_FORCE_INLINE void
4329 XXH3_initCustomSecret_scalar(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
4330 {
4331     /*
4332      * We need a separate pointer for the hack below,
4333      * which requires a non-const pointer.
4334      * Any decent compiler will optimize this out otherwise.
4335      */
4336     const xxh_u8* kSecretPtr = XXH3_kSecret;
4337     XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
4338 
4339 #if defined(__clang__) && defined(__aarch64__)
4340     /*
4341      * UGLY HACK:
4342      * Clang generates a bunch of MOV/MOVK pairs for aarch64, and they are
4343      * placed sequentially, in order, at the top of the unrolled loop.
4344      *
4345      * While MOVK is great for generating constants (2 cycles for a 64-bit
4346      * constant compared to 4 cycles for LDR), it fights for bandwidth with
4347      * the arithmetic instructions.
4348      *
4349      *   I   L   S
4350      * MOVK
4351      * MOVK
4352      * MOVK
4353      * MOVK
4354      * ADD
4355      * SUB      STR
4356      *          STR
4357      * By forcing loads from memory (as the asm line causes Clang to assume
4358      * that XXH3_kSecretPtr has been changed), the pipelines are used more
4359      * efficiently:
4360      *   I   L   S
4361      *      LDR
4362      *  ADD LDR
4363      *  SUB     STR
4364      *          STR
4365      *
4366      * See XXH3_NEON_LANES for details on the pipsline.
4367      *
4368      * XXH3_64bits_withSeed, len == 256, Snapdragon 835
4369      *   without hack: 2654.4 MB/s
4370      *   with hack:    3202.9 MB/s
4371      */
4372     XXH_COMPILER_GUARD(kSecretPtr);
4373 #endif
4374     /*
4375      * Note: in debug mode, this overrides the asm optimization
4376      * and Clang will emit MOVK chains again.
4377      */
4378     XXH_ASSERT(kSecretPtr == XXH3_kSecret);
4379 
4380     {   int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16;
4381         int i;
4382         for (i=0; i < nbRounds; i++) {
4383             /*
4384              * The asm hack causes Clang to assume that kSecretPtr aliases with
4385              * customSecret, and on aarch64, this prevented LDP from merging two
4386              * loads together for free. Putting the loads together before the stores
4387              * properly generates LDP.
4388              */
4389             xxh_u64 lo = XXH_readLE64(kSecretPtr + 16*i)     + seed64;
4390             xxh_u64 hi = XXH_readLE64(kSecretPtr + 16*i + 8) - seed64;
4391             XXH_writeLE64((xxh_u8*)customSecret + 16*i,     lo);
4392             XXH_writeLE64((xxh_u8*)customSecret + 16*i + 8, hi);
4393     }   }
4394 }
4395 
4396 
4397 typedef void (*XXH3_f_accumulate_512)(void* XXH_RESTRICT, const void*, const void*);
4398 typedef void (*XXH3_f_scrambleAcc)(void* XXH_RESTRICT, const void*);
4399 typedef void (*XXH3_f_initCustomSecret)(void* XXH_RESTRICT, xxh_u64);
4400 
4401 
4402 #if (XXH_VECTOR == XXH_AVX512)
4403 
4404 #define XXH3_accumulate_512 XXH3_accumulate_512_avx512
4405 #define XXH3_scrambleAcc    XXH3_scrambleAcc_avx512
4406 #define XXH3_initCustomSecret XXH3_initCustomSecret_avx512
4407 
4408 #elif (XXH_VECTOR == XXH_AVX2)
4409 
4410 #define XXH3_accumulate_512 XXH3_accumulate_512_avx2
4411 #define XXH3_scrambleAcc    XXH3_scrambleAcc_avx2
4412 #define XXH3_initCustomSecret XXH3_initCustomSecret_avx2
4413 
4414 #elif (XXH_VECTOR == XXH_SSE2)
4415 
4416 #define XXH3_accumulate_512 XXH3_accumulate_512_sse2
4417 #define XXH3_scrambleAcc    XXH3_scrambleAcc_sse2
4418 #define XXH3_initCustomSecret XXH3_initCustomSecret_sse2
4419 
4420 #elif (XXH_VECTOR == XXH_NEON)
4421 
4422 #define XXH3_accumulate_512 XXH3_accumulate_512_neon
4423 #define XXH3_scrambleAcc    XXH3_scrambleAcc_neon
4424 #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
4425 
4426 #elif (XXH_VECTOR == XXH_VSX)
4427 
4428 #define XXH3_accumulate_512 XXH3_accumulate_512_vsx
4429 #define XXH3_scrambleAcc    XXH3_scrambleAcc_vsx
4430 #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
4431 
4432 #else /* scalar */
4433 
4434 #define XXH3_accumulate_512 XXH3_accumulate_512_scalar
4435 #define XXH3_scrambleAcc    XXH3_scrambleAcc_scalar
4436 #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
4437 
4438 #endif
4439 
4440 
4441 
4442 #ifndef XXH_PREFETCH_DIST
4443 #  ifdef __clang__
4444 #    define XXH_PREFETCH_DIST 320
4445 #  else
4446 #    if (XXH_VECTOR == XXH_AVX512)
4447 #      define XXH_PREFETCH_DIST 512
4448 #    else
4449 #      define XXH_PREFETCH_DIST 384
4450 #    endif
4451 #  endif  /* __clang__ */
4452 #endif  /* XXH_PREFETCH_DIST */
4453 
4454 /*
4455  * XXH3_accumulate()
4456  * Loops over XXH3_accumulate_512().
4457  * Assumption: nbStripes will not overflow the secret size
4458  */
4459 XXH_FORCE_INLINE void
4460 XXH3_accumulate(     xxh_u64* XXH_RESTRICT acc,
4461                 const xxh_u8* XXH_RESTRICT input,
4462                 const xxh_u8* XXH_RESTRICT secret,
4463                       size_t nbStripes,
4464                       XXH3_f_accumulate_512 f_acc512)
4465 {
4466     size_t n;
4467     for (n = 0; n < nbStripes; n++ ) {
4468         const xxh_u8* const in = input + n*XXH_STRIPE_LEN;
4469         XXH_PREFETCH(in + XXH_PREFETCH_DIST);
4470         f_acc512(acc,
4471                  in,
4472                  secret + n*XXH_SECRET_CONSUME_RATE);
4473     }
4474 }
4475 
4476 XXH_FORCE_INLINE void
4477 XXH3_hashLong_internal_loop(xxh_u64* XXH_RESTRICT acc,
4478                       const xxh_u8* XXH_RESTRICT input, size_t len,
4479                       const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
4480                             XXH3_f_accumulate_512 f_acc512,
4481                             XXH3_f_scrambleAcc f_scramble)
4482 {
4483     size_t const nbStripesPerBlock = (secretSize - XXH_STRIPE_LEN) / XXH_SECRET_CONSUME_RATE;
4484     size_t const block_len = XXH_STRIPE_LEN * nbStripesPerBlock;
4485     size_t const nb_blocks = (len - 1) / block_len;
4486 
4487     size_t n;
4488 
4489     XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
4490 
4491     for (n = 0; n < nb_blocks; n++) {
4492         XXH3_accumulate(acc, input + n*block_len, secret, nbStripesPerBlock, f_acc512);
4493         f_scramble(acc, secret + secretSize - XXH_STRIPE_LEN);
4494     }
4495 
4496     /* last partial block */
4497     XXH_ASSERT(len > XXH_STRIPE_LEN);
4498     {   size_t const nbStripes = ((len - 1) - (block_len * nb_blocks)) / XXH_STRIPE_LEN;
4499         XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE));
4500         XXH3_accumulate(acc, input + nb_blocks*block_len, secret, nbStripes, f_acc512);
4501 
4502         /* last stripe */
4503         {   const xxh_u8* const p = input + len - XXH_STRIPE_LEN;
4504 #define XXH_SECRET_LASTACC_START 7  /* not aligned on 8, last secret is different from acc & scrambler */
4505             f_acc512(acc, p, secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START);
4506     }   }
4507 }
4508 
4509 XXH_FORCE_INLINE xxh_u64
4510 XXH3_mix2Accs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret)
4511 {
4512     return XXH3_mul128_fold64(
4513                acc[0] ^ XXH_readLE64(secret),
4514                acc[1] ^ XXH_readLE64(secret+8) );
4515 }
4516 
4517 static XXH64_hash_t
4518 XXH3_mergeAccs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret, xxh_u64 start)
4519 {
4520     xxh_u64 result64 = start;
4521     size_t i = 0;
4522 
4523     for (i = 0; i < 4; i++) {
4524         result64 += XXH3_mix2Accs(acc+2*i, secret + 16*i);
4525 #if defined(__clang__)                                /* Clang */ \
4526     && (defined(__arm__) || defined(__thumb__))       /* ARMv7 */ \
4527     && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */  \
4528     && !defined(XXH_ENABLE_AUTOVECTORIZE)             /* Define to disable */
4529         /*
4530          * UGLY HACK:
4531          * Prevent autovectorization on Clang ARMv7-a. Exact same problem as
4532          * the one in XXH3_len_129to240_64b. Speeds up shorter keys > 240b.
4533          * XXH3_64bits, len == 256, Snapdragon 835:
4534          *   without hack: 2063.7 MB/s
4535          *   with hack:    2560.7 MB/s
4536          */
4537         XXH_COMPILER_GUARD(result64);
4538 #endif
4539     }
4540 
4541     return XXH3_avalanche(result64);
4542 }
4543 
4544 #define XXH3_INIT_ACC { XXH_PRIME32_3, XXH_PRIME64_1, XXH_PRIME64_2, XXH_PRIME64_3, \
4545                         XXH_PRIME64_4, XXH_PRIME32_2, XXH_PRIME64_5, XXH_PRIME32_1 }
4546 
4547 XXH_FORCE_INLINE XXH64_hash_t
4548 XXH3_hashLong_64b_internal(const void* XXH_RESTRICT input, size_t len,
4549                            const void* XXH_RESTRICT secret, size_t secretSize,
4550                            XXH3_f_accumulate_512 f_acc512,
4551                            XXH3_f_scrambleAcc f_scramble)
4552 {
4553     XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
4554 
4555     XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, f_acc512, f_scramble);
4556 
4557     /* converge into final hash */
4558     XXH_STATIC_ASSERT(sizeof(acc) == 64);
4559     /* do not align on 8, so that the secret is different from the accumulator */
4560 #define XXH_SECRET_MERGEACCS_START 11
4561     XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
4562     return XXH3_mergeAccs(acc, (const xxh_u8*)secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)len * XXH_PRIME64_1);
4563 }
4564 
4565 /*
4566  * It's important for performance to transmit secret's size (when it's static)
4567  * so that the compiler can properly optimize the vectorized loop.
4568  * This makes a big performance difference for "medium" keys (<1 KB) when using AVX instruction set.
4569  */
4570 XXH_FORCE_INLINE XXH64_hash_t
4571 XXH3_hashLong_64b_withSecret(const void* XXH_RESTRICT input, size_t len,
4572                              XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
4573 {
4574     (void)seed64;
4575     return XXH3_hashLong_64b_internal(input, len, secret, secretLen, XXH3_accumulate_512, XXH3_scrambleAcc);
4576 }
4577 
4578 /*
4579  * It's preferable for performance that XXH3_hashLong is not inlined,
4580  * as it results in a smaller function for small data, easier to the instruction cache.
4581  * Note that inside this no_inline function, we do inline the internal loop,
4582  * and provide a statically defined secret size to allow optimization of vector loop.
4583  */
4584 XXH_NO_INLINE XXH64_hash_t
4585 XXH3_hashLong_64b_default(const void* XXH_RESTRICT input, size_t len,
4586                           XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
4587 {
4588     (void)seed64; (void)secret; (void)secretLen;
4589     return XXH3_hashLong_64b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_accumulate_512, XXH3_scrambleAcc);
4590 }
4591 
4592 /*
4593  * XXH3_hashLong_64b_withSeed():
4594  * Generate a custom key based on alteration of default XXH3_kSecret with the seed,
4595  * and then use this key for long mode hashing.
4596  *
4597  * This operation is decently fast but nonetheless costs a little bit of time.
4598  * Try to avoid it whenever possible (typically when seed==0).
4599  *
4600  * It's important for performance that XXH3_hashLong is not inlined. Not sure
4601  * why (uop cache maybe?), but the difference is large and easily measurable.
4602  */
4603 XXH_FORCE_INLINE XXH64_hash_t
4604 XXH3_hashLong_64b_withSeed_internal(const void* input, size_t len,
4605                                     XXH64_hash_t seed,
4606                                     XXH3_f_accumulate_512 f_acc512,
4607                                     XXH3_f_scrambleAcc f_scramble,
4608                                     XXH3_f_initCustomSecret f_initSec)
4609 {
4610     if (seed == 0)
4611         return XXH3_hashLong_64b_internal(input, len,
4612                                           XXH3_kSecret, sizeof(XXH3_kSecret),
4613                                           f_acc512, f_scramble);
4614     {   XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
4615         f_initSec(secret, seed);
4616         return XXH3_hashLong_64b_internal(input, len, secret, sizeof(secret),
4617                                           f_acc512, f_scramble);
4618     }
4619 }
4620 
4621 /*
4622  * It's important for performance that XXH3_hashLong is not inlined.
4623  */
4624 XXH_NO_INLINE XXH64_hash_t
4625 XXH3_hashLong_64b_withSeed(const void* input, size_t len,
4626                            XXH64_hash_t seed, const xxh_u8* secret, size_t secretLen)
4627 {
4628     (void)secret; (void)secretLen;
4629     return XXH3_hashLong_64b_withSeed_internal(input, len, seed,
4630                 XXH3_accumulate_512, XXH3_scrambleAcc, XXH3_initCustomSecret);
4631 }
4632 
4633 
4634 typedef XXH64_hash_t (*XXH3_hashLong64_f)(const void* XXH_RESTRICT, size_t,
4635                                           XXH64_hash_t, const xxh_u8* XXH_RESTRICT, size_t);
4636 
4637 XXH_FORCE_INLINE XXH64_hash_t
4638 XXH3_64bits_internal(const void* XXH_RESTRICT input, size_t len,
4639                      XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen,
4640                      XXH3_hashLong64_f f_hashLong)
4641 {
4642     XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
4643     /*
4644      * If an action is to be taken if `secretLen` condition is not respected,
4645      * it should be done here.
4646      * For now, it's a contract pre-condition.
4647      * Adding a check and a branch here would cost performance at every hash.
4648      * Also, note that function signature doesn't offer room to return an error.
4649      */
4650     if (len <= 16)
4651         return XXH3_len_0to16_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64);
4652     if (len <= 128)
4653         return XXH3_len_17to128_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
4654     if (len <= XXH3_MIDSIZE_MAX)
4655         return XXH3_len_129to240_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
4656     return f_hashLong(input, len, seed64, (const xxh_u8*)secret, secretLen);
4657 }
4658 
4659 
4660 /* ===   Public entry point   === */
4661 
4662 /*! @ingroup xxh3_family */
4663 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void* input, size_t len)
4664 {
4665     return XXH3_64bits_internal(input, len, 0, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_default);
4666 }
4667 
4668 /*! @ingroup xxh3_family */
4669 XXH_PUBLIC_API XXH64_hash_t
4670 XXH3_64bits_withSecret(const void* input, size_t len, const void* secret, size_t secretSize)
4671 {
4672     return XXH3_64bits_internal(input, len, 0, secret, secretSize, XXH3_hashLong_64b_withSecret);
4673 }
4674 
4675 /*! @ingroup xxh3_family */
4676 XXH_PUBLIC_API XXH64_hash_t
4677 XXH3_64bits_withSeed(const void* input, size_t len, XXH64_hash_t seed)
4678 {
4679     return XXH3_64bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed);
4680 }
4681 
4682 XXH_PUBLIC_API XXH64_hash_t
4683 XXH3_64bits_withSecretandSeed(const void* input, size_t len, const void* secret, size_t secretSize, XXH64_hash_t seed)
4684 {
4685     if (len <= XXH3_MIDSIZE_MAX)
4686         return XXH3_64bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL);
4687     return XXH3_hashLong_64b_withSecret(input, len, seed, (const xxh_u8*)secret, secretSize);
4688 }
4689 
4690 
4691 /* ===   XXH3 streaming   === */
4692 
4693 /*
4694  * Malloc's a pointer that is always aligned to align.
4695  *
4696  * This must be freed with `XXH_alignedFree()`.
4697  *
4698  * malloc typically guarantees 16 byte alignment on 64-bit systems and 8 byte
4699  * alignment on 32-bit. This isn't enough for the 32 byte aligned loads in AVX2
4700  * or on 32-bit, the 16 byte aligned loads in SSE2 and NEON.
4701  *
4702  * This underalignment previously caused a rather obvious crash which went
4703  * completely unnoticed due to XXH3_createState() not actually being tested.
4704  * Credit to RedSpah for noticing this bug.
4705  *
4706  * The alignment is done manually: Functions like posix_memalign or _mm_malloc
4707  * are avoided: To maintain portability, we would have to write a fallback
4708  * like this anyways, and besides, testing for the existence of library
4709  * functions without relying on external build tools is impossible.
4710  *
4711  * The method is simple: Overallocate, manually align, and store the offset
4712  * to the original behind the returned pointer.
4713  *
4714  * Align must be a power of 2 and 8 <= align <= 128.
4715  */
4716 static void* XXH_alignedMalloc(size_t s, size_t align)
4717 {
4718     XXH_ASSERT(align <= 128 && align >= 8); /* range check */
4719     XXH_ASSERT((align & (align-1)) == 0);   /* power of 2 */
4720     XXH_ASSERT(s != 0 && s < (s + align));  /* empty/overflow */
4721     {   /* Overallocate to make room for manual realignment and an offset byte */
4722         xxh_u8* base = (xxh_u8*)XXH_malloc(s + align);
4723         if (base != NULL) {
4724             /*
4725              * Get the offset needed to align this pointer.
4726              *
4727              * Even if the returned pointer is aligned, there will always be
4728              * at least one byte to store the offset to the original pointer.
4729              */
4730             size_t offset = align - ((size_t)base & (align - 1)); /* base % align */
4731             /* Add the offset for the now-aligned pointer */
4732             xxh_u8* ptr = base + offset;
4733 
4734             XXH_ASSERT((size_t)ptr % align == 0);
4735 
4736             /* Store the offset immediately before the returned pointer. */
4737             ptr[-1] = (xxh_u8)offset;
4738             return ptr;
4739         }
4740         return NULL;
4741     }
4742 }
4743 /*
4744  * Frees an aligned pointer allocated by XXH_alignedMalloc(). Don't pass
4745  * normal malloc'd pointers, XXH_alignedMalloc has a specific data layout.
4746  */
4747 static void XXH_alignedFree(void* p)
4748 {
4749     if (p != NULL) {
4750         xxh_u8* ptr = (xxh_u8*)p;
4751         /* Get the offset byte we added in XXH_malloc. */
4752         xxh_u8 offset = ptr[-1];
4753         /* Free the original malloc'd pointer */
4754         xxh_u8* base = ptr - offset;
4755         XXH_free(base);
4756     }
4757 }
4758 /*! @ingroup xxh3_family */
4759 XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void)
4760 {
4761     XXH3_state_t* const state = (XXH3_state_t*)XXH_alignedMalloc(sizeof(XXH3_state_t), 64);
4762     if (state==NULL) return NULL;
4763     XXH3_INITSTATE(state);
4764     return state;
4765 }
4766 
4767 /*! @ingroup xxh3_family */
4768 XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr)
4769 {
4770     XXH_alignedFree(statePtr);
4771     return XXH_OK;
4772 }
4773 
4774 /*! @ingroup xxh3_family */
4775 XXH_PUBLIC_API void
4776 XXH3_copyState(XXH3_state_t* dst_state, const XXH3_state_t* src_state)
4777 {
4778     XXH_memcpy(dst_state, src_state, sizeof(*dst_state));
4779 }
4780 
4781 static void
4782 XXH3_reset_internal(XXH3_state_t* statePtr,
4783                     XXH64_hash_t seed,
4784                     const void* secret, size_t secretSize)
4785 {
4786     size_t const initStart = offsetof(XXH3_state_t, bufferedSize);
4787     size_t const initLength = offsetof(XXH3_state_t, nbStripesPerBlock) - initStart;
4788     XXH_ASSERT(offsetof(XXH3_state_t, nbStripesPerBlock) > initStart);
4789     XXH_ASSERT(statePtr != NULL);
4790     /* set members from bufferedSize to nbStripesPerBlock (excluded) to 0 */
4791     memset((char*)statePtr + initStart, 0, initLength);
4792     statePtr->acc[0] = XXH_PRIME32_3;
4793     statePtr->acc[1] = XXH_PRIME64_1;
4794     statePtr->acc[2] = XXH_PRIME64_2;
4795     statePtr->acc[3] = XXH_PRIME64_3;
4796     statePtr->acc[4] = XXH_PRIME64_4;
4797     statePtr->acc[5] = XXH_PRIME32_2;
4798     statePtr->acc[6] = XXH_PRIME64_5;
4799     statePtr->acc[7] = XXH_PRIME32_1;
4800     statePtr->seed = seed;
4801     statePtr->useSeed = (seed != 0);
4802     statePtr->extSecret = (const unsigned char*)secret;
4803     XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
4804     statePtr->secretLimit = secretSize - XXH_STRIPE_LEN;
4805     statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE;
4806 }
4807 
4808 /*! @ingroup xxh3_family */
4809 XXH_PUBLIC_API XXH_errorcode
4810 XXH3_64bits_reset(XXH3_state_t* statePtr)
4811 {
4812     if (statePtr == NULL) return XXH_ERROR;
4813     XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE);
4814     return XXH_OK;
4815 }
4816 
4817 /*! @ingroup xxh3_family */
4818 XXH_PUBLIC_API XXH_errorcode
4819 XXH3_64bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize)
4820 {
4821     if (statePtr == NULL) return XXH_ERROR;
4822     XXH3_reset_internal(statePtr, 0, secret, secretSize);
4823     if (secret == NULL) return XXH_ERROR;
4824     if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
4825     return XXH_OK;
4826 }
4827 
4828 /*! @ingroup xxh3_family */
4829 XXH_PUBLIC_API XXH_errorcode
4830 XXH3_64bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed)
4831 {
4832     if (statePtr == NULL) return XXH_ERROR;
4833     if (seed==0) return XXH3_64bits_reset(statePtr);
4834     if ((seed != statePtr->seed) || (statePtr->extSecret != NULL))
4835         XXH3_initCustomSecret(statePtr->customSecret, seed);
4836     XXH3_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE);
4837     return XXH_OK;
4838 }
4839 
4840 /*! @ingroup xxh3_family */
4841 XXH_PUBLIC_API XXH_errorcode
4842 XXH3_64bits_reset_withSecretandSeed(XXH3_state_t* statePtr, const void* secret, size_t secretSize, XXH64_hash_t seed64)
4843 {
4844     if (statePtr == NULL) return XXH_ERROR;
4845     if (secret == NULL) return XXH_ERROR;
4846     if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
4847     XXH3_reset_internal(statePtr, seed64, secret, secretSize);
4848     statePtr->useSeed = 1; /* always, even if seed64==0 */
4849     return XXH_OK;
4850 }
4851 
4852 /* Note : when XXH3_consumeStripes() is invoked,
4853  * there must be a guarantee that at least one more byte must be consumed from input
4854  * so that the function can blindly consume all stripes using the "normal" secret segment */
4855 XXH_FORCE_INLINE void
4856 XXH3_consumeStripes(xxh_u64* XXH_RESTRICT acc,
4857                     size_t* XXH_RESTRICT nbStripesSoFarPtr, size_t nbStripesPerBlock,
4858                     const xxh_u8* XXH_RESTRICT input, size_t nbStripes,
4859                     const xxh_u8* XXH_RESTRICT secret, size_t secretLimit,
4860                     XXH3_f_accumulate_512 f_acc512,
4861                     XXH3_f_scrambleAcc f_scramble)
4862 {
4863     XXH_ASSERT(nbStripes <= nbStripesPerBlock);  /* can handle max 1 scramble per invocation */
4864     XXH_ASSERT(*nbStripesSoFarPtr < nbStripesPerBlock);
4865     if (nbStripesPerBlock - *nbStripesSoFarPtr <= nbStripes) {
4866         /* need a scrambling operation */
4867         size_t const nbStripesToEndofBlock = nbStripesPerBlock - *nbStripesSoFarPtr;
4868         size_t const nbStripesAfterBlock = nbStripes - nbStripesToEndofBlock;
4869         XXH3_accumulate(acc, input, secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, nbStripesToEndofBlock, f_acc512);
4870         f_scramble(acc, secret + secretLimit);
4871         XXH3_accumulate(acc, input + nbStripesToEndofBlock * XXH_STRIPE_LEN, secret, nbStripesAfterBlock, f_acc512);
4872         *nbStripesSoFarPtr = nbStripesAfterBlock;
4873     } else {
4874         XXH3_accumulate(acc, input, secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, nbStripes, f_acc512);
4875         *nbStripesSoFarPtr += nbStripes;
4876     }
4877 }
4878 
4879 #ifndef XXH3_STREAM_USE_STACK
4880 # ifndef __clang__ /* clang doesn't need additional stack space */
4881 #   define XXH3_STREAM_USE_STACK 1
4882 # endif
4883 #endif
4884 /*
4885  * Both XXH3_64bits_update and XXH3_128bits_update use this routine.
4886  */
4887 XXH_FORCE_INLINE XXH_errorcode
4888 XXH3_update(XXH3_state_t* XXH_RESTRICT const state,
4889             const xxh_u8* XXH_RESTRICT input, size_t len,
4890             XXH3_f_accumulate_512 f_acc512,
4891             XXH3_f_scrambleAcc f_scramble)
4892 {
4893     if (input==NULL) {
4894         XXH_ASSERT(len == 0);
4895         return XXH_OK;
4896     }
4897 
4898     XXH_ASSERT(state != NULL);
4899     {   const xxh_u8* const bEnd = input + len;
4900         const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
4901 #if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1
4902         /* For some reason, gcc and MSVC seem to suffer greatly
4903          * when operating accumulators directly into state.
4904          * Operating into stack space seems to enable proper optimization.
4905          * clang, on the other hand, doesn't seem to need this trick */
4906         XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[8]; memcpy(acc, state->acc, sizeof(acc));
4907 #else
4908         xxh_u64* XXH_RESTRICT const acc = state->acc;
4909 #endif
4910         state->totalLen += len;
4911         XXH_ASSERT(state->bufferedSize <= XXH3_INTERNALBUFFER_SIZE);
4912 
4913         /* small input : just fill in tmp buffer */
4914         if (state->bufferedSize + len <= XXH3_INTERNALBUFFER_SIZE) {
4915             XXH_memcpy(state->buffer + state->bufferedSize, input, len);
4916             state->bufferedSize += (XXH32_hash_t)len;
4917             return XXH_OK;
4918         }
4919 
4920         /* total input is now > XXH3_INTERNALBUFFER_SIZE */
4921         #define XXH3_INTERNALBUFFER_STRIPES (XXH3_INTERNALBUFFER_SIZE / XXH_STRIPE_LEN)
4922         XXH_STATIC_ASSERT(XXH3_INTERNALBUFFER_SIZE % XXH_STRIPE_LEN == 0);   /* clean multiple */
4923 
4924         /*
4925          * Internal buffer is partially filled (always, except at beginning)
4926          * Complete it, then consume it.
4927          */
4928         if (state->bufferedSize) {
4929             size_t const loadSize = XXH3_INTERNALBUFFER_SIZE - state->bufferedSize;
4930             XXH_memcpy(state->buffer + state->bufferedSize, input, loadSize);
4931             input += loadSize;
4932             XXH3_consumeStripes(acc,
4933                                &state->nbStripesSoFar, state->nbStripesPerBlock,
4934                                 state->buffer, XXH3_INTERNALBUFFER_STRIPES,
4935                                 secret, state->secretLimit,
4936                                 f_acc512, f_scramble);
4937             state->bufferedSize = 0;
4938         }
4939         XXH_ASSERT(input < bEnd);
4940 
4941         /* large input to consume : ingest per full block */
4942         if ((size_t)(bEnd - input) > state->nbStripesPerBlock * XXH_STRIPE_LEN) {
4943             size_t nbStripes = (size_t)(bEnd - 1 - input) / XXH_STRIPE_LEN;
4944             XXH_ASSERT(state->nbStripesPerBlock >= state->nbStripesSoFar);
4945             /* join to current block's end */
4946             {   size_t const nbStripesToEnd = state->nbStripesPerBlock - state->nbStripesSoFar;
4947                 XXH_ASSERT(nbStripesToEnd <= nbStripes);
4948                 XXH3_accumulate(acc, input, secret + state->nbStripesSoFar * XXH_SECRET_CONSUME_RATE, nbStripesToEnd, f_acc512);
4949                 f_scramble(acc, secret + state->secretLimit);
4950                 state->nbStripesSoFar = 0;
4951                 input += nbStripesToEnd * XXH_STRIPE_LEN;
4952                 nbStripes -= nbStripesToEnd;
4953             }
4954             /* consume per entire blocks */
4955             while(nbStripes >= state->nbStripesPerBlock) {
4956                 XXH3_accumulate(acc, input, secret, state->nbStripesPerBlock, f_acc512);
4957                 f_scramble(acc, secret + state->secretLimit);
4958                 input += state->nbStripesPerBlock * XXH_STRIPE_LEN;
4959                 nbStripes -= state->nbStripesPerBlock;
4960             }
4961             /* consume last partial block */
4962             XXH3_accumulate(acc, input, secret, nbStripes, f_acc512);
4963             input += nbStripes * XXH_STRIPE_LEN;
4964             XXH_ASSERT(input < bEnd);  /* at least some bytes left */
4965             state->nbStripesSoFar = nbStripes;
4966             /* buffer predecessor of last partial stripe */
4967             XXH_memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN);
4968             XXH_ASSERT(bEnd - input <= XXH_STRIPE_LEN);
4969         } else {
4970             /* content to consume <= block size */
4971             /* Consume input by a multiple of internal buffer size */
4972             if (bEnd - input > XXH3_INTERNALBUFFER_SIZE) {
4973                 const xxh_u8* const limit = bEnd - XXH3_INTERNALBUFFER_SIZE;
4974                 do {
4975                     XXH3_consumeStripes(acc,
4976                                        &state->nbStripesSoFar, state->nbStripesPerBlock,
4977                                         input, XXH3_INTERNALBUFFER_STRIPES,
4978                                         secret, state->secretLimit,
4979                                         f_acc512, f_scramble);
4980                     input += XXH3_INTERNALBUFFER_SIZE;
4981                 } while (input<limit);
4982                 /* buffer predecessor of last partial stripe */
4983                 XXH_memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN);
4984             }
4985         }
4986 
4987         /* Some remaining input (always) : buffer it */
4988         XXH_ASSERT(input < bEnd);
4989         XXH_ASSERT(bEnd - input <= XXH3_INTERNALBUFFER_SIZE);
4990         XXH_ASSERT(state->bufferedSize == 0);
4991         XXH_memcpy(state->buffer, input, (size_t)(bEnd-input));
4992         state->bufferedSize = (XXH32_hash_t)(bEnd-input);
4993 #if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1
4994         /* save stack accumulators into state */
4995         memcpy(state->acc, acc, sizeof(acc));
4996 #endif
4997     }
4998 
4999     return XXH_OK;
5000 }
5001 
5002 /*! @ingroup xxh3_family */
5003 XXH_PUBLIC_API XXH_errorcode
5004 XXH3_64bits_update(XXH3_state_t* state, const void* input, size_t len)
5005 {
5006     return XXH3_update(state, (const xxh_u8*)input, len,
5007                        XXH3_accumulate_512, XXH3_scrambleAcc);
5008 }
5009 
5010 
5011 XXH_FORCE_INLINE void
5012 XXH3_digest_long (XXH64_hash_t* acc,
5013                   const XXH3_state_t* state,
5014                   const unsigned char* secret)
5015 {
5016     /*
5017      * Digest on a local copy. This way, the state remains unaltered, and it can
5018      * continue ingesting more input afterwards.
5019      */
5020     XXH_memcpy(acc, state->acc, sizeof(state->acc));
5021     if (state->bufferedSize >= XXH_STRIPE_LEN) {
5022         size_t const nbStripes = (state->bufferedSize - 1) / XXH_STRIPE_LEN;
5023         size_t nbStripesSoFar = state->nbStripesSoFar;
5024         XXH3_consumeStripes(acc,
5025                            &nbStripesSoFar, state->nbStripesPerBlock,
5026                             state->buffer, nbStripes,
5027                             secret, state->secretLimit,
5028                             XXH3_accumulate_512, XXH3_scrambleAcc);
5029         /* last stripe */
5030         XXH3_accumulate_512(acc,
5031                             state->buffer + state->bufferedSize - XXH_STRIPE_LEN,
5032                             secret + state->secretLimit - XXH_SECRET_LASTACC_START);
5033     } else {  /* bufferedSize < XXH_STRIPE_LEN */
5034         xxh_u8 lastStripe[XXH_STRIPE_LEN];
5035         size_t const catchupSize = XXH_STRIPE_LEN - state->bufferedSize;
5036         XXH_ASSERT(state->bufferedSize > 0);  /* there is always some input buffered */
5037         XXH_memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize, catchupSize);
5038         XXH_memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize);
5039         XXH3_accumulate_512(acc,
5040                             lastStripe,
5041                             secret + state->secretLimit - XXH_SECRET_LASTACC_START);
5042     }
5043 }
5044 
5045 /*! @ingroup xxh3_family */
5046 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (const XXH3_state_t* state)
5047 {
5048     const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
5049     if (state->totalLen > XXH3_MIDSIZE_MAX) {
5050         XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
5051         XXH3_digest_long(acc, state, secret);
5052         return XXH3_mergeAccs(acc,
5053                               secret + XXH_SECRET_MERGEACCS_START,
5054                               (xxh_u64)state->totalLen * XXH_PRIME64_1);
5055     }
5056     /* totalLen <= XXH3_MIDSIZE_MAX: digesting a short input */
5057     if (state->useSeed)
5058         return XXH3_64bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed);
5059     return XXH3_64bits_withSecret(state->buffer, (size_t)(state->totalLen),
5060                                   secret, state->secretLimit + XXH_STRIPE_LEN);
5061 }
5062 
5063 
5064 
5065 /* ==========================================
5066  * XXH3 128 bits (a.k.a XXH128)
5067  * ==========================================
5068  * XXH3's 128-bit variant has better mixing and strength than the 64-bit variant,
5069  * even without counting the significantly larger output size.
5070  *
5071  * For example, extra steps are taken to avoid the seed-dependent collisions
5072  * in 17-240 byte inputs (See XXH3_mix16B and XXH128_mix32B).
5073  *
5074  * This strength naturally comes at the cost of some speed, especially on short
5075  * lengths. Note that longer hashes are about as fast as the 64-bit version
5076  * due to it using only a slight modification of the 64-bit loop.
5077  *
5078  * XXH128 is also more oriented towards 64-bit machines. It is still extremely
5079  * fast for a _128-bit_ hash on 32-bit (it usually clears XXH64).
5080  */
5081 
5082 XXH_FORCE_INLINE XXH128_hash_t
5083 XXH3_len_1to3_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
5084 {
5085     /* A doubled version of 1to3_64b with different constants. */
5086     XXH_ASSERT(input != NULL);
5087     XXH_ASSERT(1 <= len && len <= 3);
5088     XXH_ASSERT(secret != NULL);
5089     /*
5090      * len = 1: combinedl = { input[0], 0x01, input[0], input[0] }
5091      * len = 2: combinedl = { input[1], 0x02, input[0], input[1] }
5092      * len = 3: combinedl = { input[2], 0x03, input[0], input[1] }
5093      */
5094     {   xxh_u8 const c1 = input[0];
5095         xxh_u8 const c2 = input[len >> 1];
5096         xxh_u8 const c3 = input[len - 1];
5097         xxh_u32 const combinedl = ((xxh_u32)c1 <<16) | ((xxh_u32)c2 << 24)
5098                                 | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
5099         xxh_u32 const combinedh = XXH_rotl32(XXH_swap32(combinedl), 13);
5100         xxh_u64 const bitflipl = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
5101         xxh_u64 const bitfliph = (XXH_readLE32(secret+8) ^ XXH_readLE32(secret+12)) - seed;
5102         xxh_u64 const keyed_lo = (xxh_u64)combinedl ^ bitflipl;
5103         xxh_u64 const keyed_hi = (xxh_u64)combinedh ^ bitfliph;
5104         XXH128_hash_t h128;
5105         h128.low64  = XXH64_avalanche(keyed_lo);
5106         h128.high64 = XXH64_avalanche(keyed_hi);
5107         return h128;
5108     }
5109 }
5110 
5111 XXH_FORCE_INLINE XXH128_hash_t
5112 XXH3_len_4to8_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
5113 {
5114     XXH_ASSERT(input != NULL);
5115     XXH_ASSERT(secret != NULL);
5116     XXH_ASSERT(4 <= len && len <= 8);
5117     seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
5118     {   xxh_u32 const input_lo = XXH_readLE32(input);
5119         xxh_u32 const input_hi = XXH_readLE32(input + len - 4);
5120         xxh_u64 const input_64 = input_lo + ((xxh_u64)input_hi << 32);
5121         xxh_u64 const bitflip = (XXH_readLE64(secret+16) ^ XXH_readLE64(secret+24)) + seed;
5122         xxh_u64 const keyed = input_64 ^ bitflip;
5123 
5124         /* Shift len to the left to ensure it is even, this avoids even multiplies. */
5125         XXH128_hash_t m128 = XXH_mult64to128(keyed, XXH_PRIME64_1 + (len << 2));
5126 
5127         m128.high64 += (m128.low64 << 1);
5128         m128.low64  ^= (m128.high64 >> 3);
5129 
5130         m128.low64   = XXH_xorshift64(m128.low64, 35);
5131         m128.low64  *= 0x9FB21C651E98DF25ULL;
5132         m128.low64   = XXH_xorshift64(m128.low64, 28);
5133         m128.high64  = XXH3_avalanche(m128.high64);
5134         return m128;
5135     }
5136 }
5137 
5138 XXH_FORCE_INLINE XXH128_hash_t
5139 XXH3_len_9to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
5140 {
5141     XXH_ASSERT(input != NULL);
5142     XXH_ASSERT(secret != NULL);
5143     XXH_ASSERT(9 <= len && len <= 16);
5144     {   xxh_u64 const bitflipl = (XXH_readLE64(secret+32) ^ XXH_readLE64(secret+40)) - seed;
5145         xxh_u64 const bitfliph = (XXH_readLE64(secret+48) ^ XXH_readLE64(secret+56)) + seed;
5146         xxh_u64 const input_lo = XXH_readLE64(input);
5147         xxh_u64       input_hi = XXH_readLE64(input + len - 8);
5148         XXH128_hash_t m128 = XXH_mult64to128(input_lo ^ input_hi ^ bitflipl, XXH_PRIME64_1);
5149         /*
5150          * Put len in the middle of m128 to ensure that the length gets mixed to
5151          * both the low and high bits in the 128x64 multiply below.
5152          */
5153         m128.low64 += (xxh_u64)(len - 1) << 54;
5154         input_hi   ^= bitfliph;
5155         /*
5156          * Add the high 32 bits of input_hi to the high 32 bits of m128, then
5157          * add the long product of the low 32 bits of input_hi and XXH_PRIME32_2 to
5158          * the high 64 bits of m128.
5159          *
5160          * The best approach to this operation is different on 32-bit and 64-bit.
5161          */
5162         if (sizeof(void *) < sizeof(xxh_u64)) { /* 32-bit */
5163             /*
5164              * 32-bit optimized version, which is more readable.
5165              *
5166              * On 32-bit, it removes an ADC and delays a dependency between the two
5167              * halves of m128.high64, but it generates an extra mask on 64-bit.
5168              */
5169             m128.high64 += (input_hi & 0xFFFFFFFF00000000ULL) + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2);
5170         } else {
5171             /*
5172              * 64-bit optimized (albeit more confusing) version.
5173              *
5174              * Uses some properties of addition and multiplication to remove the mask:
5175              *
5176              * Let:
5177              *    a = input_hi.lo = (input_hi & 0x00000000FFFFFFFF)
5178              *    b = input_hi.hi = (input_hi & 0xFFFFFFFF00000000)
5179              *    c = XXH_PRIME32_2
5180              *
5181              *    a + (b * c)
5182              * Inverse Property: x + y - x == y
5183              *    a + (b * (1 + c - 1))
5184              * Distributive Property: x * (y + z) == (x * y) + (x * z)
5185              *    a + (b * 1) + (b * (c - 1))
5186              * Identity Property: x * 1 == x
5187              *    a + b + (b * (c - 1))
5188              *
5189              * Substitute a, b, and c:
5190              *    input_hi.hi + input_hi.lo + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
5191              *
5192              * Since input_hi.hi + input_hi.lo == input_hi, we get this:
5193              *    input_hi + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
5194              */
5195             m128.high64 += input_hi + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2 - 1);
5196         }
5197         /* m128 ^= XXH_swap64(m128 >> 64); */
5198         m128.low64  ^= XXH_swap64(m128.high64);
5199 
5200         {   /* 128x64 multiply: h128 = m128 * XXH_PRIME64_2; */
5201             XXH128_hash_t h128 = XXH_mult64to128(m128.low64, XXH_PRIME64_2);
5202             h128.high64 += m128.high64 * XXH_PRIME64_2;
5203 
5204             h128.low64   = XXH3_avalanche(h128.low64);
5205             h128.high64  = XXH3_avalanche(h128.high64);
5206             return h128;
5207     }   }
5208 }
5209 
5210 /*
5211  * Assumption: `secret` size is >= XXH3_SECRET_SIZE_MIN
5212  */
5213 XXH_FORCE_INLINE XXH128_hash_t
5214 XXH3_len_0to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
5215 {
5216     XXH_ASSERT(len <= 16);
5217     {   if (len > 8) return XXH3_len_9to16_128b(input, len, secret, seed);
5218         if (len >= 4) return XXH3_len_4to8_128b(input, len, secret, seed);
5219         if (len) return XXH3_len_1to3_128b(input, len, secret, seed);
5220         {   XXH128_hash_t h128;
5221             xxh_u64 const bitflipl = XXH_readLE64(secret+64) ^ XXH_readLE64(secret+72);
5222             xxh_u64 const bitfliph = XXH_readLE64(secret+80) ^ XXH_readLE64(secret+88);
5223             h128.low64 = XXH64_avalanche(seed ^ bitflipl);
5224             h128.high64 = XXH64_avalanche( seed ^ bitfliph);
5225             return h128;
5226     }   }
5227 }
5228 
5229 /*
5230  * A bit slower than XXH3_mix16B, but handles multiply by zero better.
5231  */
5232 XXH_FORCE_INLINE XXH128_hash_t
5233 XXH128_mix32B(XXH128_hash_t acc, const xxh_u8* input_1, const xxh_u8* input_2,
5234               const xxh_u8* secret, XXH64_hash_t seed)
5235 {
5236     acc.low64  += XXH3_mix16B (input_1, secret+0, seed);
5237     acc.low64  ^= XXH_readLE64(input_2) + XXH_readLE64(input_2 + 8);
5238     acc.high64 += XXH3_mix16B (input_2, secret+16, seed);
5239     acc.high64 ^= XXH_readLE64(input_1) + XXH_readLE64(input_1 + 8);
5240     return acc;
5241 }
5242 
5243 
5244 XXH_FORCE_INLINE XXH128_hash_t
5245 XXH3_len_17to128_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
5246                       const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
5247                       XXH64_hash_t seed)
5248 {
5249     XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
5250     XXH_ASSERT(16 < len && len <= 128);
5251 
5252     {   XXH128_hash_t acc;
5253         acc.low64 = len * XXH_PRIME64_1;
5254         acc.high64 = 0;
5255         if (len > 32) {
5256             if (len > 64) {
5257                 if (len > 96) {
5258                     acc = XXH128_mix32B(acc, input+48, input+len-64, secret+96, seed);
5259                 }
5260                 acc = XXH128_mix32B(acc, input+32, input+len-48, secret+64, seed);
5261             }
5262             acc = XXH128_mix32B(acc, input+16, input+len-32, secret+32, seed);
5263         }
5264         acc = XXH128_mix32B(acc, input, input+len-16, secret, seed);
5265         {   XXH128_hash_t h128;
5266             h128.low64  = acc.low64 + acc.high64;
5267             h128.high64 = (acc.low64    * XXH_PRIME64_1)
5268                         + (acc.high64   * XXH_PRIME64_4)
5269                         + ((len - seed) * XXH_PRIME64_2);
5270             h128.low64  = XXH3_avalanche(h128.low64);
5271             h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
5272             return h128;
5273         }
5274     }
5275 }
5276 
5277 XXH_NO_INLINE XXH128_hash_t
5278 XXH3_len_129to240_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
5279                        const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
5280                        XXH64_hash_t seed)
5281 {
5282     XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
5283     XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
5284 
5285     {   XXH128_hash_t acc;
5286         int const nbRounds = (int)len / 32;
5287         int i;
5288         acc.low64 = len * XXH_PRIME64_1;
5289         acc.high64 = 0;
5290         for (i=0; i<4; i++) {
5291             acc = XXH128_mix32B(acc,
5292                                 input  + (32 * i),
5293                                 input  + (32 * i) + 16,
5294                                 secret + (32 * i),
5295                                 seed);
5296         }
5297         acc.low64 = XXH3_avalanche(acc.low64);
5298         acc.high64 = XXH3_avalanche(acc.high64);
5299         XXH_ASSERT(nbRounds >= 4);
5300         for (i=4 ; i < nbRounds; i++) {
5301             acc = XXH128_mix32B(acc,
5302                                 input + (32 * i),
5303                                 input + (32 * i) + 16,
5304                                 secret + XXH3_MIDSIZE_STARTOFFSET + (32 * (i - 4)),
5305                                 seed);
5306         }
5307         /* last bytes */
5308         acc = XXH128_mix32B(acc,
5309                             input + len - 16,
5310                             input + len - 32,
5311                             secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET - 16,
5312                             0ULL - seed);
5313 
5314         {   XXH128_hash_t h128;
5315             h128.low64  = acc.low64 + acc.high64;
5316             h128.high64 = (acc.low64    * XXH_PRIME64_1)
5317                         + (acc.high64   * XXH_PRIME64_4)
5318                         + ((len - seed) * XXH_PRIME64_2);
5319             h128.low64  = XXH3_avalanche(h128.low64);
5320             h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
5321             return h128;
5322         }
5323     }
5324 }
5325 
5326 XXH_FORCE_INLINE XXH128_hash_t
5327 XXH3_hashLong_128b_internal(const void* XXH_RESTRICT input, size_t len,
5328                             const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
5329                             XXH3_f_accumulate_512 f_acc512,
5330                             XXH3_f_scrambleAcc f_scramble)
5331 {
5332     XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
5333 
5334     XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, secret, secretSize, f_acc512, f_scramble);
5335 
5336     /* converge into final hash */
5337     XXH_STATIC_ASSERT(sizeof(acc) == 64);
5338     XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
5339     {   XXH128_hash_t h128;
5340         h128.low64  = XXH3_mergeAccs(acc,
5341                                      secret + XXH_SECRET_MERGEACCS_START,
5342                                      (xxh_u64)len * XXH_PRIME64_1);
5343         h128.high64 = XXH3_mergeAccs(acc,
5344                                      secret + secretSize
5345                                             - sizeof(acc) - XXH_SECRET_MERGEACCS_START,
5346                                      ~((xxh_u64)len * XXH_PRIME64_2));
5347         return h128;
5348     }
5349 }
5350 
5351 /*
5352  * It's important for performance that XXH3_hashLong is not inlined.
5353  */
5354 XXH_NO_INLINE XXH128_hash_t
5355 XXH3_hashLong_128b_default(const void* XXH_RESTRICT input, size_t len,
5356                            XXH64_hash_t seed64,
5357                            const void* XXH_RESTRICT secret, size_t secretLen)
5358 {
5359     (void)seed64; (void)secret; (void)secretLen;
5360     return XXH3_hashLong_128b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret),
5361                                        XXH3_accumulate_512, XXH3_scrambleAcc);
5362 }
5363 
5364 /*
5365  * It's important for performance to pass @secretLen (when it's static)
5366  * to the compiler, so that it can properly optimize the vectorized loop.
5367  */
5368 XXH_FORCE_INLINE XXH128_hash_t
5369 XXH3_hashLong_128b_withSecret(const void* XXH_RESTRICT input, size_t len,
5370                               XXH64_hash_t seed64,
5371                               const void* XXH_RESTRICT secret, size_t secretLen)
5372 {
5373     (void)seed64;
5374     return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, secretLen,
5375                                        XXH3_accumulate_512, XXH3_scrambleAcc);
5376 }
5377 
5378 XXH_FORCE_INLINE XXH128_hash_t
5379 XXH3_hashLong_128b_withSeed_internal(const void* XXH_RESTRICT input, size_t len,
5380                                 XXH64_hash_t seed64,
5381                                 XXH3_f_accumulate_512 f_acc512,
5382                                 XXH3_f_scrambleAcc f_scramble,
5383                                 XXH3_f_initCustomSecret f_initSec)
5384 {
5385     if (seed64 == 0)
5386         return XXH3_hashLong_128b_internal(input, len,
5387                                            XXH3_kSecret, sizeof(XXH3_kSecret),
5388                                            f_acc512, f_scramble);
5389     {   XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
5390         f_initSec(secret, seed64);
5391         return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, sizeof(secret),
5392                                            f_acc512, f_scramble);
5393     }
5394 }
5395 
5396 /*
5397  * It's important for performance that XXH3_hashLong is not inlined.
5398  */
5399 XXH_NO_INLINE XXH128_hash_t
5400 XXH3_hashLong_128b_withSeed(const void* input, size_t len,
5401                             XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen)
5402 {
5403     (void)secret; (void)secretLen;
5404     return XXH3_hashLong_128b_withSeed_internal(input, len, seed64,
5405                 XXH3_accumulate_512, XXH3_scrambleAcc, XXH3_initCustomSecret);
5406 }
5407 
5408 typedef XXH128_hash_t (*XXH3_hashLong128_f)(const void* XXH_RESTRICT, size_t,
5409                                             XXH64_hash_t, const void* XXH_RESTRICT, size_t);
5410 
5411 XXH_FORCE_INLINE XXH128_hash_t
5412 XXH3_128bits_internal(const void* input, size_t len,
5413                       XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen,
5414                       XXH3_hashLong128_f f_hl128)
5415 {
5416     XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
5417     /*
5418      * If an action is to be taken if `secret` conditions are not respected,
5419      * it should be done here.
5420      * For now, it's a contract pre-condition.
5421      * Adding a check and a branch here would cost performance at every hash.
5422      */
5423     if (len <= 16)
5424         return XXH3_len_0to16_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64);
5425     if (len <= 128)
5426         return XXH3_len_17to128_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
5427     if (len <= XXH3_MIDSIZE_MAX)
5428         return XXH3_len_129to240_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
5429     return f_hl128(input, len, seed64, secret, secretLen);
5430 }
5431 
5432 
5433 /* ===   Public XXH128 API   === */
5434 
5435 /*! @ingroup xxh3_family */
5436 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void* input, size_t len)
5437 {
5438     return XXH3_128bits_internal(input, len, 0,
5439                                  XXH3_kSecret, sizeof(XXH3_kSecret),
5440                                  XXH3_hashLong_128b_default);
5441 }
5442 
5443 /*! @ingroup xxh3_family */
5444 XXH_PUBLIC_API XXH128_hash_t
5445 XXH3_128bits_withSecret(const void* input, size_t len, const void* secret, size_t secretSize)
5446 {
5447     return XXH3_128bits_internal(input, len, 0,
5448                                  (const xxh_u8*)secret, secretSize,
5449                                  XXH3_hashLong_128b_withSecret);
5450 }
5451 
5452 /*! @ingroup xxh3_family */
5453 XXH_PUBLIC_API XXH128_hash_t
5454 XXH3_128bits_withSeed(const void* input, size_t len, XXH64_hash_t seed)
5455 {
5456     return XXH3_128bits_internal(input, len, seed,
5457                                  XXH3_kSecret, sizeof(XXH3_kSecret),
5458                                  XXH3_hashLong_128b_withSeed);
5459 }
5460 
5461 /*! @ingroup xxh3_family */
5462 XXH_PUBLIC_API XXH128_hash_t
5463 XXH3_128bits_withSecretandSeed(const void* input, size_t len, const void* secret, size_t secretSize, XXH64_hash_t seed)
5464 {
5465     if (len <= XXH3_MIDSIZE_MAX)
5466         return XXH3_128bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL);
5467     return XXH3_hashLong_128b_withSecret(input, len, seed, secret, secretSize);
5468 }
5469 
5470 /*! @ingroup xxh3_family */
5471 XXH_PUBLIC_API XXH128_hash_t
5472 XXH128(const void* input, size_t len, XXH64_hash_t seed)
5473 {
5474     return XXH3_128bits_withSeed(input, len, seed);
5475 }
5476 
5477 
5478 /* ===   XXH3 128-bit streaming   === */
5479 
5480 /*
5481  * All initialization and update functions are identical to 64-bit streaming variant.
5482  * The only difference is the finalization routine.
5483  */
5484 
5485 /*! @ingroup xxh3_family */
5486 XXH_PUBLIC_API XXH_errorcode
5487 XXH3_128bits_reset(XXH3_state_t* statePtr)
5488 {
5489     return XXH3_64bits_reset(statePtr);
5490 }
5491 
5492 /*! @ingroup xxh3_family */
5493 XXH_PUBLIC_API XXH_errorcode
5494 XXH3_128bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize)
5495 {
5496     return XXH3_64bits_reset_withSecret(statePtr, secret, secretSize);
5497 }
5498 
5499 /*! @ingroup xxh3_family */
5500 XXH_PUBLIC_API XXH_errorcode
5501 XXH3_128bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed)
5502 {
5503     return XXH3_64bits_reset_withSeed(statePtr, seed);
5504 }
5505 
5506 /*! @ingroup xxh3_family */
5507 XXH_PUBLIC_API XXH_errorcode
5508 XXH3_128bits_reset_withSecretandSeed(XXH3_state_t* statePtr, const void* secret, size_t secretSize, XXH64_hash_t seed)
5509 {
5510     return XXH3_64bits_reset_withSecretandSeed(statePtr, secret, secretSize, seed);
5511 }
5512 
5513 /*! @ingroup xxh3_family */
5514 XXH_PUBLIC_API XXH_errorcode
5515 XXH3_128bits_update(XXH3_state_t* state, const void* input, size_t len)
5516 {
5517     return XXH3_update(state, (const xxh_u8*)input, len,
5518                        XXH3_accumulate_512, XXH3_scrambleAcc);
5519 }
5520 
5521 /*! @ingroup xxh3_family */
5522 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (const XXH3_state_t* state)
5523 {
5524     const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
5525     if (state->totalLen > XXH3_MIDSIZE_MAX) {
5526         XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
5527         XXH3_digest_long(acc, state, secret);
5528         XXH_ASSERT(state->secretLimit + XXH_STRIPE_LEN >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
5529         {   XXH128_hash_t h128;
5530             h128.low64  = XXH3_mergeAccs(acc,
5531                                          secret + XXH_SECRET_MERGEACCS_START,
5532                                          (xxh_u64)state->totalLen * XXH_PRIME64_1);
5533             h128.high64 = XXH3_mergeAccs(acc,
5534                                          secret + state->secretLimit + XXH_STRIPE_LEN
5535                                                 - sizeof(acc) - XXH_SECRET_MERGEACCS_START,
5536                                          ~((xxh_u64)state->totalLen * XXH_PRIME64_2));
5537             return h128;
5538         }
5539     }
5540     /* len <= XXH3_MIDSIZE_MAX : short code */
5541     if (state->seed)
5542         return XXH3_128bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed);
5543     return XXH3_128bits_withSecret(state->buffer, (size_t)(state->totalLen),
5544                                    secret, state->secretLimit + XXH_STRIPE_LEN);
5545 }
5546 
5547 /* 128-bit utility functions */
5548 
5549 #include <string.h>   /* memcmp, memcpy */
5550 
5551 /* return : 1 is equal, 0 if different */
5552 /*! @ingroup xxh3_family */
5553 XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2)
5554 {
5555     /* note : XXH128_hash_t is compact, it has no padding byte */
5556     return !(memcmp(&h1, &h2, sizeof(h1)));
5557 }
5558 
5559 /* This prototype is compatible with stdlib's qsort().
5560  * return : >0 if *h128_1  > *h128_2
5561  *          <0 if *h128_1  < *h128_2
5562  *          =0 if *h128_1 == *h128_2  */
5563 /*! @ingroup xxh3_family */
5564 XXH_PUBLIC_API int XXH128_cmp(const void* h128_1, const void* h128_2)
5565 {
5566     XXH128_hash_t const h1 = *(const XXH128_hash_t*)h128_1;
5567     XXH128_hash_t const h2 = *(const XXH128_hash_t*)h128_2;
5568     int const hcmp = (h1.high64 > h2.high64) - (h2.high64 > h1.high64);
5569     /* note : bets that, in most cases, hash values are different */
5570     if (hcmp) return hcmp;
5571     return (h1.low64 > h2.low64) - (h2.low64 > h1.low64);
5572 }
5573 
5574 
5575 /*======   Canonical representation   ======*/
5576 /*! @ingroup xxh3_family */
5577 XXH_PUBLIC_API void
5578 XXH128_canonicalFromHash(XXH128_canonical_t* dst, XXH128_hash_t hash)
5579 {
5580     XXH_STATIC_ASSERT(sizeof(XXH128_canonical_t) == sizeof(XXH128_hash_t));
5581     if (XXH_CPU_LITTLE_ENDIAN) {
5582         hash.high64 = XXH_swap64(hash.high64);
5583         hash.low64  = XXH_swap64(hash.low64);
5584     }
5585     XXH_memcpy(dst, &hash.high64, sizeof(hash.high64));
5586     XXH_memcpy((char*)dst + sizeof(hash.high64), &hash.low64, sizeof(hash.low64));
5587 }
5588 
5589 /*! @ingroup xxh3_family */
5590 XXH_PUBLIC_API XXH128_hash_t
5591 XXH128_hashFromCanonical(const XXH128_canonical_t* src)
5592 {
5593     XXH128_hash_t h;
5594     h.high64 = XXH_readBE64(src);
5595     h.low64  = XXH_readBE64(src->digest + 8);
5596     return h;
5597 }
5598 
5599 
5600 
5601 /* ==========================================
5602  * Secret generators
5603  * ==========================================
5604  */
5605 #define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x))
5606 
5607 XXH_FORCE_INLINE void XXH3_combine16(void* dst, XXH128_hash_t h128)
5608 {
5609     XXH_writeLE64( dst, XXH_readLE64(dst) ^ h128.low64 );
5610     XXH_writeLE64( (char*)dst+8, XXH_readLE64((char*)dst+8) ^ h128.high64 );
5611 }
5612 
5613 /*! @ingroup xxh3_family */
5614 XXH_PUBLIC_API XXH_errorcode
5615 XXH3_generateSecret(void* secretBuffer, size_t secretSize, const void* customSeed, size_t customSeedSize)
5616 {
5617 #if (XXH_DEBUGLEVEL >= 1)
5618     XXH_ASSERT(secretBuffer != NULL);
5619     XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
5620 #else
5621     /* production mode, assert() are disabled */
5622     if (secretBuffer == NULL) return XXH_ERROR;
5623     if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
5624 #endif
5625 
5626     if (customSeedSize == 0) {
5627         customSeed = XXH3_kSecret;
5628         customSeedSize = XXH_SECRET_DEFAULT_SIZE;
5629     }
5630 #if (XXH_DEBUGLEVEL >= 1)
5631     XXH_ASSERT(customSeed != NULL);
5632 #else
5633     if (customSeed == NULL) return XXH_ERROR;
5634 #endif
5635 
5636     /* Fill secretBuffer with a copy of customSeed - repeat as needed */
5637     {   size_t pos = 0;
5638         while (pos < secretSize) {
5639             size_t const toCopy = XXH_MIN((secretSize - pos), customSeedSize);
5640             memcpy((char*)secretBuffer + pos, customSeed, toCopy);
5641             pos += toCopy;
5642     }   }
5643 
5644     {   size_t const nbSeg16 = secretSize / 16;
5645         size_t n;
5646         XXH128_canonical_t scrambler;
5647         XXH128_canonicalFromHash(&scrambler, XXH128(customSeed, customSeedSize, 0));
5648         for (n=0; n<nbSeg16; n++) {
5649             XXH128_hash_t const h128 = XXH128(&scrambler, sizeof(scrambler), n);
5650             XXH3_combine16((char*)secretBuffer + n*16, h128);
5651         }
5652         /* last segment */
5653         XXH3_combine16((char*)secretBuffer + secretSize - 16, XXH128_hashFromCanonical(&scrambler));
5654     }
5655     return XXH_OK;
5656 }
5657 
5658 /*! @ingroup xxh3_family */
5659 XXH_PUBLIC_API void
5660 XXH3_generateSecret_fromSeed(void* secretBuffer, XXH64_hash_t seed)
5661 {
5662     XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
5663     XXH3_initCustomSecret(secret, seed);
5664     XXH_ASSERT(secretBuffer != NULL);
5665     memcpy(secretBuffer, secret, XXH_SECRET_DEFAULT_SIZE);
5666 }
5667 
5668 
5669 
5670 /* Pop our optimization override from above */
5671 #if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
5672   && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
5673   && defined(__OPTIMIZE__) && !defined(__OPTIMIZE_SIZE__) /* respect -O0 and -Os */
5674 #  pragma GCC pop_options
5675 #endif
5676 
5677 #endif  /* XXH_NO_LONG_LONG */
5678 
5679 #endif  /* XXH_NO_XXH3 */
5680 
5681 /*!
5682  * @}
5683  */
5684 #endif  /* XXH_IMPLEMENTATION */
5685 
5686 
5687 #if defined (__cplusplus)
5688 }
5689 #endif
5690