10c16b537SWarner Losh /*
2*5ff13fbcSAllan Jude * xxHash - Fast Hash algorithm
3*5ff13fbcSAllan Jude * Copyright (c) Yann Collet, Facebook, Inc.
437f1f268SConrad Meyer *
537f1f268SConrad Meyer * You can contact the author at :
6*5ff13fbcSAllan Jude * - xxHash homepage: http://www.xxhash.com
737f1f268SConrad Meyer * - xxHash source repository : https://github.com/Cyan4973/xxHash
837f1f268SConrad Meyer *
937f1f268SConrad Meyer * This source code is licensed under both the BSD-style license (found in the
1037f1f268SConrad Meyer * LICENSE file in the root directory of this source tree) and the GPLv2 (found
1137f1f268SConrad Meyer * in the COPYING file in the root directory of this source tree).
1237f1f268SConrad Meyer * You may select, at your option, one of the above-listed licenses.
130c16b537SWarner Losh */
140c16b537SWarner Losh
15*5ff13fbcSAllan Jude
16*5ff13fbcSAllan Jude #ifndef XXH_NO_XXH3
17*5ff13fbcSAllan Jude # define XXH_NO_XXH3
18*5ff13fbcSAllan Jude #endif
19*5ff13fbcSAllan Jude
20*5ff13fbcSAllan Jude #ifndef XXH_NAMESPACE
21*5ff13fbcSAllan Jude # define XXH_NAMESPACE ZSTD_
22*5ff13fbcSAllan Jude #endif
23*5ff13fbcSAllan Jude
24*5ff13fbcSAllan Jude /*!
25*5ff13fbcSAllan Jude * @mainpage xxHash
26*5ff13fbcSAllan Jude *
27*5ff13fbcSAllan Jude * @file xxhash.h
28*5ff13fbcSAllan Jude * xxHash prototypes and implementation
29*5ff13fbcSAllan Jude */
30*5ff13fbcSAllan Jude /* TODO: update */
310c16b537SWarner Losh /* Notice extracted from xxHash homepage:
320c16b537SWarner Losh
33*5ff13fbcSAllan Jude xxHash is an extremely fast hash algorithm, running at RAM speed limits.
340c16b537SWarner Losh It also successfully passes all tests from the SMHasher suite.
350c16b537SWarner Losh
360c16b537SWarner Losh Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz)
370c16b537SWarner Losh
380c16b537SWarner Losh Name Speed Q.Score Author
390c16b537SWarner Losh xxHash 5.4 GB/s 10
400c16b537SWarner Losh CrapWow 3.2 GB/s 2 Andrew
41*5ff13fbcSAllan Jude MurmurHash 3a 2.7 GB/s 10 Austin Appleby
420c16b537SWarner Losh SpookyHash 2.0 GB/s 10 Bob Jenkins
430c16b537SWarner Losh SBox 1.4 GB/s 9 Bret Mulvey
440c16b537SWarner Losh Lookup3 1.2 GB/s 9 Bob Jenkins
450c16b537SWarner Losh SuperFastHash 1.2 GB/s 1 Paul Hsieh
460c16b537SWarner Losh CityHash64 1.05 GB/s 10 Pike & Alakuijala
470c16b537SWarner Losh FNV 0.55 GB/s 5 Fowler, Noll, Vo
480c16b537SWarner Losh CRC32 0.43 GB/s 9
490c16b537SWarner Losh MD5-32 0.33 GB/s 10 Ronald L. Rivest
500c16b537SWarner Losh SHA1-32 0.28 GB/s 10
510c16b537SWarner Losh
520c16b537SWarner Losh Q.Score is a measure of quality of the hash function.
530c16b537SWarner Losh It depends on successfully passing SMHasher test set.
540c16b537SWarner Losh 10 is a perfect score.
550c16b537SWarner Losh
56*5ff13fbcSAllan Jude Note: SMHasher's CRC32 implementation is not the fastest one.
57*5ff13fbcSAllan Jude Other speed-oriented implementations can be faster,
58*5ff13fbcSAllan Jude especially in combination with PCLMUL instruction:
59*5ff13fbcSAllan Jude https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html?showComment=1552696407071#c3490092340461170735
60*5ff13fbcSAllan Jude
61*5ff13fbcSAllan Jude A 64-bit version, named XXH64, is available since r35.
62*5ff13fbcSAllan Jude It offers much better speed, but for 64-bit applications only.
630c16b537SWarner Losh Name Speed on 64 bits Speed on 32 bits
640c16b537SWarner Losh XXH64 13.8 GB/s 1.9 GB/s
650c16b537SWarner Losh XXH32 6.8 GB/s 6.0 GB/s
660c16b537SWarner Losh */
670c16b537SWarner Losh
680c16b537SWarner Losh #if defined (__cplusplus)
690c16b537SWarner Losh extern "C" {
700c16b537SWarner Losh #endif
710c16b537SWarner Losh
720c16b537SWarner Losh /* ****************************
73*5ff13fbcSAllan Jude * INLINE mode
740c16b537SWarner Losh ******************************/
75*5ff13fbcSAllan Jude /*!
76*5ff13fbcSAllan Jude * XXH_INLINE_ALL (and XXH_PRIVATE_API)
77*5ff13fbcSAllan Jude * Use these build macros to inline xxhash into the target unit.
78*5ff13fbcSAllan Jude * Inlining improves performance on small inputs, especially when the length is
79*5ff13fbcSAllan Jude * expressed as a compile-time constant:
80*5ff13fbcSAllan Jude *
81*5ff13fbcSAllan Jude * https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html
82*5ff13fbcSAllan Jude *
83*5ff13fbcSAllan Jude * It also keeps xxHash symbols private to the unit, so they are not exported.
84*5ff13fbcSAllan Jude *
85*5ff13fbcSAllan Jude * Usage:
86*5ff13fbcSAllan Jude * #define XXH_INLINE_ALL
870c16b537SWarner Losh * #include "xxhash.h"
88*5ff13fbcSAllan Jude *
89*5ff13fbcSAllan Jude * Do not compile and link xxhash.o as a separate object, as it is not useful.
900c16b537SWarner Losh */
91*5ff13fbcSAllan Jude #if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) \
92*5ff13fbcSAllan Jude && !defined(XXH_INLINE_ALL_31684351384)
93*5ff13fbcSAllan Jude /* this section should be traversed only once */
94*5ff13fbcSAllan Jude # define XXH_INLINE_ALL_31684351384
95*5ff13fbcSAllan Jude /* give access to the advanced API, required to compile implementations */
96*5ff13fbcSAllan Jude # undef XXH_STATIC_LINKING_ONLY /* avoid macro redef */
970c16b537SWarner Losh # define XXH_STATIC_LINKING_ONLY
98*5ff13fbcSAllan Jude /* make all functions private */
99*5ff13fbcSAllan Jude # undef XXH_PUBLIC_API
1000c16b537SWarner Losh # if defined(__GNUC__)
1010c16b537SWarner Losh # define XXH_PUBLIC_API static __inline __attribute__((unused))
1020c16b537SWarner Losh # elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
1030c16b537SWarner Losh # define XXH_PUBLIC_API static inline
1040c16b537SWarner Losh # elif defined(_MSC_VER)
1050c16b537SWarner Losh # define XXH_PUBLIC_API static __inline
1060c16b537SWarner Losh # else
107*5ff13fbcSAllan Jude /* note: this version may generate warnings for unused static functions */
108*5ff13fbcSAllan Jude # define XXH_PUBLIC_API static
109*5ff13fbcSAllan Jude # endif
110*5ff13fbcSAllan Jude
111*5ff13fbcSAllan Jude /*
112*5ff13fbcSAllan Jude * This part deals with the special case where a unit wants to inline xxHash,
113*5ff13fbcSAllan Jude * but "xxhash.h" has previously been included without XXH_INLINE_ALL,
114*5ff13fbcSAllan Jude * such as part of some previously included *.h header file.
115*5ff13fbcSAllan Jude * Without further action, the new include would just be ignored,
116*5ff13fbcSAllan Jude * and functions would effectively _not_ be inlined (silent failure).
117*5ff13fbcSAllan Jude * The following macros solve this situation by prefixing all inlined names,
118*5ff13fbcSAllan Jude * avoiding naming collision with previous inclusions.
119*5ff13fbcSAllan Jude */
120*5ff13fbcSAllan Jude /* Before that, we unconditionally #undef all symbols,
121*5ff13fbcSAllan Jude * in case they were already defined with XXH_NAMESPACE.
122*5ff13fbcSAllan Jude * They will then be redefined for XXH_INLINE_ALL
123*5ff13fbcSAllan Jude */
124*5ff13fbcSAllan Jude # undef XXH_versionNumber
125*5ff13fbcSAllan Jude /* XXH32 */
126*5ff13fbcSAllan Jude # undef XXH32
127*5ff13fbcSAllan Jude # undef XXH32_createState
128*5ff13fbcSAllan Jude # undef XXH32_freeState
129*5ff13fbcSAllan Jude # undef XXH32_reset
130*5ff13fbcSAllan Jude # undef XXH32_update
131*5ff13fbcSAllan Jude # undef XXH32_digest
132*5ff13fbcSAllan Jude # undef XXH32_copyState
133*5ff13fbcSAllan Jude # undef XXH32_canonicalFromHash
134*5ff13fbcSAllan Jude # undef XXH32_hashFromCanonical
135*5ff13fbcSAllan Jude /* XXH64 */
136*5ff13fbcSAllan Jude # undef XXH64
137*5ff13fbcSAllan Jude # undef XXH64_createState
138*5ff13fbcSAllan Jude # undef XXH64_freeState
139*5ff13fbcSAllan Jude # undef XXH64_reset
140*5ff13fbcSAllan Jude # undef XXH64_update
141*5ff13fbcSAllan Jude # undef XXH64_digest
142*5ff13fbcSAllan Jude # undef XXH64_copyState
143*5ff13fbcSAllan Jude # undef XXH64_canonicalFromHash
144*5ff13fbcSAllan Jude # undef XXH64_hashFromCanonical
145*5ff13fbcSAllan Jude /* XXH3_64bits */
146*5ff13fbcSAllan Jude # undef XXH3_64bits
147*5ff13fbcSAllan Jude # undef XXH3_64bits_withSecret
148*5ff13fbcSAllan Jude # undef XXH3_64bits_withSeed
149*5ff13fbcSAllan Jude # undef XXH3_64bits_withSecretandSeed
150*5ff13fbcSAllan Jude # undef XXH3_createState
151*5ff13fbcSAllan Jude # undef XXH3_freeState
152*5ff13fbcSAllan Jude # undef XXH3_copyState
153*5ff13fbcSAllan Jude # undef XXH3_64bits_reset
154*5ff13fbcSAllan Jude # undef XXH3_64bits_reset_withSeed
155*5ff13fbcSAllan Jude # undef XXH3_64bits_reset_withSecret
156*5ff13fbcSAllan Jude # undef XXH3_64bits_update
157*5ff13fbcSAllan Jude # undef XXH3_64bits_digest
158*5ff13fbcSAllan Jude # undef XXH3_generateSecret
159*5ff13fbcSAllan Jude /* XXH3_128bits */
160*5ff13fbcSAllan Jude # undef XXH128
161*5ff13fbcSAllan Jude # undef XXH3_128bits
162*5ff13fbcSAllan Jude # undef XXH3_128bits_withSeed
163*5ff13fbcSAllan Jude # undef XXH3_128bits_withSecret
164*5ff13fbcSAllan Jude # undef XXH3_128bits_reset
165*5ff13fbcSAllan Jude # undef XXH3_128bits_reset_withSeed
166*5ff13fbcSAllan Jude # undef XXH3_128bits_reset_withSecret
167*5ff13fbcSAllan Jude # undef XXH3_128bits_reset_withSecretandSeed
168*5ff13fbcSAllan Jude # undef XXH3_128bits_update
169*5ff13fbcSAllan Jude # undef XXH3_128bits_digest
170*5ff13fbcSAllan Jude # undef XXH128_isEqual
171*5ff13fbcSAllan Jude # undef XXH128_cmp
172*5ff13fbcSAllan Jude # undef XXH128_canonicalFromHash
173*5ff13fbcSAllan Jude # undef XXH128_hashFromCanonical
174*5ff13fbcSAllan Jude /* Finally, free the namespace itself */
175*5ff13fbcSAllan Jude # undef XXH_NAMESPACE
176*5ff13fbcSAllan Jude
177*5ff13fbcSAllan Jude /* employ the namespace for XXH_INLINE_ALL */
178*5ff13fbcSAllan Jude # define XXH_NAMESPACE XXH_INLINE_
179*5ff13fbcSAllan Jude /*
180*5ff13fbcSAllan Jude * Some identifiers (enums, type names) are not symbols,
181*5ff13fbcSAllan Jude * but they must nonetheless be renamed to avoid redeclaration.
182*5ff13fbcSAllan Jude * Alternative solution: do not redeclare them.
183*5ff13fbcSAllan Jude * However, this requires some #ifdefs, and has a more dispersed impact.
184*5ff13fbcSAllan Jude * Meanwhile, renaming can be achieved in a single place.
185*5ff13fbcSAllan Jude */
186*5ff13fbcSAllan Jude # define XXH_IPREF(Id) XXH_NAMESPACE ## Id
187*5ff13fbcSAllan Jude # define XXH_OK XXH_IPREF(XXH_OK)
188*5ff13fbcSAllan Jude # define XXH_ERROR XXH_IPREF(XXH_ERROR)
189*5ff13fbcSAllan Jude # define XXH_errorcode XXH_IPREF(XXH_errorcode)
190*5ff13fbcSAllan Jude # define XXH32_canonical_t XXH_IPREF(XXH32_canonical_t)
191*5ff13fbcSAllan Jude # define XXH64_canonical_t XXH_IPREF(XXH64_canonical_t)
192*5ff13fbcSAllan Jude # define XXH128_canonical_t XXH_IPREF(XXH128_canonical_t)
193*5ff13fbcSAllan Jude # define XXH32_state_s XXH_IPREF(XXH32_state_s)
194*5ff13fbcSAllan Jude # define XXH32_state_t XXH_IPREF(XXH32_state_t)
195*5ff13fbcSAllan Jude # define XXH64_state_s XXH_IPREF(XXH64_state_s)
196*5ff13fbcSAllan Jude # define XXH64_state_t XXH_IPREF(XXH64_state_t)
197*5ff13fbcSAllan Jude # define XXH3_state_s XXH_IPREF(XXH3_state_s)
198*5ff13fbcSAllan Jude # define XXH3_state_t XXH_IPREF(XXH3_state_t)
199*5ff13fbcSAllan Jude # define XXH128_hash_t XXH_IPREF(XXH128_hash_t)
200*5ff13fbcSAllan Jude /* Ensure the header is parsed again, even if it was previously included */
201*5ff13fbcSAllan Jude # undef XXHASH_H_5627135585666179
202*5ff13fbcSAllan Jude # undef XXHASH_H_STATIC_13879238742
203*5ff13fbcSAllan Jude #endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */
204*5ff13fbcSAllan Jude
205*5ff13fbcSAllan Jude
206*5ff13fbcSAllan Jude
207*5ff13fbcSAllan Jude /* ****************************************************************
208*5ff13fbcSAllan Jude * Stable API
209*5ff13fbcSAllan Jude *****************************************************************/
210*5ff13fbcSAllan Jude #ifndef XXHASH_H_5627135585666179
211*5ff13fbcSAllan Jude #define XXHASH_H_5627135585666179 1
212*5ff13fbcSAllan Jude
213*5ff13fbcSAllan Jude
214*5ff13fbcSAllan Jude /*!
215*5ff13fbcSAllan Jude * @defgroup public Public API
216*5ff13fbcSAllan Jude * Contains details on the public xxHash functions.
217*5ff13fbcSAllan Jude * @{
218*5ff13fbcSAllan Jude */
219*5ff13fbcSAllan Jude /* specific declaration modes for Windows */
220*5ff13fbcSAllan Jude #if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API)
221*5ff13fbcSAllan Jude # if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT))
222*5ff13fbcSAllan Jude # ifdef XXH_EXPORT
223*5ff13fbcSAllan Jude # define XXH_PUBLIC_API __declspec(dllexport)
224*5ff13fbcSAllan Jude # elif XXH_IMPORT
225*5ff13fbcSAllan Jude # define XXH_PUBLIC_API __declspec(dllimport)
2260c16b537SWarner Losh # endif
2270c16b537SWarner Losh # else
2280c16b537SWarner Losh # define XXH_PUBLIC_API /* do nothing */
229*5ff13fbcSAllan Jude # endif
230*5ff13fbcSAllan Jude #endif
2310c16b537SWarner Losh
232*5ff13fbcSAllan Jude #ifdef XXH_DOXYGEN
233*5ff13fbcSAllan Jude /*!
234*5ff13fbcSAllan Jude * @brief Emulate a namespace by transparently prefixing all symbols.
235*5ff13fbcSAllan Jude *
236*5ff13fbcSAllan Jude * If you want to include _and expose_ xxHash functions from within your own
237*5ff13fbcSAllan Jude * library, but also want to avoid symbol collisions with other libraries which
238*5ff13fbcSAllan Jude * may also include xxHash, you can use XXH_NAMESPACE to automatically prefix
239*5ff13fbcSAllan Jude * any public symbol from xxhash library with the value of XXH_NAMESPACE
240*5ff13fbcSAllan Jude * (therefore, avoid empty or numeric values).
241*5ff13fbcSAllan Jude *
242*5ff13fbcSAllan Jude * Note that no change is required within the calling program as long as it
243*5ff13fbcSAllan Jude * includes `xxhash.h`: Regular symbol names will be automatically translated
244*5ff13fbcSAllan Jude * by this header.
2450c16b537SWarner Losh */
246*5ff13fbcSAllan Jude # define XXH_NAMESPACE /* YOUR NAME HERE */
247*5ff13fbcSAllan Jude # undef XXH_NAMESPACE
248*5ff13fbcSAllan Jude #endif
249*5ff13fbcSAllan Jude
2500c16b537SWarner Losh #ifdef XXH_NAMESPACE
2510c16b537SWarner Losh # define XXH_CAT(A,B) A##B
2520c16b537SWarner Losh # define XXH_NAME2(A,B) XXH_CAT(A,B)
2530c16b537SWarner Losh # define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
254*5ff13fbcSAllan Jude /* XXH32 */
255*5ff13fbcSAllan Jude # define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
2560c16b537SWarner Losh # define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
2570c16b537SWarner Losh # define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
2580c16b537SWarner Losh # define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
2590c16b537SWarner Losh # define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
2600c16b537SWarner Losh # define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
2610c16b537SWarner Losh # define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
2620c16b537SWarner Losh # define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
2630c16b537SWarner Losh # define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
264*5ff13fbcSAllan Jude /* XXH64 */
265*5ff13fbcSAllan Jude # define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
266*5ff13fbcSAllan Jude # define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
267*5ff13fbcSAllan Jude # define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
268*5ff13fbcSAllan Jude # define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
269*5ff13fbcSAllan Jude # define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
270*5ff13fbcSAllan Jude # define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
271*5ff13fbcSAllan Jude # define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
272*5ff13fbcSAllan Jude # define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
2730c16b537SWarner Losh # define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
274*5ff13fbcSAllan Jude /* XXH3_64bits */
275*5ff13fbcSAllan Jude # define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits)
276*5ff13fbcSAllan Jude # define XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret)
277*5ff13fbcSAllan Jude # define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed)
278*5ff13fbcSAllan Jude # define XXH3_64bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecretandSeed)
279*5ff13fbcSAllan Jude # define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState)
280*5ff13fbcSAllan Jude # define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState)
281*5ff13fbcSAllan Jude # define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState)
282*5ff13fbcSAllan Jude # define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset)
283*5ff13fbcSAllan Jude # define XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed)
284*5ff13fbcSAllan Jude # define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret)
285*5ff13fbcSAllan Jude # define XXH3_64bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecretandSeed)
286*5ff13fbcSAllan Jude # define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update)
287*5ff13fbcSAllan Jude # define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest)
288*5ff13fbcSAllan Jude # define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret)
289*5ff13fbcSAllan Jude # define XXH3_generateSecret_fromSeed XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret_fromSeed)
290*5ff13fbcSAllan Jude /* XXH3_128bits */
291*5ff13fbcSAllan Jude # define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128)
292*5ff13fbcSAllan Jude # define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits)
293*5ff13fbcSAllan Jude # define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed)
294*5ff13fbcSAllan Jude # define XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret)
295*5ff13fbcSAllan Jude # define XXH3_128bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecretandSeed)
296*5ff13fbcSAllan Jude # define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset)
297*5ff13fbcSAllan Jude # define XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed)
298*5ff13fbcSAllan Jude # define XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret)
299*5ff13fbcSAllan Jude # define XXH3_128bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecretandSeed)
300*5ff13fbcSAllan Jude # define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update)
301*5ff13fbcSAllan Jude # define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest)
302*5ff13fbcSAllan Jude # define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual)
303*5ff13fbcSAllan Jude # define XXH128_cmp XXH_NAME2(XXH_NAMESPACE, XXH128_cmp)
304*5ff13fbcSAllan Jude # define XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash)
305*5ff13fbcSAllan Jude # define XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical)
3060c16b537SWarner Losh #endif
3070c16b537SWarner Losh
3080c16b537SWarner Losh
3090c16b537SWarner Losh /* *************************************
3100c16b537SWarner Losh * Version
3110c16b537SWarner Losh ***************************************/
3120c16b537SWarner Losh #define XXH_VERSION_MAJOR 0
313*5ff13fbcSAllan Jude #define XXH_VERSION_MINOR 8
314*5ff13fbcSAllan Jude #define XXH_VERSION_RELEASE 1
3150c16b537SWarner Losh #define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
316*5ff13fbcSAllan Jude
317*5ff13fbcSAllan Jude /*!
318*5ff13fbcSAllan Jude * @brief Obtains the xxHash version.
319*5ff13fbcSAllan Jude *
320*5ff13fbcSAllan Jude * This is mostly useful when xxHash is compiled as a shared library,
321*5ff13fbcSAllan Jude * since the returned value comes from the library, as opposed to header file.
322*5ff13fbcSAllan Jude *
323*5ff13fbcSAllan Jude * @return `XXH_VERSION_NUMBER` of the invoked library.
324*5ff13fbcSAllan Jude */
3250c16b537SWarner Losh XXH_PUBLIC_API unsigned XXH_versionNumber (void);
3260c16b537SWarner Losh
3270c16b537SWarner Losh
3280c16b537SWarner Losh /* ****************************
329*5ff13fbcSAllan Jude * Common basic types
3300c16b537SWarner Losh ******************************/
331*5ff13fbcSAllan Jude #include <stddef.h> /* size_t */
332*5ff13fbcSAllan Jude typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode;
3330c16b537SWarner Losh
334*5ff13fbcSAllan Jude
335*5ff13fbcSAllan Jude /*-**********************************************************************
336*5ff13fbcSAllan Jude * 32-bit hash
337*5ff13fbcSAllan Jude ************************************************************************/
338*5ff13fbcSAllan Jude #if defined(XXH_DOXYGEN) /* Don't show <stdint.h> include */
339*5ff13fbcSAllan Jude /*!
340*5ff13fbcSAllan Jude * @brief An unsigned 32-bit integer.
341*5ff13fbcSAllan Jude *
342*5ff13fbcSAllan Jude * Not necessarily defined to `uint32_t` but functionally equivalent.
343*5ff13fbcSAllan Jude */
344*5ff13fbcSAllan Jude typedef uint32_t XXH32_hash_t;
345*5ff13fbcSAllan Jude
346*5ff13fbcSAllan Jude #elif !defined (__VMS) \
347*5ff13fbcSAllan Jude && (defined (__cplusplus) \
348*5ff13fbcSAllan Jude || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
349*5ff13fbcSAllan Jude # include <stdint.h>
350*5ff13fbcSAllan Jude typedef uint32_t XXH32_hash_t;
351*5ff13fbcSAllan Jude
352*5ff13fbcSAllan Jude #else
353*5ff13fbcSAllan Jude # include <limits.h>
354*5ff13fbcSAllan Jude # if UINT_MAX == 0xFFFFFFFFUL
355*5ff13fbcSAllan Jude typedef unsigned int XXH32_hash_t;
356*5ff13fbcSAllan Jude # else
357*5ff13fbcSAllan Jude # if ULONG_MAX == 0xFFFFFFFFUL
358*5ff13fbcSAllan Jude typedef unsigned long XXH32_hash_t;
359*5ff13fbcSAllan Jude # else
360*5ff13fbcSAllan Jude # error "unsupported platform: need a 32-bit type"
361*5ff13fbcSAllan Jude # endif
362*5ff13fbcSAllan Jude # endif
363*5ff13fbcSAllan Jude #endif
364*5ff13fbcSAllan Jude
365*5ff13fbcSAllan Jude /*!
366*5ff13fbcSAllan Jude * @}
367*5ff13fbcSAllan Jude *
368*5ff13fbcSAllan Jude * @defgroup xxh32_family XXH32 family
369*5ff13fbcSAllan Jude * @ingroup public
370*5ff13fbcSAllan Jude * Contains functions used in the classic 32-bit xxHash algorithm.
371*5ff13fbcSAllan Jude *
372*5ff13fbcSAllan Jude * @note
373*5ff13fbcSAllan Jude * XXH32 is useful for older platforms, with no or poor 64-bit performance.
374*5ff13fbcSAllan Jude * Note that @ref xxh3_family provides competitive speed
375*5ff13fbcSAllan Jude * for both 32-bit and 64-bit systems, and offers true 64/128 bit hash results.
376*5ff13fbcSAllan Jude *
377*5ff13fbcSAllan Jude * @see @ref xxh64_family, @ref xxh3_family : Other xxHash families
378*5ff13fbcSAllan Jude * @see @ref xxh32_impl for implementation details
379*5ff13fbcSAllan Jude * @{
380*5ff13fbcSAllan Jude */
381*5ff13fbcSAllan Jude
382*5ff13fbcSAllan Jude /*!
383*5ff13fbcSAllan Jude * @brief Calculates the 32-bit hash of @p input using xxHash32.
384*5ff13fbcSAllan Jude *
385*5ff13fbcSAllan Jude * Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark): 5.4 GB/s
386*5ff13fbcSAllan Jude *
387*5ff13fbcSAllan Jude * @param input The block of data to be hashed, at least @p length bytes in size.
388*5ff13fbcSAllan Jude * @param length The length of @p input, in bytes.
389*5ff13fbcSAllan Jude * @param seed The 32-bit seed to alter the hash's output predictably.
390*5ff13fbcSAllan Jude *
391*5ff13fbcSAllan Jude * @pre
392*5ff13fbcSAllan Jude * The memory between @p input and @p input + @p length must be valid,
393*5ff13fbcSAllan Jude * readable, contiguous memory. However, if @p length is `0`, @p input may be
394*5ff13fbcSAllan Jude * `NULL`. In C++, this also must be *TriviallyCopyable*.
395*5ff13fbcSAllan Jude *
396*5ff13fbcSAllan Jude * @return The calculated 32-bit hash value.
397*5ff13fbcSAllan Jude *
398*5ff13fbcSAllan Jude * @see
399*5ff13fbcSAllan Jude * XXH64(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128():
400*5ff13fbcSAllan Jude * Direct equivalents for the other variants of xxHash.
401*5ff13fbcSAllan Jude * @see
402*5ff13fbcSAllan Jude * XXH32_createState(), XXH32_update(), XXH32_digest(): Streaming version.
403*5ff13fbcSAllan Jude */
404*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed);
405*5ff13fbcSAllan Jude
406*5ff13fbcSAllan Jude /*!
407*5ff13fbcSAllan Jude * Streaming functions generate the xxHash value from an incremental input.
408*5ff13fbcSAllan Jude * This method is slower than single-call functions, due to state management.
409*5ff13fbcSAllan Jude * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized.
410*5ff13fbcSAllan Jude *
411*5ff13fbcSAllan Jude * An XXH state must first be allocated using `XXH*_createState()`.
412*5ff13fbcSAllan Jude *
413*5ff13fbcSAllan Jude * Start a new hash by initializing the state with a seed using `XXH*_reset()`.
414*5ff13fbcSAllan Jude *
415*5ff13fbcSAllan Jude * Then, feed the hash state by calling `XXH*_update()` as many times as necessary.
416*5ff13fbcSAllan Jude *
417*5ff13fbcSAllan Jude * The function returns an error code, with 0 meaning OK, and any other value
418*5ff13fbcSAllan Jude * meaning there is an error.
419*5ff13fbcSAllan Jude *
420*5ff13fbcSAllan Jude * Finally, a hash value can be produced anytime, by using `XXH*_digest()`.
421*5ff13fbcSAllan Jude * This function returns the nn-bits hash as an int or long long.
422*5ff13fbcSAllan Jude *
423*5ff13fbcSAllan Jude * It's still possible to continue inserting input into the hash state after a
424*5ff13fbcSAllan Jude * digest, and generate new hash values later on by invoking `XXH*_digest()`.
425*5ff13fbcSAllan Jude *
426*5ff13fbcSAllan Jude * When done, release the state using `XXH*_freeState()`.
427*5ff13fbcSAllan Jude *
428*5ff13fbcSAllan Jude * Example code for incrementally hashing a file:
429*5ff13fbcSAllan Jude * @code{.c}
430*5ff13fbcSAllan Jude * #include <stdio.h>
431*5ff13fbcSAllan Jude * #include <xxhash.h>
432*5ff13fbcSAllan Jude * #define BUFFER_SIZE 256
433*5ff13fbcSAllan Jude *
434*5ff13fbcSAllan Jude * // Note: XXH64 and XXH3 use the same interface.
435*5ff13fbcSAllan Jude * XXH32_hash_t
436*5ff13fbcSAllan Jude * hashFile(FILE* stream)
437*5ff13fbcSAllan Jude * {
438*5ff13fbcSAllan Jude * XXH32_state_t* state;
439*5ff13fbcSAllan Jude * unsigned char buf[BUFFER_SIZE];
440*5ff13fbcSAllan Jude * size_t amt;
441*5ff13fbcSAllan Jude * XXH32_hash_t hash;
442*5ff13fbcSAllan Jude *
443*5ff13fbcSAllan Jude * state = XXH32_createState(); // Create a state
444*5ff13fbcSAllan Jude * assert(state != NULL); // Error check here
445*5ff13fbcSAllan Jude * XXH32_reset(state, 0xbaad5eed); // Reset state with our seed
446*5ff13fbcSAllan Jude * while ((amt = fread(buf, 1, sizeof(buf), stream)) != 0) {
447*5ff13fbcSAllan Jude * XXH32_update(state, buf, amt); // Hash the file in chunks
448*5ff13fbcSAllan Jude * }
449*5ff13fbcSAllan Jude * hash = XXH32_digest(state); // Finalize the hash
450*5ff13fbcSAllan Jude * XXH32_freeState(state); // Clean up
451*5ff13fbcSAllan Jude * return hash;
452*5ff13fbcSAllan Jude * }
453*5ff13fbcSAllan Jude * @endcode
454*5ff13fbcSAllan Jude */
455*5ff13fbcSAllan Jude
456*5ff13fbcSAllan Jude /*!
457*5ff13fbcSAllan Jude * @typedef struct XXH32_state_s XXH32_state_t
458*5ff13fbcSAllan Jude * @brief The opaque state struct for the XXH32 streaming API.
459*5ff13fbcSAllan Jude *
460*5ff13fbcSAllan Jude * @see XXH32_state_s for details.
461*5ff13fbcSAllan Jude */
462*5ff13fbcSAllan Jude typedef struct XXH32_state_s XXH32_state_t;
463*5ff13fbcSAllan Jude
464*5ff13fbcSAllan Jude /*!
465*5ff13fbcSAllan Jude * @brief Allocates an @ref XXH32_state_t.
466*5ff13fbcSAllan Jude *
467*5ff13fbcSAllan Jude * Must be freed with XXH32_freeState().
468*5ff13fbcSAllan Jude * @return An allocated XXH32_state_t on success, `NULL` on failure.
469*5ff13fbcSAllan Jude */
470*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void);
471*5ff13fbcSAllan Jude /*!
472*5ff13fbcSAllan Jude * @brief Frees an @ref XXH32_state_t.
473*5ff13fbcSAllan Jude *
474*5ff13fbcSAllan Jude * Must be allocated with XXH32_createState().
475*5ff13fbcSAllan Jude * @param statePtr A pointer to an @ref XXH32_state_t allocated with @ref XXH32_createState().
476*5ff13fbcSAllan Jude * @return XXH_OK.
477*5ff13fbcSAllan Jude */
478*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr);
479*5ff13fbcSAllan Jude /*!
480*5ff13fbcSAllan Jude * @brief Copies one @ref XXH32_state_t to another.
481*5ff13fbcSAllan Jude *
482*5ff13fbcSAllan Jude * @param dst_state The state to copy to.
483*5ff13fbcSAllan Jude * @param src_state The state to copy from.
484*5ff13fbcSAllan Jude * @pre
485*5ff13fbcSAllan Jude * @p dst_state and @p src_state must not be `NULL` and must not overlap.
486*5ff13fbcSAllan Jude */
487*5ff13fbcSAllan Jude XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state);
488*5ff13fbcSAllan Jude
489*5ff13fbcSAllan Jude /*!
490*5ff13fbcSAllan Jude * @brief Resets an @ref XXH32_state_t to begin a new hash.
491*5ff13fbcSAllan Jude *
492*5ff13fbcSAllan Jude * This function resets and seeds a state. Call it before @ref XXH32_update().
493*5ff13fbcSAllan Jude *
494*5ff13fbcSAllan Jude * @param statePtr The state struct to reset.
495*5ff13fbcSAllan Jude * @param seed The 32-bit seed to alter the hash result predictably.
496*5ff13fbcSAllan Jude *
497*5ff13fbcSAllan Jude * @pre
498*5ff13fbcSAllan Jude * @p statePtr must not be `NULL`.
499*5ff13fbcSAllan Jude *
500*5ff13fbcSAllan Jude * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
501*5ff13fbcSAllan Jude */
502*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, XXH32_hash_t seed);
503*5ff13fbcSAllan Jude
504*5ff13fbcSAllan Jude /*!
505*5ff13fbcSAllan Jude * @brief Consumes a block of @p input to an @ref XXH32_state_t.
506*5ff13fbcSAllan Jude *
507*5ff13fbcSAllan Jude * Call this to incrementally consume blocks of data.
508*5ff13fbcSAllan Jude *
509*5ff13fbcSAllan Jude * @param statePtr The state struct to update.
510*5ff13fbcSAllan Jude * @param input The block of data to be hashed, at least @p length bytes in size.
511*5ff13fbcSAllan Jude * @param length The length of @p input, in bytes.
512*5ff13fbcSAllan Jude *
513*5ff13fbcSAllan Jude * @pre
514*5ff13fbcSAllan Jude * @p statePtr must not be `NULL`.
515*5ff13fbcSAllan Jude * @pre
516*5ff13fbcSAllan Jude * The memory between @p input and @p input + @p length must be valid,
517*5ff13fbcSAllan Jude * readable, contiguous memory. However, if @p length is `0`, @p input may be
518*5ff13fbcSAllan Jude * `NULL`. In C++, this also must be *TriviallyCopyable*.
519*5ff13fbcSAllan Jude *
520*5ff13fbcSAllan Jude * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
521*5ff13fbcSAllan Jude */
522*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length);
523*5ff13fbcSAllan Jude
524*5ff13fbcSAllan Jude /*!
525*5ff13fbcSAllan Jude * @brief Returns the calculated hash value from an @ref XXH32_state_t.
526*5ff13fbcSAllan Jude *
527*5ff13fbcSAllan Jude * @note
528*5ff13fbcSAllan Jude * Calling XXH32_digest() will not affect @p statePtr, so you can update,
529*5ff13fbcSAllan Jude * digest, and update again.
530*5ff13fbcSAllan Jude *
531*5ff13fbcSAllan Jude * @param statePtr The state struct to calculate the hash from.
532*5ff13fbcSAllan Jude *
533*5ff13fbcSAllan Jude * @pre
534*5ff13fbcSAllan Jude * @p statePtr must not be `NULL`.
535*5ff13fbcSAllan Jude *
536*5ff13fbcSAllan Jude * @return The calculated xxHash32 value from that state.
537*5ff13fbcSAllan Jude */
538*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr);
539*5ff13fbcSAllan Jude
540*5ff13fbcSAllan Jude /******* Canonical representation *******/
541*5ff13fbcSAllan Jude
542*5ff13fbcSAllan Jude /*
543*5ff13fbcSAllan Jude * The default return values from XXH functions are unsigned 32 and 64 bit
544*5ff13fbcSAllan Jude * integers.
545*5ff13fbcSAllan Jude * This the simplest and fastest format for further post-processing.
546*5ff13fbcSAllan Jude *
547*5ff13fbcSAllan Jude * However, this leaves open the question of what is the order on the byte level,
548*5ff13fbcSAllan Jude * since little and big endian conventions will store the same number differently.
549*5ff13fbcSAllan Jude *
550*5ff13fbcSAllan Jude * The canonical representation settles this issue by mandating big-endian
551*5ff13fbcSAllan Jude * convention, the same convention as human-readable numbers (large digits first).
552*5ff13fbcSAllan Jude *
553*5ff13fbcSAllan Jude * When writing hash values to storage, sending them over a network, or printing
554*5ff13fbcSAllan Jude * them, it's highly recommended to use the canonical representation to ensure
555*5ff13fbcSAllan Jude * portability across a wider range of systems, present and future.
556*5ff13fbcSAllan Jude *
557*5ff13fbcSAllan Jude * The following functions allow transformation of hash values to and from
558*5ff13fbcSAllan Jude * canonical format.
559*5ff13fbcSAllan Jude */
560*5ff13fbcSAllan Jude
561*5ff13fbcSAllan Jude /*!
562*5ff13fbcSAllan Jude * @brief Canonical (big endian) representation of @ref XXH32_hash_t.
563*5ff13fbcSAllan Jude */
564*5ff13fbcSAllan Jude typedef struct {
565*5ff13fbcSAllan Jude unsigned char digest[4]; /*!< Hash bytes, big endian */
566*5ff13fbcSAllan Jude } XXH32_canonical_t;
567*5ff13fbcSAllan Jude
568*5ff13fbcSAllan Jude /*!
569*5ff13fbcSAllan Jude * @brief Converts an @ref XXH32_hash_t to a big endian @ref XXH32_canonical_t.
570*5ff13fbcSAllan Jude *
571*5ff13fbcSAllan Jude * @param dst The @ref XXH32_canonical_t pointer to be stored to.
572*5ff13fbcSAllan Jude * @param hash The @ref XXH32_hash_t to be converted.
573*5ff13fbcSAllan Jude *
574*5ff13fbcSAllan Jude * @pre
575*5ff13fbcSAllan Jude * @p dst must not be `NULL`.
576*5ff13fbcSAllan Jude */
577*5ff13fbcSAllan Jude XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash);
578*5ff13fbcSAllan Jude
579*5ff13fbcSAllan Jude /*!
580*5ff13fbcSAllan Jude * @brief Converts an @ref XXH32_canonical_t to a native @ref XXH32_hash_t.
581*5ff13fbcSAllan Jude *
582*5ff13fbcSAllan Jude * @param src The @ref XXH32_canonical_t to convert.
583*5ff13fbcSAllan Jude *
584*5ff13fbcSAllan Jude * @pre
585*5ff13fbcSAllan Jude * @p src must not be `NULL`.
586*5ff13fbcSAllan Jude *
587*5ff13fbcSAllan Jude * @return The converted hash.
588*5ff13fbcSAllan Jude */
589*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src);
590*5ff13fbcSAllan Jude
591*5ff13fbcSAllan Jude
592*5ff13fbcSAllan Jude #ifdef __has_attribute
593*5ff13fbcSAllan Jude # define XXH_HAS_ATTRIBUTE(x) __has_attribute(x)
594*5ff13fbcSAllan Jude #else
595*5ff13fbcSAllan Jude # define XXH_HAS_ATTRIBUTE(x) 0
596*5ff13fbcSAllan Jude #endif
597*5ff13fbcSAllan Jude
598*5ff13fbcSAllan Jude /* C-language Attributes are added in C23. */
599*5ff13fbcSAllan Jude #if defined(__STDC_VERSION__) && (__STDC_VERSION__ > 201710L) && defined(__has_c_attribute)
600*5ff13fbcSAllan Jude # define XXH_HAS_C_ATTRIBUTE(x) __has_c_attribute(x)
601*5ff13fbcSAllan Jude #else
602*5ff13fbcSAllan Jude # define XXH_HAS_C_ATTRIBUTE(x) 0
603*5ff13fbcSAllan Jude #endif
604*5ff13fbcSAllan Jude
605*5ff13fbcSAllan Jude #if defined(__cplusplus) && defined(__has_cpp_attribute)
606*5ff13fbcSAllan Jude # define XXH_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
607*5ff13fbcSAllan Jude #else
608*5ff13fbcSAllan Jude # define XXH_HAS_CPP_ATTRIBUTE(x) 0
609*5ff13fbcSAllan Jude #endif
610*5ff13fbcSAllan Jude
611*5ff13fbcSAllan Jude /*
612*5ff13fbcSAllan Jude Define XXH_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute
613*5ff13fbcSAllan Jude introduced in CPP17 and C23.
614*5ff13fbcSAllan Jude CPP17 : https://en.cppreference.com/w/cpp/language/attributes/fallthrough
615*5ff13fbcSAllan Jude C23 : https://en.cppreference.com/w/c/language/attributes/fallthrough
616*5ff13fbcSAllan Jude */
617*5ff13fbcSAllan Jude #if XXH_HAS_C_ATTRIBUTE(x)
618*5ff13fbcSAllan Jude # define XXH_FALLTHROUGH [[fallthrough]]
619*5ff13fbcSAllan Jude #elif XXH_HAS_CPP_ATTRIBUTE(x)
620*5ff13fbcSAllan Jude # define XXH_FALLTHROUGH [[fallthrough]]
621*5ff13fbcSAllan Jude #elif XXH_HAS_ATTRIBUTE(__fallthrough__)
622*5ff13fbcSAllan Jude # define XXH_FALLTHROUGH __attribute__ ((fallthrough))
623*5ff13fbcSAllan Jude #else
624*5ff13fbcSAllan Jude # define XXH_FALLTHROUGH
625*5ff13fbcSAllan Jude #endif
626*5ff13fbcSAllan Jude
627*5ff13fbcSAllan Jude /*!
628*5ff13fbcSAllan Jude * @}
629*5ff13fbcSAllan Jude * @ingroup public
630*5ff13fbcSAllan Jude * @{
631*5ff13fbcSAllan Jude */
632*5ff13fbcSAllan Jude
633*5ff13fbcSAllan Jude #ifndef XXH_NO_LONG_LONG
634*5ff13fbcSAllan Jude /*-**********************************************************************
635*5ff13fbcSAllan Jude * 64-bit hash
636*5ff13fbcSAllan Jude ************************************************************************/
637*5ff13fbcSAllan Jude #if defined(XXH_DOXYGEN) /* don't include <stdint.h> */
638*5ff13fbcSAllan Jude /*!
639*5ff13fbcSAllan Jude * @brief An unsigned 64-bit integer.
640*5ff13fbcSAllan Jude *
641*5ff13fbcSAllan Jude * Not necessarily defined to `uint64_t` but functionally equivalent.
642*5ff13fbcSAllan Jude */
643*5ff13fbcSAllan Jude typedef uint64_t XXH64_hash_t;
644*5ff13fbcSAllan Jude #elif !defined (__VMS) \
645*5ff13fbcSAllan Jude && (defined (__cplusplus) \
646*5ff13fbcSAllan Jude || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
647*5ff13fbcSAllan Jude # include <stdint.h>
648*5ff13fbcSAllan Jude typedef uint64_t XXH64_hash_t;
649*5ff13fbcSAllan Jude #else
650*5ff13fbcSAllan Jude # include <limits.h>
651*5ff13fbcSAllan Jude # if defined(__LP64__) && ULONG_MAX == 0xFFFFFFFFFFFFFFFFULL
652*5ff13fbcSAllan Jude /* LP64 ABI says uint64_t is unsigned long */
653*5ff13fbcSAllan Jude typedef unsigned long XXH64_hash_t;
654*5ff13fbcSAllan Jude # else
655*5ff13fbcSAllan Jude /* the following type must have a width of 64-bit */
656*5ff13fbcSAllan Jude typedef unsigned long long XXH64_hash_t;
657*5ff13fbcSAllan Jude # endif
658*5ff13fbcSAllan Jude #endif
659*5ff13fbcSAllan Jude
660*5ff13fbcSAllan Jude /*!
661*5ff13fbcSAllan Jude * @}
662*5ff13fbcSAllan Jude *
663*5ff13fbcSAllan Jude * @defgroup xxh64_family XXH64 family
664*5ff13fbcSAllan Jude * @ingroup public
665*5ff13fbcSAllan Jude * @{
666*5ff13fbcSAllan Jude * Contains functions used in the classic 64-bit xxHash algorithm.
667*5ff13fbcSAllan Jude *
668*5ff13fbcSAllan Jude * @note
669*5ff13fbcSAllan Jude * XXH3 provides competitive speed for both 32-bit and 64-bit systems,
670*5ff13fbcSAllan Jude * and offers true 64/128 bit hash results.
671*5ff13fbcSAllan Jude * It provides better speed for systems with vector processing capabilities.
672*5ff13fbcSAllan Jude */
673*5ff13fbcSAllan Jude
674*5ff13fbcSAllan Jude
675*5ff13fbcSAllan Jude /*!
676*5ff13fbcSAllan Jude * @brief Calculates the 64-bit hash of @p input using xxHash64.
677*5ff13fbcSAllan Jude *
678*5ff13fbcSAllan Jude * This function usually runs faster on 64-bit systems, but slower on 32-bit
679*5ff13fbcSAllan Jude * systems (see benchmark).
680*5ff13fbcSAllan Jude *
681*5ff13fbcSAllan Jude * @param input The block of data to be hashed, at least @p length bytes in size.
682*5ff13fbcSAllan Jude * @param length The length of @p input, in bytes.
683*5ff13fbcSAllan Jude * @param seed The 64-bit seed to alter the hash's output predictably.
684*5ff13fbcSAllan Jude *
685*5ff13fbcSAllan Jude * @pre
686*5ff13fbcSAllan Jude * The memory between @p input and @p input + @p length must be valid,
687*5ff13fbcSAllan Jude * readable, contiguous memory. However, if @p length is `0`, @p input may be
688*5ff13fbcSAllan Jude * `NULL`. In C++, this also must be *TriviallyCopyable*.
689*5ff13fbcSAllan Jude *
690*5ff13fbcSAllan Jude * @return The calculated 64-bit hash.
691*5ff13fbcSAllan Jude *
692*5ff13fbcSAllan Jude * @see
693*5ff13fbcSAllan Jude * XXH32(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128():
694*5ff13fbcSAllan Jude * Direct equivalents for the other variants of xxHash.
695*5ff13fbcSAllan Jude * @see
696*5ff13fbcSAllan Jude * XXH64_createState(), XXH64_update(), XXH64_digest(): Streaming version.
697*5ff13fbcSAllan Jude */
69898689d0fSConrad Meyer /* Begin FreeBSD - This symbol is needed by dll-linked CLI zstd(1). */
69998689d0fSConrad Meyer __attribute__((visibility ("default")))
70098689d0fSConrad Meyer /* End FreeBSD */
701*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH64_hash_t XXH64(const void* input, size_t length, XXH64_hash_t seed);
7020c16b537SWarner Losh
703*5ff13fbcSAllan Jude /******* Streaming *******/
7040c16b537SWarner Losh /*!
705*5ff13fbcSAllan Jude * @brief The opaque state struct for the XXH64 streaming API.
706*5ff13fbcSAllan Jude *
707*5ff13fbcSAllan Jude * @see XXH64_state_s for details.
7080c16b537SWarner Losh */
7090c16b537SWarner Losh typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */
7100c16b537SWarner Losh XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void);
7110c16b537SWarner Losh XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr);
712*5ff13fbcSAllan Jude XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state);
7130c16b537SWarner Losh
714*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, XXH64_hash_t seed);
7150c16b537SWarner Losh XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length);
7160c16b537SWarner Losh XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr);
7170c16b537SWarner Losh
718*5ff13fbcSAllan Jude /******* Canonical representation *******/
719*5ff13fbcSAllan Jude typedef struct { unsigned char digest[sizeof(XXH64_hash_t)]; } XXH64_canonical_t;
7200c16b537SWarner Losh XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash);
7210c16b537SWarner Losh XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src);
7220c16b537SWarner Losh
723*5ff13fbcSAllan Jude #ifndef XXH_NO_XXH3
724*5ff13fbcSAllan Jude /*!
725*5ff13fbcSAllan Jude * @}
726*5ff13fbcSAllan Jude * ************************************************************************
727*5ff13fbcSAllan Jude * @defgroup xxh3_family XXH3 family
728*5ff13fbcSAllan Jude * @ingroup public
729*5ff13fbcSAllan Jude * @{
730*5ff13fbcSAllan Jude *
731*5ff13fbcSAllan Jude * XXH3 is a more recent hash algorithm featuring:
732*5ff13fbcSAllan Jude * - Improved speed for both small and large inputs
733*5ff13fbcSAllan Jude * - True 64-bit and 128-bit outputs
734*5ff13fbcSAllan Jude * - SIMD acceleration
735*5ff13fbcSAllan Jude * - Improved 32-bit viability
736*5ff13fbcSAllan Jude *
737*5ff13fbcSAllan Jude * Speed analysis methodology is explained here:
738*5ff13fbcSAllan Jude *
739*5ff13fbcSAllan Jude * https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html
740*5ff13fbcSAllan Jude *
741*5ff13fbcSAllan Jude * Compared to XXH64, expect XXH3 to run approximately
742*5ff13fbcSAllan Jude * ~2x faster on large inputs and >3x faster on small ones,
743*5ff13fbcSAllan Jude * exact differences vary depending on platform.
744*5ff13fbcSAllan Jude *
745*5ff13fbcSAllan Jude * XXH3's speed benefits greatly from SIMD and 64-bit arithmetic,
746*5ff13fbcSAllan Jude * but does not require it.
747*5ff13fbcSAllan Jude * Any 32-bit and 64-bit targets that can run XXH32 smoothly
748*5ff13fbcSAllan Jude * can run XXH3 at competitive speeds, even without vector support.
749*5ff13fbcSAllan Jude * Further details are explained in the implementation.
750*5ff13fbcSAllan Jude *
751*5ff13fbcSAllan Jude * Optimized implementations are provided for AVX512, AVX2, SSE2, NEON, POWER8,
752*5ff13fbcSAllan Jude * ZVector and scalar targets. This can be controlled via the XXH_VECTOR macro.
753*5ff13fbcSAllan Jude *
754*5ff13fbcSAllan Jude * XXH3 implementation is portable:
755*5ff13fbcSAllan Jude * it has a generic C90 formulation that can be compiled on any platform,
756*5ff13fbcSAllan Jude * all implementations generage exactly the same hash value on all platforms.
757*5ff13fbcSAllan Jude * Starting from v0.8.0, it's also labelled "stable", meaning that
758*5ff13fbcSAllan Jude * any future version will also generate the same hash value.
759*5ff13fbcSAllan Jude *
760*5ff13fbcSAllan Jude * XXH3 offers 2 variants, _64bits and _128bits.
761*5ff13fbcSAllan Jude *
762*5ff13fbcSAllan Jude * When only 64 bits are needed, prefer invoking the _64bits variant, as it
763*5ff13fbcSAllan Jude * reduces the amount of mixing, resulting in faster speed on small inputs.
764*5ff13fbcSAllan Jude * It's also generally simpler to manipulate a scalar return type than a struct.
765*5ff13fbcSAllan Jude *
766*5ff13fbcSAllan Jude * The API supports one-shot hashing, streaming mode, and custom secrets.
767*5ff13fbcSAllan Jude */
768*5ff13fbcSAllan Jude
769*5ff13fbcSAllan Jude /*-**********************************************************************
770*5ff13fbcSAllan Jude * XXH3 64-bit variant
771*5ff13fbcSAllan Jude ************************************************************************/
772*5ff13fbcSAllan Jude
773*5ff13fbcSAllan Jude /* XXH3_64bits():
774*5ff13fbcSAllan Jude * default 64-bit variant, using default secret and default seed of 0.
775*5ff13fbcSAllan Jude * It's the fastest variant. */
776*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void* data, size_t len);
777*5ff13fbcSAllan Jude
778*5ff13fbcSAllan Jude /*
779*5ff13fbcSAllan Jude * XXH3_64bits_withSeed():
780*5ff13fbcSAllan Jude * This variant generates a custom secret on the fly
781*5ff13fbcSAllan Jude * based on default secret altered using the `seed` value.
782*5ff13fbcSAllan Jude * While this operation is decently fast, note that it's not completely free.
783*5ff13fbcSAllan Jude * Note: seed==0 produces the same results as XXH3_64bits().
784*5ff13fbcSAllan Jude */
785*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSeed(const void* data, size_t len, XXH64_hash_t seed);
786*5ff13fbcSAllan Jude
787*5ff13fbcSAllan Jude /*!
788*5ff13fbcSAllan Jude * The bare minimum size for a custom secret.
789*5ff13fbcSAllan Jude *
790*5ff13fbcSAllan Jude * @see
791*5ff13fbcSAllan Jude * XXH3_64bits_withSecret(), XXH3_64bits_reset_withSecret(),
792*5ff13fbcSAllan Jude * XXH3_128bits_withSecret(), XXH3_128bits_reset_withSecret().
793*5ff13fbcSAllan Jude */
794*5ff13fbcSAllan Jude #define XXH3_SECRET_SIZE_MIN 136
795*5ff13fbcSAllan Jude
796*5ff13fbcSAllan Jude /*
797*5ff13fbcSAllan Jude * XXH3_64bits_withSecret():
798*5ff13fbcSAllan Jude * It's possible to provide any blob of bytes as a "secret" to generate the hash.
799*5ff13fbcSAllan Jude * This makes it more difficult for an external actor to prepare an intentional collision.
800*5ff13fbcSAllan Jude * The main condition is that secretSize *must* be large enough (>= XXH3_SECRET_SIZE_MIN).
801*5ff13fbcSAllan Jude * However, the quality of the secret impacts the dispersion of the hash algorithm.
802*5ff13fbcSAllan Jude * Therefore, the secret _must_ look like a bunch of random bytes.
803*5ff13fbcSAllan Jude * Avoid "trivial" or structured data such as repeated sequences or a text document.
804*5ff13fbcSAllan Jude * Whenever in doubt about the "randomness" of the blob of bytes,
805*5ff13fbcSAllan Jude * consider employing "XXH3_generateSecret()" instead (see below).
806*5ff13fbcSAllan Jude * It will generate a proper high entropy secret derived from the blob of bytes.
807*5ff13fbcSAllan Jude * Another advantage of using XXH3_generateSecret() is that
808*5ff13fbcSAllan Jude * it guarantees that all bits within the initial blob of bytes
809*5ff13fbcSAllan Jude * will impact every bit of the output.
810*5ff13fbcSAllan Jude * This is not necessarily the case when using the blob of bytes directly
811*5ff13fbcSAllan Jude * because, when hashing _small_ inputs, only a portion of the secret is employed.
812*5ff13fbcSAllan Jude */
813*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize);
814*5ff13fbcSAllan Jude
815*5ff13fbcSAllan Jude
816*5ff13fbcSAllan Jude /******* Streaming *******/
817*5ff13fbcSAllan Jude /*
818*5ff13fbcSAllan Jude * Streaming requires state maintenance.
819*5ff13fbcSAllan Jude * This operation costs memory and CPU.
820*5ff13fbcSAllan Jude * As a consequence, streaming is slower than one-shot hashing.
821*5ff13fbcSAllan Jude * For better performance, prefer one-shot functions whenever applicable.
822*5ff13fbcSAllan Jude */
823*5ff13fbcSAllan Jude
824*5ff13fbcSAllan Jude /*!
825*5ff13fbcSAllan Jude * @brief The state struct for the XXH3 streaming API.
826*5ff13fbcSAllan Jude *
827*5ff13fbcSAllan Jude * @see XXH3_state_s for details.
828*5ff13fbcSAllan Jude */
829*5ff13fbcSAllan Jude typedef struct XXH3_state_s XXH3_state_t;
830*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void);
831*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr);
832*5ff13fbcSAllan Jude XXH_PUBLIC_API void XXH3_copyState(XXH3_state_t* dst_state, const XXH3_state_t* src_state);
833*5ff13fbcSAllan Jude
834*5ff13fbcSAllan Jude /*
835*5ff13fbcSAllan Jude * XXH3_64bits_reset():
836*5ff13fbcSAllan Jude * Initialize with default parameters.
837*5ff13fbcSAllan Jude * digest will be equivalent to `XXH3_64bits()`.
838*5ff13fbcSAllan Jude */
839*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH3_state_t* statePtr);
840*5ff13fbcSAllan Jude /*
841*5ff13fbcSAllan Jude * XXH3_64bits_reset_withSeed():
842*5ff13fbcSAllan Jude * Generate a custom secret from `seed`, and store it into `statePtr`.
843*5ff13fbcSAllan Jude * digest will be equivalent to `XXH3_64bits_withSeed()`.
844*5ff13fbcSAllan Jude */
845*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed);
846*5ff13fbcSAllan Jude /*
847*5ff13fbcSAllan Jude * XXH3_64bits_reset_withSecret():
848*5ff13fbcSAllan Jude * `secret` is referenced, it _must outlive_ the hash streaming session.
849*5ff13fbcSAllan Jude * Similar to one-shot API, `secretSize` must be >= `XXH3_SECRET_SIZE_MIN`,
850*5ff13fbcSAllan Jude * and the quality of produced hash values depends on secret's entropy
851*5ff13fbcSAllan Jude * (secret's content should look like a bunch of random bytes).
852*5ff13fbcSAllan Jude * When in doubt about the randomness of a candidate `secret`,
853*5ff13fbcSAllan Jude * consider employing `XXH3_generateSecret()` instead (see below).
854*5ff13fbcSAllan Jude */
855*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize);
856*5ff13fbcSAllan Jude
857*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update (XXH3_state_t* statePtr, const void* input, size_t length);
858*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (const XXH3_state_t* statePtr);
859*5ff13fbcSAllan Jude
860*5ff13fbcSAllan Jude /* note : canonical representation of XXH3 is the same as XXH64
861*5ff13fbcSAllan Jude * since they both produce XXH64_hash_t values */
862*5ff13fbcSAllan Jude
863*5ff13fbcSAllan Jude
864*5ff13fbcSAllan Jude /*-**********************************************************************
865*5ff13fbcSAllan Jude * XXH3 128-bit variant
866*5ff13fbcSAllan Jude ************************************************************************/
867*5ff13fbcSAllan Jude
868*5ff13fbcSAllan Jude /*!
869*5ff13fbcSAllan Jude * @brief The return value from 128-bit hashes.
870*5ff13fbcSAllan Jude *
871*5ff13fbcSAllan Jude * Stored in little endian order, although the fields themselves are in native
872*5ff13fbcSAllan Jude * endianness.
873*5ff13fbcSAllan Jude */
874*5ff13fbcSAllan Jude typedef struct {
875*5ff13fbcSAllan Jude XXH64_hash_t low64; /*!< `value & 0xFFFFFFFFFFFFFFFF` */
876*5ff13fbcSAllan Jude XXH64_hash_t high64; /*!< `value >> 64` */
877*5ff13fbcSAllan Jude } XXH128_hash_t;
878*5ff13fbcSAllan Jude
879*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void* data, size_t len);
880*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSeed(const void* data, size_t len, XXH64_hash_t seed);
881*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize);
882*5ff13fbcSAllan Jude
883*5ff13fbcSAllan Jude /******* Streaming *******/
884*5ff13fbcSAllan Jude /*
885*5ff13fbcSAllan Jude * Streaming requires state maintenance.
886*5ff13fbcSAllan Jude * This operation costs memory and CPU.
887*5ff13fbcSAllan Jude * As a consequence, streaming is slower than one-shot hashing.
888*5ff13fbcSAllan Jude * For better performance, prefer one-shot functions whenever applicable.
889*5ff13fbcSAllan Jude *
890*5ff13fbcSAllan Jude * XXH3_128bits uses the same XXH3_state_t as XXH3_64bits().
891*5ff13fbcSAllan Jude * Use already declared XXH3_createState() and XXH3_freeState().
892*5ff13fbcSAllan Jude *
893*5ff13fbcSAllan Jude * All reset and streaming functions have same meaning as their 64-bit counterpart.
894*5ff13fbcSAllan Jude */
895*5ff13fbcSAllan Jude
896*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH3_state_t* statePtr);
897*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed);
898*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize);
899*5ff13fbcSAllan Jude
900*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update (XXH3_state_t* statePtr, const void* input, size_t length);
901*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (const XXH3_state_t* statePtr);
902*5ff13fbcSAllan Jude
903*5ff13fbcSAllan Jude /* Following helper functions make it possible to compare XXH128_hast_t values.
904*5ff13fbcSAllan Jude * Since XXH128_hash_t is a structure, this capability is not offered by the language.
905*5ff13fbcSAllan Jude * Note: For better performance, these functions can be inlined using XXH_INLINE_ALL */
906*5ff13fbcSAllan Jude
907*5ff13fbcSAllan Jude /*!
908*5ff13fbcSAllan Jude * XXH128_isEqual():
909*5ff13fbcSAllan Jude * Return: 1 if `h1` and `h2` are equal, 0 if they are not.
910*5ff13fbcSAllan Jude */
911*5ff13fbcSAllan Jude XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2);
912*5ff13fbcSAllan Jude
913*5ff13fbcSAllan Jude /*!
914*5ff13fbcSAllan Jude * XXH128_cmp():
915*5ff13fbcSAllan Jude *
916*5ff13fbcSAllan Jude * This comparator is compatible with stdlib's `qsort()`/`bsearch()`.
917*5ff13fbcSAllan Jude *
918*5ff13fbcSAllan Jude * return: >0 if *h128_1 > *h128_2
919*5ff13fbcSAllan Jude * =0 if *h128_1 == *h128_2
920*5ff13fbcSAllan Jude * <0 if *h128_1 < *h128_2
921*5ff13fbcSAllan Jude */
922*5ff13fbcSAllan Jude XXH_PUBLIC_API int XXH128_cmp(const void* h128_1, const void* h128_2);
923*5ff13fbcSAllan Jude
924*5ff13fbcSAllan Jude
925*5ff13fbcSAllan Jude /******* Canonical representation *******/
926*5ff13fbcSAllan Jude typedef struct { unsigned char digest[sizeof(XXH128_hash_t)]; } XXH128_canonical_t;
927*5ff13fbcSAllan Jude XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH128_canonical_t* dst, XXH128_hash_t hash);
928*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH128_hash_t XXH128_hashFromCanonical(const XXH128_canonical_t* src);
929*5ff13fbcSAllan Jude
930*5ff13fbcSAllan Jude
931*5ff13fbcSAllan Jude #endif /* !XXH_NO_XXH3 */
932*5ff13fbcSAllan Jude #endif /* XXH_NO_LONG_LONG */
933*5ff13fbcSAllan Jude
934*5ff13fbcSAllan Jude /*!
935*5ff13fbcSAllan Jude * @}
936*5ff13fbcSAllan Jude */
9370c16b537SWarner Losh #endif /* XXHASH_H_5627135585666179 */
9380c16b537SWarner Losh
9390c16b537SWarner Losh
9400c16b537SWarner Losh
941*5ff13fbcSAllan Jude #if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742)
942*5ff13fbcSAllan Jude #define XXHASH_H_STATIC_13879238742
943*5ff13fbcSAllan Jude /* ****************************************************************************
944*5ff13fbcSAllan Jude * This section contains declarations which are not guaranteed to remain stable.
945*5ff13fbcSAllan Jude * They may change in future versions, becoming incompatible with a different
946*5ff13fbcSAllan Jude * version of the library.
947*5ff13fbcSAllan Jude * These declarations should only be used with static linking.
948*5ff13fbcSAllan Jude * Never use them in association with dynamic linking!
949*5ff13fbcSAllan Jude ***************************************************************************** */
9500c16b537SWarner Losh
951*5ff13fbcSAllan Jude /*
952*5ff13fbcSAllan Jude * These definitions are only present to allow static allocation
953*5ff13fbcSAllan Jude * of XXH states, on stack or in a struct, for example.
954*5ff13fbcSAllan Jude * Never **ever** access their members directly.
955*5ff13fbcSAllan Jude */
9560c16b537SWarner Losh
957*5ff13fbcSAllan Jude /*!
958*5ff13fbcSAllan Jude * @internal
959*5ff13fbcSAllan Jude * @brief Structure for XXH32 streaming API.
960*5ff13fbcSAllan Jude *
961*5ff13fbcSAllan Jude * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
962*5ff13fbcSAllan Jude * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is
963*5ff13fbcSAllan Jude * an opaque type. This allows fields to safely be changed.
964*5ff13fbcSAllan Jude *
965*5ff13fbcSAllan Jude * Typedef'd to @ref XXH32_state_t.
966*5ff13fbcSAllan Jude * Do not access the members of this struct directly.
967*5ff13fbcSAllan Jude * @see XXH64_state_s, XXH3_state_s
968*5ff13fbcSAllan Jude */
9690c16b537SWarner Losh struct XXH32_state_s {
970*5ff13fbcSAllan Jude XXH32_hash_t total_len_32; /*!< Total length hashed, modulo 2^32 */
971*5ff13fbcSAllan Jude XXH32_hash_t large_len; /*!< Whether the hash is >= 16 (handles @ref total_len_32 overflow) */
972*5ff13fbcSAllan Jude XXH32_hash_t v[4]; /*!< Accumulator lanes */
973*5ff13fbcSAllan Jude XXH32_hash_t mem32[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[16]. */
974*5ff13fbcSAllan Jude XXH32_hash_t memsize; /*!< Amount of data in @ref mem32 */
975*5ff13fbcSAllan Jude XXH32_hash_t reserved; /*!< Reserved field. Do not read nor write to it. */
9760c16b537SWarner Losh }; /* typedef'd to XXH32_state_t */
9770c16b537SWarner Losh
978*5ff13fbcSAllan Jude
979*5ff13fbcSAllan Jude #ifndef XXH_NO_LONG_LONG /* defined when there is no 64-bit support */
980*5ff13fbcSAllan Jude
981*5ff13fbcSAllan Jude /*!
982*5ff13fbcSAllan Jude * @internal
983*5ff13fbcSAllan Jude * @brief Structure for XXH64 streaming API.
984*5ff13fbcSAllan Jude *
985*5ff13fbcSAllan Jude * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
986*5ff13fbcSAllan Jude * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is
987*5ff13fbcSAllan Jude * an opaque type. This allows fields to safely be changed.
988*5ff13fbcSAllan Jude *
989*5ff13fbcSAllan Jude * Typedef'd to @ref XXH64_state_t.
990*5ff13fbcSAllan Jude * Do not access the members of this struct directly.
991*5ff13fbcSAllan Jude * @see XXH32_state_s, XXH3_state_s
992*5ff13fbcSAllan Jude */
9930c16b537SWarner Losh struct XXH64_state_s {
994*5ff13fbcSAllan Jude XXH64_hash_t total_len; /*!< Total length hashed. This is always 64-bit. */
995*5ff13fbcSAllan Jude XXH64_hash_t v[4]; /*!< Accumulator lanes */
996*5ff13fbcSAllan Jude XXH64_hash_t mem64[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[32]. */
997*5ff13fbcSAllan Jude XXH32_hash_t memsize; /*!< Amount of data in @ref mem64 */
998*5ff13fbcSAllan Jude XXH32_hash_t reserved32; /*!< Reserved field, needed for padding anyways*/
999*5ff13fbcSAllan Jude XXH64_hash_t reserved64; /*!< Reserved field. Do not read or write to it. */
10000c16b537SWarner Losh }; /* typedef'd to XXH64_state_t */
10010c16b537SWarner Losh
10020c16b537SWarner Losh
1003*5ff13fbcSAllan Jude #ifndef XXH_NO_XXH3
1004*5ff13fbcSAllan Jude
1005*5ff13fbcSAllan Jude #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* >= C11 */
1006*5ff13fbcSAllan Jude # include <stdalign.h>
1007*5ff13fbcSAllan Jude # define XXH_ALIGN(n) alignas(n)
1008*5ff13fbcSAllan Jude #elif defined(__cplusplus) && (__cplusplus >= 201103L) /* >= C++11 */
1009*5ff13fbcSAllan Jude /* In C++ alignas() is a keyword */
1010*5ff13fbcSAllan Jude # define XXH_ALIGN(n) alignas(n)
1011*5ff13fbcSAllan Jude #elif defined(__GNUC__)
1012*5ff13fbcSAllan Jude # define XXH_ALIGN(n) __attribute__ ((aligned(n)))
1013*5ff13fbcSAllan Jude #elif defined(_MSC_VER)
1014*5ff13fbcSAllan Jude # define XXH_ALIGN(n) __declspec(align(n))
1015*5ff13fbcSAllan Jude #else
1016*5ff13fbcSAllan Jude # define XXH_ALIGN(n) /* disabled */
10170c16b537SWarner Losh #endif
10180c16b537SWarner Losh
1019*5ff13fbcSAllan Jude /* Old GCC versions only accept the attribute after the type in structures. */
1020*5ff13fbcSAllan Jude #if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) /* C11+ */ \
1021*5ff13fbcSAllan Jude && ! (defined(__cplusplus) && (__cplusplus >= 201103L)) /* >= C++11 */ \
1022*5ff13fbcSAllan Jude && defined(__GNUC__)
1023*5ff13fbcSAllan Jude # define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align)
1024*5ff13fbcSAllan Jude #else
1025*5ff13fbcSAllan Jude # define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type
1026*5ff13fbcSAllan Jude #endif
1027*5ff13fbcSAllan Jude
1028*5ff13fbcSAllan Jude /*!
1029*5ff13fbcSAllan Jude * @brief The size of the internal XXH3 buffer.
1030*5ff13fbcSAllan Jude *
1031*5ff13fbcSAllan Jude * This is the optimal update size for incremental hashing.
1032*5ff13fbcSAllan Jude *
1033*5ff13fbcSAllan Jude * @see XXH3_64b_update(), XXH3_128b_update().
1034*5ff13fbcSAllan Jude */
1035*5ff13fbcSAllan Jude #define XXH3_INTERNALBUFFER_SIZE 256
1036*5ff13fbcSAllan Jude
1037*5ff13fbcSAllan Jude /*!
1038*5ff13fbcSAllan Jude * @brief Default size of the secret buffer (and @ref XXH3_kSecret).
1039*5ff13fbcSAllan Jude *
1040*5ff13fbcSAllan Jude * This is the size used in @ref XXH3_kSecret and the seeded functions.
1041*5ff13fbcSAllan Jude *
1042*5ff13fbcSAllan Jude * Not to be confused with @ref XXH3_SECRET_SIZE_MIN.
1043*5ff13fbcSAllan Jude */
1044*5ff13fbcSAllan Jude #define XXH3_SECRET_DEFAULT_SIZE 192
1045*5ff13fbcSAllan Jude
1046*5ff13fbcSAllan Jude /*!
1047*5ff13fbcSAllan Jude * @internal
1048*5ff13fbcSAllan Jude * @brief Structure for XXH3 streaming API.
1049*5ff13fbcSAllan Jude *
1050*5ff13fbcSAllan Jude * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
1051*5ff13fbcSAllan Jude * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined.
1052*5ff13fbcSAllan Jude * Otherwise it is an opaque type.
1053*5ff13fbcSAllan Jude * Never use this definition in combination with dynamic library.
1054*5ff13fbcSAllan Jude * This allows fields to safely be changed in the future.
1055*5ff13fbcSAllan Jude *
1056*5ff13fbcSAllan Jude * @note ** This structure has a strict alignment requirement of 64 bytes!! **
1057*5ff13fbcSAllan Jude * Do not allocate this with `malloc()` or `new`,
1058*5ff13fbcSAllan Jude * it will not be sufficiently aligned.
1059*5ff13fbcSAllan Jude * Use @ref XXH3_createState() and @ref XXH3_freeState(), or stack allocation.
1060*5ff13fbcSAllan Jude *
1061*5ff13fbcSAllan Jude * Typedef'd to @ref XXH3_state_t.
1062*5ff13fbcSAllan Jude * Do never access the members of this struct directly.
1063*5ff13fbcSAllan Jude *
1064*5ff13fbcSAllan Jude * @see XXH3_INITSTATE() for stack initialization.
1065*5ff13fbcSAllan Jude * @see XXH3_createState(), XXH3_freeState().
1066*5ff13fbcSAllan Jude * @see XXH32_state_s, XXH64_state_s
1067*5ff13fbcSAllan Jude */
1068*5ff13fbcSAllan Jude struct XXH3_state_s {
1069*5ff13fbcSAllan Jude XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]);
1070*5ff13fbcSAllan Jude /*!< The 8 accumulators. Similar to `vN` in @ref XXH32_state_s::v1 and @ref XXH64_state_s */
1071*5ff13fbcSAllan Jude XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]);
1072*5ff13fbcSAllan Jude /*!< Used to store a custom secret generated from a seed. */
1073*5ff13fbcSAllan Jude XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]);
1074*5ff13fbcSAllan Jude /*!< The internal buffer. @see XXH32_state_s::mem32 */
1075*5ff13fbcSAllan Jude XXH32_hash_t bufferedSize;
1076*5ff13fbcSAllan Jude /*!< The amount of memory in @ref buffer, @see XXH32_state_s::memsize */
1077*5ff13fbcSAllan Jude XXH32_hash_t useSeed;
1078*5ff13fbcSAllan Jude /*!< Reserved field. Needed for padding on 64-bit. */
1079*5ff13fbcSAllan Jude size_t nbStripesSoFar;
1080*5ff13fbcSAllan Jude /*!< Number or stripes processed. */
1081*5ff13fbcSAllan Jude XXH64_hash_t totalLen;
1082*5ff13fbcSAllan Jude /*!< Total length hashed. 64-bit even on 32-bit targets. */
1083*5ff13fbcSAllan Jude size_t nbStripesPerBlock;
1084*5ff13fbcSAllan Jude /*!< Number of stripes per block. */
1085*5ff13fbcSAllan Jude size_t secretLimit;
1086*5ff13fbcSAllan Jude /*!< Size of @ref customSecret or @ref extSecret */
1087*5ff13fbcSAllan Jude XXH64_hash_t seed;
1088*5ff13fbcSAllan Jude /*!< Seed for _withSeed variants. Must be zero otherwise, @see XXH3_INITSTATE() */
1089*5ff13fbcSAllan Jude XXH64_hash_t reserved64;
1090*5ff13fbcSAllan Jude /*!< Reserved field. */
1091*5ff13fbcSAllan Jude const unsigned char* extSecret;
1092*5ff13fbcSAllan Jude /*!< Reference to an external secret for the _withSecret variants, NULL
1093*5ff13fbcSAllan Jude * for other variants. */
1094*5ff13fbcSAllan Jude /* note: there may be some padding at the end due to alignment on 64 bytes */
1095*5ff13fbcSAllan Jude }; /* typedef'd to XXH3_state_t */
1096*5ff13fbcSAllan Jude
1097*5ff13fbcSAllan Jude #undef XXH_ALIGN_MEMBER
1098*5ff13fbcSAllan Jude
1099*5ff13fbcSAllan Jude /*!
1100*5ff13fbcSAllan Jude * @brief Initializes a stack-allocated `XXH3_state_s`.
1101*5ff13fbcSAllan Jude *
1102*5ff13fbcSAllan Jude * When the @ref XXH3_state_t structure is merely emplaced on stack,
1103*5ff13fbcSAllan Jude * it should be initialized with XXH3_INITSTATE() or a memset()
1104*5ff13fbcSAllan Jude * in case its first reset uses XXH3_NNbits_reset_withSeed().
1105*5ff13fbcSAllan Jude * This init can be omitted if the first reset uses default or _withSecret mode.
1106*5ff13fbcSAllan Jude * This operation isn't necessary when the state is created with XXH3_createState().
1107*5ff13fbcSAllan Jude * Note that this doesn't prepare the state for a streaming operation,
1108*5ff13fbcSAllan Jude * it's still necessary to use XXH3_NNbits_reset*() afterwards.
1109*5ff13fbcSAllan Jude */
1110*5ff13fbcSAllan Jude #define XXH3_INITSTATE(XXH3_state_ptr) { (XXH3_state_ptr)->seed = 0; }
1111*5ff13fbcSAllan Jude
1112*5ff13fbcSAllan Jude
1113*5ff13fbcSAllan Jude /* XXH128() :
1114*5ff13fbcSAllan Jude * simple alias to pre-selected XXH3_128bits variant
1115*5ff13fbcSAllan Jude */
1116*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH128_hash_t XXH128(const void* data, size_t len, XXH64_hash_t seed);
1117*5ff13fbcSAllan Jude
1118*5ff13fbcSAllan Jude
1119*5ff13fbcSAllan Jude /* === Experimental API === */
1120*5ff13fbcSAllan Jude /* Symbols defined below must be considered tied to a specific library version. */
1121*5ff13fbcSAllan Jude
1122*5ff13fbcSAllan Jude /*
1123*5ff13fbcSAllan Jude * XXH3_generateSecret():
1124*5ff13fbcSAllan Jude *
1125*5ff13fbcSAllan Jude * Derive a high-entropy secret from any user-defined content, named customSeed.
1126*5ff13fbcSAllan Jude * The generated secret can be used in combination with `*_withSecret()` functions.
1127*5ff13fbcSAllan Jude * The `_withSecret()` variants are useful to provide a higher level of protection than 64-bit seed,
1128*5ff13fbcSAllan Jude * as it becomes much more difficult for an external actor to guess how to impact the calculation logic.
1129*5ff13fbcSAllan Jude *
1130*5ff13fbcSAllan Jude * The function accepts as input a custom seed of any length and any content,
1131*5ff13fbcSAllan Jude * and derives from it a high-entropy secret of length @secretSize
1132*5ff13fbcSAllan Jude * into an already allocated buffer @secretBuffer.
1133*5ff13fbcSAllan Jude * @secretSize must be >= XXH3_SECRET_SIZE_MIN
1134*5ff13fbcSAllan Jude *
1135*5ff13fbcSAllan Jude * The generated secret can then be used with any `*_withSecret()` variant.
1136*5ff13fbcSAllan Jude * Functions `XXH3_128bits_withSecret()`, `XXH3_64bits_withSecret()`,
1137*5ff13fbcSAllan Jude * `XXH3_128bits_reset_withSecret()` and `XXH3_64bits_reset_withSecret()`
1138*5ff13fbcSAllan Jude * are part of this list. They all accept a `secret` parameter
1139*5ff13fbcSAllan Jude * which must be large enough for implementation reasons (>= XXH3_SECRET_SIZE_MIN)
1140*5ff13fbcSAllan Jude * _and_ feature very high entropy (consist of random-looking bytes).
1141*5ff13fbcSAllan Jude * These conditions can be a high bar to meet, so
1142*5ff13fbcSAllan Jude * XXH3_generateSecret() can be employed to ensure proper quality.
1143*5ff13fbcSAllan Jude *
1144*5ff13fbcSAllan Jude * customSeed can be anything. It can have any size, even small ones,
1145*5ff13fbcSAllan Jude * and its content can be anything, even "poor entropy" sources such as a bunch of zeroes.
1146*5ff13fbcSAllan Jude * The resulting `secret` will nonetheless provide all required qualities.
1147*5ff13fbcSAllan Jude *
1148*5ff13fbcSAllan Jude * When customSeedSize > 0, supplying NULL as customSeed is undefined behavior.
1149*5ff13fbcSAllan Jude */
1150*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH_errorcode XXH3_generateSecret(void* secretBuffer, size_t secretSize, const void* customSeed, size_t customSeedSize);
1151*5ff13fbcSAllan Jude
1152*5ff13fbcSAllan Jude
1153*5ff13fbcSAllan Jude /*
1154*5ff13fbcSAllan Jude * XXH3_generateSecret_fromSeed():
1155*5ff13fbcSAllan Jude *
1156*5ff13fbcSAllan Jude * Generate the same secret as the _withSeed() variants.
1157*5ff13fbcSAllan Jude *
1158*5ff13fbcSAllan Jude * The resulting secret has a length of XXH3_SECRET_DEFAULT_SIZE (necessarily).
1159*5ff13fbcSAllan Jude * @secretBuffer must be already allocated, of size at least XXH3_SECRET_DEFAULT_SIZE bytes.
1160*5ff13fbcSAllan Jude *
1161*5ff13fbcSAllan Jude * The generated secret can be used in combination with
1162*5ff13fbcSAllan Jude *`*_withSecret()` and `_withSecretandSeed()` variants.
1163*5ff13fbcSAllan Jude * This generator is notably useful in combination with `_withSecretandSeed()`,
1164*5ff13fbcSAllan Jude * as a way to emulate a faster `_withSeed()` variant.
1165*5ff13fbcSAllan Jude */
1166*5ff13fbcSAllan Jude XXH_PUBLIC_API void XXH3_generateSecret_fromSeed(void* secretBuffer, XXH64_hash_t seed);
1167*5ff13fbcSAllan Jude
1168*5ff13fbcSAllan Jude /*
1169*5ff13fbcSAllan Jude * *_withSecretandSeed() :
1170*5ff13fbcSAllan Jude * These variants generate hash values using either
1171*5ff13fbcSAllan Jude * @seed for "short" keys (< XXH3_MIDSIZE_MAX = 240 bytes)
1172*5ff13fbcSAllan Jude * or @secret for "large" keys (>= XXH3_MIDSIZE_MAX).
1173*5ff13fbcSAllan Jude *
1174*5ff13fbcSAllan Jude * This generally benefits speed, compared to `_withSeed()` or `_withSecret()`.
1175*5ff13fbcSAllan Jude * `_withSeed()` has to generate the secret on the fly for "large" keys.
1176*5ff13fbcSAllan Jude * It's fast, but can be perceptible for "not so large" keys (< 1 KB).
1177*5ff13fbcSAllan Jude * `_withSecret()` has to generate the masks on the fly for "small" keys,
1178*5ff13fbcSAllan Jude * which requires more instructions than _withSeed() variants.
1179*5ff13fbcSAllan Jude * Therefore, _withSecretandSeed variant combines the best of both worlds.
1180*5ff13fbcSAllan Jude *
1181*5ff13fbcSAllan Jude * When @secret has been generated by XXH3_generateSecret_fromSeed(),
1182*5ff13fbcSAllan Jude * this variant produces *exactly* the same results as `_withSeed()` variant,
1183*5ff13fbcSAllan Jude * hence offering only a pure speed benefit on "large" input,
1184*5ff13fbcSAllan Jude * by skipping the need to regenerate the secret for every large input.
1185*5ff13fbcSAllan Jude *
1186*5ff13fbcSAllan Jude * Another usage scenario is to hash the secret to a 64-bit hash value,
1187*5ff13fbcSAllan Jude * for example with XXH3_64bits(), which then becomes the seed,
1188*5ff13fbcSAllan Jude * and then employ both the seed and the secret in _withSecretandSeed().
1189*5ff13fbcSAllan Jude * On top of speed, an added benefit is that each bit in the secret
1190*5ff13fbcSAllan Jude * has a 50% chance to swap each bit in the output,
1191*5ff13fbcSAllan Jude * via its impact to the seed.
1192*5ff13fbcSAllan Jude * This is not guaranteed when using the secret directly in "small data" scenarios,
1193*5ff13fbcSAllan Jude * because only portions of the secret are employed for small data.
1194*5ff13fbcSAllan Jude */
1195*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH64_hash_t
1196*5ff13fbcSAllan Jude XXH3_64bits_withSecretandSeed(const void* data, size_t len,
1197*5ff13fbcSAllan Jude const void* secret, size_t secretSize,
1198*5ff13fbcSAllan Jude XXH64_hash_t seed);
1199*5ff13fbcSAllan Jude
1200*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH128_hash_t
1201*5ff13fbcSAllan Jude XXH3_128bits_withSecretandSeed(const void* data, size_t len,
1202*5ff13fbcSAllan Jude const void* secret, size_t secretSize,
1203*5ff13fbcSAllan Jude XXH64_hash_t seed64);
1204*5ff13fbcSAllan Jude
1205*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH_errorcode
1206*5ff13fbcSAllan Jude XXH3_64bits_reset_withSecretandSeed(XXH3_state_t* statePtr,
1207*5ff13fbcSAllan Jude const void* secret, size_t secretSize,
1208*5ff13fbcSAllan Jude XXH64_hash_t seed64);
1209*5ff13fbcSAllan Jude
1210*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH_errorcode
1211*5ff13fbcSAllan Jude XXH3_128bits_reset_withSecretandSeed(XXH3_state_t* statePtr,
1212*5ff13fbcSAllan Jude const void* secret, size_t secretSize,
1213*5ff13fbcSAllan Jude XXH64_hash_t seed64);
1214*5ff13fbcSAllan Jude
1215*5ff13fbcSAllan Jude
1216*5ff13fbcSAllan Jude #endif /* XXH_NO_XXH3 */
1217*5ff13fbcSAllan Jude #endif /* XXH_NO_LONG_LONG */
1218*5ff13fbcSAllan Jude #if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
1219*5ff13fbcSAllan Jude # define XXH_IMPLEMENTATION
1220*5ff13fbcSAllan Jude #endif
1221*5ff13fbcSAllan Jude
1222*5ff13fbcSAllan Jude #endif /* defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) */
1223*5ff13fbcSAllan Jude
1224*5ff13fbcSAllan Jude
1225*5ff13fbcSAllan Jude /* ======================================================================== */
1226*5ff13fbcSAllan Jude /* ======================================================================== */
1227*5ff13fbcSAllan Jude /* ======================================================================== */
1228*5ff13fbcSAllan Jude
1229*5ff13fbcSAllan Jude
1230*5ff13fbcSAllan Jude /*-**********************************************************************
1231*5ff13fbcSAllan Jude * xxHash implementation
1232*5ff13fbcSAllan Jude *-**********************************************************************
1233*5ff13fbcSAllan Jude * xxHash's implementation used to be hosted inside xxhash.c.
1234*5ff13fbcSAllan Jude *
1235*5ff13fbcSAllan Jude * However, inlining requires implementation to be visible to the compiler,
1236*5ff13fbcSAllan Jude * hence be included alongside the header.
1237*5ff13fbcSAllan Jude * Previously, implementation was hosted inside xxhash.c,
1238*5ff13fbcSAllan Jude * which was then #included when inlining was activated.
1239*5ff13fbcSAllan Jude * This construction created issues with a few build and install systems,
1240*5ff13fbcSAllan Jude * as it required xxhash.c to be stored in /include directory.
1241*5ff13fbcSAllan Jude *
1242*5ff13fbcSAllan Jude * xxHash implementation is now directly integrated within xxhash.h.
1243*5ff13fbcSAllan Jude * As a consequence, xxhash.c is no longer needed in /include.
1244*5ff13fbcSAllan Jude *
1245*5ff13fbcSAllan Jude * xxhash.c is still available and is still useful.
1246*5ff13fbcSAllan Jude * In a "normal" setup, when xxhash is not inlined,
1247*5ff13fbcSAllan Jude * xxhash.h only exposes the prototypes and public symbols,
1248*5ff13fbcSAllan Jude * while xxhash.c can be built into an object file xxhash.o
1249*5ff13fbcSAllan Jude * which can then be linked into the final binary.
1250*5ff13fbcSAllan Jude ************************************************************************/
1251*5ff13fbcSAllan Jude
1252*5ff13fbcSAllan Jude #if ( defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) \
1253*5ff13fbcSAllan Jude || defined(XXH_IMPLEMENTATION) ) && !defined(XXH_IMPLEM_13a8737387)
1254*5ff13fbcSAllan Jude # define XXH_IMPLEM_13a8737387
1255*5ff13fbcSAllan Jude
1256*5ff13fbcSAllan Jude /* *************************************
1257*5ff13fbcSAllan Jude * Tuning parameters
1258*5ff13fbcSAllan Jude ***************************************/
1259*5ff13fbcSAllan Jude
1260*5ff13fbcSAllan Jude /*!
1261*5ff13fbcSAllan Jude * @defgroup tuning Tuning parameters
1262*5ff13fbcSAllan Jude * @{
1263*5ff13fbcSAllan Jude *
1264*5ff13fbcSAllan Jude * Various macros to control xxHash's behavior.
1265*5ff13fbcSAllan Jude */
1266*5ff13fbcSAllan Jude #ifdef XXH_DOXYGEN
1267*5ff13fbcSAllan Jude /*!
1268*5ff13fbcSAllan Jude * @brief Define this to disable 64-bit code.
1269*5ff13fbcSAllan Jude *
1270*5ff13fbcSAllan Jude * Useful if only using the @ref xxh32_family and you have a strict C90 compiler.
1271*5ff13fbcSAllan Jude */
1272*5ff13fbcSAllan Jude # define XXH_NO_LONG_LONG
1273*5ff13fbcSAllan Jude # undef XXH_NO_LONG_LONG /* don't actually */
1274*5ff13fbcSAllan Jude /*!
1275*5ff13fbcSAllan Jude * @brief Controls how unaligned memory is accessed.
1276*5ff13fbcSAllan Jude *
1277*5ff13fbcSAllan Jude * By default, access to unaligned memory is controlled by `memcpy()`, which is
1278*5ff13fbcSAllan Jude * safe and portable.
1279*5ff13fbcSAllan Jude *
1280*5ff13fbcSAllan Jude * Unfortunately, on some target/compiler combinations, the generated assembly
1281*5ff13fbcSAllan Jude * is sub-optimal.
1282*5ff13fbcSAllan Jude *
1283*5ff13fbcSAllan Jude * The below switch allow selection of a different access method
1284*5ff13fbcSAllan Jude * in the search for improved performance.
1285*5ff13fbcSAllan Jude *
1286*5ff13fbcSAllan Jude * @par Possible options:
1287*5ff13fbcSAllan Jude *
1288*5ff13fbcSAllan Jude * - `XXH_FORCE_MEMORY_ACCESS=0` (default): `memcpy`
1289*5ff13fbcSAllan Jude * @par
1290*5ff13fbcSAllan Jude * Use `memcpy()`. Safe and portable. Note that most modern compilers will
1291*5ff13fbcSAllan Jude * eliminate the function call and treat it as an unaligned access.
1292*5ff13fbcSAllan Jude *
1293*5ff13fbcSAllan Jude * - `XXH_FORCE_MEMORY_ACCESS=1`: `__attribute__((packed))`
1294*5ff13fbcSAllan Jude * @par
1295*5ff13fbcSAllan Jude * Depends on compiler extensions and is therefore not portable.
1296*5ff13fbcSAllan Jude * This method is safe _if_ your compiler supports it,
1297*5ff13fbcSAllan Jude * and *generally* as fast or faster than `memcpy`.
1298*5ff13fbcSAllan Jude *
1299*5ff13fbcSAllan Jude * - `XXH_FORCE_MEMORY_ACCESS=2`: Direct cast
1300*5ff13fbcSAllan Jude * @par
1301*5ff13fbcSAllan Jude * Casts directly and dereferences. This method doesn't depend on the
1302*5ff13fbcSAllan Jude * compiler, but it violates the C standard as it directly dereferences an
1303*5ff13fbcSAllan Jude * unaligned pointer. It can generate buggy code on targets which do not
1304*5ff13fbcSAllan Jude * support unaligned memory accesses, but in some circumstances, it's the
1305*5ff13fbcSAllan Jude * only known way to get the most performance.
1306*5ff13fbcSAllan Jude *
1307*5ff13fbcSAllan Jude * - `XXH_FORCE_MEMORY_ACCESS=3`: Byteshift
1308*5ff13fbcSAllan Jude * @par
1309*5ff13fbcSAllan Jude * Also portable. This can generate the best code on old compilers which don't
1310*5ff13fbcSAllan Jude * inline small `memcpy()` calls, and it might also be faster on big-endian
1311*5ff13fbcSAllan Jude * systems which lack a native byteswap instruction. However, some compilers
1312*5ff13fbcSAllan Jude * will emit literal byteshifts even if the target supports unaligned access.
1313*5ff13fbcSAllan Jude * .
1314*5ff13fbcSAllan Jude *
1315*5ff13fbcSAllan Jude * @warning
1316*5ff13fbcSAllan Jude * Methods 1 and 2 rely on implementation-defined behavior. Use these with
1317*5ff13fbcSAllan Jude * care, as what works on one compiler/platform/optimization level may cause
1318*5ff13fbcSAllan Jude * another to read garbage data or even crash.
1319*5ff13fbcSAllan Jude *
1320*5ff13fbcSAllan Jude * See http://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html for details.
1321*5ff13fbcSAllan Jude *
1322*5ff13fbcSAllan Jude * Prefer these methods in priority order (0 > 3 > 1 > 2)
1323*5ff13fbcSAllan Jude */
1324*5ff13fbcSAllan Jude # define XXH_FORCE_MEMORY_ACCESS 0
1325*5ff13fbcSAllan Jude
1326*5ff13fbcSAllan Jude /*!
1327*5ff13fbcSAllan Jude * @def XXH_FORCE_ALIGN_CHECK
1328*5ff13fbcSAllan Jude * @brief If defined to non-zero, adds a special path for aligned inputs (XXH32()
1329*5ff13fbcSAllan Jude * and XXH64() only).
1330*5ff13fbcSAllan Jude *
1331*5ff13fbcSAllan Jude * This is an important performance trick for architectures without decent
1332*5ff13fbcSAllan Jude * unaligned memory access performance.
1333*5ff13fbcSAllan Jude *
1334*5ff13fbcSAllan Jude * It checks for input alignment, and when conditions are met, uses a "fast
1335*5ff13fbcSAllan Jude * path" employing direct 32-bit/64-bit reads, resulting in _dramatically
1336*5ff13fbcSAllan Jude * faster_ read speed.
1337*5ff13fbcSAllan Jude *
1338*5ff13fbcSAllan Jude * The check costs one initial branch per hash, which is generally negligible,
1339*5ff13fbcSAllan Jude * but not zero.
1340*5ff13fbcSAllan Jude *
1341*5ff13fbcSAllan Jude * Moreover, it's not useful to generate an additional code path if memory
1342*5ff13fbcSAllan Jude * access uses the same instruction for both aligned and unaligned
1343*5ff13fbcSAllan Jude * addresses (e.g. x86 and aarch64).
1344*5ff13fbcSAllan Jude *
1345*5ff13fbcSAllan Jude * In these cases, the alignment check can be removed by setting this macro to 0.
1346*5ff13fbcSAllan Jude * Then the code will always use unaligned memory access.
1347*5ff13fbcSAllan Jude * Align check is automatically disabled on x86, x64 & arm64,
1348*5ff13fbcSAllan Jude * which are platforms known to offer good unaligned memory accesses performance.
1349*5ff13fbcSAllan Jude *
1350*5ff13fbcSAllan Jude * This option does not affect XXH3 (only XXH32 and XXH64).
1351*5ff13fbcSAllan Jude */
1352*5ff13fbcSAllan Jude # define XXH_FORCE_ALIGN_CHECK 0
1353*5ff13fbcSAllan Jude
1354*5ff13fbcSAllan Jude /*!
1355*5ff13fbcSAllan Jude * @def XXH_NO_INLINE_HINTS
1356*5ff13fbcSAllan Jude * @brief When non-zero, sets all functions to `static`.
1357*5ff13fbcSAllan Jude *
1358*5ff13fbcSAllan Jude * By default, xxHash tries to force the compiler to inline almost all internal
1359*5ff13fbcSAllan Jude * functions.
1360*5ff13fbcSAllan Jude *
1361*5ff13fbcSAllan Jude * This can usually improve performance due to reduced jumping and improved
1362*5ff13fbcSAllan Jude * constant folding, but significantly increases the size of the binary which
1363*5ff13fbcSAllan Jude * might not be favorable.
1364*5ff13fbcSAllan Jude *
1365*5ff13fbcSAllan Jude * Additionally, sometimes the forced inlining can be detrimental to performance,
1366*5ff13fbcSAllan Jude * depending on the architecture.
1367*5ff13fbcSAllan Jude *
1368*5ff13fbcSAllan Jude * XXH_NO_INLINE_HINTS marks all internal functions as static, giving the
1369*5ff13fbcSAllan Jude * compiler full control on whether to inline or not.
1370*5ff13fbcSAllan Jude *
1371*5ff13fbcSAllan Jude * When not optimizing (-O0), optimizing for size (-Os, -Oz), or using
1372*5ff13fbcSAllan Jude * -fno-inline with GCC or Clang, this will automatically be defined.
1373*5ff13fbcSAllan Jude */
1374*5ff13fbcSAllan Jude # define XXH_NO_INLINE_HINTS 0
1375*5ff13fbcSAllan Jude
1376*5ff13fbcSAllan Jude /*!
1377*5ff13fbcSAllan Jude * @def XXH32_ENDJMP
1378*5ff13fbcSAllan Jude * @brief Whether to use a jump for `XXH32_finalize`.
1379*5ff13fbcSAllan Jude *
1380*5ff13fbcSAllan Jude * For performance, `XXH32_finalize` uses multiple branches in the finalizer.
1381*5ff13fbcSAllan Jude * This is generally preferable for performance,
1382*5ff13fbcSAllan Jude * but depending on exact architecture, a jmp may be preferable.
1383*5ff13fbcSAllan Jude *
1384*5ff13fbcSAllan Jude * This setting is only possibly making a difference for very small inputs.
1385*5ff13fbcSAllan Jude */
1386*5ff13fbcSAllan Jude # define XXH32_ENDJMP 0
1387*5ff13fbcSAllan Jude
1388*5ff13fbcSAllan Jude /*!
1389*5ff13fbcSAllan Jude * @internal
1390*5ff13fbcSAllan Jude * @brief Redefines old internal names.
1391*5ff13fbcSAllan Jude *
1392*5ff13fbcSAllan Jude * For compatibility with code that uses xxHash's internals before the names
1393*5ff13fbcSAllan Jude * were changed to improve namespacing. There is no other reason to use this.
1394*5ff13fbcSAllan Jude */
1395*5ff13fbcSAllan Jude # define XXH_OLD_NAMES
1396*5ff13fbcSAllan Jude # undef XXH_OLD_NAMES /* don't actually use, it is ugly. */
1397*5ff13fbcSAllan Jude #endif /* XXH_DOXYGEN */
1398*5ff13fbcSAllan Jude /*!
1399*5ff13fbcSAllan Jude * @}
1400*5ff13fbcSAllan Jude */
1401*5ff13fbcSAllan Jude
1402*5ff13fbcSAllan Jude #ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
1403*5ff13fbcSAllan Jude /* prefer __packed__ structures (method 1) for gcc on armv7+ and mips */
1404*5ff13fbcSAllan Jude # if !defined(__clang__) && \
1405*5ff13fbcSAllan Jude ( \
1406*5ff13fbcSAllan Jude (defined(__INTEL_COMPILER) && !defined(_WIN32)) || \
1407*5ff13fbcSAllan Jude ( \
1408*5ff13fbcSAllan Jude defined(__GNUC__) && ( \
1409*5ff13fbcSAllan Jude (defined(__ARM_ARCH) && __ARM_ARCH >= 7) || \
1410*5ff13fbcSAllan Jude ( \
1411*5ff13fbcSAllan Jude defined(__mips__) && \
1412*5ff13fbcSAllan Jude (__mips <= 5 || __mips_isa_rev < 6) && \
1413*5ff13fbcSAllan Jude (!defined(__mips16) || defined(__mips_mips16e2)) \
1414*5ff13fbcSAllan Jude ) \
1415*5ff13fbcSAllan Jude ) \
1416*5ff13fbcSAllan Jude ) \
1417*5ff13fbcSAllan Jude )
1418*5ff13fbcSAllan Jude # define XXH_FORCE_MEMORY_ACCESS 1
1419*5ff13fbcSAllan Jude # endif
1420*5ff13fbcSAllan Jude #endif
1421*5ff13fbcSAllan Jude
1422*5ff13fbcSAllan Jude #ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
1423*5ff13fbcSAllan Jude # if defined(__i386) || defined(__x86_64__) || defined(__aarch64__) \
1424*5ff13fbcSAllan Jude || defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64) /* visual */
1425*5ff13fbcSAllan Jude # define XXH_FORCE_ALIGN_CHECK 0
1426*5ff13fbcSAllan Jude # else
1427*5ff13fbcSAllan Jude # define XXH_FORCE_ALIGN_CHECK 1
1428*5ff13fbcSAllan Jude # endif
1429*5ff13fbcSAllan Jude #endif
1430*5ff13fbcSAllan Jude
1431*5ff13fbcSAllan Jude #ifndef XXH_NO_INLINE_HINTS
1432*5ff13fbcSAllan Jude # if defined(__OPTIMIZE_SIZE__) /* -Os, -Oz */ \
1433*5ff13fbcSAllan Jude || defined(__NO_INLINE__) /* -O0, -fno-inline */
1434*5ff13fbcSAllan Jude # define XXH_NO_INLINE_HINTS 1
1435*5ff13fbcSAllan Jude # else
1436*5ff13fbcSAllan Jude # define XXH_NO_INLINE_HINTS 0
1437*5ff13fbcSAllan Jude # endif
1438*5ff13fbcSAllan Jude #endif
1439*5ff13fbcSAllan Jude
1440*5ff13fbcSAllan Jude #ifndef XXH32_ENDJMP
1441*5ff13fbcSAllan Jude /* generally preferable for performance */
1442*5ff13fbcSAllan Jude # define XXH32_ENDJMP 0
1443*5ff13fbcSAllan Jude #endif
1444*5ff13fbcSAllan Jude
1445*5ff13fbcSAllan Jude /*!
1446*5ff13fbcSAllan Jude * @defgroup impl Implementation
1447*5ff13fbcSAllan Jude * @{
1448*5ff13fbcSAllan Jude */
1449*5ff13fbcSAllan Jude
1450*5ff13fbcSAllan Jude
1451*5ff13fbcSAllan Jude /* *************************************
1452*5ff13fbcSAllan Jude * Includes & Memory related functions
1453*5ff13fbcSAllan Jude ***************************************/
1454*5ff13fbcSAllan Jude /* Modify the local functions below should you wish to use some other memory routines */
1455*5ff13fbcSAllan Jude /* for ZSTD_malloc(), ZSTD_free() */
1456*5ff13fbcSAllan Jude #define ZSTD_DEPS_NEED_MALLOC
1457*5ff13fbcSAllan Jude #include "zstd_deps.h" /* size_t, ZSTD_malloc, ZSTD_free, ZSTD_memcpy */
XXH_malloc(size_t s)1458*5ff13fbcSAllan Jude static void* XXH_malloc(size_t s) { return ZSTD_malloc(s); }
XXH_free(void * p)1459*5ff13fbcSAllan Jude static void XXH_free (void* p) { ZSTD_free(p); }
XXH_memcpy(void * dest,const void * src,size_t size)1460*5ff13fbcSAllan Jude static void* XXH_memcpy(void* dest, const void* src, size_t size) { return ZSTD_memcpy(dest,src,size); }
1461*5ff13fbcSAllan Jude
1462*5ff13fbcSAllan Jude
1463*5ff13fbcSAllan Jude /* *************************************
1464*5ff13fbcSAllan Jude * Compiler Specific Options
1465*5ff13fbcSAllan Jude ***************************************/
1466*5ff13fbcSAllan Jude #ifdef _MSC_VER /* Visual Studio warning fix */
1467*5ff13fbcSAllan Jude # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
1468*5ff13fbcSAllan Jude #endif
1469*5ff13fbcSAllan Jude
1470*5ff13fbcSAllan Jude #if XXH_NO_INLINE_HINTS /* disable inlining hints */
1471*5ff13fbcSAllan Jude # if defined(__GNUC__) || defined(__clang__)
1472*5ff13fbcSAllan Jude # define XXH_FORCE_INLINE static __attribute__((unused))
1473*5ff13fbcSAllan Jude # else
1474*5ff13fbcSAllan Jude # define XXH_FORCE_INLINE static
1475*5ff13fbcSAllan Jude # endif
1476*5ff13fbcSAllan Jude # define XXH_NO_INLINE static
1477*5ff13fbcSAllan Jude /* enable inlining hints */
1478*5ff13fbcSAllan Jude #elif defined(__GNUC__) || defined(__clang__)
1479*5ff13fbcSAllan Jude # define XXH_FORCE_INLINE static __inline__ __attribute__((always_inline, unused))
1480*5ff13fbcSAllan Jude # define XXH_NO_INLINE static __attribute__((noinline))
1481*5ff13fbcSAllan Jude #elif defined(_MSC_VER) /* Visual Studio */
1482*5ff13fbcSAllan Jude # define XXH_FORCE_INLINE static __forceinline
1483*5ff13fbcSAllan Jude # define XXH_NO_INLINE static __declspec(noinline)
1484*5ff13fbcSAllan Jude #elif defined (__cplusplus) \
1485*5ff13fbcSAllan Jude || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* C99 */
1486*5ff13fbcSAllan Jude # define XXH_FORCE_INLINE static inline
1487*5ff13fbcSAllan Jude # define XXH_NO_INLINE static
1488*5ff13fbcSAllan Jude #else
1489*5ff13fbcSAllan Jude # define XXH_FORCE_INLINE static
1490*5ff13fbcSAllan Jude # define XXH_NO_INLINE static
1491*5ff13fbcSAllan Jude #endif
1492*5ff13fbcSAllan Jude
1493*5ff13fbcSAllan Jude
1494*5ff13fbcSAllan Jude
1495*5ff13fbcSAllan Jude /* *************************************
1496*5ff13fbcSAllan Jude * Debug
1497*5ff13fbcSAllan Jude ***************************************/
1498*5ff13fbcSAllan Jude /*!
1499*5ff13fbcSAllan Jude * @ingroup tuning
1500*5ff13fbcSAllan Jude * @def XXH_DEBUGLEVEL
1501*5ff13fbcSAllan Jude * @brief Sets the debugging level.
1502*5ff13fbcSAllan Jude *
1503*5ff13fbcSAllan Jude * XXH_DEBUGLEVEL is expected to be defined externally, typically via the
1504*5ff13fbcSAllan Jude * compiler's command line options. The value must be a number.
1505*5ff13fbcSAllan Jude */
1506*5ff13fbcSAllan Jude #ifndef XXH_DEBUGLEVEL
1507*5ff13fbcSAllan Jude # ifdef DEBUGLEVEL /* backwards compat */
1508*5ff13fbcSAllan Jude # define XXH_DEBUGLEVEL DEBUGLEVEL
1509*5ff13fbcSAllan Jude # else
1510*5ff13fbcSAllan Jude # define XXH_DEBUGLEVEL 0
1511*5ff13fbcSAllan Jude # endif
1512*5ff13fbcSAllan Jude #endif
1513*5ff13fbcSAllan Jude
1514*5ff13fbcSAllan Jude #if (XXH_DEBUGLEVEL>=1)
1515*5ff13fbcSAllan Jude # include <assert.h> /* note: can still be disabled with NDEBUG */
1516*5ff13fbcSAllan Jude # define XXH_ASSERT(c) assert(c)
1517*5ff13fbcSAllan Jude #else
1518*5ff13fbcSAllan Jude # define XXH_ASSERT(c) ((void)0)
1519*5ff13fbcSAllan Jude #endif
1520*5ff13fbcSAllan Jude
1521*5ff13fbcSAllan Jude /* note: use after variable declarations */
1522*5ff13fbcSAllan Jude #ifndef XXH_STATIC_ASSERT
1523*5ff13fbcSAllan Jude # if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11 */
1524*5ff13fbcSAllan Jude # include <assert.h>
1525*5ff13fbcSAllan Jude # define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0)
1526*5ff13fbcSAllan Jude # elif defined(__cplusplus) && (__cplusplus >= 201103L) /* C++11 */
1527*5ff13fbcSAllan Jude # define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0)
1528*5ff13fbcSAllan Jude # else
1529*5ff13fbcSAllan Jude # define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { struct xxh_sa { char x[(c) ? 1 : -1]; }; } while(0)
1530*5ff13fbcSAllan Jude # endif
1531*5ff13fbcSAllan Jude # define XXH_STATIC_ASSERT(c) XXH_STATIC_ASSERT_WITH_MESSAGE((c),#c)
1532*5ff13fbcSAllan Jude #endif
1533*5ff13fbcSAllan Jude
1534*5ff13fbcSAllan Jude /*!
1535*5ff13fbcSAllan Jude * @internal
1536*5ff13fbcSAllan Jude * @def XXH_COMPILER_GUARD(var)
1537*5ff13fbcSAllan Jude * @brief Used to prevent unwanted optimizations for @p var.
1538*5ff13fbcSAllan Jude *
1539*5ff13fbcSAllan Jude * It uses an empty GCC inline assembly statement with a register constraint
1540*5ff13fbcSAllan Jude * which forces @p var into a general purpose register (eg eax, ebx, ecx
1541*5ff13fbcSAllan Jude * on x86) and marks it as modified.
1542*5ff13fbcSAllan Jude *
1543*5ff13fbcSAllan Jude * This is used in a few places to avoid unwanted autovectorization (e.g.
1544*5ff13fbcSAllan Jude * XXH32_round()). All vectorization we want is explicit via intrinsics,
1545*5ff13fbcSAllan Jude * and _usually_ isn't wanted elsewhere.
1546*5ff13fbcSAllan Jude *
1547*5ff13fbcSAllan Jude * We also use it to prevent unwanted constant folding for AArch64 in
1548*5ff13fbcSAllan Jude * XXH3_initCustomSecret_scalar().
1549*5ff13fbcSAllan Jude */
1550*5ff13fbcSAllan Jude #if defined(__GNUC__) || defined(__clang__)
1551*5ff13fbcSAllan Jude # define XXH_COMPILER_GUARD(var) __asm__ __volatile__("" : "+r" (var))
1552*5ff13fbcSAllan Jude #else
1553*5ff13fbcSAllan Jude # define XXH_COMPILER_GUARD(var) ((void)0)
1554*5ff13fbcSAllan Jude #endif
1555*5ff13fbcSAllan Jude
1556*5ff13fbcSAllan Jude /* *************************************
1557*5ff13fbcSAllan Jude * Basic Types
1558*5ff13fbcSAllan Jude ***************************************/
1559*5ff13fbcSAllan Jude #if !defined (__VMS) \
1560*5ff13fbcSAllan Jude && (defined (__cplusplus) \
1561*5ff13fbcSAllan Jude || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
1562*5ff13fbcSAllan Jude # include <stdint.h>
1563*5ff13fbcSAllan Jude typedef uint8_t xxh_u8;
1564*5ff13fbcSAllan Jude #else
1565*5ff13fbcSAllan Jude typedef unsigned char xxh_u8;
1566*5ff13fbcSAllan Jude #endif
1567*5ff13fbcSAllan Jude typedef XXH32_hash_t xxh_u32;
1568*5ff13fbcSAllan Jude
1569*5ff13fbcSAllan Jude #ifdef XXH_OLD_NAMES
1570*5ff13fbcSAllan Jude # define BYTE xxh_u8
1571*5ff13fbcSAllan Jude # define U8 xxh_u8
1572*5ff13fbcSAllan Jude # define U32 xxh_u32
1573*5ff13fbcSAllan Jude #endif
1574*5ff13fbcSAllan Jude
1575*5ff13fbcSAllan Jude /* *** Memory access *** */
1576*5ff13fbcSAllan Jude
1577*5ff13fbcSAllan Jude /*!
1578*5ff13fbcSAllan Jude * @internal
1579*5ff13fbcSAllan Jude * @fn xxh_u32 XXH_read32(const void* ptr)
1580*5ff13fbcSAllan Jude * @brief Reads an unaligned 32-bit integer from @p ptr in native endianness.
1581*5ff13fbcSAllan Jude *
1582*5ff13fbcSAllan Jude * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
1583*5ff13fbcSAllan Jude *
1584*5ff13fbcSAllan Jude * @param ptr The pointer to read from.
1585*5ff13fbcSAllan Jude * @return The 32-bit native endian integer from the bytes at @p ptr.
1586*5ff13fbcSAllan Jude */
1587*5ff13fbcSAllan Jude
1588*5ff13fbcSAllan Jude /*!
1589*5ff13fbcSAllan Jude * @internal
1590*5ff13fbcSAllan Jude * @fn xxh_u32 XXH_readLE32(const void* ptr)
1591*5ff13fbcSAllan Jude * @brief Reads an unaligned 32-bit little endian integer from @p ptr.
1592*5ff13fbcSAllan Jude *
1593*5ff13fbcSAllan Jude * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
1594*5ff13fbcSAllan Jude *
1595*5ff13fbcSAllan Jude * @param ptr The pointer to read from.
1596*5ff13fbcSAllan Jude * @return The 32-bit little endian integer from the bytes at @p ptr.
1597*5ff13fbcSAllan Jude */
1598*5ff13fbcSAllan Jude
1599*5ff13fbcSAllan Jude /*!
1600*5ff13fbcSAllan Jude * @internal
1601*5ff13fbcSAllan Jude * @fn xxh_u32 XXH_readBE32(const void* ptr)
1602*5ff13fbcSAllan Jude * @brief Reads an unaligned 32-bit big endian integer from @p ptr.
1603*5ff13fbcSAllan Jude *
1604*5ff13fbcSAllan Jude * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
1605*5ff13fbcSAllan Jude *
1606*5ff13fbcSAllan Jude * @param ptr The pointer to read from.
1607*5ff13fbcSAllan Jude * @return The 32-bit big endian integer from the bytes at @p ptr.
1608*5ff13fbcSAllan Jude */
1609*5ff13fbcSAllan Jude
1610*5ff13fbcSAllan Jude /*!
1611*5ff13fbcSAllan Jude * @internal
1612*5ff13fbcSAllan Jude * @fn xxh_u32 XXH_readLE32_align(const void* ptr, XXH_alignment align)
1613*5ff13fbcSAllan Jude * @brief Like @ref XXH_readLE32(), but has an option for aligned reads.
1614*5ff13fbcSAllan Jude *
1615*5ff13fbcSAllan Jude * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
1616*5ff13fbcSAllan Jude * Note that when @ref XXH_FORCE_ALIGN_CHECK == 0, the @p align parameter is
1617*5ff13fbcSAllan Jude * always @ref XXH_alignment::XXH_unaligned.
1618*5ff13fbcSAllan Jude *
1619*5ff13fbcSAllan Jude * @param ptr The pointer to read from.
1620*5ff13fbcSAllan Jude * @param align Whether @p ptr is aligned.
1621*5ff13fbcSAllan Jude * @pre
1622*5ff13fbcSAllan Jude * If @p align == @ref XXH_alignment::XXH_aligned, @p ptr must be 4 byte
1623*5ff13fbcSAllan Jude * aligned.
1624*5ff13fbcSAllan Jude * @return The 32-bit little endian integer from the bytes at @p ptr.
1625*5ff13fbcSAllan Jude */
1626*5ff13fbcSAllan Jude
1627*5ff13fbcSAllan Jude #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
1628*5ff13fbcSAllan Jude /*
1629*5ff13fbcSAllan Jude * Manual byteshift. Best for old compilers which don't inline memcpy.
1630*5ff13fbcSAllan Jude * We actually directly use XXH_readLE32 and XXH_readBE32.
1631*5ff13fbcSAllan Jude */
1632*5ff13fbcSAllan Jude #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
1633*5ff13fbcSAllan Jude
1634*5ff13fbcSAllan Jude /*
1635*5ff13fbcSAllan Jude * Force direct memory access. Only works on CPU which support unaligned memory
1636*5ff13fbcSAllan Jude * access in hardware.
1637*5ff13fbcSAllan Jude */
XXH_read32(const void * memPtr)1638*5ff13fbcSAllan Jude static xxh_u32 XXH_read32(const void* memPtr) { return *(const xxh_u32*) memPtr; }
1639*5ff13fbcSAllan Jude
1640*5ff13fbcSAllan Jude #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
1641*5ff13fbcSAllan Jude
1642*5ff13fbcSAllan Jude /*
1643*5ff13fbcSAllan Jude * __pack instructions are safer but compiler specific, hence potentially
1644*5ff13fbcSAllan Jude * problematic for some compilers.
1645*5ff13fbcSAllan Jude *
1646*5ff13fbcSAllan Jude * Currently only defined for GCC and ICC.
1647*5ff13fbcSAllan Jude */
1648*5ff13fbcSAllan Jude #ifdef XXH_OLD_NAMES
1649*5ff13fbcSAllan Jude typedef union { xxh_u32 u32; } __attribute__((packed)) unalign;
1650*5ff13fbcSAllan Jude #endif
XXH_read32(const void * ptr)1651*5ff13fbcSAllan Jude static xxh_u32 XXH_read32(const void* ptr)
1652*5ff13fbcSAllan Jude {
1653*5ff13fbcSAllan Jude typedef union { xxh_u32 u32; } __attribute__((packed)) xxh_unalign;
1654*5ff13fbcSAllan Jude return ((const xxh_unalign*)ptr)->u32;
1655*5ff13fbcSAllan Jude }
1656*5ff13fbcSAllan Jude
1657*5ff13fbcSAllan Jude #else
1658*5ff13fbcSAllan Jude
1659*5ff13fbcSAllan Jude /*
1660*5ff13fbcSAllan Jude * Portable and safe solution. Generally efficient.
1661*5ff13fbcSAllan Jude * see: http://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html
1662*5ff13fbcSAllan Jude */
XXH_read32(const void * memPtr)1663*5ff13fbcSAllan Jude static xxh_u32 XXH_read32(const void* memPtr)
1664*5ff13fbcSAllan Jude {
1665*5ff13fbcSAllan Jude xxh_u32 val;
1666*5ff13fbcSAllan Jude XXH_memcpy(&val, memPtr, sizeof(val));
1667*5ff13fbcSAllan Jude return val;
1668*5ff13fbcSAllan Jude }
1669*5ff13fbcSAllan Jude
1670*5ff13fbcSAllan Jude #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
1671*5ff13fbcSAllan Jude
1672*5ff13fbcSAllan Jude
1673*5ff13fbcSAllan Jude /* *** Endianness *** */
1674*5ff13fbcSAllan Jude
1675*5ff13fbcSAllan Jude /*!
1676*5ff13fbcSAllan Jude * @ingroup tuning
1677*5ff13fbcSAllan Jude * @def XXH_CPU_LITTLE_ENDIAN
1678*5ff13fbcSAllan Jude * @brief Whether the target is little endian.
1679*5ff13fbcSAllan Jude *
1680*5ff13fbcSAllan Jude * Defined to 1 if the target is little endian, or 0 if it is big endian.
1681*5ff13fbcSAllan Jude * It can be defined externally, for example on the compiler command line.
1682*5ff13fbcSAllan Jude *
1683*5ff13fbcSAllan Jude * If it is not defined,
1684*5ff13fbcSAllan Jude * a runtime check (which is usually constant folded) is used instead.
1685*5ff13fbcSAllan Jude *
1686*5ff13fbcSAllan Jude * @note
1687*5ff13fbcSAllan Jude * This is not necessarily defined to an integer constant.
1688*5ff13fbcSAllan Jude *
1689*5ff13fbcSAllan Jude * @see XXH_isLittleEndian() for the runtime check.
1690*5ff13fbcSAllan Jude */
1691*5ff13fbcSAllan Jude #ifndef XXH_CPU_LITTLE_ENDIAN
1692*5ff13fbcSAllan Jude /*
1693*5ff13fbcSAllan Jude * Try to detect endianness automatically, to avoid the nonstandard behavior
1694*5ff13fbcSAllan Jude * in `XXH_isLittleEndian()`
1695*5ff13fbcSAllan Jude */
1696*5ff13fbcSAllan Jude # if defined(_WIN32) /* Windows is always little endian */ \
1697*5ff13fbcSAllan Jude || defined(__LITTLE_ENDIAN__) \
1698*5ff13fbcSAllan Jude || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
1699*5ff13fbcSAllan Jude # define XXH_CPU_LITTLE_ENDIAN 1
1700*5ff13fbcSAllan Jude # elif defined(__BIG_ENDIAN__) \
1701*5ff13fbcSAllan Jude || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
1702*5ff13fbcSAllan Jude # define XXH_CPU_LITTLE_ENDIAN 0
1703*5ff13fbcSAllan Jude # else
1704*5ff13fbcSAllan Jude /*!
1705*5ff13fbcSAllan Jude * @internal
1706*5ff13fbcSAllan Jude * @brief Runtime check for @ref XXH_CPU_LITTLE_ENDIAN.
1707*5ff13fbcSAllan Jude *
1708*5ff13fbcSAllan Jude * Most compilers will constant fold this.
1709*5ff13fbcSAllan Jude */
XXH_isLittleEndian(void)1710*5ff13fbcSAllan Jude static int XXH_isLittleEndian(void)
1711*5ff13fbcSAllan Jude {
1712*5ff13fbcSAllan Jude /*
1713*5ff13fbcSAllan Jude * Portable and well-defined behavior.
1714*5ff13fbcSAllan Jude * Don't use static: it is detrimental to performance.
1715*5ff13fbcSAllan Jude */
1716*5ff13fbcSAllan Jude const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 };
1717*5ff13fbcSAllan Jude return one.c[0];
1718*5ff13fbcSAllan Jude }
1719*5ff13fbcSAllan Jude # define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian()
1720*5ff13fbcSAllan Jude # endif
1721*5ff13fbcSAllan Jude #endif
1722*5ff13fbcSAllan Jude
1723*5ff13fbcSAllan Jude
1724*5ff13fbcSAllan Jude
1725*5ff13fbcSAllan Jude
1726*5ff13fbcSAllan Jude /* ****************************************
1727*5ff13fbcSAllan Jude * Compiler-specific Functions and Macros
1728*5ff13fbcSAllan Jude ******************************************/
1729*5ff13fbcSAllan Jude #define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
1730*5ff13fbcSAllan Jude
1731*5ff13fbcSAllan Jude #ifdef __has_builtin
1732*5ff13fbcSAllan Jude # define XXH_HAS_BUILTIN(x) __has_builtin(x)
1733*5ff13fbcSAllan Jude #else
1734*5ff13fbcSAllan Jude # define XXH_HAS_BUILTIN(x) 0
1735*5ff13fbcSAllan Jude #endif
1736*5ff13fbcSAllan Jude
1737*5ff13fbcSAllan Jude /*!
1738*5ff13fbcSAllan Jude * @internal
1739*5ff13fbcSAllan Jude * @def XXH_rotl32(x,r)
1740*5ff13fbcSAllan Jude * @brief 32-bit rotate left.
1741*5ff13fbcSAllan Jude *
1742*5ff13fbcSAllan Jude * @param x The 32-bit integer to be rotated.
1743*5ff13fbcSAllan Jude * @param r The number of bits to rotate.
1744*5ff13fbcSAllan Jude * @pre
1745*5ff13fbcSAllan Jude * @p r > 0 && @p r < 32
1746*5ff13fbcSAllan Jude * @note
1747*5ff13fbcSAllan Jude * @p x and @p r may be evaluated multiple times.
1748*5ff13fbcSAllan Jude * @return The rotated result.
1749*5ff13fbcSAllan Jude */
1750*5ff13fbcSAllan Jude #if !defined(NO_CLANG_BUILTIN) && XXH_HAS_BUILTIN(__builtin_rotateleft32) \
1751*5ff13fbcSAllan Jude && XXH_HAS_BUILTIN(__builtin_rotateleft64)
1752*5ff13fbcSAllan Jude # define XXH_rotl32 __builtin_rotateleft32
1753*5ff13fbcSAllan Jude # define XXH_rotl64 __builtin_rotateleft64
1754*5ff13fbcSAllan Jude /* Note: although _rotl exists for minGW (GCC under windows), performance seems poor */
1755*5ff13fbcSAllan Jude #elif defined(_MSC_VER)
1756*5ff13fbcSAllan Jude # define XXH_rotl32(x,r) _rotl(x,r)
1757*5ff13fbcSAllan Jude # define XXH_rotl64(x,r) _rotl64(x,r)
1758*5ff13fbcSAllan Jude #else
1759*5ff13fbcSAllan Jude # define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r))))
1760*5ff13fbcSAllan Jude # define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r))))
1761*5ff13fbcSAllan Jude #endif
1762*5ff13fbcSAllan Jude
1763*5ff13fbcSAllan Jude /*!
1764*5ff13fbcSAllan Jude * @internal
1765*5ff13fbcSAllan Jude * @fn xxh_u32 XXH_swap32(xxh_u32 x)
1766*5ff13fbcSAllan Jude * @brief A 32-bit byteswap.
1767*5ff13fbcSAllan Jude *
1768*5ff13fbcSAllan Jude * @param x The 32-bit integer to byteswap.
1769*5ff13fbcSAllan Jude * @return @p x, byteswapped.
1770*5ff13fbcSAllan Jude */
1771*5ff13fbcSAllan Jude #if defined(_MSC_VER) /* Visual Studio */
1772*5ff13fbcSAllan Jude # define XXH_swap32 _byteswap_ulong
1773*5ff13fbcSAllan Jude #elif XXH_GCC_VERSION >= 403
1774*5ff13fbcSAllan Jude # define XXH_swap32 __builtin_bswap32
1775*5ff13fbcSAllan Jude #else
XXH_swap32(xxh_u32 x)1776*5ff13fbcSAllan Jude static xxh_u32 XXH_swap32 (xxh_u32 x)
1777*5ff13fbcSAllan Jude {
1778*5ff13fbcSAllan Jude return ((x << 24) & 0xff000000 ) |
1779*5ff13fbcSAllan Jude ((x << 8) & 0x00ff0000 ) |
1780*5ff13fbcSAllan Jude ((x >> 8) & 0x0000ff00 ) |
1781*5ff13fbcSAllan Jude ((x >> 24) & 0x000000ff );
1782*5ff13fbcSAllan Jude }
1783*5ff13fbcSAllan Jude #endif
1784*5ff13fbcSAllan Jude
1785*5ff13fbcSAllan Jude
1786*5ff13fbcSAllan Jude /* ***************************
1787*5ff13fbcSAllan Jude * Memory reads
1788*5ff13fbcSAllan Jude *****************************/
1789*5ff13fbcSAllan Jude
1790*5ff13fbcSAllan Jude /*!
1791*5ff13fbcSAllan Jude * @internal
1792*5ff13fbcSAllan Jude * @brief Enum to indicate whether a pointer is aligned.
1793*5ff13fbcSAllan Jude */
1794*5ff13fbcSAllan Jude typedef enum {
1795*5ff13fbcSAllan Jude XXH_aligned, /*!< Aligned */
1796*5ff13fbcSAllan Jude XXH_unaligned /*!< Possibly unaligned */
1797*5ff13fbcSAllan Jude } XXH_alignment;
1798*5ff13fbcSAllan Jude
1799*5ff13fbcSAllan Jude /*
1800*5ff13fbcSAllan Jude * XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load.
1801*5ff13fbcSAllan Jude *
1802*5ff13fbcSAllan Jude * This is ideal for older compilers which don't inline memcpy.
1803*5ff13fbcSAllan Jude */
1804*5ff13fbcSAllan Jude #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
1805*5ff13fbcSAllan Jude
XXH_readLE32(const void * memPtr)1806*5ff13fbcSAllan Jude XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* memPtr)
1807*5ff13fbcSAllan Jude {
1808*5ff13fbcSAllan Jude const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
1809*5ff13fbcSAllan Jude return bytePtr[0]
1810*5ff13fbcSAllan Jude | ((xxh_u32)bytePtr[1] << 8)
1811*5ff13fbcSAllan Jude | ((xxh_u32)bytePtr[2] << 16)
1812*5ff13fbcSAllan Jude | ((xxh_u32)bytePtr[3] << 24);
1813*5ff13fbcSAllan Jude }
1814*5ff13fbcSAllan Jude
XXH_readBE32(const void * memPtr)1815*5ff13fbcSAllan Jude XXH_FORCE_INLINE xxh_u32 XXH_readBE32(const void* memPtr)
1816*5ff13fbcSAllan Jude {
1817*5ff13fbcSAllan Jude const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
1818*5ff13fbcSAllan Jude return bytePtr[3]
1819*5ff13fbcSAllan Jude | ((xxh_u32)bytePtr[2] << 8)
1820*5ff13fbcSAllan Jude | ((xxh_u32)bytePtr[1] << 16)
1821*5ff13fbcSAllan Jude | ((xxh_u32)bytePtr[0] << 24);
1822*5ff13fbcSAllan Jude }
1823*5ff13fbcSAllan Jude
1824*5ff13fbcSAllan Jude #else
XXH_readLE32(const void * ptr)1825*5ff13fbcSAllan Jude XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* ptr)
1826*5ff13fbcSAllan Jude {
1827*5ff13fbcSAllan Jude return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
1828*5ff13fbcSAllan Jude }
1829*5ff13fbcSAllan Jude
XXH_readBE32(const void * ptr)1830*5ff13fbcSAllan Jude static xxh_u32 XXH_readBE32(const void* ptr)
1831*5ff13fbcSAllan Jude {
1832*5ff13fbcSAllan Jude return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
1833*5ff13fbcSAllan Jude }
1834*5ff13fbcSAllan Jude #endif
1835*5ff13fbcSAllan Jude
1836*5ff13fbcSAllan Jude XXH_FORCE_INLINE xxh_u32
XXH_readLE32_align(const void * ptr,XXH_alignment align)1837*5ff13fbcSAllan Jude XXH_readLE32_align(const void* ptr, XXH_alignment align)
1838*5ff13fbcSAllan Jude {
1839*5ff13fbcSAllan Jude if (align==XXH_unaligned) {
1840*5ff13fbcSAllan Jude return XXH_readLE32(ptr);
1841*5ff13fbcSAllan Jude } else {
1842*5ff13fbcSAllan Jude return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u32*)ptr : XXH_swap32(*(const xxh_u32*)ptr);
1843*5ff13fbcSAllan Jude }
1844*5ff13fbcSAllan Jude }
1845*5ff13fbcSAllan Jude
1846*5ff13fbcSAllan Jude
1847*5ff13fbcSAllan Jude /* *************************************
1848*5ff13fbcSAllan Jude * Misc
1849*5ff13fbcSAllan Jude ***************************************/
1850*5ff13fbcSAllan Jude /*! @ingroup public */
XXH_versionNumber(void)1851*5ff13fbcSAllan Jude XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
1852*5ff13fbcSAllan Jude
1853*5ff13fbcSAllan Jude
1854*5ff13fbcSAllan Jude /* *******************************************************************
1855*5ff13fbcSAllan Jude * 32-bit hash functions
1856*5ff13fbcSAllan Jude *********************************************************************/
1857*5ff13fbcSAllan Jude /*!
1858*5ff13fbcSAllan Jude * @}
1859*5ff13fbcSAllan Jude * @defgroup xxh32_impl XXH32 implementation
1860*5ff13fbcSAllan Jude * @ingroup impl
1861*5ff13fbcSAllan Jude * @{
1862*5ff13fbcSAllan Jude */
1863*5ff13fbcSAllan Jude /* #define instead of static const, to be used as initializers */
1864*5ff13fbcSAllan Jude #define XXH_PRIME32_1 0x9E3779B1U /*!< 0b10011110001101110111100110110001 */
1865*5ff13fbcSAllan Jude #define XXH_PRIME32_2 0x85EBCA77U /*!< 0b10000101111010111100101001110111 */
1866*5ff13fbcSAllan Jude #define XXH_PRIME32_3 0xC2B2AE3DU /*!< 0b11000010101100101010111000111101 */
1867*5ff13fbcSAllan Jude #define XXH_PRIME32_4 0x27D4EB2FU /*!< 0b00100111110101001110101100101111 */
1868*5ff13fbcSAllan Jude #define XXH_PRIME32_5 0x165667B1U /*!< 0b00010110010101100110011110110001 */
1869*5ff13fbcSAllan Jude
1870*5ff13fbcSAllan Jude #ifdef XXH_OLD_NAMES
1871*5ff13fbcSAllan Jude # define PRIME32_1 XXH_PRIME32_1
1872*5ff13fbcSAllan Jude # define PRIME32_2 XXH_PRIME32_2
1873*5ff13fbcSAllan Jude # define PRIME32_3 XXH_PRIME32_3
1874*5ff13fbcSAllan Jude # define PRIME32_4 XXH_PRIME32_4
1875*5ff13fbcSAllan Jude # define PRIME32_5 XXH_PRIME32_5
1876*5ff13fbcSAllan Jude #endif
1877*5ff13fbcSAllan Jude
1878*5ff13fbcSAllan Jude /*!
1879*5ff13fbcSAllan Jude * @internal
1880*5ff13fbcSAllan Jude * @brief Normal stripe processing routine.
1881*5ff13fbcSAllan Jude *
1882*5ff13fbcSAllan Jude * This shuffles the bits so that any bit from @p input impacts several bits in
1883*5ff13fbcSAllan Jude * @p acc.
1884*5ff13fbcSAllan Jude *
1885*5ff13fbcSAllan Jude * @param acc The accumulator lane.
1886*5ff13fbcSAllan Jude * @param input The stripe of input to mix.
1887*5ff13fbcSAllan Jude * @return The mixed accumulator lane.
1888*5ff13fbcSAllan Jude */
XXH32_round(xxh_u32 acc,xxh_u32 input)1889*5ff13fbcSAllan Jude static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input)
1890*5ff13fbcSAllan Jude {
1891*5ff13fbcSAllan Jude acc += input * XXH_PRIME32_2;
1892*5ff13fbcSAllan Jude acc = XXH_rotl32(acc, 13);
1893*5ff13fbcSAllan Jude acc *= XXH_PRIME32_1;
1894*5ff13fbcSAllan Jude #if (defined(__SSE4_1__) || defined(__aarch64__)) && !defined(XXH_ENABLE_AUTOVECTORIZE)
1895*5ff13fbcSAllan Jude /*
1896*5ff13fbcSAllan Jude * UGLY HACK:
1897*5ff13fbcSAllan Jude * A compiler fence is the only thing that prevents GCC and Clang from
1898*5ff13fbcSAllan Jude * autovectorizing the XXH32 loop (pragmas and attributes don't work for some
1899*5ff13fbcSAllan Jude * reason) without globally disabling SSE4.1.
1900*5ff13fbcSAllan Jude *
1901*5ff13fbcSAllan Jude * The reason we want to avoid vectorization is because despite working on
1902*5ff13fbcSAllan Jude * 4 integers at a time, there are multiple factors slowing XXH32 down on
1903*5ff13fbcSAllan Jude * SSE4:
1904*5ff13fbcSAllan Jude * - There's a ridiculous amount of lag from pmulld (10 cycles of latency on
1905*5ff13fbcSAllan Jude * newer chips!) making it slightly slower to multiply four integers at
1906*5ff13fbcSAllan Jude * once compared to four integers independently. Even when pmulld was
1907*5ff13fbcSAllan Jude * fastest, Sandy/Ivy Bridge, it is still not worth it to go into SSE
1908*5ff13fbcSAllan Jude * just to multiply unless doing a long operation.
1909*5ff13fbcSAllan Jude *
1910*5ff13fbcSAllan Jude * - Four instructions are required to rotate,
1911*5ff13fbcSAllan Jude * movqda tmp, v // not required with VEX encoding
1912*5ff13fbcSAllan Jude * pslld tmp, 13 // tmp <<= 13
1913*5ff13fbcSAllan Jude * psrld v, 19 // x >>= 19
1914*5ff13fbcSAllan Jude * por v, tmp // x |= tmp
1915*5ff13fbcSAllan Jude * compared to one for scalar:
1916*5ff13fbcSAllan Jude * roll v, 13 // reliably fast across the board
1917*5ff13fbcSAllan Jude * shldl v, v, 13 // Sandy Bridge and later prefer this for some reason
1918*5ff13fbcSAllan Jude *
1919*5ff13fbcSAllan Jude * - Instruction level parallelism is actually more beneficial here because
1920*5ff13fbcSAllan Jude * the SIMD actually serializes this operation: While v1 is rotating, v2
1921*5ff13fbcSAllan Jude * can load data, while v3 can multiply. SSE forces them to operate
1922*5ff13fbcSAllan Jude * together.
1923*5ff13fbcSAllan Jude *
1924*5ff13fbcSAllan Jude * This is also enabled on AArch64, as Clang autovectorizes it incorrectly
1925*5ff13fbcSAllan Jude * and it is pointless writing a NEON implementation that is basically the
1926*5ff13fbcSAllan Jude * same speed as scalar for XXH32.
1927*5ff13fbcSAllan Jude */
1928*5ff13fbcSAllan Jude XXH_COMPILER_GUARD(acc);
1929*5ff13fbcSAllan Jude #endif
1930*5ff13fbcSAllan Jude return acc;
1931*5ff13fbcSAllan Jude }
1932*5ff13fbcSAllan Jude
1933*5ff13fbcSAllan Jude /*!
1934*5ff13fbcSAllan Jude * @internal
1935*5ff13fbcSAllan Jude * @brief Mixes all bits to finalize the hash.
1936*5ff13fbcSAllan Jude *
1937*5ff13fbcSAllan Jude * The final mix ensures that all input bits have a chance to impact any bit in
1938*5ff13fbcSAllan Jude * the output digest, resulting in an unbiased distribution.
1939*5ff13fbcSAllan Jude *
1940*5ff13fbcSAllan Jude * @param h32 The hash to avalanche.
1941*5ff13fbcSAllan Jude * @return The avalanched hash.
1942*5ff13fbcSAllan Jude */
XXH32_avalanche(xxh_u32 h32)1943*5ff13fbcSAllan Jude static xxh_u32 XXH32_avalanche(xxh_u32 h32)
1944*5ff13fbcSAllan Jude {
1945*5ff13fbcSAllan Jude h32 ^= h32 >> 15;
1946*5ff13fbcSAllan Jude h32 *= XXH_PRIME32_2;
1947*5ff13fbcSAllan Jude h32 ^= h32 >> 13;
1948*5ff13fbcSAllan Jude h32 *= XXH_PRIME32_3;
1949*5ff13fbcSAllan Jude h32 ^= h32 >> 16;
1950*5ff13fbcSAllan Jude return(h32);
1951*5ff13fbcSAllan Jude }
1952*5ff13fbcSAllan Jude
1953*5ff13fbcSAllan Jude #define XXH_get32bits(p) XXH_readLE32_align(p, align)
1954*5ff13fbcSAllan Jude
1955*5ff13fbcSAllan Jude /*!
1956*5ff13fbcSAllan Jude * @internal
1957*5ff13fbcSAllan Jude * @brief Processes the last 0-15 bytes of @p ptr.
1958*5ff13fbcSAllan Jude *
1959*5ff13fbcSAllan Jude * There may be up to 15 bytes remaining to consume from the input.
1960*5ff13fbcSAllan Jude * This final stage will digest them to ensure that all input bytes are present
1961*5ff13fbcSAllan Jude * in the final mix.
1962*5ff13fbcSAllan Jude *
1963*5ff13fbcSAllan Jude * @param h32 The hash to finalize.
1964*5ff13fbcSAllan Jude * @param ptr The pointer to the remaining input.
1965*5ff13fbcSAllan Jude * @param len The remaining length, modulo 16.
1966*5ff13fbcSAllan Jude * @param align Whether @p ptr is aligned.
1967*5ff13fbcSAllan Jude * @return The finalized hash.
1968*5ff13fbcSAllan Jude */
1969*5ff13fbcSAllan Jude static xxh_u32
XXH32_finalize(xxh_u32 h32,const xxh_u8 * ptr,size_t len,XXH_alignment align)1970*5ff13fbcSAllan Jude XXH32_finalize(xxh_u32 h32, const xxh_u8* ptr, size_t len, XXH_alignment align)
1971*5ff13fbcSAllan Jude {
1972*5ff13fbcSAllan Jude #define XXH_PROCESS1 do { \
1973*5ff13fbcSAllan Jude h32 += (*ptr++) * XXH_PRIME32_5; \
1974*5ff13fbcSAllan Jude h32 = XXH_rotl32(h32, 11) * XXH_PRIME32_1; \
1975*5ff13fbcSAllan Jude } while (0)
1976*5ff13fbcSAllan Jude
1977*5ff13fbcSAllan Jude #define XXH_PROCESS4 do { \
1978*5ff13fbcSAllan Jude h32 += XXH_get32bits(ptr) * XXH_PRIME32_3; \
1979*5ff13fbcSAllan Jude ptr += 4; \
1980*5ff13fbcSAllan Jude h32 = XXH_rotl32(h32, 17) * XXH_PRIME32_4; \
1981*5ff13fbcSAllan Jude } while (0)
1982*5ff13fbcSAllan Jude
1983*5ff13fbcSAllan Jude if (ptr==NULL) XXH_ASSERT(len == 0);
1984*5ff13fbcSAllan Jude
1985*5ff13fbcSAllan Jude /* Compact rerolled version; generally faster */
1986*5ff13fbcSAllan Jude if (!XXH32_ENDJMP) {
1987*5ff13fbcSAllan Jude len &= 15;
1988*5ff13fbcSAllan Jude while (len >= 4) {
1989*5ff13fbcSAllan Jude XXH_PROCESS4;
1990*5ff13fbcSAllan Jude len -= 4;
1991*5ff13fbcSAllan Jude }
1992*5ff13fbcSAllan Jude while (len > 0) {
1993*5ff13fbcSAllan Jude XXH_PROCESS1;
1994*5ff13fbcSAllan Jude --len;
1995*5ff13fbcSAllan Jude }
1996*5ff13fbcSAllan Jude return XXH32_avalanche(h32);
1997*5ff13fbcSAllan Jude } else {
1998*5ff13fbcSAllan Jude switch(len&15) /* or switch(bEnd - p) */ {
1999*5ff13fbcSAllan Jude case 12: XXH_PROCESS4;
2000*5ff13fbcSAllan Jude XXH_FALLTHROUGH;
2001*5ff13fbcSAllan Jude case 8: XXH_PROCESS4;
2002*5ff13fbcSAllan Jude XXH_FALLTHROUGH;
2003*5ff13fbcSAllan Jude case 4: XXH_PROCESS4;
2004*5ff13fbcSAllan Jude return XXH32_avalanche(h32);
2005*5ff13fbcSAllan Jude
2006*5ff13fbcSAllan Jude case 13: XXH_PROCESS4;
2007*5ff13fbcSAllan Jude XXH_FALLTHROUGH;
2008*5ff13fbcSAllan Jude case 9: XXH_PROCESS4;
2009*5ff13fbcSAllan Jude XXH_FALLTHROUGH;
2010*5ff13fbcSAllan Jude case 5: XXH_PROCESS4;
2011*5ff13fbcSAllan Jude XXH_PROCESS1;
2012*5ff13fbcSAllan Jude return XXH32_avalanche(h32);
2013*5ff13fbcSAllan Jude
2014*5ff13fbcSAllan Jude case 14: XXH_PROCESS4;
2015*5ff13fbcSAllan Jude XXH_FALLTHROUGH;
2016*5ff13fbcSAllan Jude case 10: XXH_PROCESS4;
2017*5ff13fbcSAllan Jude XXH_FALLTHROUGH;
2018*5ff13fbcSAllan Jude case 6: XXH_PROCESS4;
2019*5ff13fbcSAllan Jude XXH_PROCESS1;
2020*5ff13fbcSAllan Jude XXH_PROCESS1;
2021*5ff13fbcSAllan Jude return XXH32_avalanche(h32);
2022*5ff13fbcSAllan Jude
2023*5ff13fbcSAllan Jude case 15: XXH_PROCESS4;
2024*5ff13fbcSAllan Jude XXH_FALLTHROUGH;
2025*5ff13fbcSAllan Jude case 11: XXH_PROCESS4;
2026*5ff13fbcSAllan Jude XXH_FALLTHROUGH;
2027*5ff13fbcSAllan Jude case 7: XXH_PROCESS4;
2028*5ff13fbcSAllan Jude XXH_FALLTHROUGH;
2029*5ff13fbcSAllan Jude case 3: XXH_PROCESS1;
2030*5ff13fbcSAllan Jude XXH_FALLTHROUGH;
2031*5ff13fbcSAllan Jude case 2: XXH_PROCESS1;
2032*5ff13fbcSAllan Jude XXH_FALLTHROUGH;
2033*5ff13fbcSAllan Jude case 1: XXH_PROCESS1;
2034*5ff13fbcSAllan Jude XXH_FALLTHROUGH;
2035*5ff13fbcSAllan Jude case 0: return XXH32_avalanche(h32);
2036*5ff13fbcSAllan Jude }
2037*5ff13fbcSAllan Jude XXH_ASSERT(0);
2038*5ff13fbcSAllan Jude return h32; /* reaching this point is deemed impossible */
2039*5ff13fbcSAllan Jude }
2040*5ff13fbcSAllan Jude }
2041*5ff13fbcSAllan Jude
2042*5ff13fbcSAllan Jude #ifdef XXH_OLD_NAMES
2043*5ff13fbcSAllan Jude # define PROCESS1 XXH_PROCESS1
2044*5ff13fbcSAllan Jude # define PROCESS4 XXH_PROCESS4
2045*5ff13fbcSAllan Jude #else
2046*5ff13fbcSAllan Jude # undef XXH_PROCESS1
2047*5ff13fbcSAllan Jude # undef XXH_PROCESS4
2048*5ff13fbcSAllan Jude #endif
2049*5ff13fbcSAllan Jude
2050*5ff13fbcSAllan Jude /*!
2051*5ff13fbcSAllan Jude * @internal
2052*5ff13fbcSAllan Jude * @brief The implementation for @ref XXH32().
2053*5ff13fbcSAllan Jude *
2054*5ff13fbcSAllan Jude * @param input , len , seed Directly passed from @ref XXH32().
2055*5ff13fbcSAllan Jude * @param align Whether @p input is aligned.
2056*5ff13fbcSAllan Jude * @return The calculated hash.
2057*5ff13fbcSAllan Jude */
2058*5ff13fbcSAllan Jude XXH_FORCE_INLINE xxh_u32
XXH32_endian_align(const xxh_u8 * input,size_t len,xxh_u32 seed,XXH_alignment align)2059*5ff13fbcSAllan Jude XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align)
2060*5ff13fbcSAllan Jude {
2061*5ff13fbcSAllan Jude xxh_u32 h32;
2062*5ff13fbcSAllan Jude
2063*5ff13fbcSAllan Jude if (input==NULL) XXH_ASSERT(len == 0);
2064*5ff13fbcSAllan Jude
2065*5ff13fbcSAllan Jude if (len>=16) {
2066*5ff13fbcSAllan Jude const xxh_u8* const bEnd = input + len;
2067*5ff13fbcSAllan Jude const xxh_u8* const limit = bEnd - 15;
2068*5ff13fbcSAllan Jude xxh_u32 v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
2069*5ff13fbcSAllan Jude xxh_u32 v2 = seed + XXH_PRIME32_2;
2070*5ff13fbcSAllan Jude xxh_u32 v3 = seed + 0;
2071*5ff13fbcSAllan Jude xxh_u32 v4 = seed - XXH_PRIME32_1;
2072*5ff13fbcSAllan Jude
2073*5ff13fbcSAllan Jude do {
2074*5ff13fbcSAllan Jude v1 = XXH32_round(v1, XXH_get32bits(input)); input += 4;
2075*5ff13fbcSAllan Jude v2 = XXH32_round(v2, XXH_get32bits(input)); input += 4;
2076*5ff13fbcSAllan Jude v3 = XXH32_round(v3, XXH_get32bits(input)); input += 4;
2077*5ff13fbcSAllan Jude v4 = XXH32_round(v4, XXH_get32bits(input)); input += 4;
2078*5ff13fbcSAllan Jude } while (input < limit);
2079*5ff13fbcSAllan Jude
2080*5ff13fbcSAllan Jude h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7)
2081*5ff13fbcSAllan Jude + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
2082*5ff13fbcSAllan Jude } else {
2083*5ff13fbcSAllan Jude h32 = seed + XXH_PRIME32_5;
2084*5ff13fbcSAllan Jude }
2085*5ff13fbcSAllan Jude
2086*5ff13fbcSAllan Jude h32 += (xxh_u32)len;
2087*5ff13fbcSAllan Jude
2088*5ff13fbcSAllan Jude return XXH32_finalize(h32, input, len&15, align);
2089*5ff13fbcSAllan Jude }
2090*5ff13fbcSAllan Jude
2091*5ff13fbcSAllan Jude /*! @ingroup xxh32_family */
XXH32(const void * input,size_t len,XXH32_hash_t seed)2092*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t seed)
2093*5ff13fbcSAllan Jude {
2094*5ff13fbcSAllan Jude #if 0
2095*5ff13fbcSAllan Jude /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
2096*5ff13fbcSAllan Jude XXH32_state_t state;
2097*5ff13fbcSAllan Jude XXH32_reset(&state, seed);
2098*5ff13fbcSAllan Jude XXH32_update(&state, (const xxh_u8*)input, len);
2099*5ff13fbcSAllan Jude return XXH32_digest(&state);
2100*5ff13fbcSAllan Jude #else
2101*5ff13fbcSAllan Jude if (XXH_FORCE_ALIGN_CHECK) {
2102*5ff13fbcSAllan Jude if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */
2103*5ff13fbcSAllan Jude return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
2104*5ff13fbcSAllan Jude } }
2105*5ff13fbcSAllan Jude
2106*5ff13fbcSAllan Jude return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
2107*5ff13fbcSAllan Jude #endif
2108*5ff13fbcSAllan Jude }
2109*5ff13fbcSAllan Jude
2110*5ff13fbcSAllan Jude
2111*5ff13fbcSAllan Jude
2112*5ff13fbcSAllan Jude /******* Hash streaming *******/
2113*5ff13fbcSAllan Jude /*!
2114*5ff13fbcSAllan Jude * @ingroup xxh32_family
2115*5ff13fbcSAllan Jude */
XXH32_createState(void)2116*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
2117*5ff13fbcSAllan Jude {
2118*5ff13fbcSAllan Jude return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
2119*5ff13fbcSAllan Jude }
2120*5ff13fbcSAllan Jude /*! @ingroup xxh32_family */
XXH32_freeState(XXH32_state_t * statePtr)2121*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
2122*5ff13fbcSAllan Jude {
2123*5ff13fbcSAllan Jude XXH_free(statePtr);
2124*5ff13fbcSAllan Jude return XXH_OK;
2125*5ff13fbcSAllan Jude }
2126*5ff13fbcSAllan Jude
2127*5ff13fbcSAllan Jude /*! @ingroup xxh32_family */
XXH32_copyState(XXH32_state_t * dstState,const XXH32_state_t * srcState)2128*5ff13fbcSAllan Jude XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState)
2129*5ff13fbcSAllan Jude {
2130*5ff13fbcSAllan Jude XXH_memcpy(dstState, srcState, sizeof(*dstState));
2131*5ff13fbcSAllan Jude }
2132*5ff13fbcSAllan Jude
2133*5ff13fbcSAllan Jude /*! @ingroup xxh32_family */
XXH32_reset(XXH32_state_t * statePtr,XXH32_hash_t seed)2134*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, XXH32_hash_t seed)
2135*5ff13fbcSAllan Jude {
2136*5ff13fbcSAllan Jude XXH_ASSERT(statePtr != NULL);
2137*5ff13fbcSAllan Jude memset(statePtr, 0, sizeof(*statePtr));
2138*5ff13fbcSAllan Jude statePtr->v[0] = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
2139*5ff13fbcSAllan Jude statePtr->v[1] = seed + XXH_PRIME32_2;
2140*5ff13fbcSAllan Jude statePtr->v[2] = seed + 0;
2141*5ff13fbcSAllan Jude statePtr->v[3] = seed - XXH_PRIME32_1;
2142*5ff13fbcSAllan Jude return XXH_OK;
2143*5ff13fbcSAllan Jude }
2144*5ff13fbcSAllan Jude
2145*5ff13fbcSAllan Jude
2146*5ff13fbcSAllan Jude /*! @ingroup xxh32_family */
2147*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH_errorcode
XXH32_update(XXH32_state_t * state,const void * input,size_t len)2148*5ff13fbcSAllan Jude XXH32_update(XXH32_state_t* state, const void* input, size_t len)
2149*5ff13fbcSAllan Jude {
2150*5ff13fbcSAllan Jude if (input==NULL) {
2151*5ff13fbcSAllan Jude XXH_ASSERT(len == 0);
2152*5ff13fbcSAllan Jude return XXH_OK;
2153*5ff13fbcSAllan Jude }
2154*5ff13fbcSAllan Jude
2155*5ff13fbcSAllan Jude { const xxh_u8* p = (const xxh_u8*)input;
2156*5ff13fbcSAllan Jude const xxh_u8* const bEnd = p + len;
2157*5ff13fbcSAllan Jude
2158*5ff13fbcSAllan Jude state->total_len_32 += (XXH32_hash_t)len;
2159*5ff13fbcSAllan Jude state->large_len |= (XXH32_hash_t)((len>=16) | (state->total_len_32>=16));
2160*5ff13fbcSAllan Jude
2161*5ff13fbcSAllan Jude if (state->memsize + len < 16) { /* fill in tmp buffer */
2162*5ff13fbcSAllan Jude XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, len);
2163*5ff13fbcSAllan Jude state->memsize += (XXH32_hash_t)len;
2164*5ff13fbcSAllan Jude return XXH_OK;
2165*5ff13fbcSAllan Jude }
2166*5ff13fbcSAllan Jude
2167*5ff13fbcSAllan Jude if (state->memsize) { /* some data left from previous update */
2168*5ff13fbcSAllan Jude XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, 16-state->memsize);
2169*5ff13fbcSAllan Jude { const xxh_u32* p32 = state->mem32;
2170*5ff13fbcSAllan Jude state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p32)); p32++;
2171*5ff13fbcSAllan Jude state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p32)); p32++;
2172*5ff13fbcSAllan Jude state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p32)); p32++;
2173*5ff13fbcSAllan Jude state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p32));
2174*5ff13fbcSAllan Jude }
2175*5ff13fbcSAllan Jude p += 16-state->memsize;
2176*5ff13fbcSAllan Jude state->memsize = 0;
2177*5ff13fbcSAllan Jude }
2178*5ff13fbcSAllan Jude
2179*5ff13fbcSAllan Jude if (p <= bEnd-16) {
2180*5ff13fbcSAllan Jude const xxh_u8* const limit = bEnd - 16;
2181*5ff13fbcSAllan Jude
2182*5ff13fbcSAllan Jude do {
2183*5ff13fbcSAllan Jude state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p)); p+=4;
2184*5ff13fbcSAllan Jude state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p)); p+=4;
2185*5ff13fbcSAllan Jude state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p)); p+=4;
2186*5ff13fbcSAllan Jude state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p)); p+=4;
2187*5ff13fbcSAllan Jude } while (p<=limit);
2188*5ff13fbcSAllan Jude
2189*5ff13fbcSAllan Jude }
2190*5ff13fbcSAllan Jude
2191*5ff13fbcSAllan Jude if (p < bEnd) {
2192*5ff13fbcSAllan Jude XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
2193*5ff13fbcSAllan Jude state->memsize = (unsigned)(bEnd-p);
2194*5ff13fbcSAllan Jude }
2195*5ff13fbcSAllan Jude }
2196*5ff13fbcSAllan Jude
2197*5ff13fbcSAllan Jude return XXH_OK;
2198*5ff13fbcSAllan Jude }
2199*5ff13fbcSAllan Jude
2200*5ff13fbcSAllan Jude
2201*5ff13fbcSAllan Jude /*! @ingroup xxh32_family */
XXH32_digest(const XXH32_state_t * state)2202*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t* state)
2203*5ff13fbcSAllan Jude {
2204*5ff13fbcSAllan Jude xxh_u32 h32;
2205*5ff13fbcSAllan Jude
2206*5ff13fbcSAllan Jude if (state->large_len) {
2207*5ff13fbcSAllan Jude h32 = XXH_rotl32(state->v[0], 1)
2208*5ff13fbcSAllan Jude + XXH_rotl32(state->v[1], 7)
2209*5ff13fbcSAllan Jude + XXH_rotl32(state->v[2], 12)
2210*5ff13fbcSAllan Jude + XXH_rotl32(state->v[3], 18);
2211*5ff13fbcSAllan Jude } else {
2212*5ff13fbcSAllan Jude h32 = state->v[2] /* == seed */ + XXH_PRIME32_5;
2213*5ff13fbcSAllan Jude }
2214*5ff13fbcSAllan Jude
2215*5ff13fbcSAllan Jude h32 += state->total_len_32;
2216*5ff13fbcSAllan Jude
2217*5ff13fbcSAllan Jude return XXH32_finalize(h32, (const xxh_u8*)state->mem32, state->memsize, XXH_aligned);
2218*5ff13fbcSAllan Jude }
2219*5ff13fbcSAllan Jude
2220*5ff13fbcSAllan Jude
2221*5ff13fbcSAllan Jude /******* Canonical representation *******/
2222*5ff13fbcSAllan Jude
2223*5ff13fbcSAllan Jude /*!
2224*5ff13fbcSAllan Jude * @ingroup xxh32_family
2225*5ff13fbcSAllan Jude * The default return values from XXH functions are unsigned 32 and 64 bit
2226*5ff13fbcSAllan Jude * integers.
2227*5ff13fbcSAllan Jude *
2228*5ff13fbcSAllan Jude * The canonical representation uses big endian convention, the same convention
2229*5ff13fbcSAllan Jude * as human-readable numbers (large digits first).
2230*5ff13fbcSAllan Jude *
2231*5ff13fbcSAllan Jude * This way, hash values can be written into a file or buffer, remaining
2232*5ff13fbcSAllan Jude * comparable across different systems.
2233*5ff13fbcSAllan Jude *
2234*5ff13fbcSAllan Jude * The following functions allow transformation of hash values to and from their
2235*5ff13fbcSAllan Jude * canonical format.
2236*5ff13fbcSAllan Jude */
XXH32_canonicalFromHash(XXH32_canonical_t * dst,XXH32_hash_t hash)2237*5ff13fbcSAllan Jude XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
2238*5ff13fbcSAllan Jude {
2239*5ff13fbcSAllan Jude /* XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); */
2240*5ff13fbcSAllan Jude if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
2241*5ff13fbcSAllan Jude XXH_memcpy(dst, &hash, sizeof(*dst));
2242*5ff13fbcSAllan Jude }
2243*5ff13fbcSAllan Jude /*! @ingroup xxh32_family */
XXH32_hashFromCanonical(const XXH32_canonical_t * src)2244*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
2245*5ff13fbcSAllan Jude {
2246*5ff13fbcSAllan Jude return XXH_readBE32(src);
2247*5ff13fbcSAllan Jude }
2248*5ff13fbcSAllan Jude
2249*5ff13fbcSAllan Jude
2250*5ff13fbcSAllan Jude #ifndef XXH_NO_LONG_LONG
2251*5ff13fbcSAllan Jude
2252*5ff13fbcSAllan Jude /* *******************************************************************
2253*5ff13fbcSAllan Jude * 64-bit hash functions
2254*5ff13fbcSAllan Jude *********************************************************************/
2255*5ff13fbcSAllan Jude /*!
2256*5ff13fbcSAllan Jude * @}
2257*5ff13fbcSAllan Jude * @ingroup impl
2258*5ff13fbcSAllan Jude * @{
2259*5ff13fbcSAllan Jude */
2260*5ff13fbcSAllan Jude /******* Memory access *******/
2261*5ff13fbcSAllan Jude
2262*5ff13fbcSAllan Jude typedef XXH64_hash_t xxh_u64;
2263*5ff13fbcSAllan Jude
2264*5ff13fbcSAllan Jude #ifdef XXH_OLD_NAMES
2265*5ff13fbcSAllan Jude # define U64 xxh_u64
2266*5ff13fbcSAllan Jude #endif
2267*5ff13fbcSAllan Jude
2268*5ff13fbcSAllan Jude #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
2269*5ff13fbcSAllan Jude /*
2270*5ff13fbcSAllan Jude * Manual byteshift. Best for old compilers which don't inline memcpy.
2271*5ff13fbcSAllan Jude * We actually directly use XXH_readLE64 and XXH_readBE64.
2272*5ff13fbcSAllan Jude */
2273*5ff13fbcSAllan Jude #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
2274*5ff13fbcSAllan Jude
2275*5ff13fbcSAllan Jude /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
XXH_read64(const void * memPtr)2276*5ff13fbcSAllan Jude static xxh_u64 XXH_read64(const void* memPtr)
2277*5ff13fbcSAllan Jude {
2278*5ff13fbcSAllan Jude return *(const xxh_u64*) memPtr;
2279*5ff13fbcSAllan Jude }
2280*5ff13fbcSAllan Jude
2281*5ff13fbcSAllan Jude #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
2282*5ff13fbcSAllan Jude
2283*5ff13fbcSAllan Jude /*
2284*5ff13fbcSAllan Jude * __pack instructions are safer, but compiler specific, hence potentially
2285*5ff13fbcSAllan Jude * problematic for some compilers.
2286*5ff13fbcSAllan Jude *
2287*5ff13fbcSAllan Jude * Currently only defined for GCC and ICC.
2288*5ff13fbcSAllan Jude */
2289*5ff13fbcSAllan Jude #ifdef XXH_OLD_NAMES
2290*5ff13fbcSAllan Jude typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) unalign64;
2291*5ff13fbcSAllan Jude #endif
XXH_read64(const void * ptr)2292*5ff13fbcSAllan Jude static xxh_u64 XXH_read64(const void* ptr)
2293*5ff13fbcSAllan Jude {
2294*5ff13fbcSAllan Jude typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) xxh_unalign64;
2295*5ff13fbcSAllan Jude return ((const xxh_unalign64*)ptr)->u64;
2296*5ff13fbcSAllan Jude }
2297*5ff13fbcSAllan Jude
2298*5ff13fbcSAllan Jude #else
2299*5ff13fbcSAllan Jude
2300*5ff13fbcSAllan Jude /*
2301*5ff13fbcSAllan Jude * Portable and safe solution. Generally efficient.
2302*5ff13fbcSAllan Jude * see: http://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html
2303*5ff13fbcSAllan Jude */
XXH_read64(const void * memPtr)2304*5ff13fbcSAllan Jude static xxh_u64 XXH_read64(const void* memPtr)
2305*5ff13fbcSAllan Jude {
2306*5ff13fbcSAllan Jude xxh_u64 val;
2307*5ff13fbcSAllan Jude XXH_memcpy(&val, memPtr, sizeof(val));
2308*5ff13fbcSAllan Jude return val;
2309*5ff13fbcSAllan Jude }
2310*5ff13fbcSAllan Jude
2311*5ff13fbcSAllan Jude #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
2312*5ff13fbcSAllan Jude
2313*5ff13fbcSAllan Jude #if defined(_MSC_VER) /* Visual Studio */
2314*5ff13fbcSAllan Jude # define XXH_swap64 _byteswap_uint64
2315*5ff13fbcSAllan Jude #elif XXH_GCC_VERSION >= 403
2316*5ff13fbcSAllan Jude # define XXH_swap64 __builtin_bswap64
2317*5ff13fbcSAllan Jude #else
XXH_swap64(xxh_u64 x)2318*5ff13fbcSAllan Jude static xxh_u64 XXH_swap64(xxh_u64 x)
2319*5ff13fbcSAllan Jude {
2320*5ff13fbcSAllan Jude return ((x << 56) & 0xff00000000000000ULL) |
2321*5ff13fbcSAllan Jude ((x << 40) & 0x00ff000000000000ULL) |
2322*5ff13fbcSAllan Jude ((x << 24) & 0x0000ff0000000000ULL) |
2323*5ff13fbcSAllan Jude ((x << 8) & 0x000000ff00000000ULL) |
2324*5ff13fbcSAllan Jude ((x >> 8) & 0x00000000ff000000ULL) |
2325*5ff13fbcSAllan Jude ((x >> 24) & 0x0000000000ff0000ULL) |
2326*5ff13fbcSAllan Jude ((x >> 40) & 0x000000000000ff00ULL) |
2327*5ff13fbcSAllan Jude ((x >> 56) & 0x00000000000000ffULL);
2328*5ff13fbcSAllan Jude }
2329*5ff13fbcSAllan Jude #endif
2330*5ff13fbcSAllan Jude
2331*5ff13fbcSAllan Jude
2332*5ff13fbcSAllan Jude /* XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. */
2333*5ff13fbcSAllan Jude #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
2334*5ff13fbcSAllan Jude
XXH_readLE64(const void * memPtr)2335*5ff13fbcSAllan Jude XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* memPtr)
2336*5ff13fbcSAllan Jude {
2337*5ff13fbcSAllan Jude const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
2338*5ff13fbcSAllan Jude return bytePtr[0]
2339*5ff13fbcSAllan Jude | ((xxh_u64)bytePtr[1] << 8)
2340*5ff13fbcSAllan Jude | ((xxh_u64)bytePtr[2] << 16)
2341*5ff13fbcSAllan Jude | ((xxh_u64)bytePtr[3] << 24)
2342*5ff13fbcSAllan Jude | ((xxh_u64)bytePtr[4] << 32)
2343*5ff13fbcSAllan Jude | ((xxh_u64)bytePtr[5] << 40)
2344*5ff13fbcSAllan Jude | ((xxh_u64)bytePtr[6] << 48)
2345*5ff13fbcSAllan Jude | ((xxh_u64)bytePtr[7] << 56);
2346*5ff13fbcSAllan Jude }
2347*5ff13fbcSAllan Jude
XXH_readBE64(const void * memPtr)2348*5ff13fbcSAllan Jude XXH_FORCE_INLINE xxh_u64 XXH_readBE64(const void* memPtr)
2349*5ff13fbcSAllan Jude {
2350*5ff13fbcSAllan Jude const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
2351*5ff13fbcSAllan Jude return bytePtr[7]
2352*5ff13fbcSAllan Jude | ((xxh_u64)bytePtr[6] << 8)
2353*5ff13fbcSAllan Jude | ((xxh_u64)bytePtr[5] << 16)
2354*5ff13fbcSAllan Jude | ((xxh_u64)bytePtr[4] << 24)
2355*5ff13fbcSAllan Jude | ((xxh_u64)bytePtr[3] << 32)
2356*5ff13fbcSAllan Jude | ((xxh_u64)bytePtr[2] << 40)
2357*5ff13fbcSAllan Jude | ((xxh_u64)bytePtr[1] << 48)
2358*5ff13fbcSAllan Jude | ((xxh_u64)bytePtr[0] << 56);
2359*5ff13fbcSAllan Jude }
2360*5ff13fbcSAllan Jude
2361*5ff13fbcSAllan Jude #else
XXH_readLE64(const void * ptr)2362*5ff13fbcSAllan Jude XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* ptr)
2363*5ff13fbcSAllan Jude {
2364*5ff13fbcSAllan Jude return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
2365*5ff13fbcSAllan Jude }
2366*5ff13fbcSAllan Jude
XXH_readBE64(const void * ptr)2367*5ff13fbcSAllan Jude static xxh_u64 XXH_readBE64(const void* ptr)
2368*5ff13fbcSAllan Jude {
2369*5ff13fbcSAllan Jude return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
2370*5ff13fbcSAllan Jude }
2371*5ff13fbcSAllan Jude #endif
2372*5ff13fbcSAllan Jude
2373*5ff13fbcSAllan Jude XXH_FORCE_INLINE xxh_u64
XXH_readLE64_align(const void * ptr,XXH_alignment align)2374*5ff13fbcSAllan Jude XXH_readLE64_align(const void* ptr, XXH_alignment align)
2375*5ff13fbcSAllan Jude {
2376*5ff13fbcSAllan Jude if (align==XXH_unaligned)
2377*5ff13fbcSAllan Jude return XXH_readLE64(ptr);
2378*5ff13fbcSAllan Jude else
2379*5ff13fbcSAllan Jude return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u64*)ptr : XXH_swap64(*(const xxh_u64*)ptr);
2380*5ff13fbcSAllan Jude }
2381*5ff13fbcSAllan Jude
2382*5ff13fbcSAllan Jude
2383*5ff13fbcSAllan Jude /******* xxh64 *******/
2384*5ff13fbcSAllan Jude /*!
2385*5ff13fbcSAllan Jude * @}
2386*5ff13fbcSAllan Jude * @defgroup xxh64_impl XXH64 implementation
2387*5ff13fbcSAllan Jude * @ingroup impl
2388*5ff13fbcSAllan Jude * @{
2389*5ff13fbcSAllan Jude */
2390*5ff13fbcSAllan Jude /* #define rather that static const, to be used as initializers */
2391*5ff13fbcSAllan Jude #define XXH_PRIME64_1 0x9E3779B185EBCA87ULL /*!< 0b1001111000110111011110011011000110000101111010111100101010000111 */
2392*5ff13fbcSAllan Jude #define XXH_PRIME64_2 0xC2B2AE3D27D4EB4FULL /*!< 0b1100001010110010101011100011110100100111110101001110101101001111 */
2393*5ff13fbcSAllan Jude #define XXH_PRIME64_3 0x165667B19E3779F9ULL /*!< 0b0001011001010110011001111011000110011110001101110111100111111001 */
2394*5ff13fbcSAllan Jude #define XXH_PRIME64_4 0x85EBCA77C2B2AE63ULL /*!< 0b1000010111101011110010100111011111000010101100101010111001100011 */
2395*5ff13fbcSAllan Jude #define XXH_PRIME64_5 0x27D4EB2F165667C5ULL /*!< 0b0010011111010100111010110010111100010110010101100110011111000101 */
2396*5ff13fbcSAllan Jude
2397*5ff13fbcSAllan Jude #ifdef XXH_OLD_NAMES
2398*5ff13fbcSAllan Jude # define PRIME64_1 XXH_PRIME64_1
2399*5ff13fbcSAllan Jude # define PRIME64_2 XXH_PRIME64_2
2400*5ff13fbcSAllan Jude # define PRIME64_3 XXH_PRIME64_3
2401*5ff13fbcSAllan Jude # define PRIME64_4 XXH_PRIME64_4
2402*5ff13fbcSAllan Jude # define PRIME64_5 XXH_PRIME64_5
2403*5ff13fbcSAllan Jude #endif
2404*5ff13fbcSAllan Jude
XXH64_round(xxh_u64 acc,xxh_u64 input)2405*5ff13fbcSAllan Jude static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input)
2406*5ff13fbcSAllan Jude {
2407*5ff13fbcSAllan Jude acc += input * XXH_PRIME64_2;
2408*5ff13fbcSAllan Jude acc = XXH_rotl64(acc, 31);
2409*5ff13fbcSAllan Jude acc *= XXH_PRIME64_1;
2410*5ff13fbcSAllan Jude return acc;
2411*5ff13fbcSAllan Jude }
2412*5ff13fbcSAllan Jude
XXH64_mergeRound(xxh_u64 acc,xxh_u64 val)2413*5ff13fbcSAllan Jude static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val)
2414*5ff13fbcSAllan Jude {
2415*5ff13fbcSAllan Jude val = XXH64_round(0, val);
2416*5ff13fbcSAllan Jude acc ^= val;
2417*5ff13fbcSAllan Jude acc = acc * XXH_PRIME64_1 + XXH_PRIME64_4;
2418*5ff13fbcSAllan Jude return acc;
2419*5ff13fbcSAllan Jude }
2420*5ff13fbcSAllan Jude
XXH64_avalanche(xxh_u64 h64)2421*5ff13fbcSAllan Jude static xxh_u64 XXH64_avalanche(xxh_u64 h64)
2422*5ff13fbcSAllan Jude {
2423*5ff13fbcSAllan Jude h64 ^= h64 >> 33;
2424*5ff13fbcSAllan Jude h64 *= XXH_PRIME64_2;
2425*5ff13fbcSAllan Jude h64 ^= h64 >> 29;
2426*5ff13fbcSAllan Jude h64 *= XXH_PRIME64_3;
2427*5ff13fbcSAllan Jude h64 ^= h64 >> 32;
2428*5ff13fbcSAllan Jude return h64;
2429*5ff13fbcSAllan Jude }
2430*5ff13fbcSAllan Jude
2431*5ff13fbcSAllan Jude
2432*5ff13fbcSAllan Jude #define XXH_get64bits(p) XXH_readLE64_align(p, align)
2433*5ff13fbcSAllan Jude
2434*5ff13fbcSAllan Jude static xxh_u64
XXH64_finalize(xxh_u64 h64,const xxh_u8 * ptr,size_t len,XXH_alignment align)2435*5ff13fbcSAllan Jude XXH64_finalize(xxh_u64 h64, const xxh_u8* ptr, size_t len, XXH_alignment align)
2436*5ff13fbcSAllan Jude {
2437*5ff13fbcSAllan Jude if (ptr==NULL) XXH_ASSERT(len == 0);
2438*5ff13fbcSAllan Jude len &= 31;
2439*5ff13fbcSAllan Jude while (len >= 8) {
2440*5ff13fbcSAllan Jude xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr));
2441*5ff13fbcSAllan Jude ptr += 8;
2442*5ff13fbcSAllan Jude h64 ^= k1;
2443*5ff13fbcSAllan Jude h64 = XXH_rotl64(h64,27) * XXH_PRIME64_1 + XXH_PRIME64_4;
2444*5ff13fbcSAllan Jude len -= 8;
2445*5ff13fbcSAllan Jude }
2446*5ff13fbcSAllan Jude if (len >= 4) {
2447*5ff13fbcSAllan Jude h64 ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1;
2448*5ff13fbcSAllan Jude ptr += 4;
2449*5ff13fbcSAllan Jude h64 = XXH_rotl64(h64, 23) * XXH_PRIME64_2 + XXH_PRIME64_3;
2450*5ff13fbcSAllan Jude len -= 4;
2451*5ff13fbcSAllan Jude }
2452*5ff13fbcSAllan Jude while (len > 0) {
2453*5ff13fbcSAllan Jude h64 ^= (*ptr++) * XXH_PRIME64_5;
2454*5ff13fbcSAllan Jude h64 = XXH_rotl64(h64, 11) * XXH_PRIME64_1;
2455*5ff13fbcSAllan Jude --len;
2456*5ff13fbcSAllan Jude }
2457*5ff13fbcSAllan Jude return XXH64_avalanche(h64);
2458*5ff13fbcSAllan Jude }
2459*5ff13fbcSAllan Jude
2460*5ff13fbcSAllan Jude #ifdef XXH_OLD_NAMES
2461*5ff13fbcSAllan Jude # define PROCESS1_64 XXH_PROCESS1_64
2462*5ff13fbcSAllan Jude # define PROCESS4_64 XXH_PROCESS4_64
2463*5ff13fbcSAllan Jude # define PROCESS8_64 XXH_PROCESS8_64
2464*5ff13fbcSAllan Jude #else
2465*5ff13fbcSAllan Jude # undef XXH_PROCESS1_64
2466*5ff13fbcSAllan Jude # undef XXH_PROCESS4_64
2467*5ff13fbcSAllan Jude # undef XXH_PROCESS8_64
2468*5ff13fbcSAllan Jude #endif
2469*5ff13fbcSAllan Jude
2470*5ff13fbcSAllan Jude XXH_FORCE_INLINE xxh_u64
XXH64_endian_align(const xxh_u8 * input,size_t len,xxh_u64 seed,XXH_alignment align)2471*5ff13fbcSAllan Jude XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align)
2472*5ff13fbcSAllan Jude {
2473*5ff13fbcSAllan Jude xxh_u64 h64;
2474*5ff13fbcSAllan Jude if (input==NULL) XXH_ASSERT(len == 0);
2475*5ff13fbcSAllan Jude
2476*5ff13fbcSAllan Jude if (len>=32) {
2477*5ff13fbcSAllan Jude const xxh_u8* const bEnd = input + len;
2478*5ff13fbcSAllan Jude const xxh_u8* const limit = bEnd - 31;
2479*5ff13fbcSAllan Jude xxh_u64 v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
2480*5ff13fbcSAllan Jude xxh_u64 v2 = seed + XXH_PRIME64_2;
2481*5ff13fbcSAllan Jude xxh_u64 v3 = seed + 0;
2482*5ff13fbcSAllan Jude xxh_u64 v4 = seed - XXH_PRIME64_1;
2483*5ff13fbcSAllan Jude
2484*5ff13fbcSAllan Jude do {
2485*5ff13fbcSAllan Jude v1 = XXH64_round(v1, XXH_get64bits(input)); input+=8;
2486*5ff13fbcSAllan Jude v2 = XXH64_round(v2, XXH_get64bits(input)); input+=8;
2487*5ff13fbcSAllan Jude v3 = XXH64_round(v3, XXH_get64bits(input)); input+=8;
2488*5ff13fbcSAllan Jude v4 = XXH64_round(v4, XXH_get64bits(input)); input+=8;
2489*5ff13fbcSAllan Jude } while (input<limit);
2490*5ff13fbcSAllan Jude
2491*5ff13fbcSAllan Jude h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
2492*5ff13fbcSAllan Jude h64 = XXH64_mergeRound(h64, v1);
2493*5ff13fbcSAllan Jude h64 = XXH64_mergeRound(h64, v2);
2494*5ff13fbcSAllan Jude h64 = XXH64_mergeRound(h64, v3);
2495*5ff13fbcSAllan Jude h64 = XXH64_mergeRound(h64, v4);
2496*5ff13fbcSAllan Jude
2497*5ff13fbcSAllan Jude } else {
2498*5ff13fbcSAllan Jude h64 = seed + XXH_PRIME64_5;
2499*5ff13fbcSAllan Jude }
2500*5ff13fbcSAllan Jude
2501*5ff13fbcSAllan Jude h64 += (xxh_u64) len;
2502*5ff13fbcSAllan Jude
2503*5ff13fbcSAllan Jude return XXH64_finalize(h64, input, len, align);
2504*5ff13fbcSAllan Jude }
2505*5ff13fbcSAllan Jude
2506*5ff13fbcSAllan Jude
2507*5ff13fbcSAllan Jude /*! @ingroup xxh64_family */
XXH64(const void * input,size_t len,XXH64_hash_t seed)2508*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t len, XXH64_hash_t seed)
2509*5ff13fbcSAllan Jude {
2510*5ff13fbcSAllan Jude #if 0
2511*5ff13fbcSAllan Jude /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
2512*5ff13fbcSAllan Jude XXH64_state_t state;
2513*5ff13fbcSAllan Jude XXH64_reset(&state, seed);
2514*5ff13fbcSAllan Jude XXH64_update(&state, (const xxh_u8*)input, len);
2515*5ff13fbcSAllan Jude return XXH64_digest(&state);
2516*5ff13fbcSAllan Jude #else
2517*5ff13fbcSAllan Jude if (XXH_FORCE_ALIGN_CHECK) {
2518*5ff13fbcSAllan Jude if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */
2519*5ff13fbcSAllan Jude return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
2520*5ff13fbcSAllan Jude } }
2521*5ff13fbcSAllan Jude
2522*5ff13fbcSAllan Jude return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
2523*5ff13fbcSAllan Jude
2524*5ff13fbcSAllan Jude #endif
2525*5ff13fbcSAllan Jude }
2526*5ff13fbcSAllan Jude
2527*5ff13fbcSAllan Jude /******* Hash Streaming *******/
2528*5ff13fbcSAllan Jude
2529*5ff13fbcSAllan Jude /*! @ingroup xxh64_family*/
XXH64_createState(void)2530*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
2531*5ff13fbcSAllan Jude {
2532*5ff13fbcSAllan Jude return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
2533*5ff13fbcSAllan Jude }
2534*5ff13fbcSAllan Jude /*! @ingroup xxh64_family */
XXH64_freeState(XXH64_state_t * statePtr)2535*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
2536*5ff13fbcSAllan Jude {
2537*5ff13fbcSAllan Jude XXH_free(statePtr);
2538*5ff13fbcSAllan Jude return XXH_OK;
2539*5ff13fbcSAllan Jude }
2540*5ff13fbcSAllan Jude
2541*5ff13fbcSAllan Jude /*! @ingroup xxh64_family */
XXH64_copyState(XXH64_state_t * dstState,const XXH64_state_t * srcState)2542*5ff13fbcSAllan Jude XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState)
2543*5ff13fbcSAllan Jude {
2544*5ff13fbcSAllan Jude XXH_memcpy(dstState, srcState, sizeof(*dstState));
2545*5ff13fbcSAllan Jude }
2546*5ff13fbcSAllan Jude
2547*5ff13fbcSAllan Jude /*! @ingroup xxh64_family */
XXH64_reset(XXH64_state_t * statePtr,XXH64_hash_t seed)2548*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, XXH64_hash_t seed)
2549*5ff13fbcSAllan Jude {
2550*5ff13fbcSAllan Jude XXH_ASSERT(statePtr != NULL);
2551*5ff13fbcSAllan Jude memset(statePtr, 0, sizeof(*statePtr));
2552*5ff13fbcSAllan Jude statePtr->v[0] = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
2553*5ff13fbcSAllan Jude statePtr->v[1] = seed + XXH_PRIME64_2;
2554*5ff13fbcSAllan Jude statePtr->v[2] = seed + 0;
2555*5ff13fbcSAllan Jude statePtr->v[3] = seed - XXH_PRIME64_1;
2556*5ff13fbcSAllan Jude return XXH_OK;
2557*5ff13fbcSAllan Jude }
2558*5ff13fbcSAllan Jude
2559*5ff13fbcSAllan Jude /*! @ingroup xxh64_family */
2560*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH_errorcode
XXH64_update(XXH64_state_t * state,const void * input,size_t len)2561*5ff13fbcSAllan Jude XXH64_update (XXH64_state_t* state, const void* input, size_t len)
2562*5ff13fbcSAllan Jude {
2563*5ff13fbcSAllan Jude if (input==NULL) {
2564*5ff13fbcSAllan Jude XXH_ASSERT(len == 0);
2565*5ff13fbcSAllan Jude return XXH_OK;
2566*5ff13fbcSAllan Jude }
2567*5ff13fbcSAllan Jude
2568*5ff13fbcSAllan Jude { const xxh_u8* p = (const xxh_u8*)input;
2569*5ff13fbcSAllan Jude const xxh_u8* const bEnd = p + len;
2570*5ff13fbcSAllan Jude
2571*5ff13fbcSAllan Jude state->total_len += len;
2572*5ff13fbcSAllan Jude
2573*5ff13fbcSAllan Jude if (state->memsize + len < 32) { /* fill in tmp buffer */
2574*5ff13fbcSAllan Jude XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, len);
2575*5ff13fbcSAllan Jude state->memsize += (xxh_u32)len;
2576*5ff13fbcSAllan Jude return XXH_OK;
2577*5ff13fbcSAllan Jude }
2578*5ff13fbcSAllan Jude
2579*5ff13fbcSAllan Jude if (state->memsize) { /* tmp buffer is full */
2580*5ff13fbcSAllan Jude XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, 32-state->memsize);
2581*5ff13fbcSAllan Jude state->v[0] = XXH64_round(state->v[0], XXH_readLE64(state->mem64+0));
2582*5ff13fbcSAllan Jude state->v[1] = XXH64_round(state->v[1], XXH_readLE64(state->mem64+1));
2583*5ff13fbcSAllan Jude state->v[2] = XXH64_round(state->v[2], XXH_readLE64(state->mem64+2));
2584*5ff13fbcSAllan Jude state->v[3] = XXH64_round(state->v[3], XXH_readLE64(state->mem64+3));
2585*5ff13fbcSAllan Jude p += 32 - state->memsize;
2586*5ff13fbcSAllan Jude state->memsize = 0;
2587*5ff13fbcSAllan Jude }
2588*5ff13fbcSAllan Jude
2589*5ff13fbcSAllan Jude if (p+32 <= bEnd) {
2590*5ff13fbcSAllan Jude const xxh_u8* const limit = bEnd - 32;
2591*5ff13fbcSAllan Jude
2592*5ff13fbcSAllan Jude do {
2593*5ff13fbcSAllan Jude state->v[0] = XXH64_round(state->v[0], XXH_readLE64(p)); p+=8;
2594*5ff13fbcSAllan Jude state->v[1] = XXH64_round(state->v[1], XXH_readLE64(p)); p+=8;
2595*5ff13fbcSAllan Jude state->v[2] = XXH64_round(state->v[2], XXH_readLE64(p)); p+=8;
2596*5ff13fbcSAllan Jude state->v[3] = XXH64_round(state->v[3], XXH_readLE64(p)); p+=8;
2597*5ff13fbcSAllan Jude } while (p<=limit);
2598*5ff13fbcSAllan Jude
2599*5ff13fbcSAllan Jude }
2600*5ff13fbcSAllan Jude
2601*5ff13fbcSAllan Jude if (p < bEnd) {
2602*5ff13fbcSAllan Jude XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
2603*5ff13fbcSAllan Jude state->memsize = (unsigned)(bEnd-p);
2604*5ff13fbcSAllan Jude }
2605*5ff13fbcSAllan Jude }
2606*5ff13fbcSAllan Jude
2607*5ff13fbcSAllan Jude return XXH_OK;
2608*5ff13fbcSAllan Jude }
2609*5ff13fbcSAllan Jude
2610*5ff13fbcSAllan Jude
2611*5ff13fbcSAllan Jude /*! @ingroup xxh64_family */
XXH64_digest(const XXH64_state_t * state)2612*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH64_hash_t XXH64_digest(const XXH64_state_t* state)
2613*5ff13fbcSAllan Jude {
2614*5ff13fbcSAllan Jude xxh_u64 h64;
2615*5ff13fbcSAllan Jude
2616*5ff13fbcSAllan Jude if (state->total_len >= 32) {
2617*5ff13fbcSAllan Jude h64 = XXH_rotl64(state->v[0], 1) + XXH_rotl64(state->v[1], 7) + XXH_rotl64(state->v[2], 12) + XXH_rotl64(state->v[3], 18);
2618*5ff13fbcSAllan Jude h64 = XXH64_mergeRound(h64, state->v[0]);
2619*5ff13fbcSAllan Jude h64 = XXH64_mergeRound(h64, state->v[1]);
2620*5ff13fbcSAllan Jude h64 = XXH64_mergeRound(h64, state->v[2]);
2621*5ff13fbcSAllan Jude h64 = XXH64_mergeRound(h64, state->v[3]);
2622*5ff13fbcSAllan Jude } else {
2623*5ff13fbcSAllan Jude h64 = state->v[2] /*seed*/ + XXH_PRIME64_5;
2624*5ff13fbcSAllan Jude }
2625*5ff13fbcSAllan Jude
2626*5ff13fbcSAllan Jude h64 += (xxh_u64) state->total_len;
2627*5ff13fbcSAllan Jude
2628*5ff13fbcSAllan Jude return XXH64_finalize(h64, (const xxh_u8*)state->mem64, (size_t)state->total_len, XXH_aligned);
2629*5ff13fbcSAllan Jude }
2630*5ff13fbcSAllan Jude
2631*5ff13fbcSAllan Jude
2632*5ff13fbcSAllan Jude /******* Canonical representation *******/
2633*5ff13fbcSAllan Jude
2634*5ff13fbcSAllan Jude /*! @ingroup xxh64_family */
XXH64_canonicalFromHash(XXH64_canonical_t * dst,XXH64_hash_t hash)2635*5ff13fbcSAllan Jude XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)
2636*5ff13fbcSAllan Jude {
2637*5ff13fbcSAllan Jude /* XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); */
2638*5ff13fbcSAllan Jude if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
2639*5ff13fbcSAllan Jude XXH_memcpy(dst, &hash, sizeof(*dst));
2640*5ff13fbcSAllan Jude }
2641*5ff13fbcSAllan Jude
2642*5ff13fbcSAllan Jude /*! @ingroup xxh64_family */
XXH64_hashFromCanonical(const XXH64_canonical_t * src)2643*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src)
2644*5ff13fbcSAllan Jude {
2645*5ff13fbcSAllan Jude return XXH_readBE64(src);
2646*5ff13fbcSAllan Jude }
2647*5ff13fbcSAllan Jude
2648*5ff13fbcSAllan Jude #ifndef XXH_NO_XXH3
2649*5ff13fbcSAllan Jude
2650*5ff13fbcSAllan Jude /* *********************************************************************
2651*5ff13fbcSAllan Jude * XXH3
2652*5ff13fbcSAllan Jude * New generation hash designed for speed on small keys and vectorization
2653*5ff13fbcSAllan Jude ************************************************************************ */
2654*5ff13fbcSAllan Jude /*!
2655*5ff13fbcSAllan Jude * @}
2656*5ff13fbcSAllan Jude * @defgroup xxh3_impl XXH3 implementation
2657*5ff13fbcSAllan Jude * @ingroup impl
2658*5ff13fbcSAllan Jude * @{
2659*5ff13fbcSAllan Jude */
2660*5ff13fbcSAllan Jude
2661*5ff13fbcSAllan Jude /* === Compiler specifics === */
2662*5ff13fbcSAllan Jude
2663*5ff13fbcSAllan Jude #if ((defined(sun) || defined(__sun)) && __cplusplus) /* Solaris includes __STDC_VERSION__ with C++. Tested with GCC 5.5 */
2664*5ff13fbcSAllan Jude # define XXH_RESTRICT /* disable */
2665*5ff13fbcSAllan Jude #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* >= C99 */
2666*5ff13fbcSAllan Jude # define XXH_RESTRICT restrict
2667*5ff13fbcSAllan Jude #else
2668*5ff13fbcSAllan Jude /* Note: it might be useful to define __restrict or __restrict__ for some C++ compilers */
2669*5ff13fbcSAllan Jude # define XXH_RESTRICT /* disable */
2670*5ff13fbcSAllan Jude #endif
2671*5ff13fbcSAllan Jude
2672*5ff13fbcSAllan Jude #if (defined(__GNUC__) && (__GNUC__ >= 3)) \
2673*5ff13fbcSAllan Jude || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) \
2674*5ff13fbcSAllan Jude || defined(__clang__)
2675*5ff13fbcSAllan Jude # define XXH_likely(x) __builtin_expect(x, 1)
2676*5ff13fbcSAllan Jude # define XXH_unlikely(x) __builtin_expect(x, 0)
2677*5ff13fbcSAllan Jude #else
2678*5ff13fbcSAllan Jude # define XXH_likely(x) (x)
2679*5ff13fbcSAllan Jude # define XXH_unlikely(x) (x)
2680*5ff13fbcSAllan Jude #endif
2681*5ff13fbcSAllan Jude
2682*5ff13fbcSAllan Jude #if defined(__GNUC__) || defined(__clang__)
2683*5ff13fbcSAllan Jude # if defined(__ARM_NEON__) || defined(__ARM_NEON) \
2684*5ff13fbcSAllan Jude || defined(__aarch64__) || defined(_M_ARM) \
2685*5ff13fbcSAllan Jude || defined(_M_ARM64) || defined(_M_ARM64EC)
2686*5ff13fbcSAllan Jude # define inline __inline__ /* circumvent a clang bug */
2687*5ff13fbcSAllan Jude # include <arm_neon.h>
2688*5ff13fbcSAllan Jude # undef inline
2689*5ff13fbcSAllan Jude # elif defined(__AVX2__)
2690*5ff13fbcSAllan Jude # include <immintrin.h>
2691*5ff13fbcSAllan Jude # elif defined(__SSE2__)
2692*5ff13fbcSAllan Jude # include <emmintrin.h>
2693*5ff13fbcSAllan Jude # endif
2694*5ff13fbcSAllan Jude #endif
2695*5ff13fbcSAllan Jude
2696*5ff13fbcSAllan Jude #if defined(_MSC_VER)
2697*5ff13fbcSAllan Jude # include <intrin.h>
2698*5ff13fbcSAllan Jude #endif
2699*5ff13fbcSAllan Jude
2700*5ff13fbcSAllan Jude /*
2701*5ff13fbcSAllan Jude * One goal of XXH3 is to make it fast on both 32-bit and 64-bit, while
2702*5ff13fbcSAllan Jude * remaining a true 64-bit/128-bit hash function.
2703*5ff13fbcSAllan Jude *
2704*5ff13fbcSAllan Jude * This is done by prioritizing a subset of 64-bit operations that can be
2705*5ff13fbcSAllan Jude * emulated without too many steps on the average 32-bit machine.
2706*5ff13fbcSAllan Jude *
2707*5ff13fbcSAllan Jude * For example, these two lines seem similar, and run equally fast on 64-bit:
2708*5ff13fbcSAllan Jude *
2709*5ff13fbcSAllan Jude * xxh_u64 x;
2710*5ff13fbcSAllan Jude * x ^= (x >> 47); // good
2711*5ff13fbcSAllan Jude * x ^= (x >> 13); // bad
2712*5ff13fbcSAllan Jude *
2713*5ff13fbcSAllan Jude * However, to a 32-bit machine, there is a major difference.
2714*5ff13fbcSAllan Jude *
2715*5ff13fbcSAllan Jude * x ^= (x >> 47) looks like this:
2716*5ff13fbcSAllan Jude *
2717*5ff13fbcSAllan Jude * x.lo ^= (x.hi >> (47 - 32));
2718*5ff13fbcSAllan Jude *
2719*5ff13fbcSAllan Jude * while x ^= (x >> 13) looks like this:
2720*5ff13fbcSAllan Jude *
2721*5ff13fbcSAllan Jude * // note: funnel shifts are not usually cheap.
2722*5ff13fbcSAllan Jude * x.lo ^= (x.lo >> 13) | (x.hi << (32 - 13));
2723*5ff13fbcSAllan Jude * x.hi ^= (x.hi >> 13);
2724*5ff13fbcSAllan Jude *
2725*5ff13fbcSAllan Jude * The first one is significantly faster than the second, simply because the
2726*5ff13fbcSAllan Jude * shift is larger than 32. This means:
2727*5ff13fbcSAllan Jude * - All the bits we need are in the upper 32 bits, so we can ignore the lower
2728*5ff13fbcSAllan Jude * 32 bits in the shift.
2729*5ff13fbcSAllan Jude * - The shift result will always fit in the lower 32 bits, and therefore,
2730*5ff13fbcSAllan Jude * we can ignore the upper 32 bits in the xor.
2731*5ff13fbcSAllan Jude *
2732*5ff13fbcSAllan Jude * Thanks to this optimization, XXH3 only requires these features to be efficient:
2733*5ff13fbcSAllan Jude *
2734*5ff13fbcSAllan Jude * - Usable unaligned access
2735*5ff13fbcSAllan Jude * - A 32-bit or 64-bit ALU
2736*5ff13fbcSAllan Jude * - If 32-bit, a decent ADC instruction
2737*5ff13fbcSAllan Jude * - A 32 or 64-bit multiply with a 64-bit result
2738*5ff13fbcSAllan Jude * - For the 128-bit variant, a decent byteswap helps short inputs.
2739*5ff13fbcSAllan Jude *
2740*5ff13fbcSAllan Jude * The first two are already required by XXH32, and almost all 32-bit and 64-bit
2741*5ff13fbcSAllan Jude * platforms which can run XXH32 can run XXH3 efficiently.
2742*5ff13fbcSAllan Jude *
2743*5ff13fbcSAllan Jude * Thumb-1, the classic 16-bit only subset of ARM's instruction set, is one
2744*5ff13fbcSAllan Jude * notable exception.
2745*5ff13fbcSAllan Jude *
2746*5ff13fbcSAllan Jude * First of all, Thumb-1 lacks support for the UMULL instruction which
2747*5ff13fbcSAllan Jude * performs the important long multiply. This means numerous __aeabi_lmul
2748*5ff13fbcSAllan Jude * calls.
2749*5ff13fbcSAllan Jude *
2750*5ff13fbcSAllan Jude * Second of all, the 8 functional registers are just not enough.
2751*5ff13fbcSAllan Jude * Setup for __aeabi_lmul, byteshift loads, pointers, and all arithmetic need
2752*5ff13fbcSAllan Jude * Lo registers, and this shuffling results in thousands more MOVs than A32.
2753*5ff13fbcSAllan Jude *
2754*5ff13fbcSAllan Jude * A32 and T32 don't have this limitation. They can access all 14 registers,
2755*5ff13fbcSAllan Jude * do a 32->64 multiply with UMULL, and the flexible operand allowing free
2756*5ff13fbcSAllan Jude * shifts is helpful, too.
2757*5ff13fbcSAllan Jude *
2758*5ff13fbcSAllan Jude * Therefore, we do a quick sanity check.
2759*5ff13fbcSAllan Jude *
2760*5ff13fbcSAllan Jude * If compiling Thumb-1 for a target which supports ARM instructions, we will
2761*5ff13fbcSAllan Jude * emit a warning, as it is not a "sane" platform to compile for.
2762*5ff13fbcSAllan Jude *
2763*5ff13fbcSAllan Jude * Usually, if this happens, it is because of an accident and you probably need
2764*5ff13fbcSAllan Jude * to specify -march, as you likely meant to compile for a newer architecture.
2765*5ff13fbcSAllan Jude *
2766*5ff13fbcSAllan Jude * Credit: large sections of the vectorial and asm source code paths
2767*5ff13fbcSAllan Jude * have been contributed by @easyaspi314
2768*5ff13fbcSAllan Jude */
2769*5ff13fbcSAllan Jude #if defined(__thumb__) && !defined(__thumb2__) && defined(__ARM_ARCH_ISA_ARM)
2770*5ff13fbcSAllan Jude # warning "XXH3 is highly inefficient without ARM or Thumb-2."
2771*5ff13fbcSAllan Jude #endif
2772*5ff13fbcSAllan Jude
2773*5ff13fbcSAllan Jude /* ==========================================
2774*5ff13fbcSAllan Jude * Vectorization detection
2775*5ff13fbcSAllan Jude * ========================================== */
2776*5ff13fbcSAllan Jude
2777*5ff13fbcSAllan Jude #ifdef XXH_DOXYGEN
2778*5ff13fbcSAllan Jude /*!
2779*5ff13fbcSAllan Jude * @ingroup tuning
2780*5ff13fbcSAllan Jude * @brief Overrides the vectorization implementation chosen for XXH3.
2781*5ff13fbcSAllan Jude *
2782*5ff13fbcSAllan Jude * Can be defined to 0 to disable SIMD or any of the values mentioned in
2783*5ff13fbcSAllan Jude * @ref XXH_VECTOR_TYPE.
2784*5ff13fbcSAllan Jude *
2785*5ff13fbcSAllan Jude * If this is not defined, it uses predefined macros to determine the best
2786*5ff13fbcSAllan Jude * implementation.
2787*5ff13fbcSAllan Jude */
2788*5ff13fbcSAllan Jude # define XXH_VECTOR XXH_SCALAR
2789*5ff13fbcSAllan Jude /*!
2790*5ff13fbcSAllan Jude * @ingroup tuning
2791*5ff13fbcSAllan Jude * @brief Possible values for @ref XXH_VECTOR.
2792*5ff13fbcSAllan Jude *
2793*5ff13fbcSAllan Jude * Note that these are actually implemented as macros.
2794*5ff13fbcSAllan Jude *
2795*5ff13fbcSAllan Jude * If this is not defined, it is detected automatically.
2796*5ff13fbcSAllan Jude * @ref XXH_X86DISPATCH overrides this.
2797*5ff13fbcSAllan Jude */
2798*5ff13fbcSAllan Jude enum XXH_VECTOR_TYPE /* fake enum */ {
2799*5ff13fbcSAllan Jude XXH_SCALAR = 0, /*!< Portable scalar version */
2800*5ff13fbcSAllan Jude XXH_SSE2 = 1, /*!<
2801*5ff13fbcSAllan Jude * SSE2 for Pentium 4, Opteron, all x86_64.
2802*5ff13fbcSAllan Jude *
2803*5ff13fbcSAllan Jude * @note SSE2 is also guaranteed on Windows 10, macOS, and
2804*5ff13fbcSAllan Jude * Android x86.
2805*5ff13fbcSAllan Jude */
2806*5ff13fbcSAllan Jude XXH_AVX2 = 2, /*!< AVX2 for Haswell and Bulldozer */
2807*5ff13fbcSAllan Jude XXH_AVX512 = 3, /*!< AVX512 for Skylake and Icelake */
2808*5ff13fbcSAllan Jude XXH_NEON = 4, /*!< NEON for most ARMv7-A and all AArch64 */
2809*5ff13fbcSAllan Jude XXH_VSX = 5, /*!< VSX and ZVector for POWER8/z13 (64-bit) */
2810*5ff13fbcSAllan Jude };
2811*5ff13fbcSAllan Jude /*!
2812*5ff13fbcSAllan Jude * @ingroup tuning
2813*5ff13fbcSAllan Jude * @brief Selects the minimum alignment for XXH3's accumulators.
2814*5ff13fbcSAllan Jude *
2815*5ff13fbcSAllan Jude * When using SIMD, this should match the alignment reqired for said vector
2816*5ff13fbcSAllan Jude * type, so, for example, 32 for AVX2.
2817*5ff13fbcSAllan Jude *
2818*5ff13fbcSAllan Jude * Default: Auto detected.
2819*5ff13fbcSAllan Jude */
2820*5ff13fbcSAllan Jude # define XXH_ACC_ALIGN 8
2821*5ff13fbcSAllan Jude #endif
2822*5ff13fbcSAllan Jude
2823*5ff13fbcSAllan Jude /* Actual definition */
2824*5ff13fbcSAllan Jude #ifndef XXH_DOXYGEN
2825*5ff13fbcSAllan Jude # define XXH_SCALAR 0
2826*5ff13fbcSAllan Jude # define XXH_SSE2 1
2827*5ff13fbcSAllan Jude # define XXH_AVX2 2
2828*5ff13fbcSAllan Jude # define XXH_AVX512 3
2829*5ff13fbcSAllan Jude # define XXH_NEON 4
2830*5ff13fbcSAllan Jude # define XXH_VSX 5
2831*5ff13fbcSAllan Jude #endif
2832*5ff13fbcSAllan Jude
2833*5ff13fbcSAllan Jude #ifndef XXH_VECTOR /* can be defined on command line */
2834*5ff13fbcSAllan Jude # if ( \
2835*5ff13fbcSAllan Jude defined(__ARM_NEON__) || defined(__ARM_NEON) /* gcc */ \
2836*5ff13fbcSAllan Jude || defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) /* msvc */ \
2837*5ff13fbcSAllan Jude ) && ( \
2838*5ff13fbcSAllan Jude defined(_WIN32) || defined(__LITTLE_ENDIAN__) /* little endian only */ \
2839*5ff13fbcSAllan Jude || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \
2840*5ff13fbcSAllan Jude )
2841*5ff13fbcSAllan Jude # define XXH_VECTOR XXH_NEON
2842*5ff13fbcSAllan Jude # elif defined(__AVX512F__)
2843*5ff13fbcSAllan Jude # define XXH_VECTOR XXH_AVX512
2844*5ff13fbcSAllan Jude # elif defined(__AVX2__)
2845*5ff13fbcSAllan Jude # define XXH_VECTOR XXH_AVX2
2846*5ff13fbcSAllan Jude # elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2))
2847*5ff13fbcSAllan Jude # define XXH_VECTOR XXH_SSE2
2848*5ff13fbcSAllan Jude # elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) \
2849*5ff13fbcSAllan Jude || (defined(__s390x__) && defined(__VEC__)) \
2850*5ff13fbcSAllan Jude && defined(__GNUC__) /* TODO: IBM XL */
2851*5ff13fbcSAllan Jude # define XXH_VECTOR XXH_VSX
2852*5ff13fbcSAllan Jude # else
2853*5ff13fbcSAllan Jude # define XXH_VECTOR XXH_SCALAR
2854*5ff13fbcSAllan Jude # endif
2855*5ff13fbcSAllan Jude #endif
2856*5ff13fbcSAllan Jude
2857*5ff13fbcSAllan Jude /*
2858*5ff13fbcSAllan Jude * Controls the alignment of the accumulator,
2859*5ff13fbcSAllan Jude * for compatibility with aligned vector loads, which are usually faster.
2860*5ff13fbcSAllan Jude */
2861*5ff13fbcSAllan Jude #ifndef XXH_ACC_ALIGN
2862*5ff13fbcSAllan Jude # if defined(XXH_X86DISPATCH)
2863*5ff13fbcSAllan Jude # define XXH_ACC_ALIGN 64 /* for compatibility with avx512 */
2864*5ff13fbcSAllan Jude # elif XXH_VECTOR == XXH_SCALAR /* scalar */
2865*5ff13fbcSAllan Jude # define XXH_ACC_ALIGN 8
2866*5ff13fbcSAllan Jude # elif XXH_VECTOR == XXH_SSE2 /* sse2 */
2867*5ff13fbcSAllan Jude # define XXH_ACC_ALIGN 16
2868*5ff13fbcSAllan Jude # elif XXH_VECTOR == XXH_AVX2 /* avx2 */
2869*5ff13fbcSAllan Jude # define XXH_ACC_ALIGN 32
2870*5ff13fbcSAllan Jude # elif XXH_VECTOR == XXH_NEON /* neon */
2871*5ff13fbcSAllan Jude # define XXH_ACC_ALIGN 16
2872*5ff13fbcSAllan Jude # elif XXH_VECTOR == XXH_VSX /* vsx */
2873*5ff13fbcSAllan Jude # define XXH_ACC_ALIGN 16
2874*5ff13fbcSAllan Jude # elif XXH_VECTOR == XXH_AVX512 /* avx512 */
2875*5ff13fbcSAllan Jude # define XXH_ACC_ALIGN 64
2876*5ff13fbcSAllan Jude # endif
2877*5ff13fbcSAllan Jude #endif
2878*5ff13fbcSAllan Jude
2879*5ff13fbcSAllan Jude #if defined(XXH_X86DISPATCH) || XXH_VECTOR == XXH_SSE2 \
2880*5ff13fbcSAllan Jude || XXH_VECTOR == XXH_AVX2 || XXH_VECTOR == XXH_AVX512
2881*5ff13fbcSAllan Jude # define XXH_SEC_ALIGN XXH_ACC_ALIGN
2882*5ff13fbcSAllan Jude #else
2883*5ff13fbcSAllan Jude # define XXH_SEC_ALIGN 8
2884*5ff13fbcSAllan Jude #endif
2885*5ff13fbcSAllan Jude
2886*5ff13fbcSAllan Jude /*
2887*5ff13fbcSAllan Jude * UGLY HACK:
2888*5ff13fbcSAllan Jude * GCC usually generates the best code with -O3 for xxHash.
2889*5ff13fbcSAllan Jude *
2890*5ff13fbcSAllan Jude * However, when targeting AVX2, it is overzealous in its unrolling resulting
2891*5ff13fbcSAllan Jude * in code roughly 3/4 the speed of Clang.
2892*5ff13fbcSAllan Jude *
2893*5ff13fbcSAllan Jude * There are other issues, such as GCC splitting _mm256_loadu_si256 into
2894*5ff13fbcSAllan Jude * _mm_loadu_si128 + _mm256_inserti128_si256. This is an optimization which
2895*5ff13fbcSAllan Jude * only applies to Sandy and Ivy Bridge... which don't even support AVX2.
2896*5ff13fbcSAllan Jude *
2897*5ff13fbcSAllan Jude * That is why when compiling the AVX2 version, it is recommended to use either
2898*5ff13fbcSAllan Jude * -O2 -mavx2 -march=haswell
2899*5ff13fbcSAllan Jude * or
2900*5ff13fbcSAllan Jude * -O2 -mavx2 -mno-avx256-split-unaligned-load
2901*5ff13fbcSAllan Jude * for decent performance, or to use Clang instead.
2902*5ff13fbcSAllan Jude *
2903*5ff13fbcSAllan Jude * Fortunately, we can control the first one with a pragma that forces GCC into
2904*5ff13fbcSAllan Jude * -O2, but the other one we can't control without "failed to inline always
2905*5ff13fbcSAllan Jude * inline function due to target mismatch" warnings.
2906*5ff13fbcSAllan Jude */
2907*5ff13fbcSAllan Jude #if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
2908*5ff13fbcSAllan Jude && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
2909*5ff13fbcSAllan Jude && defined(__OPTIMIZE__) && !defined(__OPTIMIZE_SIZE__) /* respect -O0 and -Os */
2910*5ff13fbcSAllan Jude # pragma GCC push_options
2911*5ff13fbcSAllan Jude # pragma GCC optimize("-O2")
2912*5ff13fbcSAllan Jude #endif
2913*5ff13fbcSAllan Jude
2914*5ff13fbcSAllan Jude
2915*5ff13fbcSAllan Jude #if XXH_VECTOR == XXH_NEON
2916*5ff13fbcSAllan Jude /*
2917*5ff13fbcSAllan Jude * NEON's setup for vmlal_u32 is a little more complicated than it is on
2918*5ff13fbcSAllan Jude * SSE2, AVX2, and VSX.
2919*5ff13fbcSAllan Jude *
2920*5ff13fbcSAllan Jude * While PMULUDQ and VMULEUW both perform a mask, VMLAL.U32 performs an upcast.
2921*5ff13fbcSAllan Jude *
2922*5ff13fbcSAllan Jude * To do the same operation, the 128-bit 'Q' register needs to be split into
2923*5ff13fbcSAllan Jude * two 64-bit 'D' registers, performing this operation::
2924*5ff13fbcSAllan Jude *
2925*5ff13fbcSAllan Jude * [ a | b ]
2926*5ff13fbcSAllan Jude * | '---------. .--------' |
2927*5ff13fbcSAllan Jude * | x |
2928*5ff13fbcSAllan Jude * | .---------' '--------. |
2929*5ff13fbcSAllan Jude * [ a & 0xFFFFFFFF | b & 0xFFFFFFFF ],[ a >> 32 | b >> 32 ]
2930*5ff13fbcSAllan Jude *
2931*5ff13fbcSAllan Jude * Due to significant changes in aarch64, the fastest method for aarch64 is
2932*5ff13fbcSAllan Jude * completely different than the fastest method for ARMv7-A.
2933*5ff13fbcSAllan Jude *
2934*5ff13fbcSAllan Jude * ARMv7-A treats D registers as unions overlaying Q registers, so modifying
2935*5ff13fbcSAllan Jude * D11 will modify the high half of Q5. This is similar to how modifying AH
2936*5ff13fbcSAllan Jude * will only affect bits 8-15 of AX on x86.
2937*5ff13fbcSAllan Jude *
2938*5ff13fbcSAllan Jude * VZIP takes two registers, and puts even lanes in one register and odd lanes
2939*5ff13fbcSAllan Jude * in the other.
2940*5ff13fbcSAllan Jude *
2941*5ff13fbcSAllan Jude * On ARMv7-A, this strangely modifies both parameters in place instead of
2942*5ff13fbcSAllan Jude * taking the usual 3-operand form.
2943*5ff13fbcSAllan Jude *
2944*5ff13fbcSAllan Jude * Therefore, if we want to do this, we can simply use a D-form VZIP.32 on the
2945*5ff13fbcSAllan Jude * lower and upper halves of the Q register to end up with the high and low
2946*5ff13fbcSAllan Jude * halves where we want - all in one instruction.
2947*5ff13fbcSAllan Jude *
2948*5ff13fbcSAllan Jude * vzip.32 d10, d11 @ d10 = { d10[0], d11[0] }; d11 = { d10[1], d11[1] }
2949*5ff13fbcSAllan Jude *
2950*5ff13fbcSAllan Jude * Unfortunately we need inline assembly for this: Instructions modifying two
2951*5ff13fbcSAllan Jude * registers at once is not possible in GCC or Clang's IR, and they have to
2952*5ff13fbcSAllan Jude * create a copy.
2953*5ff13fbcSAllan Jude *
2954*5ff13fbcSAllan Jude * aarch64 requires a different approach.
2955*5ff13fbcSAllan Jude *
2956*5ff13fbcSAllan Jude * In order to make it easier to write a decent compiler for aarch64, many
2957*5ff13fbcSAllan Jude * quirks were removed, such as conditional execution.
2958*5ff13fbcSAllan Jude *
2959*5ff13fbcSAllan Jude * NEON was also affected by this.
2960*5ff13fbcSAllan Jude *
2961*5ff13fbcSAllan Jude * aarch64 cannot access the high bits of a Q-form register, and writes to a
2962*5ff13fbcSAllan Jude * D-form register zero the high bits, similar to how writes to W-form scalar
2963*5ff13fbcSAllan Jude * registers (or DWORD registers on x86_64) work.
2964*5ff13fbcSAllan Jude *
2965*5ff13fbcSAllan Jude * The formerly free vget_high intrinsics now require a vext (with a few
2966*5ff13fbcSAllan Jude * exceptions)
2967*5ff13fbcSAllan Jude *
2968*5ff13fbcSAllan Jude * Additionally, VZIP was replaced by ZIP1 and ZIP2, which are the equivalent
2969*5ff13fbcSAllan Jude * of PUNPCKL* and PUNPCKH* in SSE, respectively, in order to only modify one
2970*5ff13fbcSAllan Jude * operand.
2971*5ff13fbcSAllan Jude *
2972*5ff13fbcSAllan Jude * The equivalent of the VZIP.32 on the lower and upper halves would be this
2973*5ff13fbcSAllan Jude * mess:
2974*5ff13fbcSAllan Jude *
2975*5ff13fbcSAllan Jude * ext v2.4s, v0.4s, v0.4s, #2 // v2 = { v0[2], v0[3], v0[0], v0[1] }
2976*5ff13fbcSAllan Jude * zip1 v1.2s, v0.2s, v2.2s // v1 = { v0[0], v2[0] }
2977*5ff13fbcSAllan Jude * zip2 v0.2s, v0.2s, v1.2s // v0 = { v0[1], v2[1] }
2978*5ff13fbcSAllan Jude *
2979*5ff13fbcSAllan Jude * Instead, we use a literal downcast, vmovn_u64 (XTN), and vshrn_n_u64 (SHRN):
2980*5ff13fbcSAllan Jude *
2981*5ff13fbcSAllan Jude * shrn v1.2s, v0.2d, #32 // v1 = (uint32x2_t)(v0 >> 32);
2982*5ff13fbcSAllan Jude * xtn v0.2s, v0.2d // v0 = (uint32x2_t)(v0 & 0xFFFFFFFF);
2983*5ff13fbcSAllan Jude *
2984*5ff13fbcSAllan Jude * This is available on ARMv7-A, but is less efficient than a single VZIP.32.
2985*5ff13fbcSAllan Jude */
2986*5ff13fbcSAllan Jude
2987*5ff13fbcSAllan Jude /*!
2988*5ff13fbcSAllan Jude * Function-like macro:
2989*5ff13fbcSAllan Jude * void XXH_SPLIT_IN_PLACE(uint64x2_t &in, uint32x2_t &outLo, uint32x2_t &outHi)
2990*5ff13fbcSAllan Jude * {
2991*5ff13fbcSAllan Jude * outLo = (uint32x2_t)(in & 0xFFFFFFFF);
2992*5ff13fbcSAllan Jude * outHi = (uint32x2_t)(in >> 32);
2993*5ff13fbcSAllan Jude * in = UNDEFINED;
2994*5ff13fbcSAllan Jude * }
2995*5ff13fbcSAllan Jude */
2996*5ff13fbcSAllan Jude # if !defined(XXH_NO_VZIP_HACK) /* define to disable */ \
2997*5ff13fbcSAllan Jude && (defined(__GNUC__) || defined(__clang__)) \
2998*5ff13fbcSAllan Jude && (defined(__arm__) || defined(__thumb__) || defined(_M_ARM))
2999*5ff13fbcSAllan Jude # define XXH_SPLIT_IN_PLACE(in, outLo, outHi) \
3000*5ff13fbcSAllan Jude do { \
3001*5ff13fbcSAllan Jude /* Undocumented GCC/Clang operand modifier: %e0 = lower D half, %f0 = upper D half */ \
3002*5ff13fbcSAllan Jude /* https://github.com/gcc-mirror/gcc/blob/38cf91e5/gcc/config/arm/arm.c#L22486 */ \
3003*5ff13fbcSAllan Jude /* https://github.com/llvm-mirror/llvm/blob/2c4ca683/lib/Target/ARM/ARMAsmPrinter.cpp#L399 */ \
3004*5ff13fbcSAllan Jude __asm__("vzip.32 %e0, %f0" : "+w" (in)); \
3005*5ff13fbcSAllan Jude (outLo) = vget_low_u32 (vreinterpretq_u32_u64(in)); \
3006*5ff13fbcSAllan Jude (outHi) = vget_high_u32(vreinterpretq_u32_u64(in)); \
3007*5ff13fbcSAllan Jude } while (0)
3008*5ff13fbcSAllan Jude # else
3009*5ff13fbcSAllan Jude # define XXH_SPLIT_IN_PLACE(in, outLo, outHi) \
3010*5ff13fbcSAllan Jude do { \
3011*5ff13fbcSAllan Jude (outLo) = vmovn_u64 (in); \
3012*5ff13fbcSAllan Jude (outHi) = vshrn_n_u64 ((in), 32); \
3013*5ff13fbcSAllan Jude } while (0)
3014*5ff13fbcSAllan Jude # endif
3015*5ff13fbcSAllan Jude
3016*5ff13fbcSAllan Jude /*!
3017*5ff13fbcSAllan Jude * @ingroup tuning
3018*5ff13fbcSAllan Jude * @brief Controls the NEON to scalar ratio for XXH3
3019*5ff13fbcSAllan Jude *
3020*5ff13fbcSAllan Jude * On AArch64 when not optimizing for size, XXH3 will run 6 lanes using NEON and
3021*5ff13fbcSAllan Jude * 2 lanes on scalar by default.
3022*5ff13fbcSAllan Jude *
3023*5ff13fbcSAllan Jude * This can be set to 2, 4, 6, or 8. ARMv7 will default to all 8 NEON lanes, as the
3024*5ff13fbcSAllan Jude * emulated 64-bit arithmetic is too slow.
3025*5ff13fbcSAllan Jude *
3026*5ff13fbcSAllan Jude * Modern ARM CPUs are _very_ sensitive to how their pipelines are used.
3027*5ff13fbcSAllan Jude *
3028*5ff13fbcSAllan Jude * For example, the Cortex-A73 can dispatch 3 micro-ops per cycle, but it can't
3029*5ff13fbcSAllan Jude * have more than 2 NEON (F0/F1) micro-ops. If you are only using NEON instructions,
3030*5ff13fbcSAllan Jude * you are only using 2/3 of the CPU bandwidth.
3031*5ff13fbcSAllan Jude *
3032*5ff13fbcSAllan Jude * This is even more noticable on the more advanced cores like the A76 which
3033*5ff13fbcSAllan Jude * can dispatch 8 micro-ops per cycle, but still only 2 NEON micro-ops at once.
3034*5ff13fbcSAllan Jude *
3035*5ff13fbcSAllan Jude * Therefore, @ref XXH3_NEON_LANES lanes will be processed using NEON, and the
3036*5ff13fbcSAllan Jude * remaining lanes will use scalar instructions. This improves the bandwidth
3037*5ff13fbcSAllan Jude * and also gives the integer pipelines something to do besides twiddling loop
3038*5ff13fbcSAllan Jude * counters and pointers.
3039*5ff13fbcSAllan Jude *
3040*5ff13fbcSAllan Jude * This change benefits CPUs with large micro-op buffers without negatively affecting
3041*5ff13fbcSAllan Jude * other CPUs:
3042*5ff13fbcSAllan Jude *
3043*5ff13fbcSAllan Jude * | Chipset | Dispatch type | NEON only | 6:2 hybrid | Diff. |
3044*5ff13fbcSAllan Jude * |:----------------------|:--------------------|----------:|-----------:|------:|
3045*5ff13fbcSAllan Jude * | Snapdragon 730 (A76) | 2 NEON/8 micro-ops | 8.8 GB/s | 10.1 GB/s | ~16% |
3046*5ff13fbcSAllan Jude * | Snapdragon 835 (A73) | 2 NEON/3 micro-ops | 5.1 GB/s | 5.3 GB/s | ~5% |
3047*5ff13fbcSAllan Jude * | Marvell PXA1928 (A53) | In-order dual-issue | 1.9 GB/s | 1.9 GB/s | 0% |
3048*5ff13fbcSAllan Jude *
3049*5ff13fbcSAllan Jude * It also seems to fix some bad codegen on GCC, making it almost as fast as clang.
3050*5ff13fbcSAllan Jude *
3051*5ff13fbcSAllan Jude * @see XXH3_accumulate_512_neon()
3052*5ff13fbcSAllan Jude */
3053*5ff13fbcSAllan Jude # ifndef XXH3_NEON_LANES
3054*5ff13fbcSAllan Jude # if (defined(__aarch64__) || defined(__arm64__) || defined(_M_ARM64) || defined(_M_ARM64EC)) \
3055*5ff13fbcSAllan Jude && !defined(__OPTIMIZE_SIZE__)
3056*5ff13fbcSAllan Jude # define XXH3_NEON_LANES 6
3057*5ff13fbcSAllan Jude # else
3058*5ff13fbcSAllan Jude # define XXH3_NEON_LANES XXH_ACC_NB
3059*5ff13fbcSAllan Jude # endif
3060*5ff13fbcSAllan Jude # endif
3061*5ff13fbcSAllan Jude #endif /* XXH_VECTOR == XXH_NEON */
3062*5ff13fbcSAllan Jude
3063*5ff13fbcSAllan Jude /*
3064*5ff13fbcSAllan Jude * VSX and Z Vector helpers.
3065*5ff13fbcSAllan Jude *
3066*5ff13fbcSAllan Jude * This is very messy, and any pull requests to clean this up are welcome.
3067*5ff13fbcSAllan Jude *
3068*5ff13fbcSAllan Jude * There are a lot of problems with supporting VSX and s390x, due to
3069*5ff13fbcSAllan Jude * inconsistent intrinsics, spotty coverage, and multiple endiannesses.
3070*5ff13fbcSAllan Jude */
3071*5ff13fbcSAllan Jude #if XXH_VECTOR == XXH_VSX
3072*5ff13fbcSAllan Jude # if defined(__s390x__)
3073*5ff13fbcSAllan Jude # include <s390intrin.h>
3074*5ff13fbcSAllan Jude # else
3075*5ff13fbcSAllan Jude /* gcc's altivec.h can have the unwanted consequence to unconditionally
3076*5ff13fbcSAllan Jude * #define bool, vector, and pixel keywords,
3077*5ff13fbcSAllan Jude * with bad consequences for programs already using these keywords for other purposes.
3078*5ff13fbcSAllan Jude * The paragraph defining these macros is skipped when __APPLE_ALTIVEC__ is defined.
3079*5ff13fbcSAllan Jude * __APPLE_ALTIVEC__ is _generally_ defined automatically by the compiler,
3080*5ff13fbcSAllan Jude * but it seems that, in some cases, it isn't.
3081*5ff13fbcSAllan Jude * Force the build macro to be defined, so that keywords are not altered.
3082*5ff13fbcSAllan Jude */
3083*5ff13fbcSAllan Jude # if defined(__GNUC__) && !defined(__APPLE_ALTIVEC__)
3084*5ff13fbcSAllan Jude # define __APPLE_ALTIVEC__
3085*5ff13fbcSAllan Jude # endif
3086*5ff13fbcSAllan Jude # include <altivec.h>
3087*5ff13fbcSAllan Jude # endif
3088*5ff13fbcSAllan Jude
3089*5ff13fbcSAllan Jude typedef __vector unsigned long long xxh_u64x2;
3090*5ff13fbcSAllan Jude typedef __vector unsigned char xxh_u8x16;
3091*5ff13fbcSAllan Jude typedef __vector unsigned xxh_u32x4;
3092*5ff13fbcSAllan Jude
3093*5ff13fbcSAllan Jude # ifndef XXH_VSX_BE
3094*5ff13fbcSAllan Jude # if defined(__BIG_ENDIAN__) \
3095*5ff13fbcSAllan Jude || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
3096*5ff13fbcSAllan Jude # define XXH_VSX_BE 1
3097*5ff13fbcSAllan Jude # elif defined(__VEC_ELEMENT_REG_ORDER__) && __VEC_ELEMENT_REG_ORDER__ == __ORDER_BIG_ENDIAN__
3098*5ff13fbcSAllan Jude # warning "-maltivec=be is not recommended. Please use native endianness."
3099*5ff13fbcSAllan Jude # define XXH_VSX_BE 1
3100*5ff13fbcSAllan Jude # else
3101*5ff13fbcSAllan Jude # define XXH_VSX_BE 0
3102*5ff13fbcSAllan Jude # endif
3103*5ff13fbcSAllan Jude # endif /* !defined(XXH_VSX_BE) */
3104*5ff13fbcSAllan Jude
3105*5ff13fbcSAllan Jude # if XXH_VSX_BE
3106*5ff13fbcSAllan Jude # if defined(__POWER9_VECTOR__) || (defined(__clang__) && defined(__s390x__))
3107*5ff13fbcSAllan Jude # define XXH_vec_revb vec_revb
3108*5ff13fbcSAllan Jude # else
3109*5ff13fbcSAllan Jude /*!
3110*5ff13fbcSAllan Jude * A polyfill for POWER9's vec_revb().
3111*5ff13fbcSAllan Jude */
XXH_vec_revb(xxh_u64x2 val)3112*5ff13fbcSAllan Jude XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val)
3113*5ff13fbcSAllan Jude {
3114*5ff13fbcSAllan Jude xxh_u8x16 const vByteSwap = { 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
3115*5ff13fbcSAllan Jude 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08 };
3116*5ff13fbcSAllan Jude return vec_perm(val, val, vByteSwap);
3117*5ff13fbcSAllan Jude }
3118*5ff13fbcSAllan Jude # endif
3119*5ff13fbcSAllan Jude # endif /* XXH_VSX_BE */
3120*5ff13fbcSAllan Jude
3121*5ff13fbcSAllan Jude /*!
3122*5ff13fbcSAllan Jude * Performs an unaligned vector load and byte swaps it on big endian.
3123*5ff13fbcSAllan Jude */
XXH_vec_loadu(const void * ptr)3124*5ff13fbcSAllan Jude XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(const void *ptr)
3125*5ff13fbcSAllan Jude {
3126*5ff13fbcSAllan Jude xxh_u64x2 ret;
3127*5ff13fbcSAllan Jude XXH_memcpy(&ret, ptr, sizeof(xxh_u64x2));
3128*5ff13fbcSAllan Jude # if XXH_VSX_BE
3129*5ff13fbcSAllan Jude ret = XXH_vec_revb(ret);
3130*5ff13fbcSAllan Jude # endif
3131*5ff13fbcSAllan Jude return ret;
3132*5ff13fbcSAllan Jude }
3133*5ff13fbcSAllan Jude
3134*5ff13fbcSAllan Jude /*
3135*5ff13fbcSAllan Jude * vec_mulo and vec_mule are very problematic intrinsics on PowerPC
3136*5ff13fbcSAllan Jude *
3137*5ff13fbcSAllan Jude * These intrinsics weren't added until GCC 8, despite existing for a while,
3138*5ff13fbcSAllan Jude * and they are endian dependent. Also, their meaning swap depending on version.
3139*5ff13fbcSAllan Jude * */
3140*5ff13fbcSAllan Jude # if defined(__s390x__)
3141*5ff13fbcSAllan Jude /* s390x is always big endian, no issue on this platform */
3142*5ff13fbcSAllan Jude # define XXH_vec_mulo vec_mulo
3143*5ff13fbcSAllan Jude # define XXH_vec_mule vec_mule
3144*5ff13fbcSAllan Jude # elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw)
3145*5ff13fbcSAllan Jude /* Clang has a better way to control this, we can just use the builtin which doesn't swap. */
3146*5ff13fbcSAllan Jude # define XXH_vec_mulo __builtin_altivec_vmulouw
3147*5ff13fbcSAllan Jude # define XXH_vec_mule __builtin_altivec_vmuleuw
3148*5ff13fbcSAllan Jude # else
3149*5ff13fbcSAllan Jude /* gcc needs inline assembly */
3150*5ff13fbcSAllan Jude /* Adapted from https://github.com/google/highwayhash/blob/master/highwayhash/hh_vsx.h. */
XXH_vec_mulo(xxh_u32x4 a,xxh_u32x4 b)3151*5ff13fbcSAllan Jude XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mulo(xxh_u32x4 a, xxh_u32x4 b)
3152*5ff13fbcSAllan Jude {
3153*5ff13fbcSAllan Jude xxh_u64x2 result;
3154*5ff13fbcSAllan Jude __asm__("vmulouw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b));
3155*5ff13fbcSAllan Jude return result;
3156*5ff13fbcSAllan Jude }
XXH_vec_mule(xxh_u32x4 a,xxh_u32x4 b)3157*5ff13fbcSAllan Jude XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b)
3158*5ff13fbcSAllan Jude {
3159*5ff13fbcSAllan Jude xxh_u64x2 result;
3160*5ff13fbcSAllan Jude __asm__("vmuleuw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b));
3161*5ff13fbcSAllan Jude return result;
3162*5ff13fbcSAllan Jude }
3163*5ff13fbcSAllan Jude # endif /* XXH_vec_mulo, XXH_vec_mule */
3164*5ff13fbcSAllan Jude #endif /* XXH_VECTOR == XXH_VSX */
3165*5ff13fbcSAllan Jude
3166*5ff13fbcSAllan Jude
3167*5ff13fbcSAllan Jude /* prefetch
3168*5ff13fbcSAllan Jude * can be disabled, by declaring XXH_NO_PREFETCH build macro */
3169*5ff13fbcSAllan Jude #if defined(XXH_NO_PREFETCH)
3170*5ff13fbcSAllan Jude # define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */
3171*5ff13fbcSAllan Jude #else
3172*5ff13fbcSAllan Jude # if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86)) /* _mm_prefetch() not defined outside of x86/x64 */
3173*5ff13fbcSAllan Jude # include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
3174*5ff13fbcSAllan Jude # define XXH_PREFETCH(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0)
3175*5ff13fbcSAllan Jude # elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
3176*5ff13fbcSAllan Jude # define XXH_PREFETCH(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
3177*5ff13fbcSAllan Jude # else
3178*5ff13fbcSAllan Jude # define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */
3179*5ff13fbcSAllan Jude # endif
3180*5ff13fbcSAllan Jude #endif /* XXH_NO_PREFETCH */
3181*5ff13fbcSAllan Jude
3182*5ff13fbcSAllan Jude
3183*5ff13fbcSAllan Jude /* ==========================================
3184*5ff13fbcSAllan Jude * XXH3 default settings
3185*5ff13fbcSAllan Jude * ========================================== */
3186*5ff13fbcSAllan Jude
3187*5ff13fbcSAllan Jude #define XXH_SECRET_DEFAULT_SIZE 192 /* minimum XXH3_SECRET_SIZE_MIN */
3188*5ff13fbcSAllan Jude
3189*5ff13fbcSAllan Jude #if (XXH_SECRET_DEFAULT_SIZE < XXH3_SECRET_SIZE_MIN)
3190*5ff13fbcSAllan Jude # error "default keyset is not large enough"
3191*5ff13fbcSAllan Jude #endif
3192*5ff13fbcSAllan Jude
3193*5ff13fbcSAllan Jude /*! Pseudorandom secret taken directly from FARSH. */
3194*5ff13fbcSAllan Jude XXH_ALIGN(64) static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = {
3195*5ff13fbcSAllan Jude 0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, 0xf7, 0x21, 0xad, 0x1c,
3196*5ff13fbcSAllan Jude 0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f,
3197*5ff13fbcSAllan Jude 0xcb, 0x79, 0xe6, 0x4e, 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21,
3198*5ff13fbcSAllan Jude 0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6, 0x81, 0x3a, 0x26, 0x4c,
3199*5ff13fbcSAllan Jude 0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb, 0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3,
3200*5ff13fbcSAllan Jude 0x71, 0x64, 0x48, 0x97, 0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8,
3201*5ff13fbcSAllan Jude 0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7, 0xc7, 0x0b, 0x4f, 0x1d,
3202*5ff13fbcSAllan Jude 0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31, 0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64,
3203*5ff13fbcSAllan Jude 0xea, 0xc5, 0xac, 0x83, 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb,
3204*5ff13fbcSAllan Jude 0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26, 0x29, 0xd4, 0x68, 0x9e,
3205*5ff13fbcSAllan Jude 0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc, 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce,
3206*5ff13fbcSAllan Jude 0x45, 0xcb, 0x3a, 0x8f, 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e,
3207*5ff13fbcSAllan Jude };
3208*5ff13fbcSAllan Jude
3209*5ff13fbcSAllan Jude
3210*5ff13fbcSAllan Jude #ifdef XXH_OLD_NAMES
3211*5ff13fbcSAllan Jude # define kSecret XXH3_kSecret
3212*5ff13fbcSAllan Jude #endif
3213*5ff13fbcSAllan Jude
3214*5ff13fbcSAllan Jude #ifdef XXH_DOXYGEN
3215*5ff13fbcSAllan Jude /*!
3216*5ff13fbcSAllan Jude * @brief Calculates a 32-bit to 64-bit long multiply.
3217*5ff13fbcSAllan Jude *
3218*5ff13fbcSAllan Jude * Implemented as a macro.
3219*5ff13fbcSAllan Jude *
3220*5ff13fbcSAllan Jude * Wraps `__emulu` on MSVC x86 because it tends to call `__allmul` when it doesn't
3221*5ff13fbcSAllan Jude * need to (but it shouldn't need to anyways, it is about 7 instructions to do
3222*5ff13fbcSAllan Jude * a 64x64 multiply...). Since we know that this will _always_ emit `MULL`, we
3223*5ff13fbcSAllan Jude * use that instead of the normal method.
3224*5ff13fbcSAllan Jude *
3225*5ff13fbcSAllan Jude * If you are compiling for platforms like Thumb-1 and don't have a better option,
3226*5ff13fbcSAllan Jude * you may also want to write your own long multiply routine here.
3227*5ff13fbcSAllan Jude *
3228*5ff13fbcSAllan Jude * @param x, y Numbers to be multiplied
3229*5ff13fbcSAllan Jude * @return 64-bit product of the low 32 bits of @p x and @p y.
3230*5ff13fbcSAllan Jude */
3231*5ff13fbcSAllan Jude XXH_FORCE_INLINE xxh_u64
XXH_mult32to64(xxh_u64 x,xxh_u64 y)3232*5ff13fbcSAllan Jude XXH_mult32to64(xxh_u64 x, xxh_u64 y)
3233*5ff13fbcSAllan Jude {
3234*5ff13fbcSAllan Jude return (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF);
3235*5ff13fbcSAllan Jude }
3236*5ff13fbcSAllan Jude #elif defined(_MSC_VER) && defined(_M_IX86)
3237*5ff13fbcSAllan Jude # define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y))
3238*5ff13fbcSAllan Jude #else
3239*5ff13fbcSAllan Jude /*
3240*5ff13fbcSAllan Jude * Downcast + upcast is usually better than masking on older compilers like
3241*5ff13fbcSAllan Jude * GCC 4.2 (especially 32-bit ones), all without affecting newer compilers.
3242*5ff13fbcSAllan Jude *
3243*5ff13fbcSAllan Jude * The other method, (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF), will AND both operands
3244*5ff13fbcSAllan Jude * and perform a full 64x64 multiply -- entirely redundant on 32-bit.
3245*5ff13fbcSAllan Jude */
3246*5ff13fbcSAllan Jude # define XXH_mult32to64(x, y) ((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y))
3247*5ff13fbcSAllan Jude #endif
3248*5ff13fbcSAllan Jude
3249*5ff13fbcSAllan Jude /*!
3250*5ff13fbcSAllan Jude * @brief Calculates a 64->128-bit long multiply.
3251*5ff13fbcSAllan Jude *
3252*5ff13fbcSAllan Jude * Uses `__uint128_t` and `_umul128` if available, otherwise uses a scalar
3253*5ff13fbcSAllan Jude * version.
3254*5ff13fbcSAllan Jude *
3255*5ff13fbcSAllan Jude * @param lhs , rhs The 64-bit integers to be multiplied
3256*5ff13fbcSAllan Jude * @return The 128-bit result represented in an @ref XXH128_hash_t.
3257*5ff13fbcSAllan Jude */
3258*5ff13fbcSAllan Jude static XXH128_hash_t
XXH_mult64to128(xxh_u64 lhs,xxh_u64 rhs)3259*5ff13fbcSAllan Jude XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs)
3260*5ff13fbcSAllan Jude {
3261*5ff13fbcSAllan Jude /*
3262*5ff13fbcSAllan Jude * GCC/Clang __uint128_t method.
3263*5ff13fbcSAllan Jude *
3264*5ff13fbcSAllan Jude * On most 64-bit targets, GCC and Clang define a __uint128_t type.
3265*5ff13fbcSAllan Jude * This is usually the best way as it usually uses a native long 64-bit
3266*5ff13fbcSAllan Jude * multiply, such as MULQ on x86_64 or MUL + UMULH on aarch64.
3267*5ff13fbcSAllan Jude *
3268*5ff13fbcSAllan Jude * Usually.
3269*5ff13fbcSAllan Jude *
3270*5ff13fbcSAllan Jude * Despite being a 32-bit platform, Clang (and emscripten) define this type
3271*5ff13fbcSAllan Jude * despite not having the arithmetic for it. This results in a laggy
3272*5ff13fbcSAllan Jude * compiler builtin call which calculates a full 128-bit multiply.
3273*5ff13fbcSAllan Jude * In that case it is best to use the portable one.
3274*5ff13fbcSAllan Jude * https://github.com/Cyan4973/xxHash/issues/211#issuecomment-515575677
3275*5ff13fbcSAllan Jude */
3276*5ff13fbcSAllan Jude #if (defined(__GNUC__) || defined(__clang__)) && !defined(__wasm__) \
3277*5ff13fbcSAllan Jude && defined(__SIZEOF_INT128__) \
3278*5ff13fbcSAllan Jude || (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
3279*5ff13fbcSAllan Jude
3280*5ff13fbcSAllan Jude __uint128_t const product = (__uint128_t)lhs * (__uint128_t)rhs;
3281*5ff13fbcSAllan Jude XXH128_hash_t r128;
3282*5ff13fbcSAllan Jude r128.low64 = (xxh_u64)(product);
3283*5ff13fbcSAllan Jude r128.high64 = (xxh_u64)(product >> 64);
3284*5ff13fbcSAllan Jude return r128;
3285*5ff13fbcSAllan Jude
3286*5ff13fbcSAllan Jude /*
3287*5ff13fbcSAllan Jude * MSVC for x64's _umul128 method.
3288*5ff13fbcSAllan Jude *
3289*5ff13fbcSAllan Jude * xxh_u64 _umul128(xxh_u64 Multiplier, xxh_u64 Multiplicand, xxh_u64 *HighProduct);
3290*5ff13fbcSAllan Jude *
3291*5ff13fbcSAllan Jude * This compiles to single operand MUL on x64.
3292*5ff13fbcSAllan Jude */
3293*5ff13fbcSAllan Jude #elif (defined(_M_X64) || defined(_M_IA64)) && !defined(_M_ARM64EC)
3294*5ff13fbcSAllan Jude
3295*5ff13fbcSAllan Jude #ifndef _MSC_VER
3296*5ff13fbcSAllan Jude # pragma intrinsic(_umul128)
3297*5ff13fbcSAllan Jude #endif
3298*5ff13fbcSAllan Jude xxh_u64 product_high;
3299*5ff13fbcSAllan Jude xxh_u64 const product_low = _umul128(lhs, rhs, &product_high);
3300*5ff13fbcSAllan Jude XXH128_hash_t r128;
3301*5ff13fbcSAllan Jude r128.low64 = product_low;
3302*5ff13fbcSAllan Jude r128.high64 = product_high;
3303*5ff13fbcSAllan Jude return r128;
3304*5ff13fbcSAllan Jude
3305*5ff13fbcSAllan Jude /*
3306*5ff13fbcSAllan Jude * MSVC for ARM64's __umulh method.
3307*5ff13fbcSAllan Jude *
3308*5ff13fbcSAllan Jude * This compiles to the same MUL + UMULH as GCC/Clang's __uint128_t method.
3309*5ff13fbcSAllan Jude */
3310*5ff13fbcSAllan Jude #elif defined(_M_ARM64) || defined(_M_ARM64EC)
3311*5ff13fbcSAllan Jude
3312*5ff13fbcSAllan Jude #ifndef _MSC_VER
3313*5ff13fbcSAllan Jude # pragma intrinsic(__umulh)
3314*5ff13fbcSAllan Jude #endif
3315*5ff13fbcSAllan Jude XXH128_hash_t r128;
3316*5ff13fbcSAllan Jude r128.low64 = lhs * rhs;
3317*5ff13fbcSAllan Jude r128.high64 = __umulh(lhs, rhs);
3318*5ff13fbcSAllan Jude return r128;
3319*5ff13fbcSAllan Jude
3320*5ff13fbcSAllan Jude #else
3321*5ff13fbcSAllan Jude /*
3322*5ff13fbcSAllan Jude * Portable scalar method. Optimized for 32-bit and 64-bit ALUs.
3323*5ff13fbcSAllan Jude *
3324*5ff13fbcSAllan Jude * This is a fast and simple grade school multiply, which is shown below
3325*5ff13fbcSAllan Jude * with base 10 arithmetic instead of base 0x100000000.
3326*5ff13fbcSAllan Jude *
3327*5ff13fbcSAllan Jude * 9 3 // D2 lhs = 93
3328*5ff13fbcSAllan Jude * x 7 5 // D2 rhs = 75
3329*5ff13fbcSAllan Jude * ----------
3330*5ff13fbcSAllan Jude * 1 5 // D2 lo_lo = (93 % 10) * (75 % 10) = 15
3331*5ff13fbcSAllan Jude * 4 5 | // D2 hi_lo = (93 / 10) * (75 % 10) = 45
3332*5ff13fbcSAllan Jude * 2 1 | // D2 lo_hi = (93 % 10) * (75 / 10) = 21
3333*5ff13fbcSAllan Jude * + 6 3 | | // D2 hi_hi = (93 / 10) * (75 / 10) = 63
3334*5ff13fbcSAllan Jude * ---------
3335*5ff13fbcSAllan Jude * 2 7 | // D2 cross = (15 / 10) + (45 % 10) + 21 = 27
3336*5ff13fbcSAllan Jude * + 6 7 | | // D2 upper = (27 / 10) + (45 / 10) + 63 = 67
3337*5ff13fbcSAllan Jude * ---------
3338*5ff13fbcSAllan Jude * 6 9 7 5 // D4 res = (27 * 10) + (15 % 10) + (67 * 100) = 6975
3339*5ff13fbcSAllan Jude *
3340*5ff13fbcSAllan Jude * The reasons for adding the products like this are:
3341*5ff13fbcSAllan Jude * 1. It avoids manual carry tracking. Just like how
3342*5ff13fbcSAllan Jude * (9 * 9) + 9 + 9 = 99, the same applies with this for UINT64_MAX.
3343*5ff13fbcSAllan Jude * This avoids a lot of complexity.
3344*5ff13fbcSAllan Jude *
3345*5ff13fbcSAllan Jude * 2. It hints for, and on Clang, compiles to, the powerful UMAAL
3346*5ff13fbcSAllan Jude * instruction available in ARM's Digital Signal Processing extension
3347*5ff13fbcSAllan Jude * in 32-bit ARMv6 and later, which is shown below:
3348*5ff13fbcSAllan Jude *
3349*5ff13fbcSAllan Jude * void UMAAL(xxh_u32 *RdLo, xxh_u32 *RdHi, xxh_u32 Rn, xxh_u32 Rm)
3350*5ff13fbcSAllan Jude * {
3351*5ff13fbcSAllan Jude * xxh_u64 product = (xxh_u64)*RdLo * (xxh_u64)*RdHi + Rn + Rm;
3352*5ff13fbcSAllan Jude * *RdLo = (xxh_u32)(product & 0xFFFFFFFF);
3353*5ff13fbcSAllan Jude * *RdHi = (xxh_u32)(product >> 32);
3354*5ff13fbcSAllan Jude * }
3355*5ff13fbcSAllan Jude *
3356*5ff13fbcSAllan Jude * This instruction was designed for efficient long multiplication, and
3357*5ff13fbcSAllan Jude * allows this to be calculated in only 4 instructions at speeds
3358*5ff13fbcSAllan Jude * comparable to some 64-bit ALUs.
3359*5ff13fbcSAllan Jude *
3360*5ff13fbcSAllan Jude * 3. It isn't terrible on other platforms. Usually this will be a couple
3361*5ff13fbcSAllan Jude * of 32-bit ADD/ADCs.
3362*5ff13fbcSAllan Jude */
3363*5ff13fbcSAllan Jude
3364*5ff13fbcSAllan Jude /* First calculate all of the cross products. */
3365*5ff13fbcSAllan Jude xxh_u64 const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF);
3366*5ff13fbcSAllan Jude xxh_u64 const hi_lo = XXH_mult32to64(lhs >> 32, rhs & 0xFFFFFFFF);
3367*5ff13fbcSAllan Jude xxh_u64 const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32);
3368*5ff13fbcSAllan Jude xxh_u64 const hi_hi = XXH_mult32to64(lhs >> 32, rhs >> 32);
3369*5ff13fbcSAllan Jude
3370*5ff13fbcSAllan Jude /* Now add the products together. These will never overflow. */
3371*5ff13fbcSAllan Jude xxh_u64 const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi;
3372*5ff13fbcSAllan Jude xxh_u64 const upper = (hi_lo >> 32) + (cross >> 32) + hi_hi;
3373*5ff13fbcSAllan Jude xxh_u64 const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF);
3374*5ff13fbcSAllan Jude
3375*5ff13fbcSAllan Jude XXH128_hash_t r128;
3376*5ff13fbcSAllan Jude r128.low64 = lower;
3377*5ff13fbcSAllan Jude r128.high64 = upper;
3378*5ff13fbcSAllan Jude return r128;
3379*5ff13fbcSAllan Jude #endif
3380*5ff13fbcSAllan Jude }
3381*5ff13fbcSAllan Jude
3382*5ff13fbcSAllan Jude /*!
3383*5ff13fbcSAllan Jude * @brief Calculates a 64-bit to 128-bit multiply, then XOR folds it.
3384*5ff13fbcSAllan Jude *
3385*5ff13fbcSAllan Jude * The reason for the separate function is to prevent passing too many structs
3386*5ff13fbcSAllan Jude * around by value. This will hopefully inline the multiply, but we don't force it.
3387*5ff13fbcSAllan Jude *
3388*5ff13fbcSAllan Jude * @param lhs , rhs The 64-bit integers to multiply
3389*5ff13fbcSAllan Jude * @return The low 64 bits of the product XOR'd by the high 64 bits.
3390*5ff13fbcSAllan Jude * @see XXH_mult64to128()
3391*5ff13fbcSAllan Jude */
3392*5ff13fbcSAllan Jude static xxh_u64
XXH3_mul128_fold64(xxh_u64 lhs,xxh_u64 rhs)3393*5ff13fbcSAllan Jude XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs)
3394*5ff13fbcSAllan Jude {
3395*5ff13fbcSAllan Jude XXH128_hash_t product = XXH_mult64to128(lhs, rhs);
3396*5ff13fbcSAllan Jude return product.low64 ^ product.high64;
3397*5ff13fbcSAllan Jude }
3398*5ff13fbcSAllan Jude
3399*5ff13fbcSAllan Jude /*! Seems to produce slightly better code on GCC for some reason. */
XXH_xorshift64(xxh_u64 v64,int shift)3400*5ff13fbcSAllan Jude XXH_FORCE_INLINE xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift)
3401*5ff13fbcSAllan Jude {
3402*5ff13fbcSAllan Jude XXH_ASSERT(0 <= shift && shift < 64);
3403*5ff13fbcSAllan Jude return v64 ^ (v64 >> shift);
3404*5ff13fbcSAllan Jude }
3405*5ff13fbcSAllan Jude
3406*5ff13fbcSAllan Jude /*
3407*5ff13fbcSAllan Jude * This is a fast avalanche stage,
3408*5ff13fbcSAllan Jude * suitable when input bits are already partially mixed
3409*5ff13fbcSAllan Jude */
XXH3_avalanche(xxh_u64 h64)3410*5ff13fbcSAllan Jude static XXH64_hash_t XXH3_avalanche(xxh_u64 h64)
3411*5ff13fbcSAllan Jude {
3412*5ff13fbcSAllan Jude h64 = XXH_xorshift64(h64, 37);
3413*5ff13fbcSAllan Jude h64 *= 0x165667919E3779F9ULL;
3414*5ff13fbcSAllan Jude h64 = XXH_xorshift64(h64, 32);
3415*5ff13fbcSAllan Jude return h64;
3416*5ff13fbcSAllan Jude }
3417*5ff13fbcSAllan Jude
3418*5ff13fbcSAllan Jude /*
3419*5ff13fbcSAllan Jude * This is a stronger avalanche,
3420*5ff13fbcSAllan Jude * inspired by Pelle Evensen's rrmxmx
3421*5ff13fbcSAllan Jude * preferable when input has not been previously mixed
3422*5ff13fbcSAllan Jude */
XXH3_rrmxmx(xxh_u64 h64,xxh_u64 len)3423*5ff13fbcSAllan Jude static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len)
3424*5ff13fbcSAllan Jude {
3425*5ff13fbcSAllan Jude /* this mix is inspired by Pelle Evensen's rrmxmx */
3426*5ff13fbcSAllan Jude h64 ^= XXH_rotl64(h64, 49) ^ XXH_rotl64(h64, 24);
3427*5ff13fbcSAllan Jude h64 *= 0x9FB21C651E98DF25ULL;
3428*5ff13fbcSAllan Jude h64 ^= (h64 >> 35) + len ;
3429*5ff13fbcSAllan Jude h64 *= 0x9FB21C651E98DF25ULL;
3430*5ff13fbcSAllan Jude return XXH_xorshift64(h64, 28);
3431*5ff13fbcSAllan Jude }
3432*5ff13fbcSAllan Jude
3433*5ff13fbcSAllan Jude
3434*5ff13fbcSAllan Jude /* ==========================================
3435*5ff13fbcSAllan Jude * Short keys
3436*5ff13fbcSAllan Jude * ==========================================
3437*5ff13fbcSAllan Jude * One of the shortcomings of XXH32 and XXH64 was that their performance was
3438*5ff13fbcSAllan Jude * sub-optimal on short lengths. It used an iterative algorithm which strongly
3439*5ff13fbcSAllan Jude * favored lengths that were a multiple of 4 or 8.
3440*5ff13fbcSAllan Jude *
3441*5ff13fbcSAllan Jude * Instead of iterating over individual inputs, we use a set of single shot
3442*5ff13fbcSAllan Jude * functions which piece together a range of lengths and operate in constant time.
3443*5ff13fbcSAllan Jude *
3444*5ff13fbcSAllan Jude * Additionally, the number of multiplies has been significantly reduced. This
3445*5ff13fbcSAllan Jude * reduces latency, especially when emulating 64-bit multiplies on 32-bit.
3446*5ff13fbcSAllan Jude *
3447*5ff13fbcSAllan Jude * Depending on the platform, this may or may not be faster than XXH32, but it
3448*5ff13fbcSAllan Jude * is almost guaranteed to be faster than XXH64.
3449*5ff13fbcSAllan Jude */
3450*5ff13fbcSAllan Jude
3451*5ff13fbcSAllan Jude /*
3452*5ff13fbcSAllan Jude * At very short lengths, there isn't enough input to fully hide secrets, or use
3453*5ff13fbcSAllan Jude * the entire secret.
3454*5ff13fbcSAllan Jude *
3455*5ff13fbcSAllan Jude * There is also only a limited amount of mixing we can do before significantly
3456*5ff13fbcSAllan Jude * impacting performance.
3457*5ff13fbcSAllan Jude *
3458*5ff13fbcSAllan Jude * Therefore, we use different sections of the secret and always mix two secret
3459*5ff13fbcSAllan Jude * samples with an XOR. This should have no effect on performance on the
3460*5ff13fbcSAllan Jude * seedless or withSeed variants because everything _should_ be constant folded
3461*5ff13fbcSAllan Jude * by modern compilers.
3462*5ff13fbcSAllan Jude *
3463*5ff13fbcSAllan Jude * The XOR mixing hides individual parts of the secret and increases entropy.
3464*5ff13fbcSAllan Jude *
3465*5ff13fbcSAllan Jude * This adds an extra layer of strength for custom secrets.
3466*5ff13fbcSAllan Jude */
3467*5ff13fbcSAllan Jude XXH_FORCE_INLINE XXH64_hash_t
XXH3_len_1to3_64b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)3468*5ff13fbcSAllan Jude XXH3_len_1to3_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
3469*5ff13fbcSAllan Jude {
3470*5ff13fbcSAllan Jude XXH_ASSERT(input != NULL);
3471*5ff13fbcSAllan Jude XXH_ASSERT(1 <= len && len <= 3);
3472*5ff13fbcSAllan Jude XXH_ASSERT(secret != NULL);
3473*5ff13fbcSAllan Jude /*
3474*5ff13fbcSAllan Jude * len = 1: combined = { input[0], 0x01, input[0], input[0] }
3475*5ff13fbcSAllan Jude * len = 2: combined = { input[1], 0x02, input[0], input[1] }
3476*5ff13fbcSAllan Jude * len = 3: combined = { input[2], 0x03, input[0], input[1] }
3477*5ff13fbcSAllan Jude */
3478*5ff13fbcSAllan Jude { xxh_u8 const c1 = input[0];
3479*5ff13fbcSAllan Jude xxh_u8 const c2 = input[len >> 1];
3480*5ff13fbcSAllan Jude xxh_u8 const c3 = input[len - 1];
3481*5ff13fbcSAllan Jude xxh_u32 const combined = ((xxh_u32)c1 << 16) | ((xxh_u32)c2 << 24)
3482*5ff13fbcSAllan Jude | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
3483*5ff13fbcSAllan Jude xxh_u64 const bitflip = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
3484*5ff13fbcSAllan Jude xxh_u64 const keyed = (xxh_u64)combined ^ bitflip;
3485*5ff13fbcSAllan Jude return XXH64_avalanche(keyed);
3486*5ff13fbcSAllan Jude }
3487*5ff13fbcSAllan Jude }
3488*5ff13fbcSAllan Jude
3489*5ff13fbcSAllan Jude XXH_FORCE_INLINE XXH64_hash_t
XXH3_len_4to8_64b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)3490*5ff13fbcSAllan Jude XXH3_len_4to8_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
3491*5ff13fbcSAllan Jude {
3492*5ff13fbcSAllan Jude XXH_ASSERT(input != NULL);
3493*5ff13fbcSAllan Jude XXH_ASSERT(secret != NULL);
3494*5ff13fbcSAllan Jude XXH_ASSERT(4 <= len && len <= 8);
3495*5ff13fbcSAllan Jude seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
3496*5ff13fbcSAllan Jude { xxh_u32 const input1 = XXH_readLE32(input);
3497*5ff13fbcSAllan Jude xxh_u32 const input2 = XXH_readLE32(input + len - 4);
3498*5ff13fbcSAllan Jude xxh_u64 const bitflip = (XXH_readLE64(secret+8) ^ XXH_readLE64(secret+16)) - seed;
3499*5ff13fbcSAllan Jude xxh_u64 const input64 = input2 + (((xxh_u64)input1) << 32);
3500*5ff13fbcSAllan Jude xxh_u64 const keyed = input64 ^ bitflip;
3501*5ff13fbcSAllan Jude return XXH3_rrmxmx(keyed, len);
3502*5ff13fbcSAllan Jude }
3503*5ff13fbcSAllan Jude }
3504*5ff13fbcSAllan Jude
3505*5ff13fbcSAllan Jude XXH_FORCE_INLINE XXH64_hash_t
XXH3_len_9to16_64b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)3506*5ff13fbcSAllan Jude XXH3_len_9to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
3507*5ff13fbcSAllan Jude {
3508*5ff13fbcSAllan Jude XXH_ASSERT(input != NULL);
3509*5ff13fbcSAllan Jude XXH_ASSERT(secret != NULL);
3510*5ff13fbcSAllan Jude XXH_ASSERT(9 <= len && len <= 16);
3511*5ff13fbcSAllan Jude { xxh_u64 const bitflip1 = (XXH_readLE64(secret+24) ^ XXH_readLE64(secret+32)) + seed;
3512*5ff13fbcSAllan Jude xxh_u64 const bitflip2 = (XXH_readLE64(secret+40) ^ XXH_readLE64(secret+48)) - seed;
3513*5ff13fbcSAllan Jude xxh_u64 const input_lo = XXH_readLE64(input) ^ bitflip1;
3514*5ff13fbcSAllan Jude xxh_u64 const input_hi = XXH_readLE64(input + len - 8) ^ bitflip2;
3515*5ff13fbcSAllan Jude xxh_u64 const acc = len
3516*5ff13fbcSAllan Jude + XXH_swap64(input_lo) + input_hi
3517*5ff13fbcSAllan Jude + XXH3_mul128_fold64(input_lo, input_hi);
3518*5ff13fbcSAllan Jude return XXH3_avalanche(acc);
3519*5ff13fbcSAllan Jude }
3520*5ff13fbcSAllan Jude }
3521*5ff13fbcSAllan Jude
3522*5ff13fbcSAllan Jude XXH_FORCE_INLINE XXH64_hash_t
XXH3_len_0to16_64b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)3523*5ff13fbcSAllan Jude XXH3_len_0to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
3524*5ff13fbcSAllan Jude {
3525*5ff13fbcSAllan Jude XXH_ASSERT(len <= 16);
3526*5ff13fbcSAllan Jude { if (XXH_likely(len > 8)) return XXH3_len_9to16_64b(input, len, secret, seed);
3527*5ff13fbcSAllan Jude if (XXH_likely(len >= 4)) return XXH3_len_4to8_64b(input, len, secret, seed);
3528*5ff13fbcSAllan Jude if (len) return XXH3_len_1to3_64b(input, len, secret, seed);
3529*5ff13fbcSAllan Jude return XXH64_avalanche(seed ^ (XXH_readLE64(secret+56) ^ XXH_readLE64(secret+64)));
3530*5ff13fbcSAllan Jude }
3531*5ff13fbcSAllan Jude }
3532*5ff13fbcSAllan Jude
3533*5ff13fbcSAllan Jude /*
3534*5ff13fbcSAllan Jude * DISCLAIMER: There are known *seed-dependent* multicollisions here due to
3535*5ff13fbcSAllan Jude * multiplication by zero, affecting hashes of lengths 17 to 240.
3536*5ff13fbcSAllan Jude *
3537*5ff13fbcSAllan Jude * However, they are very unlikely.
3538*5ff13fbcSAllan Jude *
3539*5ff13fbcSAllan Jude * Keep this in mind when using the unseeded XXH3_64bits() variant: As with all
3540*5ff13fbcSAllan Jude * unseeded non-cryptographic hashes, it does not attempt to defend itself
3541*5ff13fbcSAllan Jude * against specially crafted inputs, only random inputs.
3542*5ff13fbcSAllan Jude *
3543*5ff13fbcSAllan Jude * Compared to classic UMAC where a 1 in 2^31 chance of 4 consecutive bytes
3544*5ff13fbcSAllan Jude * cancelling out the secret is taken an arbitrary number of times (addressed
3545*5ff13fbcSAllan Jude * in XXH3_accumulate_512), this collision is very unlikely with random inputs
3546*5ff13fbcSAllan Jude * and/or proper seeding:
3547*5ff13fbcSAllan Jude *
3548*5ff13fbcSAllan Jude * This only has a 1 in 2^63 chance of 8 consecutive bytes cancelling out, in a
3549*5ff13fbcSAllan Jude * function that is only called up to 16 times per hash with up to 240 bytes of
3550*5ff13fbcSAllan Jude * input.
3551*5ff13fbcSAllan Jude *
3552*5ff13fbcSAllan Jude * This is not too bad for a non-cryptographic hash function, especially with
3553*5ff13fbcSAllan Jude * only 64 bit outputs.
3554*5ff13fbcSAllan Jude *
3555*5ff13fbcSAllan Jude * The 128-bit variant (which trades some speed for strength) is NOT affected
3556*5ff13fbcSAllan Jude * by this, although it is always a good idea to use a proper seed if you care
3557*5ff13fbcSAllan Jude * about strength.
3558*5ff13fbcSAllan Jude */
XXH3_mix16B(const xxh_u8 * XXH_RESTRICT input,const xxh_u8 * XXH_RESTRICT secret,xxh_u64 seed64)3559*5ff13fbcSAllan Jude XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(const xxh_u8* XXH_RESTRICT input,
3560*5ff13fbcSAllan Jude const xxh_u8* XXH_RESTRICT secret, xxh_u64 seed64)
3561*5ff13fbcSAllan Jude {
3562*5ff13fbcSAllan Jude #if defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
3563*5ff13fbcSAllan Jude && defined(__i386__) && defined(__SSE2__) /* x86 + SSE2 */ \
3564*5ff13fbcSAllan Jude && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable like XXH32 hack */
3565*5ff13fbcSAllan Jude /*
3566*5ff13fbcSAllan Jude * UGLY HACK:
3567*5ff13fbcSAllan Jude * GCC for x86 tends to autovectorize the 128-bit multiply, resulting in
3568*5ff13fbcSAllan Jude * slower code.
3569*5ff13fbcSAllan Jude *
3570*5ff13fbcSAllan Jude * By forcing seed64 into a register, we disrupt the cost model and
3571*5ff13fbcSAllan Jude * cause it to scalarize. See `XXH32_round()`
3572*5ff13fbcSAllan Jude *
3573*5ff13fbcSAllan Jude * FIXME: Clang's output is still _much_ faster -- On an AMD Ryzen 3600,
3574*5ff13fbcSAllan Jude * XXH3_64bits @ len=240 runs at 4.6 GB/s with Clang 9, but 3.3 GB/s on
3575*5ff13fbcSAllan Jude * GCC 9.2, despite both emitting scalar code.
3576*5ff13fbcSAllan Jude *
3577*5ff13fbcSAllan Jude * GCC generates much better scalar code than Clang for the rest of XXH3,
3578*5ff13fbcSAllan Jude * which is why finding a more optimal codepath is an interest.
3579*5ff13fbcSAllan Jude */
3580*5ff13fbcSAllan Jude XXH_COMPILER_GUARD(seed64);
3581*5ff13fbcSAllan Jude #endif
3582*5ff13fbcSAllan Jude { xxh_u64 const input_lo = XXH_readLE64(input);
3583*5ff13fbcSAllan Jude xxh_u64 const input_hi = XXH_readLE64(input+8);
3584*5ff13fbcSAllan Jude return XXH3_mul128_fold64(
3585*5ff13fbcSAllan Jude input_lo ^ (XXH_readLE64(secret) + seed64),
3586*5ff13fbcSAllan Jude input_hi ^ (XXH_readLE64(secret+8) - seed64)
3587*5ff13fbcSAllan Jude );
3588*5ff13fbcSAllan Jude }
3589*5ff13fbcSAllan Jude }
3590*5ff13fbcSAllan Jude
3591*5ff13fbcSAllan Jude /* For mid range keys, XXH3 uses a Mum-hash variant. */
3592*5ff13fbcSAllan Jude XXH_FORCE_INLINE XXH64_hash_t
XXH3_len_17to128_64b(const xxh_u8 * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH64_hash_t seed)3593*5ff13fbcSAllan Jude XXH3_len_17to128_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
3594*5ff13fbcSAllan Jude const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
3595*5ff13fbcSAllan Jude XXH64_hash_t seed)
3596*5ff13fbcSAllan Jude {
3597*5ff13fbcSAllan Jude XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
3598*5ff13fbcSAllan Jude XXH_ASSERT(16 < len && len <= 128);
3599*5ff13fbcSAllan Jude
3600*5ff13fbcSAllan Jude { xxh_u64 acc = len * XXH_PRIME64_1;
3601*5ff13fbcSAllan Jude if (len > 32) {
3602*5ff13fbcSAllan Jude if (len > 64) {
3603*5ff13fbcSAllan Jude if (len > 96) {
3604*5ff13fbcSAllan Jude acc += XXH3_mix16B(input+48, secret+96, seed);
3605*5ff13fbcSAllan Jude acc += XXH3_mix16B(input+len-64, secret+112, seed);
3606*5ff13fbcSAllan Jude }
3607*5ff13fbcSAllan Jude acc += XXH3_mix16B(input+32, secret+64, seed);
3608*5ff13fbcSAllan Jude acc += XXH3_mix16B(input+len-48, secret+80, seed);
3609*5ff13fbcSAllan Jude }
3610*5ff13fbcSAllan Jude acc += XXH3_mix16B(input+16, secret+32, seed);
3611*5ff13fbcSAllan Jude acc += XXH3_mix16B(input+len-32, secret+48, seed);
3612*5ff13fbcSAllan Jude }
3613*5ff13fbcSAllan Jude acc += XXH3_mix16B(input+0, secret+0, seed);
3614*5ff13fbcSAllan Jude acc += XXH3_mix16B(input+len-16, secret+16, seed);
3615*5ff13fbcSAllan Jude
3616*5ff13fbcSAllan Jude return XXH3_avalanche(acc);
3617*5ff13fbcSAllan Jude }
3618*5ff13fbcSAllan Jude }
3619*5ff13fbcSAllan Jude
3620*5ff13fbcSAllan Jude #define XXH3_MIDSIZE_MAX 240
3621*5ff13fbcSAllan Jude
3622*5ff13fbcSAllan Jude XXH_NO_INLINE XXH64_hash_t
XXH3_len_129to240_64b(const xxh_u8 * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH64_hash_t seed)3623*5ff13fbcSAllan Jude XXH3_len_129to240_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
3624*5ff13fbcSAllan Jude const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
3625*5ff13fbcSAllan Jude XXH64_hash_t seed)
3626*5ff13fbcSAllan Jude {
3627*5ff13fbcSAllan Jude XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
3628*5ff13fbcSAllan Jude XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
3629*5ff13fbcSAllan Jude
3630*5ff13fbcSAllan Jude #define XXH3_MIDSIZE_STARTOFFSET 3
3631*5ff13fbcSAllan Jude #define XXH3_MIDSIZE_LASTOFFSET 17
3632*5ff13fbcSAllan Jude
3633*5ff13fbcSAllan Jude { xxh_u64 acc = len * XXH_PRIME64_1;
3634*5ff13fbcSAllan Jude int const nbRounds = (int)len / 16;
3635*5ff13fbcSAllan Jude int i;
3636*5ff13fbcSAllan Jude for (i=0; i<8; i++) {
3637*5ff13fbcSAllan Jude acc += XXH3_mix16B(input+(16*i), secret+(16*i), seed);
3638*5ff13fbcSAllan Jude }
3639*5ff13fbcSAllan Jude acc = XXH3_avalanche(acc);
3640*5ff13fbcSAllan Jude XXH_ASSERT(nbRounds >= 8);
3641*5ff13fbcSAllan Jude #if defined(__clang__) /* Clang */ \
3642*5ff13fbcSAllan Jude && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \
3643*5ff13fbcSAllan Jude && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */
3644*5ff13fbcSAllan Jude /*
3645*5ff13fbcSAllan Jude * UGLY HACK:
3646*5ff13fbcSAllan Jude * Clang for ARMv7-A tries to vectorize this loop, similar to GCC x86.
3647*5ff13fbcSAllan Jude * In everywhere else, it uses scalar code.
3648*5ff13fbcSAllan Jude *
3649*5ff13fbcSAllan Jude * For 64->128-bit multiplies, even if the NEON was 100% optimal, it
3650*5ff13fbcSAllan Jude * would still be slower than UMAAL (see XXH_mult64to128).
3651*5ff13fbcSAllan Jude *
3652*5ff13fbcSAllan Jude * Unfortunately, Clang doesn't handle the long multiplies properly and
3653*5ff13fbcSAllan Jude * converts them to the nonexistent "vmulq_u64" intrinsic, which is then
3654*5ff13fbcSAllan Jude * scalarized into an ugly mess of VMOV.32 instructions.
3655*5ff13fbcSAllan Jude *
3656*5ff13fbcSAllan Jude * This mess is difficult to avoid without turning autovectorization
3657*5ff13fbcSAllan Jude * off completely, but they are usually relatively minor and/or not
3658*5ff13fbcSAllan Jude * worth it to fix.
3659*5ff13fbcSAllan Jude *
3660*5ff13fbcSAllan Jude * This loop is the easiest to fix, as unlike XXH32, this pragma
3661*5ff13fbcSAllan Jude * _actually works_ because it is a loop vectorization instead of an
3662*5ff13fbcSAllan Jude * SLP vectorization.
3663*5ff13fbcSAllan Jude */
3664*5ff13fbcSAllan Jude #pragma clang loop vectorize(disable)
3665*5ff13fbcSAllan Jude #endif
3666*5ff13fbcSAllan Jude for (i=8 ; i < nbRounds; i++) {
3667*5ff13fbcSAllan Jude acc += XXH3_mix16B(input+(16*i), secret+(16*(i-8)) + XXH3_MIDSIZE_STARTOFFSET, seed);
3668*5ff13fbcSAllan Jude }
3669*5ff13fbcSAllan Jude /* last bytes */
3670*5ff13fbcSAllan Jude acc += XXH3_mix16B(input + len - 16, secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET, seed);
3671*5ff13fbcSAllan Jude return XXH3_avalanche(acc);
3672*5ff13fbcSAllan Jude }
3673*5ff13fbcSAllan Jude }
3674*5ff13fbcSAllan Jude
3675*5ff13fbcSAllan Jude
3676*5ff13fbcSAllan Jude /* ======= Long Keys ======= */
3677*5ff13fbcSAllan Jude
3678*5ff13fbcSAllan Jude #define XXH_STRIPE_LEN 64
3679*5ff13fbcSAllan Jude #define XXH_SECRET_CONSUME_RATE 8 /* nb of secret bytes consumed at each accumulation */
3680*5ff13fbcSAllan Jude #define XXH_ACC_NB (XXH_STRIPE_LEN / sizeof(xxh_u64))
3681*5ff13fbcSAllan Jude
3682*5ff13fbcSAllan Jude #ifdef XXH_OLD_NAMES
3683*5ff13fbcSAllan Jude # define STRIPE_LEN XXH_STRIPE_LEN
3684*5ff13fbcSAllan Jude # define ACC_NB XXH_ACC_NB
3685*5ff13fbcSAllan Jude #endif
3686*5ff13fbcSAllan Jude
XXH_writeLE64(void * dst,xxh_u64 v64)3687*5ff13fbcSAllan Jude XXH_FORCE_INLINE void XXH_writeLE64(void* dst, xxh_u64 v64)
3688*5ff13fbcSAllan Jude {
3689*5ff13fbcSAllan Jude if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64);
3690*5ff13fbcSAllan Jude XXH_memcpy(dst, &v64, sizeof(v64));
3691*5ff13fbcSAllan Jude }
3692*5ff13fbcSAllan Jude
3693*5ff13fbcSAllan Jude /* Several intrinsic functions below are supposed to accept __int64 as argument,
3694*5ff13fbcSAllan Jude * as documented in https://software.intel.com/sites/landingpage/IntrinsicsGuide/ .
3695*5ff13fbcSAllan Jude * However, several environments do not define __int64 type,
3696*5ff13fbcSAllan Jude * requiring a workaround.
3697*5ff13fbcSAllan Jude */
3698*5ff13fbcSAllan Jude #if !defined (__VMS) \
3699*5ff13fbcSAllan Jude && (defined (__cplusplus) \
3700*5ff13fbcSAllan Jude || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
3701*5ff13fbcSAllan Jude typedef int64_t xxh_i64;
3702*5ff13fbcSAllan Jude #else
3703*5ff13fbcSAllan Jude /* the following type must have a width of 64-bit */
3704*5ff13fbcSAllan Jude typedef long long xxh_i64;
3705*5ff13fbcSAllan Jude #endif
3706*5ff13fbcSAllan Jude
3707*5ff13fbcSAllan Jude
3708*5ff13fbcSAllan Jude /*
3709*5ff13fbcSAllan Jude * XXH3_accumulate_512 is the tightest loop for long inputs, and it is the most optimized.
3710*5ff13fbcSAllan Jude *
3711*5ff13fbcSAllan Jude * It is a hardened version of UMAC, based off of FARSH's implementation.
3712*5ff13fbcSAllan Jude *
3713*5ff13fbcSAllan Jude * This was chosen because it adapts quite well to 32-bit, 64-bit, and SIMD
3714*5ff13fbcSAllan Jude * implementations, and it is ridiculously fast.
3715*5ff13fbcSAllan Jude *
3716*5ff13fbcSAllan Jude * We harden it by mixing the original input to the accumulators as well as the product.
3717*5ff13fbcSAllan Jude *
3718*5ff13fbcSAllan Jude * This means that in the (relatively likely) case of a multiply by zero, the
3719*5ff13fbcSAllan Jude * original input is preserved.
3720*5ff13fbcSAllan Jude *
3721*5ff13fbcSAllan Jude * On 128-bit inputs, we swap 64-bit pairs when we add the input to improve
3722*5ff13fbcSAllan Jude * cross-pollination, as otherwise the upper and lower halves would be
3723*5ff13fbcSAllan Jude * essentially independent.
3724*5ff13fbcSAllan Jude *
3725*5ff13fbcSAllan Jude * This doesn't matter on 64-bit hashes since they all get merged together in
3726*5ff13fbcSAllan Jude * the end, so we skip the extra step.
3727*5ff13fbcSAllan Jude *
3728*5ff13fbcSAllan Jude * Both XXH3_64bits and XXH3_128bits use this subroutine.
3729*5ff13fbcSAllan Jude */
3730*5ff13fbcSAllan Jude
3731*5ff13fbcSAllan Jude #if (XXH_VECTOR == XXH_AVX512) \
3732*5ff13fbcSAllan Jude || (defined(XXH_DISPATCH_AVX512) && XXH_DISPATCH_AVX512 != 0)
3733*5ff13fbcSAllan Jude
3734*5ff13fbcSAllan Jude #ifndef XXH_TARGET_AVX512
3735*5ff13fbcSAllan Jude # define XXH_TARGET_AVX512 /* disable attribute target */
3736*5ff13fbcSAllan Jude #endif
3737*5ff13fbcSAllan Jude
3738*5ff13fbcSAllan Jude XXH_FORCE_INLINE XXH_TARGET_AVX512 void
XXH3_accumulate_512_avx512(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)3739*5ff13fbcSAllan Jude XXH3_accumulate_512_avx512(void* XXH_RESTRICT acc,
3740*5ff13fbcSAllan Jude const void* XXH_RESTRICT input,
3741*5ff13fbcSAllan Jude const void* XXH_RESTRICT secret)
3742*5ff13fbcSAllan Jude {
3743*5ff13fbcSAllan Jude __m512i* const xacc = (__m512i *) acc;
3744*5ff13fbcSAllan Jude XXH_ASSERT((((size_t)acc) & 63) == 0);
3745*5ff13fbcSAllan Jude XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
3746*5ff13fbcSAllan Jude
3747*5ff13fbcSAllan Jude {
3748*5ff13fbcSAllan Jude /* data_vec = input[0]; */
3749*5ff13fbcSAllan Jude __m512i const data_vec = _mm512_loadu_si512 (input);
3750*5ff13fbcSAllan Jude /* key_vec = secret[0]; */
3751*5ff13fbcSAllan Jude __m512i const key_vec = _mm512_loadu_si512 (secret);
3752*5ff13fbcSAllan Jude /* data_key = data_vec ^ key_vec; */
3753*5ff13fbcSAllan Jude __m512i const data_key = _mm512_xor_si512 (data_vec, key_vec);
3754*5ff13fbcSAllan Jude /* data_key_lo = data_key >> 32; */
3755*5ff13fbcSAllan Jude __m512i const data_key_lo = _mm512_shuffle_epi32 (data_key, (_MM_PERM_ENUM)_MM_SHUFFLE(0, 3, 0, 1));
3756*5ff13fbcSAllan Jude /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
3757*5ff13fbcSAllan Jude __m512i const product = _mm512_mul_epu32 (data_key, data_key_lo);
3758*5ff13fbcSAllan Jude /* xacc[0] += swap(data_vec); */
3759*5ff13fbcSAllan Jude __m512i const data_swap = _mm512_shuffle_epi32(data_vec, (_MM_PERM_ENUM)_MM_SHUFFLE(1, 0, 3, 2));
3760*5ff13fbcSAllan Jude __m512i const sum = _mm512_add_epi64(*xacc, data_swap);
3761*5ff13fbcSAllan Jude /* xacc[0] += product; */
3762*5ff13fbcSAllan Jude *xacc = _mm512_add_epi64(product, sum);
3763*5ff13fbcSAllan Jude }
3764*5ff13fbcSAllan Jude }
3765*5ff13fbcSAllan Jude
3766*5ff13fbcSAllan Jude /*
3767*5ff13fbcSAllan Jude * XXH3_scrambleAcc: Scrambles the accumulators to improve mixing.
3768*5ff13fbcSAllan Jude *
3769*5ff13fbcSAllan Jude * Multiplication isn't perfect, as explained by Google in HighwayHash:
3770*5ff13fbcSAllan Jude *
3771*5ff13fbcSAllan Jude * // Multiplication mixes/scrambles bytes 0-7 of the 64-bit result to
3772*5ff13fbcSAllan Jude * // varying degrees. In descending order of goodness, bytes
3773*5ff13fbcSAllan Jude * // 3 4 2 5 1 6 0 7 have quality 228 224 164 160 100 96 36 32.
3774*5ff13fbcSAllan Jude * // As expected, the upper and lower bytes are much worse.
3775*5ff13fbcSAllan Jude *
3776*5ff13fbcSAllan Jude * Source: https://github.com/google/highwayhash/blob/0aaf66b/highwayhash/hh_avx2.h#L291
3777*5ff13fbcSAllan Jude *
3778*5ff13fbcSAllan Jude * Since our algorithm uses a pseudorandom secret to add some variance into the
3779*5ff13fbcSAllan Jude * mix, we don't need to (or want to) mix as often or as much as HighwayHash does.
3780*5ff13fbcSAllan Jude *
3781*5ff13fbcSAllan Jude * This isn't as tight as XXH3_accumulate, but still written in SIMD to avoid
3782*5ff13fbcSAllan Jude * extraction.
3783*5ff13fbcSAllan Jude *
3784*5ff13fbcSAllan Jude * Both XXH3_64bits and XXH3_128bits use this subroutine.
3785*5ff13fbcSAllan Jude */
3786*5ff13fbcSAllan Jude
3787*5ff13fbcSAllan Jude XXH_FORCE_INLINE XXH_TARGET_AVX512 void
XXH3_scrambleAcc_avx512(void * XXH_RESTRICT acc,const void * XXH_RESTRICT secret)3788*5ff13fbcSAllan Jude XXH3_scrambleAcc_avx512(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
3789*5ff13fbcSAllan Jude {
3790*5ff13fbcSAllan Jude XXH_ASSERT((((size_t)acc) & 63) == 0);
3791*5ff13fbcSAllan Jude XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
3792*5ff13fbcSAllan Jude { __m512i* const xacc = (__m512i*) acc;
3793*5ff13fbcSAllan Jude const __m512i prime32 = _mm512_set1_epi32((int)XXH_PRIME32_1);
3794*5ff13fbcSAllan Jude
3795*5ff13fbcSAllan Jude /* xacc[0] ^= (xacc[0] >> 47) */
3796*5ff13fbcSAllan Jude __m512i const acc_vec = *xacc;
3797*5ff13fbcSAllan Jude __m512i const shifted = _mm512_srli_epi64 (acc_vec, 47);
3798*5ff13fbcSAllan Jude __m512i const data_vec = _mm512_xor_si512 (acc_vec, shifted);
3799*5ff13fbcSAllan Jude /* xacc[0] ^= secret; */
3800*5ff13fbcSAllan Jude __m512i const key_vec = _mm512_loadu_si512 (secret);
3801*5ff13fbcSAllan Jude __m512i const data_key = _mm512_xor_si512 (data_vec, key_vec);
3802*5ff13fbcSAllan Jude
3803*5ff13fbcSAllan Jude /* xacc[0] *= XXH_PRIME32_1; */
3804*5ff13fbcSAllan Jude __m512i const data_key_hi = _mm512_shuffle_epi32 (data_key, (_MM_PERM_ENUM)_MM_SHUFFLE(0, 3, 0, 1));
3805*5ff13fbcSAllan Jude __m512i const prod_lo = _mm512_mul_epu32 (data_key, prime32);
3806*5ff13fbcSAllan Jude __m512i const prod_hi = _mm512_mul_epu32 (data_key_hi, prime32);
3807*5ff13fbcSAllan Jude *xacc = _mm512_add_epi64(prod_lo, _mm512_slli_epi64(prod_hi, 32));
3808*5ff13fbcSAllan Jude }
3809*5ff13fbcSAllan Jude }
3810*5ff13fbcSAllan Jude
3811*5ff13fbcSAllan Jude XXH_FORCE_INLINE XXH_TARGET_AVX512 void
XXH3_initCustomSecret_avx512(void * XXH_RESTRICT customSecret,xxh_u64 seed64)3812*5ff13fbcSAllan Jude XXH3_initCustomSecret_avx512(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
3813*5ff13fbcSAllan Jude {
3814*5ff13fbcSAllan Jude XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 63) == 0);
3815*5ff13fbcSAllan Jude XXH_STATIC_ASSERT(XXH_SEC_ALIGN == 64);
3816*5ff13fbcSAllan Jude XXH_ASSERT(((size_t)customSecret & 63) == 0);
3817*5ff13fbcSAllan Jude (void)(&XXH_writeLE64);
3818*5ff13fbcSAllan Jude { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m512i);
3819*5ff13fbcSAllan Jude __m512i const seed = _mm512_mask_set1_epi64(_mm512_set1_epi64((xxh_i64)seed64), 0xAA, (xxh_i64)(0U - seed64));
3820*5ff13fbcSAllan Jude
3821*5ff13fbcSAllan Jude const __m512i* const src = (const __m512i*) ((const void*) XXH3_kSecret);
3822*5ff13fbcSAllan Jude __m512i* const dest = ( __m512i*) customSecret;
3823*5ff13fbcSAllan Jude int i;
3824*5ff13fbcSAllan Jude XXH_ASSERT(((size_t)src & 63) == 0); /* control alignment */
3825*5ff13fbcSAllan Jude XXH_ASSERT(((size_t)dest & 63) == 0);
3826*5ff13fbcSAllan Jude for (i=0; i < nbRounds; ++i) {
3827*5ff13fbcSAllan Jude /* GCC has a bug, _mm512_stream_load_si512 accepts 'void*', not 'void const*',
3828*5ff13fbcSAllan Jude * this will warn "discards 'const' qualifier". */
3829*5ff13fbcSAllan Jude union {
3830*5ff13fbcSAllan Jude const __m512i* cp;
3831*5ff13fbcSAllan Jude void* p;
3832*5ff13fbcSAllan Jude } remote_const_void;
3833*5ff13fbcSAllan Jude remote_const_void.cp = src + i;
3834*5ff13fbcSAllan Jude dest[i] = _mm512_add_epi64(_mm512_stream_load_si512(remote_const_void.p), seed);
3835*5ff13fbcSAllan Jude } }
3836*5ff13fbcSAllan Jude }
3837*5ff13fbcSAllan Jude
3838*5ff13fbcSAllan Jude #endif
3839*5ff13fbcSAllan Jude
3840*5ff13fbcSAllan Jude #if (XXH_VECTOR == XXH_AVX2) \
3841*5ff13fbcSAllan Jude || (defined(XXH_DISPATCH_AVX2) && XXH_DISPATCH_AVX2 != 0)
3842*5ff13fbcSAllan Jude
3843*5ff13fbcSAllan Jude #ifndef XXH_TARGET_AVX2
3844*5ff13fbcSAllan Jude # define XXH_TARGET_AVX2 /* disable attribute target */
3845*5ff13fbcSAllan Jude #endif
3846*5ff13fbcSAllan Jude
3847*5ff13fbcSAllan Jude XXH_FORCE_INLINE XXH_TARGET_AVX2 void
XXH3_accumulate_512_avx2(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)3848*5ff13fbcSAllan Jude XXH3_accumulate_512_avx2( void* XXH_RESTRICT acc,
3849*5ff13fbcSAllan Jude const void* XXH_RESTRICT input,
3850*5ff13fbcSAllan Jude const void* XXH_RESTRICT secret)
3851*5ff13fbcSAllan Jude {
3852*5ff13fbcSAllan Jude XXH_ASSERT((((size_t)acc) & 31) == 0);
3853*5ff13fbcSAllan Jude { __m256i* const xacc = (__m256i *) acc;
3854*5ff13fbcSAllan Jude /* Unaligned. This is mainly for pointer arithmetic, and because
3855*5ff13fbcSAllan Jude * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
3856*5ff13fbcSAllan Jude const __m256i* const xinput = (const __m256i *) input;
3857*5ff13fbcSAllan Jude /* Unaligned. This is mainly for pointer arithmetic, and because
3858*5ff13fbcSAllan Jude * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
3859*5ff13fbcSAllan Jude const __m256i* const xsecret = (const __m256i *) secret;
3860*5ff13fbcSAllan Jude
3861*5ff13fbcSAllan Jude size_t i;
3862*5ff13fbcSAllan Jude for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) {
3863*5ff13fbcSAllan Jude /* data_vec = xinput[i]; */
3864*5ff13fbcSAllan Jude __m256i const data_vec = _mm256_loadu_si256 (xinput+i);
3865*5ff13fbcSAllan Jude /* key_vec = xsecret[i]; */
3866*5ff13fbcSAllan Jude __m256i const key_vec = _mm256_loadu_si256 (xsecret+i);
3867*5ff13fbcSAllan Jude /* data_key = data_vec ^ key_vec; */
3868*5ff13fbcSAllan Jude __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec);
3869*5ff13fbcSAllan Jude /* data_key_lo = data_key >> 32; */
3870*5ff13fbcSAllan Jude __m256i const data_key_lo = _mm256_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
3871*5ff13fbcSAllan Jude /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
3872*5ff13fbcSAllan Jude __m256i const product = _mm256_mul_epu32 (data_key, data_key_lo);
3873*5ff13fbcSAllan Jude /* xacc[i] += swap(data_vec); */
3874*5ff13fbcSAllan Jude __m256i const data_swap = _mm256_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2));
3875*5ff13fbcSAllan Jude __m256i const sum = _mm256_add_epi64(xacc[i], data_swap);
3876*5ff13fbcSAllan Jude /* xacc[i] += product; */
3877*5ff13fbcSAllan Jude xacc[i] = _mm256_add_epi64(product, sum);
3878*5ff13fbcSAllan Jude } }
3879*5ff13fbcSAllan Jude }
3880*5ff13fbcSAllan Jude
3881*5ff13fbcSAllan Jude XXH_FORCE_INLINE XXH_TARGET_AVX2 void
XXH3_scrambleAcc_avx2(void * XXH_RESTRICT acc,const void * XXH_RESTRICT secret)3882*5ff13fbcSAllan Jude XXH3_scrambleAcc_avx2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
3883*5ff13fbcSAllan Jude {
3884*5ff13fbcSAllan Jude XXH_ASSERT((((size_t)acc) & 31) == 0);
3885*5ff13fbcSAllan Jude { __m256i* const xacc = (__m256i*) acc;
3886*5ff13fbcSAllan Jude /* Unaligned. This is mainly for pointer arithmetic, and because
3887*5ff13fbcSAllan Jude * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
3888*5ff13fbcSAllan Jude const __m256i* const xsecret = (const __m256i *) secret;
3889*5ff13fbcSAllan Jude const __m256i prime32 = _mm256_set1_epi32((int)XXH_PRIME32_1);
3890*5ff13fbcSAllan Jude
3891*5ff13fbcSAllan Jude size_t i;
3892*5ff13fbcSAllan Jude for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) {
3893*5ff13fbcSAllan Jude /* xacc[i] ^= (xacc[i] >> 47) */
3894*5ff13fbcSAllan Jude __m256i const acc_vec = xacc[i];
3895*5ff13fbcSAllan Jude __m256i const shifted = _mm256_srli_epi64 (acc_vec, 47);
3896*5ff13fbcSAllan Jude __m256i const data_vec = _mm256_xor_si256 (acc_vec, shifted);
3897*5ff13fbcSAllan Jude /* xacc[i] ^= xsecret; */
3898*5ff13fbcSAllan Jude __m256i const key_vec = _mm256_loadu_si256 (xsecret+i);
3899*5ff13fbcSAllan Jude __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec);
3900*5ff13fbcSAllan Jude
3901*5ff13fbcSAllan Jude /* xacc[i] *= XXH_PRIME32_1; */
3902*5ff13fbcSAllan Jude __m256i const data_key_hi = _mm256_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
3903*5ff13fbcSAllan Jude __m256i const prod_lo = _mm256_mul_epu32 (data_key, prime32);
3904*5ff13fbcSAllan Jude __m256i const prod_hi = _mm256_mul_epu32 (data_key_hi, prime32);
3905*5ff13fbcSAllan Jude xacc[i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32));
3906*5ff13fbcSAllan Jude }
3907*5ff13fbcSAllan Jude }
3908*5ff13fbcSAllan Jude }
3909*5ff13fbcSAllan Jude
XXH3_initCustomSecret_avx2(void * XXH_RESTRICT customSecret,xxh_u64 seed64)3910*5ff13fbcSAllan Jude XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_initCustomSecret_avx2(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
3911*5ff13fbcSAllan Jude {
3912*5ff13fbcSAllan Jude XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 31) == 0);
3913*5ff13fbcSAllan Jude XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE / sizeof(__m256i)) == 6);
3914*5ff13fbcSAllan Jude XXH_STATIC_ASSERT(XXH_SEC_ALIGN <= 64);
3915*5ff13fbcSAllan Jude (void)(&XXH_writeLE64);
3916*5ff13fbcSAllan Jude XXH_PREFETCH(customSecret);
3917*5ff13fbcSAllan Jude { __m256i const seed = _mm256_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64, (xxh_i64)(0U - seed64), (xxh_i64)seed64);
3918*5ff13fbcSAllan Jude
3919*5ff13fbcSAllan Jude const __m256i* const src = (const __m256i*) ((const void*) XXH3_kSecret);
3920*5ff13fbcSAllan Jude __m256i* dest = ( __m256i*) customSecret;
3921*5ff13fbcSAllan Jude
3922*5ff13fbcSAllan Jude # if defined(__GNUC__) || defined(__clang__)
3923*5ff13fbcSAllan Jude /*
3924*5ff13fbcSAllan Jude * On GCC & Clang, marking 'dest' as modified will cause the compiler:
3925*5ff13fbcSAllan Jude * - do not extract the secret from sse registers in the internal loop
3926*5ff13fbcSAllan Jude * - use less common registers, and avoid pushing these reg into stack
3927*5ff13fbcSAllan Jude */
3928*5ff13fbcSAllan Jude XXH_COMPILER_GUARD(dest);
3929*5ff13fbcSAllan Jude # endif
3930*5ff13fbcSAllan Jude XXH_ASSERT(((size_t)src & 31) == 0); /* control alignment */
3931*5ff13fbcSAllan Jude XXH_ASSERT(((size_t)dest & 31) == 0);
3932*5ff13fbcSAllan Jude
3933*5ff13fbcSAllan Jude /* GCC -O2 need unroll loop manually */
3934*5ff13fbcSAllan Jude dest[0] = _mm256_add_epi64(_mm256_stream_load_si256(src+0), seed);
3935*5ff13fbcSAllan Jude dest[1] = _mm256_add_epi64(_mm256_stream_load_si256(src+1), seed);
3936*5ff13fbcSAllan Jude dest[2] = _mm256_add_epi64(_mm256_stream_load_si256(src+2), seed);
3937*5ff13fbcSAllan Jude dest[3] = _mm256_add_epi64(_mm256_stream_load_si256(src+3), seed);
3938*5ff13fbcSAllan Jude dest[4] = _mm256_add_epi64(_mm256_stream_load_si256(src+4), seed);
3939*5ff13fbcSAllan Jude dest[5] = _mm256_add_epi64(_mm256_stream_load_si256(src+5), seed);
3940*5ff13fbcSAllan Jude }
3941*5ff13fbcSAllan Jude }
3942*5ff13fbcSAllan Jude
3943*5ff13fbcSAllan Jude #endif
3944*5ff13fbcSAllan Jude
3945*5ff13fbcSAllan Jude /* x86dispatch always generates SSE2 */
3946*5ff13fbcSAllan Jude #if (XXH_VECTOR == XXH_SSE2) || defined(XXH_X86DISPATCH)
3947*5ff13fbcSAllan Jude
3948*5ff13fbcSAllan Jude #ifndef XXH_TARGET_SSE2
3949*5ff13fbcSAllan Jude # define XXH_TARGET_SSE2 /* disable attribute target */
3950*5ff13fbcSAllan Jude #endif
3951*5ff13fbcSAllan Jude
3952*5ff13fbcSAllan Jude XXH_FORCE_INLINE XXH_TARGET_SSE2 void
XXH3_accumulate_512_sse2(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)3953*5ff13fbcSAllan Jude XXH3_accumulate_512_sse2( void* XXH_RESTRICT acc,
3954*5ff13fbcSAllan Jude const void* XXH_RESTRICT input,
3955*5ff13fbcSAllan Jude const void* XXH_RESTRICT secret)
3956*5ff13fbcSAllan Jude {
3957*5ff13fbcSAllan Jude /* SSE2 is just a half-scale version of the AVX2 version. */
3958*5ff13fbcSAllan Jude XXH_ASSERT((((size_t)acc) & 15) == 0);
3959*5ff13fbcSAllan Jude { __m128i* const xacc = (__m128i *) acc;
3960*5ff13fbcSAllan Jude /* Unaligned. This is mainly for pointer arithmetic, and because
3961*5ff13fbcSAllan Jude * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
3962*5ff13fbcSAllan Jude const __m128i* const xinput = (const __m128i *) input;
3963*5ff13fbcSAllan Jude /* Unaligned. This is mainly for pointer arithmetic, and because
3964*5ff13fbcSAllan Jude * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
3965*5ff13fbcSAllan Jude const __m128i* const xsecret = (const __m128i *) secret;
3966*5ff13fbcSAllan Jude
3967*5ff13fbcSAllan Jude size_t i;
3968*5ff13fbcSAllan Jude for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) {
3969*5ff13fbcSAllan Jude /* data_vec = xinput[i]; */
3970*5ff13fbcSAllan Jude __m128i const data_vec = _mm_loadu_si128 (xinput+i);
3971*5ff13fbcSAllan Jude /* key_vec = xsecret[i]; */
3972*5ff13fbcSAllan Jude __m128i const key_vec = _mm_loadu_si128 (xsecret+i);
3973*5ff13fbcSAllan Jude /* data_key = data_vec ^ key_vec; */
3974*5ff13fbcSAllan Jude __m128i const data_key = _mm_xor_si128 (data_vec, key_vec);
3975*5ff13fbcSAllan Jude /* data_key_lo = data_key >> 32; */
3976*5ff13fbcSAllan Jude __m128i const data_key_lo = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
3977*5ff13fbcSAllan Jude /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
3978*5ff13fbcSAllan Jude __m128i const product = _mm_mul_epu32 (data_key, data_key_lo);
3979*5ff13fbcSAllan Jude /* xacc[i] += swap(data_vec); */
3980*5ff13fbcSAllan Jude __m128i const data_swap = _mm_shuffle_epi32(data_vec, _MM_SHUFFLE(1,0,3,2));
3981*5ff13fbcSAllan Jude __m128i const sum = _mm_add_epi64(xacc[i], data_swap);
3982*5ff13fbcSAllan Jude /* xacc[i] += product; */
3983*5ff13fbcSAllan Jude xacc[i] = _mm_add_epi64(product, sum);
3984*5ff13fbcSAllan Jude } }
3985*5ff13fbcSAllan Jude }
3986*5ff13fbcSAllan Jude
3987*5ff13fbcSAllan Jude XXH_FORCE_INLINE XXH_TARGET_SSE2 void
XXH3_scrambleAcc_sse2(void * XXH_RESTRICT acc,const void * XXH_RESTRICT secret)3988*5ff13fbcSAllan Jude XXH3_scrambleAcc_sse2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
3989*5ff13fbcSAllan Jude {
3990*5ff13fbcSAllan Jude XXH_ASSERT((((size_t)acc) & 15) == 0);
3991*5ff13fbcSAllan Jude { __m128i* const xacc = (__m128i*) acc;
3992*5ff13fbcSAllan Jude /* Unaligned. This is mainly for pointer arithmetic, and because
3993*5ff13fbcSAllan Jude * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
3994*5ff13fbcSAllan Jude const __m128i* const xsecret = (const __m128i *) secret;
3995*5ff13fbcSAllan Jude const __m128i prime32 = _mm_set1_epi32((int)XXH_PRIME32_1);
3996*5ff13fbcSAllan Jude
3997*5ff13fbcSAllan Jude size_t i;
3998*5ff13fbcSAllan Jude for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) {
3999*5ff13fbcSAllan Jude /* xacc[i] ^= (xacc[i] >> 47) */
4000*5ff13fbcSAllan Jude __m128i const acc_vec = xacc[i];
4001*5ff13fbcSAllan Jude __m128i const shifted = _mm_srli_epi64 (acc_vec, 47);
4002*5ff13fbcSAllan Jude __m128i const data_vec = _mm_xor_si128 (acc_vec, shifted);
4003*5ff13fbcSAllan Jude /* xacc[i] ^= xsecret[i]; */
4004*5ff13fbcSAllan Jude __m128i const key_vec = _mm_loadu_si128 (xsecret+i);
4005*5ff13fbcSAllan Jude __m128i const data_key = _mm_xor_si128 (data_vec, key_vec);
4006*5ff13fbcSAllan Jude
4007*5ff13fbcSAllan Jude /* xacc[i] *= XXH_PRIME32_1; */
4008*5ff13fbcSAllan Jude __m128i const data_key_hi = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
4009*5ff13fbcSAllan Jude __m128i const prod_lo = _mm_mul_epu32 (data_key, prime32);
4010*5ff13fbcSAllan Jude __m128i const prod_hi = _mm_mul_epu32 (data_key_hi, prime32);
4011*5ff13fbcSAllan Jude xacc[i] = _mm_add_epi64(prod_lo, _mm_slli_epi64(prod_hi, 32));
4012*5ff13fbcSAllan Jude }
4013*5ff13fbcSAllan Jude }
4014*5ff13fbcSAllan Jude }
4015*5ff13fbcSAllan Jude
XXH3_initCustomSecret_sse2(void * XXH_RESTRICT customSecret,xxh_u64 seed64)4016*5ff13fbcSAllan Jude XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_initCustomSecret_sse2(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
4017*5ff13fbcSAllan Jude {
4018*5ff13fbcSAllan Jude XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
4019*5ff13fbcSAllan Jude (void)(&XXH_writeLE64);
4020*5ff13fbcSAllan Jude { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m128i);
4021*5ff13fbcSAllan Jude
4022*5ff13fbcSAllan Jude # if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900
4023*5ff13fbcSAllan Jude /* MSVC 32bit mode does not support _mm_set_epi64x before 2015 */
4024*5ff13fbcSAllan Jude XXH_ALIGN(16) const xxh_i64 seed64x2[2] = { (xxh_i64)seed64, (xxh_i64)(0U - seed64) };
4025*5ff13fbcSAllan Jude __m128i const seed = _mm_load_si128((__m128i const*)seed64x2);
4026*5ff13fbcSAllan Jude # else
4027*5ff13fbcSAllan Jude __m128i const seed = _mm_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64);
4028*5ff13fbcSAllan Jude # endif
4029*5ff13fbcSAllan Jude int i;
4030*5ff13fbcSAllan Jude
4031*5ff13fbcSAllan Jude const void* const src16 = XXH3_kSecret;
4032*5ff13fbcSAllan Jude __m128i* dst16 = (__m128i*) customSecret;
4033*5ff13fbcSAllan Jude # if defined(__GNUC__) || defined(__clang__)
4034*5ff13fbcSAllan Jude /*
4035*5ff13fbcSAllan Jude * On GCC & Clang, marking 'dest' as modified will cause the compiler:
4036*5ff13fbcSAllan Jude * - do not extract the secret from sse registers in the internal loop
4037*5ff13fbcSAllan Jude * - use less common registers, and avoid pushing these reg into stack
4038*5ff13fbcSAllan Jude */
4039*5ff13fbcSAllan Jude XXH_COMPILER_GUARD(dst16);
4040*5ff13fbcSAllan Jude # endif
4041*5ff13fbcSAllan Jude XXH_ASSERT(((size_t)src16 & 15) == 0); /* control alignment */
4042*5ff13fbcSAllan Jude XXH_ASSERT(((size_t)dst16 & 15) == 0);
4043*5ff13fbcSAllan Jude
4044*5ff13fbcSAllan Jude for (i=0; i < nbRounds; ++i) {
4045*5ff13fbcSAllan Jude dst16[i] = _mm_add_epi64(_mm_load_si128((const __m128i *)src16+i), seed);
4046*5ff13fbcSAllan Jude } }
4047*5ff13fbcSAllan Jude }
4048*5ff13fbcSAllan Jude
4049*5ff13fbcSAllan Jude #endif
4050*5ff13fbcSAllan Jude
4051*5ff13fbcSAllan Jude #if (XXH_VECTOR == XXH_NEON)
4052*5ff13fbcSAllan Jude
4053*5ff13fbcSAllan Jude /* forward declarations for the scalar routines */
4054*5ff13fbcSAllan Jude XXH_FORCE_INLINE void
4055*5ff13fbcSAllan Jude XXH3_scalarRound(void* XXH_RESTRICT acc, void const* XXH_RESTRICT input,
4056*5ff13fbcSAllan Jude void const* XXH_RESTRICT secret, size_t lane);
4057*5ff13fbcSAllan Jude
4058*5ff13fbcSAllan Jude XXH_FORCE_INLINE void
4059*5ff13fbcSAllan Jude XXH3_scalarScrambleRound(void* XXH_RESTRICT acc,
4060*5ff13fbcSAllan Jude void const* XXH_RESTRICT secret, size_t lane);
4061*5ff13fbcSAllan Jude
4062*5ff13fbcSAllan Jude /*!
4063*5ff13fbcSAllan Jude * @internal
4064*5ff13fbcSAllan Jude * @brief The bulk processing loop for NEON.
4065*5ff13fbcSAllan Jude *
4066*5ff13fbcSAllan Jude * The NEON code path is actually partially scalar when running on AArch64. This
4067*5ff13fbcSAllan Jude * is to optimize the pipelining and can have up to 15% speedup depending on the
4068*5ff13fbcSAllan Jude * CPU, and it also mitigates some GCC codegen issues.
4069*5ff13fbcSAllan Jude *
4070*5ff13fbcSAllan Jude * @see XXH3_NEON_LANES for configuring this and details about this optimization.
4071*5ff13fbcSAllan Jude */
4072*5ff13fbcSAllan Jude XXH_FORCE_INLINE void
XXH3_accumulate_512_neon(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)4073*5ff13fbcSAllan Jude XXH3_accumulate_512_neon( void* XXH_RESTRICT acc,
4074*5ff13fbcSAllan Jude const void* XXH_RESTRICT input,
4075*5ff13fbcSAllan Jude const void* XXH_RESTRICT secret)
4076*5ff13fbcSAllan Jude {
4077*5ff13fbcSAllan Jude XXH_ASSERT((((size_t)acc) & 15) == 0);
4078*5ff13fbcSAllan Jude XXH_STATIC_ASSERT(XXH3_NEON_LANES > 0 && XXH3_NEON_LANES <= XXH_ACC_NB && XXH3_NEON_LANES % 2 == 0);
4079*5ff13fbcSAllan Jude {
4080*5ff13fbcSAllan Jude uint64x2_t* const xacc = (uint64x2_t *) acc;
4081*5ff13fbcSAllan Jude /* We don't use a uint32x4_t pointer because it causes bus errors on ARMv7. */
4082*5ff13fbcSAllan Jude uint8_t const* const xinput = (const uint8_t *) input;
4083*5ff13fbcSAllan Jude uint8_t const* const xsecret = (const uint8_t *) secret;
4084*5ff13fbcSAllan Jude
4085*5ff13fbcSAllan Jude size_t i;
4086*5ff13fbcSAllan Jude /* NEON for the first few lanes (these loops are normally interleaved) */
4087*5ff13fbcSAllan Jude for (i=0; i < XXH3_NEON_LANES / 2; i++) {
4088*5ff13fbcSAllan Jude /* data_vec = xinput[i]; */
4089*5ff13fbcSAllan Jude uint8x16_t data_vec = vld1q_u8(xinput + (i * 16));
4090*5ff13fbcSAllan Jude /* key_vec = xsecret[i]; */
4091*5ff13fbcSAllan Jude uint8x16_t key_vec = vld1q_u8(xsecret + (i * 16));
4092*5ff13fbcSAllan Jude uint64x2_t data_key;
4093*5ff13fbcSAllan Jude uint32x2_t data_key_lo, data_key_hi;
4094*5ff13fbcSAllan Jude /* xacc[i] += swap(data_vec); */
4095*5ff13fbcSAllan Jude uint64x2_t const data64 = vreinterpretq_u64_u8(data_vec);
4096*5ff13fbcSAllan Jude uint64x2_t const swapped = vextq_u64(data64, data64, 1);
4097*5ff13fbcSAllan Jude xacc[i] = vaddq_u64 (xacc[i], swapped);
4098*5ff13fbcSAllan Jude /* data_key = data_vec ^ key_vec; */
4099*5ff13fbcSAllan Jude data_key = vreinterpretq_u64_u8(veorq_u8(data_vec, key_vec));
4100*5ff13fbcSAllan Jude /* data_key_lo = (uint32x2_t) (data_key & 0xFFFFFFFF);
4101*5ff13fbcSAllan Jude * data_key_hi = (uint32x2_t) (data_key >> 32);
4102*5ff13fbcSAllan Jude * data_key = UNDEFINED; */
4103*5ff13fbcSAllan Jude XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi);
4104*5ff13fbcSAllan Jude /* xacc[i] += (uint64x2_t) data_key_lo * (uint64x2_t) data_key_hi; */
4105*5ff13fbcSAllan Jude xacc[i] = vmlal_u32 (xacc[i], data_key_lo, data_key_hi);
4106*5ff13fbcSAllan Jude
4107*5ff13fbcSAllan Jude }
4108*5ff13fbcSAllan Jude /* Scalar for the remainder. This may be a zero iteration loop. */
4109*5ff13fbcSAllan Jude for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) {
4110*5ff13fbcSAllan Jude XXH3_scalarRound(acc, input, secret, i);
4111*5ff13fbcSAllan Jude }
4112*5ff13fbcSAllan Jude }
4113*5ff13fbcSAllan Jude }
4114*5ff13fbcSAllan Jude
4115*5ff13fbcSAllan Jude XXH_FORCE_INLINE void
XXH3_scrambleAcc_neon(void * XXH_RESTRICT acc,const void * XXH_RESTRICT secret)4116*5ff13fbcSAllan Jude XXH3_scrambleAcc_neon(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
4117*5ff13fbcSAllan Jude {
4118*5ff13fbcSAllan Jude XXH_ASSERT((((size_t)acc) & 15) == 0);
4119*5ff13fbcSAllan Jude
4120*5ff13fbcSAllan Jude { uint64x2_t* xacc = (uint64x2_t*) acc;
4121*5ff13fbcSAllan Jude uint8_t const* xsecret = (uint8_t const*) secret;
4122*5ff13fbcSAllan Jude uint32x2_t prime = vdup_n_u32 (XXH_PRIME32_1);
4123*5ff13fbcSAllan Jude
4124*5ff13fbcSAllan Jude size_t i;
4125*5ff13fbcSAllan Jude /* NEON for the first few lanes (these loops are normally interleaved) */
4126*5ff13fbcSAllan Jude for (i=0; i < XXH3_NEON_LANES / 2; i++) {
4127*5ff13fbcSAllan Jude /* xacc[i] ^= (xacc[i] >> 47); */
4128*5ff13fbcSAllan Jude uint64x2_t acc_vec = xacc[i];
4129*5ff13fbcSAllan Jude uint64x2_t shifted = vshrq_n_u64 (acc_vec, 47);
4130*5ff13fbcSAllan Jude uint64x2_t data_vec = veorq_u64 (acc_vec, shifted);
4131*5ff13fbcSAllan Jude
4132*5ff13fbcSAllan Jude /* xacc[i] ^= xsecret[i]; */
4133*5ff13fbcSAllan Jude uint8x16_t key_vec = vld1q_u8 (xsecret + (i * 16));
4134*5ff13fbcSAllan Jude uint64x2_t data_key = veorq_u64 (data_vec, vreinterpretq_u64_u8(key_vec));
4135*5ff13fbcSAllan Jude
4136*5ff13fbcSAllan Jude /* xacc[i] *= XXH_PRIME32_1 */
4137*5ff13fbcSAllan Jude uint32x2_t data_key_lo, data_key_hi;
4138*5ff13fbcSAllan Jude /* data_key_lo = (uint32x2_t) (xacc[i] & 0xFFFFFFFF);
4139*5ff13fbcSAllan Jude * data_key_hi = (uint32x2_t) (xacc[i] >> 32);
4140*5ff13fbcSAllan Jude * xacc[i] = UNDEFINED; */
4141*5ff13fbcSAllan Jude XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi);
4142*5ff13fbcSAllan Jude { /*
4143*5ff13fbcSAllan Jude * prod_hi = (data_key >> 32) * XXH_PRIME32_1;
4144*5ff13fbcSAllan Jude *
4145*5ff13fbcSAllan Jude * Avoid vmul_u32 + vshll_n_u32 since Clang 6 and 7 will
4146*5ff13fbcSAllan Jude * incorrectly "optimize" this:
4147*5ff13fbcSAllan Jude * tmp = vmul_u32(vmovn_u64(a), vmovn_u64(b));
4148*5ff13fbcSAllan Jude * shifted = vshll_n_u32(tmp, 32);
4149*5ff13fbcSAllan Jude * to this:
4150*5ff13fbcSAllan Jude * tmp = "vmulq_u64"(a, b); // no such thing!
4151*5ff13fbcSAllan Jude * shifted = vshlq_n_u64(tmp, 32);
4152*5ff13fbcSAllan Jude *
4153*5ff13fbcSAllan Jude * However, unlike SSE, Clang lacks a 64-bit multiply routine
4154*5ff13fbcSAllan Jude * for NEON, and it scalarizes two 64-bit multiplies instead.
4155*5ff13fbcSAllan Jude *
4156*5ff13fbcSAllan Jude * vmull_u32 has the same timing as vmul_u32, and it avoids
4157*5ff13fbcSAllan Jude * this bug completely.
4158*5ff13fbcSAllan Jude * See https://bugs.llvm.org/show_bug.cgi?id=39967
4159*5ff13fbcSAllan Jude */
4160*5ff13fbcSAllan Jude uint64x2_t prod_hi = vmull_u32 (data_key_hi, prime);
4161*5ff13fbcSAllan Jude /* xacc[i] = prod_hi << 32; */
4162*5ff13fbcSAllan Jude xacc[i] = vshlq_n_u64(prod_hi, 32);
4163*5ff13fbcSAllan Jude /* xacc[i] += (prod_hi & 0xFFFFFFFF) * XXH_PRIME32_1; */
4164*5ff13fbcSAllan Jude xacc[i] = vmlal_u32(xacc[i], data_key_lo, prime);
4165*5ff13fbcSAllan Jude }
4166*5ff13fbcSAllan Jude }
4167*5ff13fbcSAllan Jude /* Scalar for the remainder. This may be a zero iteration loop. */
4168*5ff13fbcSAllan Jude for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) {
4169*5ff13fbcSAllan Jude XXH3_scalarScrambleRound(acc, secret, i);
4170*5ff13fbcSAllan Jude }
4171*5ff13fbcSAllan Jude }
4172*5ff13fbcSAllan Jude }
4173*5ff13fbcSAllan Jude
4174*5ff13fbcSAllan Jude #endif
4175*5ff13fbcSAllan Jude
4176*5ff13fbcSAllan Jude #if (XXH_VECTOR == XXH_VSX)
4177*5ff13fbcSAllan Jude
4178*5ff13fbcSAllan Jude XXH_FORCE_INLINE void
XXH3_accumulate_512_vsx(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)4179*5ff13fbcSAllan Jude XXH3_accumulate_512_vsx( void* XXH_RESTRICT acc,
4180*5ff13fbcSAllan Jude const void* XXH_RESTRICT input,
4181*5ff13fbcSAllan Jude const void* XXH_RESTRICT secret)
4182*5ff13fbcSAllan Jude {
4183*5ff13fbcSAllan Jude /* presumed aligned */
4184*5ff13fbcSAllan Jude unsigned int* const xacc = (unsigned int*) acc;
4185*5ff13fbcSAllan Jude xxh_u64x2 const* const xinput = (xxh_u64x2 const*) input; /* no alignment restriction */
4186*5ff13fbcSAllan Jude xxh_u64x2 const* const xsecret = (xxh_u64x2 const*) secret; /* no alignment restriction */
4187*5ff13fbcSAllan Jude xxh_u64x2 const v32 = { 32, 32 };
4188*5ff13fbcSAllan Jude size_t i;
4189*5ff13fbcSAllan Jude for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
4190*5ff13fbcSAllan Jude /* data_vec = xinput[i]; */
4191*5ff13fbcSAllan Jude xxh_u64x2 const data_vec = XXH_vec_loadu(xinput + i);
4192*5ff13fbcSAllan Jude /* key_vec = xsecret[i]; */
4193*5ff13fbcSAllan Jude xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + i);
4194*5ff13fbcSAllan Jude xxh_u64x2 const data_key = data_vec ^ key_vec;
4195*5ff13fbcSAllan Jude /* shuffled = (data_key << 32) | (data_key >> 32); */
4196*5ff13fbcSAllan Jude xxh_u32x4 const shuffled = (xxh_u32x4)vec_rl(data_key, v32);
4197*5ff13fbcSAllan Jude /* product = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)shuffled & 0xFFFFFFFF); */
4198*5ff13fbcSAllan Jude xxh_u64x2 const product = XXH_vec_mulo((xxh_u32x4)data_key, shuffled);
4199*5ff13fbcSAllan Jude /* acc_vec = xacc[i]; */
4200*5ff13fbcSAllan Jude xxh_u64x2 acc_vec = (xxh_u64x2)vec_xl(0, xacc + 4 * i);
4201*5ff13fbcSAllan Jude acc_vec += product;
4202*5ff13fbcSAllan Jude
4203*5ff13fbcSAllan Jude /* swap high and low halves */
4204*5ff13fbcSAllan Jude #ifdef __s390x__
4205*5ff13fbcSAllan Jude acc_vec += vec_permi(data_vec, data_vec, 2);
4206*5ff13fbcSAllan Jude #else
4207*5ff13fbcSAllan Jude acc_vec += vec_xxpermdi(data_vec, data_vec, 2);
4208*5ff13fbcSAllan Jude #endif
4209*5ff13fbcSAllan Jude /* xacc[i] = acc_vec; */
4210*5ff13fbcSAllan Jude vec_xst((xxh_u32x4)acc_vec, 0, xacc + 4 * i);
4211*5ff13fbcSAllan Jude }
4212*5ff13fbcSAllan Jude }
4213*5ff13fbcSAllan Jude
4214*5ff13fbcSAllan Jude XXH_FORCE_INLINE void
XXH3_scrambleAcc_vsx(void * XXH_RESTRICT acc,const void * XXH_RESTRICT secret)4215*5ff13fbcSAllan Jude XXH3_scrambleAcc_vsx(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
4216*5ff13fbcSAllan Jude {
4217*5ff13fbcSAllan Jude XXH_ASSERT((((size_t)acc) & 15) == 0);
4218*5ff13fbcSAllan Jude
4219*5ff13fbcSAllan Jude { xxh_u64x2* const xacc = (xxh_u64x2*) acc;
4220*5ff13fbcSAllan Jude const xxh_u64x2* const xsecret = (const xxh_u64x2*) secret;
4221*5ff13fbcSAllan Jude /* constants */
4222*5ff13fbcSAllan Jude xxh_u64x2 const v32 = { 32, 32 };
4223*5ff13fbcSAllan Jude xxh_u64x2 const v47 = { 47, 47 };
4224*5ff13fbcSAllan Jude xxh_u32x4 const prime = { XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1 };
4225*5ff13fbcSAllan Jude size_t i;
4226*5ff13fbcSAllan Jude for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
4227*5ff13fbcSAllan Jude /* xacc[i] ^= (xacc[i] >> 47); */
4228*5ff13fbcSAllan Jude xxh_u64x2 const acc_vec = xacc[i];
4229*5ff13fbcSAllan Jude xxh_u64x2 const data_vec = acc_vec ^ (acc_vec >> v47);
4230*5ff13fbcSAllan Jude
4231*5ff13fbcSAllan Jude /* xacc[i] ^= xsecret[i]; */
4232*5ff13fbcSAllan Jude xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + i);
4233*5ff13fbcSAllan Jude xxh_u64x2 const data_key = data_vec ^ key_vec;
4234*5ff13fbcSAllan Jude
4235*5ff13fbcSAllan Jude /* xacc[i] *= XXH_PRIME32_1 */
4236*5ff13fbcSAllan Jude /* prod_lo = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)prime & 0xFFFFFFFF); */
4237*5ff13fbcSAllan Jude xxh_u64x2 const prod_even = XXH_vec_mule((xxh_u32x4)data_key, prime);
4238*5ff13fbcSAllan Jude /* prod_hi = ((xxh_u64x2)data_key >> 32) * ((xxh_u64x2)prime >> 32); */
4239*5ff13fbcSAllan Jude xxh_u64x2 const prod_odd = XXH_vec_mulo((xxh_u32x4)data_key, prime);
4240*5ff13fbcSAllan Jude xacc[i] = prod_odd + (prod_even << v32);
4241*5ff13fbcSAllan Jude } }
4242*5ff13fbcSAllan Jude }
4243*5ff13fbcSAllan Jude
4244*5ff13fbcSAllan Jude #endif
4245*5ff13fbcSAllan Jude
4246*5ff13fbcSAllan Jude /* scalar variants - universal */
4247*5ff13fbcSAllan Jude
4248*5ff13fbcSAllan Jude /*!
4249*5ff13fbcSAllan Jude * @internal
4250*5ff13fbcSAllan Jude * @brief Scalar round for @ref XXH3_accumulate_512_scalar().
4251*5ff13fbcSAllan Jude *
4252*5ff13fbcSAllan Jude * This is extracted to its own function because the NEON path uses a combination
4253*5ff13fbcSAllan Jude * of NEON and scalar.
4254*5ff13fbcSAllan Jude */
4255*5ff13fbcSAllan Jude XXH_FORCE_INLINE void
XXH3_scalarRound(void * XXH_RESTRICT acc,void const * XXH_RESTRICT input,void const * XXH_RESTRICT secret,size_t lane)4256*5ff13fbcSAllan Jude XXH3_scalarRound(void* XXH_RESTRICT acc,
4257*5ff13fbcSAllan Jude void const* XXH_RESTRICT input,
4258*5ff13fbcSAllan Jude void const* XXH_RESTRICT secret,
4259*5ff13fbcSAllan Jude size_t lane)
4260*5ff13fbcSAllan Jude {
4261*5ff13fbcSAllan Jude xxh_u64* xacc = (xxh_u64*) acc;
4262*5ff13fbcSAllan Jude xxh_u8 const* xinput = (xxh_u8 const*) input;
4263*5ff13fbcSAllan Jude xxh_u8 const* xsecret = (xxh_u8 const*) secret;
4264*5ff13fbcSAllan Jude XXH_ASSERT(lane < XXH_ACC_NB);
4265*5ff13fbcSAllan Jude XXH_ASSERT(((size_t)acc & (XXH_ACC_ALIGN-1)) == 0);
4266*5ff13fbcSAllan Jude {
4267*5ff13fbcSAllan Jude xxh_u64 const data_val = XXH_readLE64(xinput + lane * 8);
4268*5ff13fbcSAllan Jude xxh_u64 const data_key = data_val ^ XXH_readLE64(xsecret + lane * 8);
4269*5ff13fbcSAllan Jude xacc[lane ^ 1] += data_val; /* swap adjacent lanes */
4270*5ff13fbcSAllan Jude xacc[lane] += XXH_mult32to64(data_key & 0xFFFFFFFF, data_key >> 32);
4271*5ff13fbcSAllan Jude }
4272*5ff13fbcSAllan Jude }
4273*5ff13fbcSAllan Jude
4274*5ff13fbcSAllan Jude /*!
4275*5ff13fbcSAllan Jude * @internal
4276*5ff13fbcSAllan Jude * @brief Processes a 64 byte block of data using the scalar path.
4277*5ff13fbcSAllan Jude */
4278*5ff13fbcSAllan Jude XXH_FORCE_INLINE void
XXH3_accumulate_512_scalar(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)4279*5ff13fbcSAllan Jude XXH3_accumulate_512_scalar(void* XXH_RESTRICT acc,
4280*5ff13fbcSAllan Jude const void* XXH_RESTRICT input,
4281*5ff13fbcSAllan Jude const void* XXH_RESTRICT secret)
4282*5ff13fbcSAllan Jude {
4283*5ff13fbcSAllan Jude size_t i;
4284*5ff13fbcSAllan Jude for (i=0; i < XXH_ACC_NB; i++) {
4285*5ff13fbcSAllan Jude XXH3_scalarRound(acc, input, secret, i);
4286*5ff13fbcSAllan Jude }
4287*5ff13fbcSAllan Jude }
4288*5ff13fbcSAllan Jude
4289*5ff13fbcSAllan Jude /*!
4290*5ff13fbcSAllan Jude * @internal
4291*5ff13fbcSAllan Jude * @brief Scalar scramble step for @ref XXH3_scrambleAcc_scalar().
4292*5ff13fbcSAllan Jude *
4293*5ff13fbcSAllan Jude * This is extracted to its own function because the NEON path uses a combination
4294*5ff13fbcSAllan Jude * of NEON and scalar.
4295*5ff13fbcSAllan Jude */
4296*5ff13fbcSAllan Jude XXH_FORCE_INLINE void
XXH3_scalarScrambleRound(void * XXH_RESTRICT acc,void const * XXH_RESTRICT secret,size_t lane)4297*5ff13fbcSAllan Jude XXH3_scalarScrambleRound(void* XXH_RESTRICT acc,
4298*5ff13fbcSAllan Jude void const* XXH_RESTRICT secret,
4299*5ff13fbcSAllan Jude size_t lane)
4300*5ff13fbcSAllan Jude {
4301*5ff13fbcSAllan Jude xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned */
4302*5ff13fbcSAllan Jude const xxh_u8* const xsecret = (const xxh_u8*) secret; /* no alignment restriction */
4303*5ff13fbcSAllan Jude XXH_ASSERT((((size_t)acc) & (XXH_ACC_ALIGN-1)) == 0);
4304*5ff13fbcSAllan Jude XXH_ASSERT(lane < XXH_ACC_NB);
4305*5ff13fbcSAllan Jude {
4306*5ff13fbcSAllan Jude xxh_u64 const key64 = XXH_readLE64(xsecret + lane * 8);
4307*5ff13fbcSAllan Jude xxh_u64 acc64 = xacc[lane];
4308*5ff13fbcSAllan Jude acc64 = XXH_xorshift64(acc64, 47);
4309*5ff13fbcSAllan Jude acc64 ^= key64;
4310*5ff13fbcSAllan Jude acc64 *= XXH_PRIME32_1;
4311*5ff13fbcSAllan Jude xacc[lane] = acc64;
4312*5ff13fbcSAllan Jude }
4313*5ff13fbcSAllan Jude }
4314*5ff13fbcSAllan Jude
4315*5ff13fbcSAllan Jude /*!
4316*5ff13fbcSAllan Jude * @internal
4317*5ff13fbcSAllan Jude * @brief Scrambles the accumulators after a large chunk has been read
4318*5ff13fbcSAllan Jude */
4319*5ff13fbcSAllan Jude XXH_FORCE_INLINE void
XXH3_scrambleAcc_scalar(void * XXH_RESTRICT acc,const void * XXH_RESTRICT secret)4320*5ff13fbcSAllan Jude XXH3_scrambleAcc_scalar(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
4321*5ff13fbcSAllan Jude {
4322*5ff13fbcSAllan Jude size_t i;
4323*5ff13fbcSAllan Jude for (i=0; i < XXH_ACC_NB; i++) {
4324*5ff13fbcSAllan Jude XXH3_scalarScrambleRound(acc, secret, i);
4325*5ff13fbcSAllan Jude }
4326*5ff13fbcSAllan Jude }
4327*5ff13fbcSAllan Jude
4328*5ff13fbcSAllan Jude XXH_FORCE_INLINE void
XXH3_initCustomSecret_scalar(void * XXH_RESTRICT customSecret,xxh_u64 seed64)4329*5ff13fbcSAllan Jude XXH3_initCustomSecret_scalar(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
4330*5ff13fbcSAllan Jude {
4331*5ff13fbcSAllan Jude /*
4332*5ff13fbcSAllan Jude * We need a separate pointer for the hack below,
4333*5ff13fbcSAllan Jude * which requires a non-const pointer.
4334*5ff13fbcSAllan Jude * Any decent compiler will optimize this out otherwise.
4335*5ff13fbcSAllan Jude */
4336*5ff13fbcSAllan Jude const xxh_u8* kSecretPtr = XXH3_kSecret;
4337*5ff13fbcSAllan Jude XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
4338*5ff13fbcSAllan Jude
4339*5ff13fbcSAllan Jude #if defined(__clang__) && defined(__aarch64__)
4340*5ff13fbcSAllan Jude /*
4341*5ff13fbcSAllan Jude * UGLY HACK:
4342*5ff13fbcSAllan Jude * Clang generates a bunch of MOV/MOVK pairs for aarch64, and they are
4343*5ff13fbcSAllan Jude * placed sequentially, in order, at the top of the unrolled loop.
4344*5ff13fbcSAllan Jude *
4345*5ff13fbcSAllan Jude * While MOVK is great for generating constants (2 cycles for a 64-bit
4346*5ff13fbcSAllan Jude * constant compared to 4 cycles for LDR), it fights for bandwidth with
4347*5ff13fbcSAllan Jude * the arithmetic instructions.
4348*5ff13fbcSAllan Jude *
4349*5ff13fbcSAllan Jude * I L S
4350*5ff13fbcSAllan Jude * MOVK
4351*5ff13fbcSAllan Jude * MOVK
4352*5ff13fbcSAllan Jude * MOVK
4353*5ff13fbcSAllan Jude * MOVK
4354*5ff13fbcSAllan Jude * ADD
4355*5ff13fbcSAllan Jude * SUB STR
4356*5ff13fbcSAllan Jude * STR
4357*5ff13fbcSAllan Jude * By forcing loads from memory (as the asm line causes Clang to assume
4358*5ff13fbcSAllan Jude * that XXH3_kSecretPtr has been changed), the pipelines are used more
4359*5ff13fbcSAllan Jude * efficiently:
4360*5ff13fbcSAllan Jude * I L S
4361*5ff13fbcSAllan Jude * LDR
4362*5ff13fbcSAllan Jude * ADD LDR
4363*5ff13fbcSAllan Jude * SUB STR
4364*5ff13fbcSAllan Jude * STR
4365*5ff13fbcSAllan Jude *
4366*5ff13fbcSAllan Jude * See XXH3_NEON_LANES for details on the pipsline.
4367*5ff13fbcSAllan Jude *
4368*5ff13fbcSAllan Jude * XXH3_64bits_withSeed, len == 256, Snapdragon 835
4369*5ff13fbcSAllan Jude * without hack: 2654.4 MB/s
4370*5ff13fbcSAllan Jude * with hack: 3202.9 MB/s
4371*5ff13fbcSAllan Jude */
4372*5ff13fbcSAllan Jude XXH_COMPILER_GUARD(kSecretPtr);
4373*5ff13fbcSAllan Jude #endif
4374*5ff13fbcSAllan Jude /*
4375*5ff13fbcSAllan Jude * Note: in debug mode, this overrides the asm optimization
4376*5ff13fbcSAllan Jude * and Clang will emit MOVK chains again.
4377*5ff13fbcSAllan Jude */
4378*5ff13fbcSAllan Jude XXH_ASSERT(kSecretPtr == XXH3_kSecret);
4379*5ff13fbcSAllan Jude
4380*5ff13fbcSAllan Jude { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16;
4381*5ff13fbcSAllan Jude int i;
4382*5ff13fbcSAllan Jude for (i=0; i < nbRounds; i++) {
4383*5ff13fbcSAllan Jude /*
4384*5ff13fbcSAllan Jude * The asm hack causes Clang to assume that kSecretPtr aliases with
4385*5ff13fbcSAllan Jude * customSecret, and on aarch64, this prevented LDP from merging two
4386*5ff13fbcSAllan Jude * loads together for free. Putting the loads together before the stores
4387*5ff13fbcSAllan Jude * properly generates LDP.
4388*5ff13fbcSAllan Jude */
4389*5ff13fbcSAllan Jude xxh_u64 lo = XXH_readLE64(kSecretPtr + 16*i) + seed64;
4390*5ff13fbcSAllan Jude xxh_u64 hi = XXH_readLE64(kSecretPtr + 16*i + 8) - seed64;
4391*5ff13fbcSAllan Jude XXH_writeLE64((xxh_u8*)customSecret + 16*i, lo);
4392*5ff13fbcSAllan Jude XXH_writeLE64((xxh_u8*)customSecret + 16*i + 8, hi);
4393*5ff13fbcSAllan Jude } }
4394*5ff13fbcSAllan Jude }
4395*5ff13fbcSAllan Jude
4396*5ff13fbcSAllan Jude
4397*5ff13fbcSAllan Jude typedef void (*XXH3_f_accumulate_512)(void* XXH_RESTRICT, const void*, const void*);
4398*5ff13fbcSAllan Jude typedef void (*XXH3_f_scrambleAcc)(void* XXH_RESTRICT, const void*);
4399*5ff13fbcSAllan Jude typedef void (*XXH3_f_initCustomSecret)(void* XXH_RESTRICT, xxh_u64);
4400*5ff13fbcSAllan Jude
4401*5ff13fbcSAllan Jude
4402*5ff13fbcSAllan Jude #if (XXH_VECTOR == XXH_AVX512)
4403*5ff13fbcSAllan Jude
4404*5ff13fbcSAllan Jude #define XXH3_accumulate_512 XXH3_accumulate_512_avx512
4405*5ff13fbcSAllan Jude #define XXH3_scrambleAcc XXH3_scrambleAcc_avx512
4406*5ff13fbcSAllan Jude #define XXH3_initCustomSecret XXH3_initCustomSecret_avx512
4407*5ff13fbcSAllan Jude
4408*5ff13fbcSAllan Jude #elif (XXH_VECTOR == XXH_AVX2)
4409*5ff13fbcSAllan Jude
4410*5ff13fbcSAllan Jude #define XXH3_accumulate_512 XXH3_accumulate_512_avx2
4411*5ff13fbcSAllan Jude #define XXH3_scrambleAcc XXH3_scrambleAcc_avx2
4412*5ff13fbcSAllan Jude #define XXH3_initCustomSecret XXH3_initCustomSecret_avx2
4413*5ff13fbcSAllan Jude
4414*5ff13fbcSAllan Jude #elif (XXH_VECTOR == XXH_SSE2)
4415*5ff13fbcSAllan Jude
4416*5ff13fbcSAllan Jude #define XXH3_accumulate_512 XXH3_accumulate_512_sse2
4417*5ff13fbcSAllan Jude #define XXH3_scrambleAcc XXH3_scrambleAcc_sse2
4418*5ff13fbcSAllan Jude #define XXH3_initCustomSecret XXH3_initCustomSecret_sse2
4419*5ff13fbcSAllan Jude
4420*5ff13fbcSAllan Jude #elif (XXH_VECTOR == XXH_NEON)
4421*5ff13fbcSAllan Jude
4422*5ff13fbcSAllan Jude #define XXH3_accumulate_512 XXH3_accumulate_512_neon
4423*5ff13fbcSAllan Jude #define XXH3_scrambleAcc XXH3_scrambleAcc_neon
4424*5ff13fbcSAllan Jude #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
4425*5ff13fbcSAllan Jude
4426*5ff13fbcSAllan Jude #elif (XXH_VECTOR == XXH_VSX)
4427*5ff13fbcSAllan Jude
4428*5ff13fbcSAllan Jude #define XXH3_accumulate_512 XXH3_accumulate_512_vsx
4429*5ff13fbcSAllan Jude #define XXH3_scrambleAcc XXH3_scrambleAcc_vsx
4430*5ff13fbcSAllan Jude #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
4431*5ff13fbcSAllan Jude
4432*5ff13fbcSAllan Jude #else /* scalar */
4433*5ff13fbcSAllan Jude
4434*5ff13fbcSAllan Jude #define XXH3_accumulate_512 XXH3_accumulate_512_scalar
4435*5ff13fbcSAllan Jude #define XXH3_scrambleAcc XXH3_scrambleAcc_scalar
4436*5ff13fbcSAllan Jude #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
4437*5ff13fbcSAllan Jude
4438*5ff13fbcSAllan Jude #endif
4439*5ff13fbcSAllan Jude
4440*5ff13fbcSAllan Jude
4441*5ff13fbcSAllan Jude
4442*5ff13fbcSAllan Jude #ifndef XXH_PREFETCH_DIST
4443*5ff13fbcSAllan Jude # ifdef __clang__
4444*5ff13fbcSAllan Jude # define XXH_PREFETCH_DIST 320
4445*5ff13fbcSAllan Jude # else
4446*5ff13fbcSAllan Jude # if (XXH_VECTOR == XXH_AVX512)
4447*5ff13fbcSAllan Jude # define XXH_PREFETCH_DIST 512
4448*5ff13fbcSAllan Jude # else
4449*5ff13fbcSAllan Jude # define XXH_PREFETCH_DIST 384
4450*5ff13fbcSAllan Jude # endif
4451*5ff13fbcSAllan Jude # endif /* __clang__ */
4452*5ff13fbcSAllan Jude #endif /* XXH_PREFETCH_DIST */
4453*5ff13fbcSAllan Jude
4454*5ff13fbcSAllan Jude /*
4455*5ff13fbcSAllan Jude * XXH3_accumulate()
4456*5ff13fbcSAllan Jude * Loops over XXH3_accumulate_512().
4457*5ff13fbcSAllan Jude * Assumption: nbStripes will not overflow the secret size
4458*5ff13fbcSAllan Jude */
4459*5ff13fbcSAllan Jude XXH_FORCE_INLINE void
XXH3_accumulate(xxh_u64 * XXH_RESTRICT acc,const xxh_u8 * XXH_RESTRICT input,const xxh_u8 * XXH_RESTRICT secret,size_t nbStripes,XXH3_f_accumulate_512 f_acc512)4460*5ff13fbcSAllan Jude XXH3_accumulate( xxh_u64* XXH_RESTRICT acc,
4461*5ff13fbcSAllan Jude const xxh_u8* XXH_RESTRICT input,
4462*5ff13fbcSAllan Jude const xxh_u8* XXH_RESTRICT secret,
4463*5ff13fbcSAllan Jude size_t nbStripes,
4464*5ff13fbcSAllan Jude XXH3_f_accumulate_512 f_acc512)
4465*5ff13fbcSAllan Jude {
4466*5ff13fbcSAllan Jude size_t n;
4467*5ff13fbcSAllan Jude for (n = 0; n < nbStripes; n++ ) {
4468*5ff13fbcSAllan Jude const xxh_u8* const in = input + n*XXH_STRIPE_LEN;
4469*5ff13fbcSAllan Jude XXH_PREFETCH(in + XXH_PREFETCH_DIST);
4470*5ff13fbcSAllan Jude f_acc512(acc,
4471*5ff13fbcSAllan Jude in,
4472*5ff13fbcSAllan Jude secret + n*XXH_SECRET_CONSUME_RATE);
4473*5ff13fbcSAllan Jude }
4474*5ff13fbcSAllan Jude }
4475*5ff13fbcSAllan Jude
4476*5ff13fbcSAllan Jude XXH_FORCE_INLINE void
XXH3_hashLong_internal_loop(xxh_u64 * XXH_RESTRICT acc,const xxh_u8 * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble)4477*5ff13fbcSAllan Jude XXH3_hashLong_internal_loop(xxh_u64* XXH_RESTRICT acc,
4478*5ff13fbcSAllan Jude const xxh_u8* XXH_RESTRICT input, size_t len,
4479*5ff13fbcSAllan Jude const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
4480*5ff13fbcSAllan Jude XXH3_f_accumulate_512 f_acc512,
4481*5ff13fbcSAllan Jude XXH3_f_scrambleAcc f_scramble)
4482*5ff13fbcSAllan Jude {
4483*5ff13fbcSAllan Jude size_t const nbStripesPerBlock = (secretSize - XXH_STRIPE_LEN) / XXH_SECRET_CONSUME_RATE;
4484*5ff13fbcSAllan Jude size_t const block_len = XXH_STRIPE_LEN * nbStripesPerBlock;
4485*5ff13fbcSAllan Jude size_t const nb_blocks = (len - 1) / block_len;
4486*5ff13fbcSAllan Jude
4487*5ff13fbcSAllan Jude size_t n;
4488*5ff13fbcSAllan Jude
4489*5ff13fbcSAllan Jude XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
4490*5ff13fbcSAllan Jude
4491*5ff13fbcSAllan Jude for (n = 0; n < nb_blocks; n++) {
4492*5ff13fbcSAllan Jude XXH3_accumulate(acc, input + n*block_len, secret, nbStripesPerBlock, f_acc512);
4493*5ff13fbcSAllan Jude f_scramble(acc, secret + secretSize - XXH_STRIPE_LEN);
4494*5ff13fbcSAllan Jude }
4495*5ff13fbcSAllan Jude
4496*5ff13fbcSAllan Jude /* last partial block */
4497*5ff13fbcSAllan Jude XXH_ASSERT(len > XXH_STRIPE_LEN);
4498*5ff13fbcSAllan Jude { size_t const nbStripes = ((len - 1) - (block_len * nb_blocks)) / XXH_STRIPE_LEN;
4499*5ff13fbcSAllan Jude XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE));
4500*5ff13fbcSAllan Jude XXH3_accumulate(acc, input + nb_blocks*block_len, secret, nbStripes, f_acc512);
4501*5ff13fbcSAllan Jude
4502*5ff13fbcSAllan Jude /* last stripe */
4503*5ff13fbcSAllan Jude { const xxh_u8* const p = input + len - XXH_STRIPE_LEN;
4504*5ff13fbcSAllan Jude #define XXH_SECRET_LASTACC_START 7 /* not aligned on 8, last secret is different from acc & scrambler */
4505*5ff13fbcSAllan Jude f_acc512(acc, p, secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START);
4506*5ff13fbcSAllan Jude } }
4507*5ff13fbcSAllan Jude }
4508*5ff13fbcSAllan Jude
4509*5ff13fbcSAllan Jude XXH_FORCE_INLINE xxh_u64
XXH3_mix2Accs(const xxh_u64 * XXH_RESTRICT acc,const xxh_u8 * XXH_RESTRICT secret)4510*5ff13fbcSAllan Jude XXH3_mix2Accs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret)
4511*5ff13fbcSAllan Jude {
4512*5ff13fbcSAllan Jude return XXH3_mul128_fold64(
4513*5ff13fbcSAllan Jude acc[0] ^ XXH_readLE64(secret),
4514*5ff13fbcSAllan Jude acc[1] ^ XXH_readLE64(secret+8) );
4515*5ff13fbcSAllan Jude }
4516*5ff13fbcSAllan Jude
4517*5ff13fbcSAllan Jude static XXH64_hash_t
XXH3_mergeAccs(const xxh_u64 * XXH_RESTRICT acc,const xxh_u8 * XXH_RESTRICT secret,xxh_u64 start)4518*5ff13fbcSAllan Jude XXH3_mergeAccs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret, xxh_u64 start)
4519*5ff13fbcSAllan Jude {
4520*5ff13fbcSAllan Jude xxh_u64 result64 = start;
4521*5ff13fbcSAllan Jude size_t i = 0;
4522*5ff13fbcSAllan Jude
4523*5ff13fbcSAllan Jude for (i = 0; i < 4; i++) {
4524*5ff13fbcSAllan Jude result64 += XXH3_mix2Accs(acc+2*i, secret + 16*i);
4525*5ff13fbcSAllan Jude #if defined(__clang__) /* Clang */ \
4526*5ff13fbcSAllan Jude && (defined(__arm__) || defined(__thumb__)) /* ARMv7 */ \
4527*5ff13fbcSAllan Jude && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \
4528*5ff13fbcSAllan Jude && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */
4529*5ff13fbcSAllan Jude /*
4530*5ff13fbcSAllan Jude * UGLY HACK:
4531*5ff13fbcSAllan Jude * Prevent autovectorization on Clang ARMv7-a. Exact same problem as
4532*5ff13fbcSAllan Jude * the one in XXH3_len_129to240_64b. Speeds up shorter keys > 240b.
4533*5ff13fbcSAllan Jude * XXH3_64bits, len == 256, Snapdragon 835:
4534*5ff13fbcSAllan Jude * without hack: 2063.7 MB/s
4535*5ff13fbcSAllan Jude * with hack: 2560.7 MB/s
4536*5ff13fbcSAllan Jude */
4537*5ff13fbcSAllan Jude XXH_COMPILER_GUARD(result64);
4538*5ff13fbcSAllan Jude #endif
4539*5ff13fbcSAllan Jude }
4540*5ff13fbcSAllan Jude
4541*5ff13fbcSAllan Jude return XXH3_avalanche(result64);
4542*5ff13fbcSAllan Jude }
4543*5ff13fbcSAllan Jude
4544*5ff13fbcSAllan Jude #define XXH3_INIT_ACC { XXH_PRIME32_3, XXH_PRIME64_1, XXH_PRIME64_2, XXH_PRIME64_3, \
4545*5ff13fbcSAllan Jude XXH_PRIME64_4, XXH_PRIME32_2, XXH_PRIME64_5, XXH_PRIME32_1 }
4546*5ff13fbcSAllan Jude
4547*5ff13fbcSAllan Jude XXH_FORCE_INLINE XXH64_hash_t
XXH3_hashLong_64b_internal(const void * XXH_RESTRICT input,size_t len,const void * XXH_RESTRICT secret,size_t secretSize,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble)4548*5ff13fbcSAllan Jude XXH3_hashLong_64b_internal(const void* XXH_RESTRICT input, size_t len,
4549*5ff13fbcSAllan Jude const void* XXH_RESTRICT secret, size_t secretSize,
4550*5ff13fbcSAllan Jude XXH3_f_accumulate_512 f_acc512,
4551*5ff13fbcSAllan Jude XXH3_f_scrambleAcc f_scramble)
4552*5ff13fbcSAllan Jude {
4553*5ff13fbcSAllan Jude XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
4554*5ff13fbcSAllan Jude
4555*5ff13fbcSAllan Jude XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, f_acc512, f_scramble);
4556*5ff13fbcSAllan Jude
4557*5ff13fbcSAllan Jude /* converge into final hash */
4558*5ff13fbcSAllan Jude XXH_STATIC_ASSERT(sizeof(acc) == 64);
4559*5ff13fbcSAllan Jude /* do not align on 8, so that the secret is different from the accumulator */
4560*5ff13fbcSAllan Jude #define XXH_SECRET_MERGEACCS_START 11
4561*5ff13fbcSAllan Jude XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
4562*5ff13fbcSAllan Jude return XXH3_mergeAccs(acc, (const xxh_u8*)secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)len * XXH_PRIME64_1);
4563*5ff13fbcSAllan Jude }
4564*5ff13fbcSAllan Jude
4565*5ff13fbcSAllan Jude /*
4566*5ff13fbcSAllan Jude * It's important for performance to transmit secret's size (when it's static)
4567*5ff13fbcSAllan Jude * so that the compiler can properly optimize the vectorized loop.
4568*5ff13fbcSAllan Jude * This makes a big performance difference for "medium" keys (<1 KB) when using AVX instruction set.
4569*5ff13fbcSAllan Jude */
4570*5ff13fbcSAllan Jude XXH_FORCE_INLINE XXH64_hash_t
XXH3_hashLong_64b_withSecret(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,const xxh_u8 * XXH_RESTRICT secret,size_t secretLen)4571*5ff13fbcSAllan Jude XXH3_hashLong_64b_withSecret(const void* XXH_RESTRICT input, size_t len,
4572*5ff13fbcSAllan Jude XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
4573*5ff13fbcSAllan Jude {
4574*5ff13fbcSAllan Jude (void)seed64;
4575*5ff13fbcSAllan Jude return XXH3_hashLong_64b_internal(input, len, secret, secretLen, XXH3_accumulate_512, XXH3_scrambleAcc);
4576*5ff13fbcSAllan Jude }
4577*5ff13fbcSAllan Jude
4578*5ff13fbcSAllan Jude /*
4579*5ff13fbcSAllan Jude * It's preferable for performance that XXH3_hashLong is not inlined,
4580*5ff13fbcSAllan Jude * as it results in a smaller function for small data, easier to the instruction cache.
4581*5ff13fbcSAllan Jude * Note that inside this no_inline function, we do inline the internal loop,
4582*5ff13fbcSAllan Jude * and provide a statically defined secret size to allow optimization of vector loop.
4583*5ff13fbcSAllan Jude */
4584*5ff13fbcSAllan Jude XXH_NO_INLINE XXH64_hash_t
XXH3_hashLong_64b_default(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,const xxh_u8 * XXH_RESTRICT secret,size_t secretLen)4585*5ff13fbcSAllan Jude XXH3_hashLong_64b_default(const void* XXH_RESTRICT input, size_t len,
4586*5ff13fbcSAllan Jude XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
4587*5ff13fbcSAllan Jude {
4588*5ff13fbcSAllan Jude (void)seed64; (void)secret; (void)secretLen;
4589*5ff13fbcSAllan Jude return XXH3_hashLong_64b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_accumulate_512, XXH3_scrambleAcc);
4590*5ff13fbcSAllan Jude }
4591*5ff13fbcSAllan Jude
4592*5ff13fbcSAllan Jude /*
4593*5ff13fbcSAllan Jude * XXH3_hashLong_64b_withSeed():
4594*5ff13fbcSAllan Jude * Generate a custom key based on alteration of default XXH3_kSecret with the seed,
4595*5ff13fbcSAllan Jude * and then use this key for long mode hashing.
4596*5ff13fbcSAllan Jude *
4597*5ff13fbcSAllan Jude * This operation is decently fast but nonetheless costs a little bit of time.
4598*5ff13fbcSAllan Jude * Try to avoid it whenever possible (typically when seed==0).
4599*5ff13fbcSAllan Jude *
4600*5ff13fbcSAllan Jude * It's important for performance that XXH3_hashLong is not inlined. Not sure
4601*5ff13fbcSAllan Jude * why (uop cache maybe?), but the difference is large and easily measurable.
4602*5ff13fbcSAllan Jude */
4603*5ff13fbcSAllan Jude XXH_FORCE_INLINE XXH64_hash_t
XXH3_hashLong_64b_withSeed_internal(const void * input,size_t len,XXH64_hash_t seed,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble,XXH3_f_initCustomSecret f_initSec)4604*5ff13fbcSAllan Jude XXH3_hashLong_64b_withSeed_internal(const void* input, size_t len,
4605*5ff13fbcSAllan Jude XXH64_hash_t seed,
4606*5ff13fbcSAllan Jude XXH3_f_accumulate_512 f_acc512,
4607*5ff13fbcSAllan Jude XXH3_f_scrambleAcc f_scramble,
4608*5ff13fbcSAllan Jude XXH3_f_initCustomSecret f_initSec)
4609*5ff13fbcSAllan Jude {
4610*5ff13fbcSAllan Jude if (seed == 0)
4611*5ff13fbcSAllan Jude return XXH3_hashLong_64b_internal(input, len,
4612*5ff13fbcSAllan Jude XXH3_kSecret, sizeof(XXH3_kSecret),
4613*5ff13fbcSAllan Jude f_acc512, f_scramble);
4614*5ff13fbcSAllan Jude { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
4615*5ff13fbcSAllan Jude f_initSec(secret, seed);
4616*5ff13fbcSAllan Jude return XXH3_hashLong_64b_internal(input, len, secret, sizeof(secret),
4617*5ff13fbcSAllan Jude f_acc512, f_scramble);
4618*5ff13fbcSAllan Jude }
4619*5ff13fbcSAllan Jude }
4620*5ff13fbcSAllan Jude
4621*5ff13fbcSAllan Jude /*
4622*5ff13fbcSAllan Jude * It's important for performance that XXH3_hashLong is not inlined.
4623*5ff13fbcSAllan Jude */
4624*5ff13fbcSAllan Jude XXH_NO_INLINE XXH64_hash_t
XXH3_hashLong_64b_withSeed(const void * input,size_t len,XXH64_hash_t seed,const xxh_u8 * secret,size_t secretLen)4625*5ff13fbcSAllan Jude XXH3_hashLong_64b_withSeed(const void* input, size_t len,
4626*5ff13fbcSAllan Jude XXH64_hash_t seed, const xxh_u8* secret, size_t secretLen)
4627*5ff13fbcSAllan Jude {
4628*5ff13fbcSAllan Jude (void)secret; (void)secretLen;
4629*5ff13fbcSAllan Jude return XXH3_hashLong_64b_withSeed_internal(input, len, seed,
4630*5ff13fbcSAllan Jude XXH3_accumulate_512, XXH3_scrambleAcc, XXH3_initCustomSecret);
4631*5ff13fbcSAllan Jude }
4632*5ff13fbcSAllan Jude
4633*5ff13fbcSAllan Jude
4634*5ff13fbcSAllan Jude typedef XXH64_hash_t (*XXH3_hashLong64_f)(const void* XXH_RESTRICT, size_t,
4635*5ff13fbcSAllan Jude XXH64_hash_t, const xxh_u8* XXH_RESTRICT, size_t);
4636*5ff13fbcSAllan Jude
4637*5ff13fbcSAllan Jude XXH_FORCE_INLINE XXH64_hash_t
XXH3_64bits_internal(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,const void * XXH_RESTRICT secret,size_t secretLen,XXH3_hashLong64_f f_hashLong)4638*5ff13fbcSAllan Jude XXH3_64bits_internal(const void* XXH_RESTRICT input, size_t len,
4639*5ff13fbcSAllan Jude XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen,
4640*5ff13fbcSAllan Jude XXH3_hashLong64_f f_hashLong)
4641*5ff13fbcSAllan Jude {
4642*5ff13fbcSAllan Jude XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
4643*5ff13fbcSAllan Jude /*
4644*5ff13fbcSAllan Jude * If an action is to be taken if `secretLen` condition is not respected,
4645*5ff13fbcSAllan Jude * it should be done here.
4646*5ff13fbcSAllan Jude * For now, it's a contract pre-condition.
4647*5ff13fbcSAllan Jude * Adding a check and a branch here would cost performance at every hash.
4648*5ff13fbcSAllan Jude * Also, note that function signature doesn't offer room to return an error.
4649*5ff13fbcSAllan Jude */
4650*5ff13fbcSAllan Jude if (len <= 16)
4651*5ff13fbcSAllan Jude return XXH3_len_0to16_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64);
4652*5ff13fbcSAllan Jude if (len <= 128)
4653*5ff13fbcSAllan Jude return XXH3_len_17to128_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
4654*5ff13fbcSAllan Jude if (len <= XXH3_MIDSIZE_MAX)
4655*5ff13fbcSAllan Jude return XXH3_len_129to240_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
4656*5ff13fbcSAllan Jude return f_hashLong(input, len, seed64, (const xxh_u8*)secret, secretLen);
4657*5ff13fbcSAllan Jude }
4658*5ff13fbcSAllan Jude
4659*5ff13fbcSAllan Jude
4660*5ff13fbcSAllan Jude /* === Public entry point === */
4661*5ff13fbcSAllan Jude
4662*5ff13fbcSAllan Jude /*! @ingroup xxh3_family */
XXH3_64bits(const void * input,size_t len)4663*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void* input, size_t len)
4664*5ff13fbcSAllan Jude {
4665*5ff13fbcSAllan Jude return XXH3_64bits_internal(input, len, 0, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_default);
4666*5ff13fbcSAllan Jude }
4667*5ff13fbcSAllan Jude
4668*5ff13fbcSAllan Jude /*! @ingroup xxh3_family */
4669*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH64_hash_t
XXH3_64bits_withSecret(const void * input,size_t len,const void * secret,size_t secretSize)4670*5ff13fbcSAllan Jude XXH3_64bits_withSecret(const void* input, size_t len, const void* secret, size_t secretSize)
4671*5ff13fbcSAllan Jude {
4672*5ff13fbcSAllan Jude return XXH3_64bits_internal(input, len, 0, secret, secretSize, XXH3_hashLong_64b_withSecret);
4673*5ff13fbcSAllan Jude }
4674*5ff13fbcSAllan Jude
4675*5ff13fbcSAllan Jude /*! @ingroup xxh3_family */
4676*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH64_hash_t
XXH3_64bits_withSeed(const void * input,size_t len,XXH64_hash_t seed)4677*5ff13fbcSAllan Jude XXH3_64bits_withSeed(const void* input, size_t len, XXH64_hash_t seed)
4678*5ff13fbcSAllan Jude {
4679*5ff13fbcSAllan Jude return XXH3_64bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed);
4680*5ff13fbcSAllan Jude }
4681*5ff13fbcSAllan Jude
4682*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH64_hash_t
XXH3_64bits_withSecretandSeed(const void * input,size_t len,const void * secret,size_t secretSize,XXH64_hash_t seed)4683*5ff13fbcSAllan Jude XXH3_64bits_withSecretandSeed(const void* input, size_t len, const void* secret, size_t secretSize, XXH64_hash_t seed)
4684*5ff13fbcSAllan Jude {
4685*5ff13fbcSAllan Jude if (len <= XXH3_MIDSIZE_MAX)
4686*5ff13fbcSAllan Jude return XXH3_64bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL);
4687*5ff13fbcSAllan Jude return XXH3_hashLong_64b_withSecret(input, len, seed, (const xxh_u8*)secret, secretSize);
4688*5ff13fbcSAllan Jude }
4689*5ff13fbcSAllan Jude
4690*5ff13fbcSAllan Jude
4691*5ff13fbcSAllan Jude /* === XXH3 streaming === */
4692*5ff13fbcSAllan Jude
4693*5ff13fbcSAllan Jude /*
4694*5ff13fbcSAllan Jude * Malloc's a pointer that is always aligned to align.
4695*5ff13fbcSAllan Jude *
4696*5ff13fbcSAllan Jude * This must be freed with `XXH_alignedFree()`.
4697*5ff13fbcSAllan Jude *
4698*5ff13fbcSAllan Jude * malloc typically guarantees 16 byte alignment on 64-bit systems and 8 byte
4699*5ff13fbcSAllan Jude * alignment on 32-bit. This isn't enough for the 32 byte aligned loads in AVX2
4700*5ff13fbcSAllan Jude * or on 32-bit, the 16 byte aligned loads in SSE2 and NEON.
4701*5ff13fbcSAllan Jude *
4702*5ff13fbcSAllan Jude * This underalignment previously caused a rather obvious crash which went
4703*5ff13fbcSAllan Jude * completely unnoticed due to XXH3_createState() not actually being tested.
4704*5ff13fbcSAllan Jude * Credit to RedSpah for noticing this bug.
4705*5ff13fbcSAllan Jude *
4706*5ff13fbcSAllan Jude * The alignment is done manually: Functions like posix_memalign or _mm_malloc
4707*5ff13fbcSAllan Jude * are avoided: To maintain portability, we would have to write a fallback
4708*5ff13fbcSAllan Jude * like this anyways, and besides, testing for the existence of library
4709*5ff13fbcSAllan Jude * functions without relying on external build tools is impossible.
4710*5ff13fbcSAllan Jude *
4711*5ff13fbcSAllan Jude * The method is simple: Overallocate, manually align, and store the offset
4712*5ff13fbcSAllan Jude * to the original behind the returned pointer.
4713*5ff13fbcSAllan Jude *
4714*5ff13fbcSAllan Jude * Align must be a power of 2 and 8 <= align <= 128.
4715*5ff13fbcSAllan Jude */
XXH_alignedMalloc(size_t s,size_t align)4716*5ff13fbcSAllan Jude static void* XXH_alignedMalloc(size_t s, size_t align)
4717*5ff13fbcSAllan Jude {
4718*5ff13fbcSAllan Jude XXH_ASSERT(align <= 128 && align >= 8); /* range check */
4719*5ff13fbcSAllan Jude XXH_ASSERT((align & (align-1)) == 0); /* power of 2 */
4720*5ff13fbcSAllan Jude XXH_ASSERT(s != 0 && s < (s + align)); /* empty/overflow */
4721*5ff13fbcSAllan Jude { /* Overallocate to make room for manual realignment and an offset byte */
4722*5ff13fbcSAllan Jude xxh_u8* base = (xxh_u8*)XXH_malloc(s + align);
4723*5ff13fbcSAllan Jude if (base != NULL) {
4724*5ff13fbcSAllan Jude /*
4725*5ff13fbcSAllan Jude * Get the offset needed to align this pointer.
4726*5ff13fbcSAllan Jude *
4727*5ff13fbcSAllan Jude * Even if the returned pointer is aligned, there will always be
4728*5ff13fbcSAllan Jude * at least one byte to store the offset to the original pointer.
4729*5ff13fbcSAllan Jude */
4730*5ff13fbcSAllan Jude size_t offset = align - ((size_t)base & (align - 1)); /* base % align */
4731*5ff13fbcSAllan Jude /* Add the offset for the now-aligned pointer */
4732*5ff13fbcSAllan Jude xxh_u8* ptr = base + offset;
4733*5ff13fbcSAllan Jude
4734*5ff13fbcSAllan Jude XXH_ASSERT((size_t)ptr % align == 0);
4735*5ff13fbcSAllan Jude
4736*5ff13fbcSAllan Jude /* Store the offset immediately before the returned pointer. */
4737*5ff13fbcSAllan Jude ptr[-1] = (xxh_u8)offset;
4738*5ff13fbcSAllan Jude return ptr;
4739*5ff13fbcSAllan Jude }
4740*5ff13fbcSAllan Jude return NULL;
4741*5ff13fbcSAllan Jude }
4742*5ff13fbcSAllan Jude }
4743*5ff13fbcSAllan Jude /*
4744*5ff13fbcSAllan Jude * Frees an aligned pointer allocated by XXH_alignedMalloc(). Don't pass
4745*5ff13fbcSAllan Jude * normal malloc'd pointers, XXH_alignedMalloc has a specific data layout.
4746*5ff13fbcSAllan Jude */
XXH_alignedFree(void * p)4747*5ff13fbcSAllan Jude static void XXH_alignedFree(void* p)
4748*5ff13fbcSAllan Jude {
4749*5ff13fbcSAllan Jude if (p != NULL) {
4750*5ff13fbcSAllan Jude xxh_u8* ptr = (xxh_u8*)p;
4751*5ff13fbcSAllan Jude /* Get the offset byte we added in XXH_malloc. */
4752*5ff13fbcSAllan Jude xxh_u8 offset = ptr[-1];
4753*5ff13fbcSAllan Jude /* Free the original malloc'd pointer */
4754*5ff13fbcSAllan Jude xxh_u8* base = ptr - offset;
4755*5ff13fbcSAllan Jude XXH_free(base);
4756*5ff13fbcSAllan Jude }
4757*5ff13fbcSAllan Jude }
4758*5ff13fbcSAllan Jude /*! @ingroup xxh3_family */
XXH3_createState(void)4759*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void)
4760*5ff13fbcSAllan Jude {
4761*5ff13fbcSAllan Jude XXH3_state_t* const state = (XXH3_state_t*)XXH_alignedMalloc(sizeof(XXH3_state_t), 64);
4762*5ff13fbcSAllan Jude if (state==NULL) return NULL;
4763*5ff13fbcSAllan Jude XXH3_INITSTATE(state);
4764*5ff13fbcSAllan Jude return state;
4765*5ff13fbcSAllan Jude }
4766*5ff13fbcSAllan Jude
4767*5ff13fbcSAllan Jude /*! @ingroup xxh3_family */
XXH3_freeState(XXH3_state_t * statePtr)4768*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr)
4769*5ff13fbcSAllan Jude {
4770*5ff13fbcSAllan Jude XXH_alignedFree(statePtr);
4771*5ff13fbcSAllan Jude return XXH_OK;
4772*5ff13fbcSAllan Jude }
4773*5ff13fbcSAllan Jude
4774*5ff13fbcSAllan Jude /*! @ingroup xxh3_family */
4775*5ff13fbcSAllan Jude XXH_PUBLIC_API void
XXH3_copyState(XXH3_state_t * dst_state,const XXH3_state_t * src_state)4776*5ff13fbcSAllan Jude XXH3_copyState(XXH3_state_t* dst_state, const XXH3_state_t* src_state)
4777*5ff13fbcSAllan Jude {
4778*5ff13fbcSAllan Jude XXH_memcpy(dst_state, src_state, sizeof(*dst_state));
4779*5ff13fbcSAllan Jude }
4780*5ff13fbcSAllan Jude
4781*5ff13fbcSAllan Jude static void
XXH3_reset_internal(XXH3_state_t * statePtr,XXH64_hash_t seed,const void * secret,size_t secretSize)4782*5ff13fbcSAllan Jude XXH3_reset_internal(XXH3_state_t* statePtr,
4783*5ff13fbcSAllan Jude XXH64_hash_t seed,
4784*5ff13fbcSAllan Jude const void* secret, size_t secretSize)
4785*5ff13fbcSAllan Jude {
4786*5ff13fbcSAllan Jude size_t const initStart = offsetof(XXH3_state_t, bufferedSize);
4787*5ff13fbcSAllan Jude size_t const initLength = offsetof(XXH3_state_t, nbStripesPerBlock) - initStart;
4788*5ff13fbcSAllan Jude XXH_ASSERT(offsetof(XXH3_state_t, nbStripesPerBlock) > initStart);
4789*5ff13fbcSAllan Jude XXH_ASSERT(statePtr != NULL);
4790*5ff13fbcSAllan Jude /* set members from bufferedSize to nbStripesPerBlock (excluded) to 0 */
4791*5ff13fbcSAllan Jude memset((char*)statePtr + initStart, 0, initLength);
4792*5ff13fbcSAllan Jude statePtr->acc[0] = XXH_PRIME32_3;
4793*5ff13fbcSAllan Jude statePtr->acc[1] = XXH_PRIME64_1;
4794*5ff13fbcSAllan Jude statePtr->acc[2] = XXH_PRIME64_2;
4795*5ff13fbcSAllan Jude statePtr->acc[3] = XXH_PRIME64_3;
4796*5ff13fbcSAllan Jude statePtr->acc[4] = XXH_PRIME64_4;
4797*5ff13fbcSAllan Jude statePtr->acc[5] = XXH_PRIME32_2;
4798*5ff13fbcSAllan Jude statePtr->acc[6] = XXH_PRIME64_5;
4799*5ff13fbcSAllan Jude statePtr->acc[7] = XXH_PRIME32_1;
4800*5ff13fbcSAllan Jude statePtr->seed = seed;
4801*5ff13fbcSAllan Jude statePtr->useSeed = (seed != 0);
4802*5ff13fbcSAllan Jude statePtr->extSecret = (const unsigned char*)secret;
4803*5ff13fbcSAllan Jude XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
4804*5ff13fbcSAllan Jude statePtr->secretLimit = secretSize - XXH_STRIPE_LEN;
4805*5ff13fbcSAllan Jude statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE;
4806*5ff13fbcSAllan Jude }
4807*5ff13fbcSAllan Jude
4808*5ff13fbcSAllan Jude /*! @ingroup xxh3_family */
4809*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_reset(XXH3_state_t * statePtr)4810*5ff13fbcSAllan Jude XXH3_64bits_reset(XXH3_state_t* statePtr)
4811*5ff13fbcSAllan Jude {
4812*5ff13fbcSAllan Jude if (statePtr == NULL) return XXH_ERROR;
4813*5ff13fbcSAllan Jude XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE);
4814*5ff13fbcSAllan Jude return XXH_OK;
4815*5ff13fbcSAllan Jude }
4816*5ff13fbcSAllan Jude
4817*5ff13fbcSAllan Jude /*! @ingroup xxh3_family */
4818*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_reset_withSecret(XXH3_state_t * statePtr,const void * secret,size_t secretSize)4819*5ff13fbcSAllan Jude XXH3_64bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize)
4820*5ff13fbcSAllan Jude {
4821*5ff13fbcSAllan Jude if (statePtr == NULL) return XXH_ERROR;
4822*5ff13fbcSAllan Jude XXH3_reset_internal(statePtr, 0, secret, secretSize);
4823*5ff13fbcSAllan Jude if (secret == NULL) return XXH_ERROR;
4824*5ff13fbcSAllan Jude if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
4825*5ff13fbcSAllan Jude return XXH_OK;
4826*5ff13fbcSAllan Jude }
4827*5ff13fbcSAllan Jude
4828*5ff13fbcSAllan Jude /*! @ingroup xxh3_family */
4829*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_reset_withSeed(XXH3_state_t * statePtr,XXH64_hash_t seed)4830*5ff13fbcSAllan Jude XXH3_64bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed)
4831*5ff13fbcSAllan Jude {
4832*5ff13fbcSAllan Jude if (statePtr == NULL) return XXH_ERROR;
4833*5ff13fbcSAllan Jude if (seed==0) return XXH3_64bits_reset(statePtr);
4834*5ff13fbcSAllan Jude if ((seed != statePtr->seed) || (statePtr->extSecret != NULL))
4835*5ff13fbcSAllan Jude XXH3_initCustomSecret(statePtr->customSecret, seed);
4836*5ff13fbcSAllan Jude XXH3_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE);
4837*5ff13fbcSAllan Jude return XXH_OK;
4838*5ff13fbcSAllan Jude }
4839*5ff13fbcSAllan Jude
4840*5ff13fbcSAllan Jude /*! @ingroup xxh3_family */
4841*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_reset_withSecretandSeed(XXH3_state_t * statePtr,const void * secret,size_t secretSize,XXH64_hash_t seed64)4842*5ff13fbcSAllan Jude XXH3_64bits_reset_withSecretandSeed(XXH3_state_t* statePtr, const void* secret, size_t secretSize, XXH64_hash_t seed64)
4843*5ff13fbcSAllan Jude {
4844*5ff13fbcSAllan Jude if (statePtr == NULL) return XXH_ERROR;
4845*5ff13fbcSAllan Jude if (secret == NULL) return XXH_ERROR;
4846*5ff13fbcSAllan Jude if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
4847*5ff13fbcSAllan Jude XXH3_reset_internal(statePtr, seed64, secret, secretSize);
4848*5ff13fbcSAllan Jude statePtr->useSeed = 1; /* always, even if seed64==0 */
4849*5ff13fbcSAllan Jude return XXH_OK;
4850*5ff13fbcSAllan Jude }
4851*5ff13fbcSAllan Jude
4852*5ff13fbcSAllan Jude /* Note : when XXH3_consumeStripes() is invoked,
4853*5ff13fbcSAllan Jude * there must be a guarantee that at least one more byte must be consumed from input
4854*5ff13fbcSAllan Jude * so that the function can blindly consume all stripes using the "normal" secret segment */
4855*5ff13fbcSAllan Jude XXH_FORCE_INLINE void
XXH3_consumeStripes(xxh_u64 * XXH_RESTRICT acc,size_t * XXH_RESTRICT nbStripesSoFarPtr,size_t nbStripesPerBlock,const xxh_u8 * XXH_RESTRICT input,size_t nbStripes,const xxh_u8 * XXH_RESTRICT secret,size_t secretLimit,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble)4856*5ff13fbcSAllan Jude XXH3_consumeStripes(xxh_u64* XXH_RESTRICT acc,
4857*5ff13fbcSAllan Jude size_t* XXH_RESTRICT nbStripesSoFarPtr, size_t nbStripesPerBlock,
4858*5ff13fbcSAllan Jude const xxh_u8* XXH_RESTRICT input, size_t nbStripes,
4859*5ff13fbcSAllan Jude const xxh_u8* XXH_RESTRICT secret, size_t secretLimit,
4860*5ff13fbcSAllan Jude XXH3_f_accumulate_512 f_acc512,
4861*5ff13fbcSAllan Jude XXH3_f_scrambleAcc f_scramble)
4862*5ff13fbcSAllan Jude {
4863*5ff13fbcSAllan Jude XXH_ASSERT(nbStripes <= nbStripesPerBlock); /* can handle max 1 scramble per invocation */
4864*5ff13fbcSAllan Jude XXH_ASSERT(*nbStripesSoFarPtr < nbStripesPerBlock);
4865*5ff13fbcSAllan Jude if (nbStripesPerBlock - *nbStripesSoFarPtr <= nbStripes) {
4866*5ff13fbcSAllan Jude /* need a scrambling operation */
4867*5ff13fbcSAllan Jude size_t const nbStripesToEndofBlock = nbStripesPerBlock - *nbStripesSoFarPtr;
4868*5ff13fbcSAllan Jude size_t const nbStripesAfterBlock = nbStripes - nbStripesToEndofBlock;
4869*5ff13fbcSAllan Jude XXH3_accumulate(acc, input, secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, nbStripesToEndofBlock, f_acc512);
4870*5ff13fbcSAllan Jude f_scramble(acc, secret + secretLimit);
4871*5ff13fbcSAllan Jude XXH3_accumulate(acc, input + nbStripesToEndofBlock * XXH_STRIPE_LEN, secret, nbStripesAfterBlock, f_acc512);
4872*5ff13fbcSAllan Jude *nbStripesSoFarPtr = nbStripesAfterBlock;
4873*5ff13fbcSAllan Jude } else {
4874*5ff13fbcSAllan Jude XXH3_accumulate(acc, input, secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, nbStripes, f_acc512);
4875*5ff13fbcSAllan Jude *nbStripesSoFarPtr += nbStripes;
4876*5ff13fbcSAllan Jude }
4877*5ff13fbcSAllan Jude }
4878*5ff13fbcSAllan Jude
4879*5ff13fbcSAllan Jude #ifndef XXH3_STREAM_USE_STACK
4880*5ff13fbcSAllan Jude # ifndef __clang__ /* clang doesn't need additional stack space */
4881*5ff13fbcSAllan Jude # define XXH3_STREAM_USE_STACK 1
4882*5ff13fbcSAllan Jude # endif
4883*5ff13fbcSAllan Jude #endif
4884*5ff13fbcSAllan Jude /*
4885*5ff13fbcSAllan Jude * Both XXH3_64bits_update and XXH3_128bits_update use this routine.
4886*5ff13fbcSAllan Jude */
4887*5ff13fbcSAllan Jude XXH_FORCE_INLINE XXH_errorcode
XXH3_update(XXH3_state_t * XXH_RESTRICT const state,const xxh_u8 * XXH_RESTRICT input,size_t len,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble)4888*5ff13fbcSAllan Jude XXH3_update(XXH3_state_t* XXH_RESTRICT const state,
4889*5ff13fbcSAllan Jude const xxh_u8* XXH_RESTRICT input, size_t len,
4890*5ff13fbcSAllan Jude XXH3_f_accumulate_512 f_acc512,
4891*5ff13fbcSAllan Jude XXH3_f_scrambleAcc f_scramble)
4892*5ff13fbcSAllan Jude {
4893*5ff13fbcSAllan Jude if (input==NULL) {
4894*5ff13fbcSAllan Jude XXH_ASSERT(len == 0);
4895*5ff13fbcSAllan Jude return XXH_OK;
4896*5ff13fbcSAllan Jude }
4897*5ff13fbcSAllan Jude
4898*5ff13fbcSAllan Jude XXH_ASSERT(state != NULL);
4899*5ff13fbcSAllan Jude { const xxh_u8* const bEnd = input + len;
4900*5ff13fbcSAllan Jude const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
4901*5ff13fbcSAllan Jude #if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1
4902*5ff13fbcSAllan Jude /* For some reason, gcc and MSVC seem to suffer greatly
4903*5ff13fbcSAllan Jude * when operating accumulators directly into state.
4904*5ff13fbcSAllan Jude * Operating into stack space seems to enable proper optimization.
4905*5ff13fbcSAllan Jude * clang, on the other hand, doesn't seem to need this trick */
4906*5ff13fbcSAllan Jude XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[8]; memcpy(acc, state->acc, sizeof(acc));
4907*5ff13fbcSAllan Jude #else
4908*5ff13fbcSAllan Jude xxh_u64* XXH_RESTRICT const acc = state->acc;
4909*5ff13fbcSAllan Jude #endif
4910*5ff13fbcSAllan Jude state->totalLen += len;
4911*5ff13fbcSAllan Jude XXH_ASSERT(state->bufferedSize <= XXH3_INTERNALBUFFER_SIZE);
4912*5ff13fbcSAllan Jude
4913*5ff13fbcSAllan Jude /* small input : just fill in tmp buffer */
4914*5ff13fbcSAllan Jude if (state->bufferedSize + len <= XXH3_INTERNALBUFFER_SIZE) {
4915*5ff13fbcSAllan Jude XXH_memcpy(state->buffer + state->bufferedSize, input, len);
4916*5ff13fbcSAllan Jude state->bufferedSize += (XXH32_hash_t)len;
4917*5ff13fbcSAllan Jude return XXH_OK;
4918*5ff13fbcSAllan Jude }
4919*5ff13fbcSAllan Jude
4920*5ff13fbcSAllan Jude /* total input is now > XXH3_INTERNALBUFFER_SIZE */
4921*5ff13fbcSAllan Jude #define XXH3_INTERNALBUFFER_STRIPES (XXH3_INTERNALBUFFER_SIZE / XXH_STRIPE_LEN)
4922*5ff13fbcSAllan Jude XXH_STATIC_ASSERT(XXH3_INTERNALBUFFER_SIZE % XXH_STRIPE_LEN == 0); /* clean multiple */
4923*5ff13fbcSAllan Jude
4924*5ff13fbcSAllan Jude /*
4925*5ff13fbcSAllan Jude * Internal buffer is partially filled (always, except at beginning)
4926*5ff13fbcSAllan Jude * Complete it, then consume it.
4927*5ff13fbcSAllan Jude */
4928*5ff13fbcSAllan Jude if (state->bufferedSize) {
4929*5ff13fbcSAllan Jude size_t const loadSize = XXH3_INTERNALBUFFER_SIZE - state->bufferedSize;
4930*5ff13fbcSAllan Jude XXH_memcpy(state->buffer + state->bufferedSize, input, loadSize);
4931*5ff13fbcSAllan Jude input += loadSize;
4932*5ff13fbcSAllan Jude XXH3_consumeStripes(acc,
4933*5ff13fbcSAllan Jude &state->nbStripesSoFar, state->nbStripesPerBlock,
4934*5ff13fbcSAllan Jude state->buffer, XXH3_INTERNALBUFFER_STRIPES,
4935*5ff13fbcSAllan Jude secret, state->secretLimit,
4936*5ff13fbcSAllan Jude f_acc512, f_scramble);
4937*5ff13fbcSAllan Jude state->bufferedSize = 0;
4938*5ff13fbcSAllan Jude }
4939*5ff13fbcSAllan Jude XXH_ASSERT(input < bEnd);
4940*5ff13fbcSAllan Jude
4941*5ff13fbcSAllan Jude /* large input to consume : ingest per full block */
4942*5ff13fbcSAllan Jude if ((size_t)(bEnd - input) > state->nbStripesPerBlock * XXH_STRIPE_LEN) {
4943*5ff13fbcSAllan Jude size_t nbStripes = (size_t)(bEnd - 1 - input) / XXH_STRIPE_LEN;
4944*5ff13fbcSAllan Jude XXH_ASSERT(state->nbStripesPerBlock >= state->nbStripesSoFar);
4945*5ff13fbcSAllan Jude /* join to current block's end */
4946*5ff13fbcSAllan Jude { size_t const nbStripesToEnd = state->nbStripesPerBlock - state->nbStripesSoFar;
4947*5ff13fbcSAllan Jude XXH_ASSERT(nbStripesToEnd <= nbStripes);
4948*5ff13fbcSAllan Jude XXH3_accumulate(acc, input, secret + state->nbStripesSoFar * XXH_SECRET_CONSUME_RATE, nbStripesToEnd, f_acc512);
4949*5ff13fbcSAllan Jude f_scramble(acc, secret + state->secretLimit);
4950*5ff13fbcSAllan Jude state->nbStripesSoFar = 0;
4951*5ff13fbcSAllan Jude input += nbStripesToEnd * XXH_STRIPE_LEN;
4952*5ff13fbcSAllan Jude nbStripes -= nbStripesToEnd;
4953*5ff13fbcSAllan Jude }
4954*5ff13fbcSAllan Jude /* consume per entire blocks */
4955*5ff13fbcSAllan Jude while(nbStripes >= state->nbStripesPerBlock) {
4956*5ff13fbcSAllan Jude XXH3_accumulate(acc, input, secret, state->nbStripesPerBlock, f_acc512);
4957*5ff13fbcSAllan Jude f_scramble(acc, secret + state->secretLimit);
4958*5ff13fbcSAllan Jude input += state->nbStripesPerBlock * XXH_STRIPE_LEN;
4959*5ff13fbcSAllan Jude nbStripes -= state->nbStripesPerBlock;
4960*5ff13fbcSAllan Jude }
4961*5ff13fbcSAllan Jude /* consume last partial block */
4962*5ff13fbcSAllan Jude XXH3_accumulate(acc, input, secret, nbStripes, f_acc512);
4963*5ff13fbcSAllan Jude input += nbStripes * XXH_STRIPE_LEN;
4964*5ff13fbcSAllan Jude XXH_ASSERT(input < bEnd); /* at least some bytes left */
4965*5ff13fbcSAllan Jude state->nbStripesSoFar = nbStripes;
4966*5ff13fbcSAllan Jude /* buffer predecessor of last partial stripe */
4967*5ff13fbcSAllan Jude XXH_memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN);
4968*5ff13fbcSAllan Jude XXH_ASSERT(bEnd - input <= XXH_STRIPE_LEN);
4969*5ff13fbcSAllan Jude } else {
4970*5ff13fbcSAllan Jude /* content to consume <= block size */
4971*5ff13fbcSAllan Jude /* Consume input by a multiple of internal buffer size */
4972*5ff13fbcSAllan Jude if (bEnd - input > XXH3_INTERNALBUFFER_SIZE) {
4973*5ff13fbcSAllan Jude const xxh_u8* const limit = bEnd - XXH3_INTERNALBUFFER_SIZE;
4974*5ff13fbcSAllan Jude do {
4975*5ff13fbcSAllan Jude XXH3_consumeStripes(acc,
4976*5ff13fbcSAllan Jude &state->nbStripesSoFar, state->nbStripesPerBlock,
4977*5ff13fbcSAllan Jude input, XXH3_INTERNALBUFFER_STRIPES,
4978*5ff13fbcSAllan Jude secret, state->secretLimit,
4979*5ff13fbcSAllan Jude f_acc512, f_scramble);
4980*5ff13fbcSAllan Jude input += XXH3_INTERNALBUFFER_SIZE;
4981*5ff13fbcSAllan Jude } while (input<limit);
4982*5ff13fbcSAllan Jude /* buffer predecessor of last partial stripe */
4983*5ff13fbcSAllan Jude XXH_memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN);
4984*5ff13fbcSAllan Jude }
4985*5ff13fbcSAllan Jude }
4986*5ff13fbcSAllan Jude
4987*5ff13fbcSAllan Jude /* Some remaining input (always) : buffer it */
4988*5ff13fbcSAllan Jude XXH_ASSERT(input < bEnd);
4989*5ff13fbcSAllan Jude XXH_ASSERT(bEnd - input <= XXH3_INTERNALBUFFER_SIZE);
4990*5ff13fbcSAllan Jude XXH_ASSERT(state->bufferedSize == 0);
4991*5ff13fbcSAllan Jude XXH_memcpy(state->buffer, input, (size_t)(bEnd-input));
4992*5ff13fbcSAllan Jude state->bufferedSize = (XXH32_hash_t)(bEnd-input);
4993*5ff13fbcSAllan Jude #if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1
4994*5ff13fbcSAllan Jude /* save stack accumulators into state */
4995*5ff13fbcSAllan Jude memcpy(state->acc, acc, sizeof(acc));
4996*5ff13fbcSAllan Jude #endif
4997*5ff13fbcSAllan Jude }
4998*5ff13fbcSAllan Jude
4999*5ff13fbcSAllan Jude return XXH_OK;
5000*5ff13fbcSAllan Jude }
5001*5ff13fbcSAllan Jude
5002*5ff13fbcSAllan Jude /*! @ingroup xxh3_family */
5003*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_update(XXH3_state_t * state,const void * input,size_t len)5004*5ff13fbcSAllan Jude XXH3_64bits_update(XXH3_state_t* state, const void* input, size_t len)
5005*5ff13fbcSAllan Jude {
5006*5ff13fbcSAllan Jude return XXH3_update(state, (const xxh_u8*)input, len,
5007*5ff13fbcSAllan Jude XXH3_accumulate_512, XXH3_scrambleAcc);
5008*5ff13fbcSAllan Jude }
5009*5ff13fbcSAllan Jude
5010*5ff13fbcSAllan Jude
5011*5ff13fbcSAllan Jude XXH_FORCE_INLINE void
XXH3_digest_long(XXH64_hash_t * acc,const XXH3_state_t * state,const unsigned char * secret)5012*5ff13fbcSAllan Jude XXH3_digest_long (XXH64_hash_t* acc,
5013*5ff13fbcSAllan Jude const XXH3_state_t* state,
5014*5ff13fbcSAllan Jude const unsigned char* secret)
5015*5ff13fbcSAllan Jude {
5016*5ff13fbcSAllan Jude /*
5017*5ff13fbcSAllan Jude * Digest on a local copy. This way, the state remains unaltered, and it can
5018*5ff13fbcSAllan Jude * continue ingesting more input afterwards.
5019*5ff13fbcSAllan Jude */
5020*5ff13fbcSAllan Jude XXH_memcpy(acc, state->acc, sizeof(state->acc));
5021*5ff13fbcSAllan Jude if (state->bufferedSize >= XXH_STRIPE_LEN) {
5022*5ff13fbcSAllan Jude size_t const nbStripes = (state->bufferedSize - 1) / XXH_STRIPE_LEN;
5023*5ff13fbcSAllan Jude size_t nbStripesSoFar = state->nbStripesSoFar;
5024*5ff13fbcSAllan Jude XXH3_consumeStripes(acc,
5025*5ff13fbcSAllan Jude &nbStripesSoFar, state->nbStripesPerBlock,
5026*5ff13fbcSAllan Jude state->buffer, nbStripes,
5027*5ff13fbcSAllan Jude secret, state->secretLimit,
5028*5ff13fbcSAllan Jude XXH3_accumulate_512, XXH3_scrambleAcc);
5029*5ff13fbcSAllan Jude /* last stripe */
5030*5ff13fbcSAllan Jude XXH3_accumulate_512(acc,
5031*5ff13fbcSAllan Jude state->buffer + state->bufferedSize - XXH_STRIPE_LEN,
5032*5ff13fbcSAllan Jude secret + state->secretLimit - XXH_SECRET_LASTACC_START);
5033*5ff13fbcSAllan Jude } else { /* bufferedSize < XXH_STRIPE_LEN */
5034*5ff13fbcSAllan Jude xxh_u8 lastStripe[XXH_STRIPE_LEN];
5035*5ff13fbcSAllan Jude size_t const catchupSize = XXH_STRIPE_LEN - state->bufferedSize;
5036*5ff13fbcSAllan Jude XXH_ASSERT(state->bufferedSize > 0); /* there is always some input buffered */
5037*5ff13fbcSAllan Jude XXH_memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize, catchupSize);
5038*5ff13fbcSAllan Jude XXH_memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize);
5039*5ff13fbcSAllan Jude XXH3_accumulate_512(acc,
5040*5ff13fbcSAllan Jude lastStripe,
5041*5ff13fbcSAllan Jude secret + state->secretLimit - XXH_SECRET_LASTACC_START);
5042*5ff13fbcSAllan Jude }
5043*5ff13fbcSAllan Jude }
5044*5ff13fbcSAllan Jude
5045*5ff13fbcSAllan Jude /*! @ingroup xxh3_family */
XXH3_64bits_digest(const XXH3_state_t * state)5046*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (const XXH3_state_t* state)
5047*5ff13fbcSAllan Jude {
5048*5ff13fbcSAllan Jude const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
5049*5ff13fbcSAllan Jude if (state->totalLen > XXH3_MIDSIZE_MAX) {
5050*5ff13fbcSAllan Jude XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
5051*5ff13fbcSAllan Jude XXH3_digest_long(acc, state, secret);
5052*5ff13fbcSAllan Jude return XXH3_mergeAccs(acc,
5053*5ff13fbcSAllan Jude secret + XXH_SECRET_MERGEACCS_START,
5054*5ff13fbcSAllan Jude (xxh_u64)state->totalLen * XXH_PRIME64_1);
5055*5ff13fbcSAllan Jude }
5056*5ff13fbcSAllan Jude /* totalLen <= XXH3_MIDSIZE_MAX: digesting a short input */
5057*5ff13fbcSAllan Jude if (state->useSeed)
5058*5ff13fbcSAllan Jude return XXH3_64bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed);
5059*5ff13fbcSAllan Jude return XXH3_64bits_withSecret(state->buffer, (size_t)(state->totalLen),
5060*5ff13fbcSAllan Jude secret, state->secretLimit + XXH_STRIPE_LEN);
5061*5ff13fbcSAllan Jude }
5062*5ff13fbcSAllan Jude
5063*5ff13fbcSAllan Jude
5064*5ff13fbcSAllan Jude
5065*5ff13fbcSAllan Jude /* ==========================================
5066*5ff13fbcSAllan Jude * XXH3 128 bits (a.k.a XXH128)
5067*5ff13fbcSAllan Jude * ==========================================
5068*5ff13fbcSAllan Jude * XXH3's 128-bit variant has better mixing and strength than the 64-bit variant,
5069*5ff13fbcSAllan Jude * even without counting the significantly larger output size.
5070*5ff13fbcSAllan Jude *
5071*5ff13fbcSAllan Jude * For example, extra steps are taken to avoid the seed-dependent collisions
5072*5ff13fbcSAllan Jude * in 17-240 byte inputs (See XXH3_mix16B and XXH128_mix32B).
5073*5ff13fbcSAllan Jude *
5074*5ff13fbcSAllan Jude * This strength naturally comes at the cost of some speed, especially on short
5075*5ff13fbcSAllan Jude * lengths. Note that longer hashes are about as fast as the 64-bit version
5076*5ff13fbcSAllan Jude * due to it using only a slight modification of the 64-bit loop.
5077*5ff13fbcSAllan Jude *
5078*5ff13fbcSAllan Jude * XXH128 is also more oriented towards 64-bit machines. It is still extremely
5079*5ff13fbcSAllan Jude * fast for a _128-bit_ hash on 32-bit (it usually clears XXH64).
5080*5ff13fbcSAllan Jude */
5081*5ff13fbcSAllan Jude
5082*5ff13fbcSAllan Jude XXH_FORCE_INLINE XXH128_hash_t
XXH3_len_1to3_128b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)5083*5ff13fbcSAllan Jude XXH3_len_1to3_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
5084*5ff13fbcSAllan Jude {
5085*5ff13fbcSAllan Jude /* A doubled version of 1to3_64b with different constants. */
5086*5ff13fbcSAllan Jude XXH_ASSERT(input != NULL);
5087*5ff13fbcSAllan Jude XXH_ASSERT(1 <= len && len <= 3);
5088*5ff13fbcSAllan Jude XXH_ASSERT(secret != NULL);
5089*5ff13fbcSAllan Jude /*
5090*5ff13fbcSAllan Jude * len = 1: combinedl = { input[0], 0x01, input[0], input[0] }
5091*5ff13fbcSAllan Jude * len = 2: combinedl = { input[1], 0x02, input[0], input[1] }
5092*5ff13fbcSAllan Jude * len = 3: combinedl = { input[2], 0x03, input[0], input[1] }
5093*5ff13fbcSAllan Jude */
5094*5ff13fbcSAllan Jude { xxh_u8 const c1 = input[0];
5095*5ff13fbcSAllan Jude xxh_u8 const c2 = input[len >> 1];
5096*5ff13fbcSAllan Jude xxh_u8 const c3 = input[len - 1];
5097*5ff13fbcSAllan Jude xxh_u32 const combinedl = ((xxh_u32)c1 <<16) | ((xxh_u32)c2 << 24)
5098*5ff13fbcSAllan Jude | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
5099*5ff13fbcSAllan Jude xxh_u32 const combinedh = XXH_rotl32(XXH_swap32(combinedl), 13);
5100*5ff13fbcSAllan Jude xxh_u64 const bitflipl = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
5101*5ff13fbcSAllan Jude xxh_u64 const bitfliph = (XXH_readLE32(secret+8) ^ XXH_readLE32(secret+12)) - seed;
5102*5ff13fbcSAllan Jude xxh_u64 const keyed_lo = (xxh_u64)combinedl ^ bitflipl;
5103*5ff13fbcSAllan Jude xxh_u64 const keyed_hi = (xxh_u64)combinedh ^ bitfliph;
5104*5ff13fbcSAllan Jude XXH128_hash_t h128;
5105*5ff13fbcSAllan Jude h128.low64 = XXH64_avalanche(keyed_lo);
5106*5ff13fbcSAllan Jude h128.high64 = XXH64_avalanche(keyed_hi);
5107*5ff13fbcSAllan Jude return h128;
5108*5ff13fbcSAllan Jude }
5109*5ff13fbcSAllan Jude }
5110*5ff13fbcSAllan Jude
5111*5ff13fbcSAllan Jude XXH_FORCE_INLINE XXH128_hash_t
XXH3_len_4to8_128b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)5112*5ff13fbcSAllan Jude XXH3_len_4to8_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
5113*5ff13fbcSAllan Jude {
5114*5ff13fbcSAllan Jude XXH_ASSERT(input != NULL);
5115*5ff13fbcSAllan Jude XXH_ASSERT(secret != NULL);
5116*5ff13fbcSAllan Jude XXH_ASSERT(4 <= len && len <= 8);
5117*5ff13fbcSAllan Jude seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
5118*5ff13fbcSAllan Jude { xxh_u32 const input_lo = XXH_readLE32(input);
5119*5ff13fbcSAllan Jude xxh_u32 const input_hi = XXH_readLE32(input + len - 4);
5120*5ff13fbcSAllan Jude xxh_u64 const input_64 = input_lo + ((xxh_u64)input_hi << 32);
5121*5ff13fbcSAllan Jude xxh_u64 const bitflip = (XXH_readLE64(secret+16) ^ XXH_readLE64(secret+24)) + seed;
5122*5ff13fbcSAllan Jude xxh_u64 const keyed = input_64 ^ bitflip;
5123*5ff13fbcSAllan Jude
5124*5ff13fbcSAllan Jude /* Shift len to the left to ensure it is even, this avoids even multiplies. */
5125*5ff13fbcSAllan Jude XXH128_hash_t m128 = XXH_mult64to128(keyed, XXH_PRIME64_1 + (len << 2));
5126*5ff13fbcSAllan Jude
5127*5ff13fbcSAllan Jude m128.high64 += (m128.low64 << 1);
5128*5ff13fbcSAllan Jude m128.low64 ^= (m128.high64 >> 3);
5129*5ff13fbcSAllan Jude
5130*5ff13fbcSAllan Jude m128.low64 = XXH_xorshift64(m128.low64, 35);
5131*5ff13fbcSAllan Jude m128.low64 *= 0x9FB21C651E98DF25ULL;
5132*5ff13fbcSAllan Jude m128.low64 = XXH_xorshift64(m128.low64, 28);
5133*5ff13fbcSAllan Jude m128.high64 = XXH3_avalanche(m128.high64);
5134*5ff13fbcSAllan Jude return m128;
5135*5ff13fbcSAllan Jude }
5136*5ff13fbcSAllan Jude }
5137*5ff13fbcSAllan Jude
5138*5ff13fbcSAllan Jude XXH_FORCE_INLINE XXH128_hash_t
XXH3_len_9to16_128b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)5139*5ff13fbcSAllan Jude XXH3_len_9to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
5140*5ff13fbcSAllan Jude {
5141*5ff13fbcSAllan Jude XXH_ASSERT(input != NULL);
5142*5ff13fbcSAllan Jude XXH_ASSERT(secret != NULL);
5143*5ff13fbcSAllan Jude XXH_ASSERT(9 <= len && len <= 16);
5144*5ff13fbcSAllan Jude { xxh_u64 const bitflipl = (XXH_readLE64(secret+32) ^ XXH_readLE64(secret+40)) - seed;
5145*5ff13fbcSAllan Jude xxh_u64 const bitfliph = (XXH_readLE64(secret+48) ^ XXH_readLE64(secret+56)) + seed;
5146*5ff13fbcSAllan Jude xxh_u64 const input_lo = XXH_readLE64(input);
5147*5ff13fbcSAllan Jude xxh_u64 input_hi = XXH_readLE64(input + len - 8);
5148*5ff13fbcSAllan Jude XXH128_hash_t m128 = XXH_mult64to128(input_lo ^ input_hi ^ bitflipl, XXH_PRIME64_1);
5149*5ff13fbcSAllan Jude /*
5150*5ff13fbcSAllan Jude * Put len in the middle of m128 to ensure that the length gets mixed to
5151*5ff13fbcSAllan Jude * both the low and high bits in the 128x64 multiply below.
5152*5ff13fbcSAllan Jude */
5153*5ff13fbcSAllan Jude m128.low64 += (xxh_u64)(len - 1) << 54;
5154*5ff13fbcSAllan Jude input_hi ^= bitfliph;
5155*5ff13fbcSAllan Jude /*
5156*5ff13fbcSAllan Jude * Add the high 32 bits of input_hi to the high 32 bits of m128, then
5157*5ff13fbcSAllan Jude * add the long product of the low 32 bits of input_hi and XXH_PRIME32_2 to
5158*5ff13fbcSAllan Jude * the high 64 bits of m128.
5159*5ff13fbcSAllan Jude *
5160*5ff13fbcSAllan Jude * The best approach to this operation is different on 32-bit and 64-bit.
5161*5ff13fbcSAllan Jude */
5162*5ff13fbcSAllan Jude if (sizeof(void *) < sizeof(xxh_u64)) { /* 32-bit */
5163*5ff13fbcSAllan Jude /*
5164*5ff13fbcSAllan Jude * 32-bit optimized version, which is more readable.
5165*5ff13fbcSAllan Jude *
5166*5ff13fbcSAllan Jude * On 32-bit, it removes an ADC and delays a dependency between the two
5167*5ff13fbcSAllan Jude * halves of m128.high64, but it generates an extra mask on 64-bit.
5168*5ff13fbcSAllan Jude */
5169*5ff13fbcSAllan Jude m128.high64 += (input_hi & 0xFFFFFFFF00000000ULL) + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2);
5170*5ff13fbcSAllan Jude } else {
5171*5ff13fbcSAllan Jude /*
5172*5ff13fbcSAllan Jude * 64-bit optimized (albeit more confusing) version.
5173*5ff13fbcSAllan Jude *
5174*5ff13fbcSAllan Jude * Uses some properties of addition and multiplication to remove the mask:
5175*5ff13fbcSAllan Jude *
5176*5ff13fbcSAllan Jude * Let:
5177*5ff13fbcSAllan Jude * a = input_hi.lo = (input_hi & 0x00000000FFFFFFFF)
5178*5ff13fbcSAllan Jude * b = input_hi.hi = (input_hi & 0xFFFFFFFF00000000)
5179*5ff13fbcSAllan Jude * c = XXH_PRIME32_2
5180*5ff13fbcSAllan Jude *
5181*5ff13fbcSAllan Jude * a + (b * c)
5182*5ff13fbcSAllan Jude * Inverse Property: x + y - x == y
5183*5ff13fbcSAllan Jude * a + (b * (1 + c - 1))
5184*5ff13fbcSAllan Jude * Distributive Property: x * (y + z) == (x * y) + (x * z)
5185*5ff13fbcSAllan Jude * a + (b * 1) + (b * (c - 1))
5186*5ff13fbcSAllan Jude * Identity Property: x * 1 == x
5187*5ff13fbcSAllan Jude * a + b + (b * (c - 1))
5188*5ff13fbcSAllan Jude *
5189*5ff13fbcSAllan Jude * Substitute a, b, and c:
5190*5ff13fbcSAllan Jude * input_hi.hi + input_hi.lo + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
5191*5ff13fbcSAllan Jude *
5192*5ff13fbcSAllan Jude * Since input_hi.hi + input_hi.lo == input_hi, we get this:
5193*5ff13fbcSAllan Jude * input_hi + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
5194*5ff13fbcSAllan Jude */
5195*5ff13fbcSAllan Jude m128.high64 += input_hi + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2 - 1);
5196*5ff13fbcSAllan Jude }
5197*5ff13fbcSAllan Jude /* m128 ^= XXH_swap64(m128 >> 64); */
5198*5ff13fbcSAllan Jude m128.low64 ^= XXH_swap64(m128.high64);
5199*5ff13fbcSAllan Jude
5200*5ff13fbcSAllan Jude { /* 128x64 multiply: h128 = m128 * XXH_PRIME64_2; */
5201*5ff13fbcSAllan Jude XXH128_hash_t h128 = XXH_mult64to128(m128.low64, XXH_PRIME64_2);
5202*5ff13fbcSAllan Jude h128.high64 += m128.high64 * XXH_PRIME64_2;
5203*5ff13fbcSAllan Jude
5204*5ff13fbcSAllan Jude h128.low64 = XXH3_avalanche(h128.low64);
5205*5ff13fbcSAllan Jude h128.high64 = XXH3_avalanche(h128.high64);
5206*5ff13fbcSAllan Jude return h128;
5207*5ff13fbcSAllan Jude } }
5208*5ff13fbcSAllan Jude }
5209*5ff13fbcSAllan Jude
5210*5ff13fbcSAllan Jude /*
5211*5ff13fbcSAllan Jude * Assumption: `secret` size is >= XXH3_SECRET_SIZE_MIN
5212*5ff13fbcSAllan Jude */
5213*5ff13fbcSAllan Jude XXH_FORCE_INLINE XXH128_hash_t
XXH3_len_0to16_128b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)5214*5ff13fbcSAllan Jude XXH3_len_0to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
5215*5ff13fbcSAllan Jude {
5216*5ff13fbcSAllan Jude XXH_ASSERT(len <= 16);
5217*5ff13fbcSAllan Jude { if (len > 8) return XXH3_len_9to16_128b(input, len, secret, seed);
5218*5ff13fbcSAllan Jude if (len >= 4) return XXH3_len_4to8_128b(input, len, secret, seed);
5219*5ff13fbcSAllan Jude if (len) return XXH3_len_1to3_128b(input, len, secret, seed);
5220*5ff13fbcSAllan Jude { XXH128_hash_t h128;
5221*5ff13fbcSAllan Jude xxh_u64 const bitflipl = XXH_readLE64(secret+64) ^ XXH_readLE64(secret+72);
5222*5ff13fbcSAllan Jude xxh_u64 const bitfliph = XXH_readLE64(secret+80) ^ XXH_readLE64(secret+88);
5223*5ff13fbcSAllan Jude h128.low64 = XXH64_avalanche(seed ^ bitflipl);
5224*5ff13fbcSAllan Jude h128.high64 = XXH64_avalanche( seed ^ bitfliph);
5225*5ff13fbcSAllan Jude return h128;
5226*5ff13fbcSAllan Jude } }
5227*5ff13fbcSAllan Jude }
5228*5ff13fbcSAllan Jude
5229*5ff13fbcSAllan Jude /*
5230*5ff13fbcSAllan Jude * A bit slower than XXH3_mix16B, but handles multiply by zero better.
5231*5ff13fbcSAllan Jude */
5232*5ff13fbcSAllan Jude XXH_FORCE_INLINE XXH128_hash_t
XXH128_mix32B(XXH128_hash_t acc,const xxh_u8 * input_1,const xxh_u8 * input_2,const xxh_u8 * secret,XXH64_hash_t seed)5233*5ff13fbcSAllan Jude XXH128_mix32B(XXH128_hash_t acc, const xxh_u8* input_1, const xxh_u8* input_2,
5234*5ff13fbcSAllan Jude const xxh_u8* secret, XXH64_hash_t seed)
5235*5ff13fbcSAllan Jude {
5236*5ff13fbcSAllan Jude acc.low64 += XXH3_mix16B (input_1, secret+0, seed);
5237*5ff13fbcSAllan Jude acc.low64 ^= XXH_readLE64(input_2) + XXH_readLE64(input_2 + 8);
5238*5ff13fbcSAllan Jude acc.high64 += XXH3_mix16B (input_2, secret+16, seed);
5239*5ff13fbcSAllan Jude acc.high64 ^= XXH_readLE64(input_1) + XXH_readLE64(input_1 + 8);
5240*5ff13fbcSAllan Jude return acc;
5241*5ff13fbcSAllan Jude }
5242*5ff13fbcSAllan Jude
5243*5ff13fbcSAllan Jude
5244*5ff13fbcSAllan Jude XXH_FORCE_INLINE XXH128_hash_t
XXH3_len_17to128_128b(const xxh_u8 * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH64_hash_t seed)5245*5ff13fbcSAllan Jude XXH3_len_17to128_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
5246*5ff13fbcSAllan Jude const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
5247*5ff13fbcSAllan Jude XXH64_hash_t seed)
5248*5ff13fbcSAllan Jude {
5249*5ff13fbcSAllan Jude XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
5250*5ff13fbcSAllan Jude XXH_ASSERT(16 < len && len <= 128);
5251*5ff13fbcSAllan Jude
5252*5ff13fbcSAllan Jude { XXH128_hash_t acc;
5253*5ff13fbcSAllan Jude acc.low64 = len * XXH_PRIME64_1;
5254*5ff13fbcSAllan Jude acc.high64 = 0;
5255*5ff13fbcSAllan Jude if (len > 32) {
5256*5ff13fbcSAllan Jude if (len > 64) {
5257*5ff13fbcSAllan Jude if (len > 96) {
5258*5ff13fbcSAllan Jude acc = XXH128_mix32B(acc, input+48, input+len-64, secret+96, seed);
5259*5ff13fbcSAllan Jude }
5260*5ff13fbcSAllan Jude acc = XXH128_mix32B(acc, input+32, input+len-48, secret+64, seed);
5261*5ff13fbcSAllan Jude }
5262*5ff13fbcSAllan Jude acc = XXH128_mix32B(acc, input+16, input+len-32, secret+32, seed);
5263*5ff13fbcSAllan Jude }
5264*5ff13fbcSAllan Jude acc = XXH128_mix32B(acc, input, input+len-16, secret, seed);
5265*5ff13fbcSAllan Jude { XXH128_hash_t h128;
5266*5ff13fbcSAllan Jude h128.low64 = acc.low64 + acc.high64;
5267*5ff13fbcSAllan Jude h128.high64 = (acc.low64 * XXH_PRIME64_1)
5268*5ff13fbcSAllan Jude + (acc.high64 * XXH_PRIME64_4)
5269*5ff13fbcSAllan Jude + ((len - seed) * XXH_PRIME64_2);
5270*5ff13fbcSAllan Jude h128.low64 = XXH3_avalanche(h128.low64);
5271*5ff13fbcSAllan Jude h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
5272*5ff13fbcSAllan Jude return h128;
5273*5ff13fbcSAllan Jude }
5274*5ff13fbcSAllan Jude }
5275*5ff13fbcSAllan Jude }
5276*5ff13fbcSAllan Jude
5277*5ff13fbcSAllan Jude XXH_NO_INLINE XXH128_hash_t
XXH3_len_129to240_128b(const xxh_u8 * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH64_hash_t seed)5278*5ff13fbcSAllan Jude XXH3_len_129to240_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
5279*5ff13fbcSAllan Jude const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
5280*5ff13fbcSAllan Jude XXH64_hash_t seed)
5281*5ff13fbcSAllan Jude {
5282*5ff13fbcSAllan Jude XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
5283*5ff13fbcSAllan Jude XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
5284*5ff13fbcSAllan Jude
5285*5ff13fbcSAllan Jude { XXH128_hash_t acc;
5286*5ff13fbcSAllan Jude int const nbRounds = (int)len / 32;
5287*5ff13fbcSAllan Jude int i;
5288*5ff13fbcSAllan Jude acc.low64 = len * XXH_PRIME64_1;
5289*5ff13fbcSAllan Jude acc.high64 = 0;
5290*5ff13fbcSAllan Jude for (i=0; i<4; i++) {
5291*5ff13fbcSAllan Jude acc = XXH128_mix32B(acc,
5292*5ff13fbcSAllan Jude input + (32 * i),
5293*5ff13fbcSAllan Jude input + (32 * i) + 16,
5294*5ff13fbcSAllan Jude secret + (32 * i),
5295*5ff13fbcSAllan Jude seed);
5296*5ff13fbcSAllan Jude }
5297*5ff13fbcSAllan Jude acc.low64 = XXH3_avalanche(acc.low64);
5298*5ff13fbcSAllan Jude acc.high64 = XXH3_avalanche(acc.high64);
5299*5ff13fbcSAllan Jude XXH_ASSERT(nbRounds >= 4);
5300*5ff13fbcSAllan Jude for (i=4 ; i < nbRounds; i++) {
5301*5ff13fbcSAllan Jude acc = XXH128_mix32B(acc,
5302*5ff13fbcSAllan Jude input + (32 * i),
5303*5ff13fbcSAllan Jude input + (32 * i) + 16,
5304*5ff13fbcSAllan Jude secret + XXH3_MIDSIZE_STARTOFFSET + (32 * (i - 4)),
5305*5ff13fbcSAllan Jude seed);
5306*5ff13fbcSAllan Jude }
5307*5ff13fbcSAllan Jude /* last bytes */
5308*5ff13fbcSAllan Jude acc = XXH128_mix32B(acc,
5309*5ff13fbcSAllan Jude input + len - 16,
5310*5ff13fbcSAllan Jude input + len - 32,
5311*5ff13fbcSAllan Jude secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET - 16,
5312*5ff13fbcSAllan Jude 0ULL - seed);
5313*5ff13fbcSAllan Jude
5314*5ff13fbcSAllan Jude { XXH128_hash_t h128;
5315*5ff13fbcSAllan Jude h128.low64 = acc.low64 + acc.high64;
5316*5ff13fbcSAllan Jude h128.high64 = (acc.low64 * XXH_PRIME64_1)
5317*5ff13fbcSAllan Jude + (acc.high64 * XXH_PRIME64_4)
5318*5ff13fbcSAllan Jude + ((len - seed) * XXH_PRIME64_2);
5319*5ff13fbcSAllan Jude h128.low64 = XXH3_avalanche(h128.low64);
5320*5ff13fbcSAllan Jude h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
5321*5ff13fbcSAllan Jude return h128;
5322*5ff13fbcSAllan Jude }
5323*5ff13fbcSAllan Jude }
5324*5ff13fbcSAllan Jude }
5325*5ff13fbcSAllan Jude
5326*5ff13fbcSAllan Jude XXH_FORCE_INLINE XXH128_hash_t
XXH3_hashLong_128b_internal(const void * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble)5327*5ff13fbcSAllan Jude XXH3_hashLong_128b_internal(const void* XXH_RESTRICT input, size_t len,
5328*5ff13fbcSAllan Jude const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
5329*5ff13fbcSAllan Jude XXH3_f_accumulate_512 f_acc512,
5330*5ff13fbcSAllan Jude XXH3_f_scrambleAcc f_scramble)
5331*5ff13fbcSAllan Jude {
5332*5ff13fbcSAllan Jude XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
5333*5ff13fbcSAllan Jude
5334*5ff13fbcSAllan Jude XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, secret, secretSize, f_acc512, f_scramble);
5335*5ff13fbcSAllan Jude
5336*5ff13fbcSAllan Jude /* converge into final hash */
5337*5ff13fbcSAllan Jude XXH_STATIC_ASSERT(sizeof(acc) == 64);
5338*5ff13fbcSAllan Jude XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
5339*5ff13fbcSAllan Jude { XXH128_hash_t h128;
5340*5ff13fbcSAllan Jude h128.low64 = XXH3_mergeAccs(acc,
5341*5ff13fbcSAllan Jude secret + XXH_SECRET_MERGEACCS_START,
5342*5ff13fbcSAllan Jude (xxh_u64)len * XXH_PRIME64_1);
5343*5ff13fbcSAllan Jude h128.high64 = XXH3_mergeAccs(acc,
5344*5ff13fbcSAllan Jude secret + secretSize
5345*5ff13fbcSAllan Jude - sizeof(acc) - XXH_SECRET_MERGEACCS_START,
5346*5ff13fbcSAllan Jude ~((xxh_u64)len * XXH_PRIME64_2));
5347*5ff13fbcSAllan Jude return h128;
5348*5ff13fbcSAllan Jude }
5349*5ff13fbcSAllan Jude }
5350*5ff13fbcSAllan Jude
5351*5ff13fbcSAllan Jude /*
5352*5ff13fbcSAllan Jude * It's important for performance that XXH3_hashLong is not inlined.
5353*5ff13fbcSAllan Jude */
5354*5ff13fbcSAllan Jude XXH_NO_INLINE XXH128_hash_t
XXH3_hashLong_128b_default(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,const void * XXH_RESTRICT secret,size_t secretLen)5355*5ff13fbcSAllan Jude XXH3_hashLong_128b_default(const void* XXH_RESTRICT input, size_t len,
5356*5ff13fbcSAllan Jude XXH64_hash_t seed64,
5357*5ff13fbcSAllan Jude const void* XXH_RESTRICT secret, size_t secretLen)
5358*5ff13fbcSAllan Jude {
5359*5ff13fbcSAllan Jude (void)seed64; (void)secret; (void)secretLen;
5360*5ff13fbcSAllan Jude return XXH3_hashLong_128b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret),
5361*5ff13fbcSAllan Jude XXH3_accumulate_512, XXH3_scrambleAcc);
5362*5ff13fbcSAllan Jude }
5363*5ff13fbcSAllan Jude
5364*5ff13fbcSAllan Jude /*
5365*5ff13fbcSAllan Jude * It's important for performance to pass @secretLen (when it's static)
5366*5ff13fbcSAllan Jude * to the compiler, so that it can properly optimize the vectorized loop.
5367*5ff13fbcSAllan Jude */
5368*5ff13fbcSAllan Jude XXH_FORCE_INLINE XXH128_hash_t
XXH3_hashLong_128b_withSecret(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,const void * XXH_RESTRICT secret,size_t secretLen)5369*5ff13fbcSAllan Jude XXH3_hashLong_128b_withSecret(const void* XXH_RESTRICT input, size_t len,
5370*5ff13fbcSAllan Jude XXH64_hash_t seed64,
5371*5ff13fbcSAllan Jude const void* XXH_RESTRICT secret, size_t secretLen)
5372*5ff13fbcSAllan Jude {
5373*5ff13fbcSAllan Jude (void)seed64;
5374*5ff13fbcSAllan Jude return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, secretLen,
5375*5ff13fbcSAllan Jude XXH3_accumulate_512, XXH3_scrambleAcc);
5376*5ff13fbcSAllan Jude }
5377*5ff13fbcSAllan Jude
5378*5ff13fbcSAllan Jude XXH_FORCE_INLINE XXH128_hash_t
XXH3_hashLong_128b_withSeed_internal(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,XXH3_f_accumulate_512 f_acc512,XXH3_f_scrambleAcc f_scramble,XXH3_f_initCustomSecret f_initSec)5379*5ff13fbcSAllan Jude XXH3_hashLong_128b_withSeed_internal(const void* XXH_RESTRICT input, size_t len,
5380*5ff13fbcSAllan Jude XXH64_hash_t seed64,
5381*5ff13fbcSAllan Jude XXH3_f_accumulate_512 f_acc512,
5382*5ff13fbcSAllan Jude XXH3_f_scrambleAcc f_scramble,
5383*5ff13fbcSAllan Jude XXH3_f_initCustomSecret f_initSec)
5384*5ff13fbcSAllan Jude {
5385*5ff13fbcSAllan Jude if (seed64 == 0)
5386*5ff13fbcSAllan Jude return XXH3_hashLong_128b_internal(input, len,
5387*5ff13fbcSAllan Jude XXH3_kSecret, sizeof(XXH3_kSecret),
5388*5ff13fbcSAllan Jude f_acc512, f_scramble);
5389*5ff13fbcSAllan Jude { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
5390*5ff13fbcSAllan Jude f_initSec(secret, seed64);
5391*5ff13fbcSAllan Jude return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, sizeof(secret),
5392*5ff13fbcSAllan Jude f_acc512, f_scramble);
5393*5ff13fbcSAllan Jude }
5394*5ff13fbcSAllan Jude }
5395*5ff13fbcSAllan Jude
5396*5ff13fbcSAllan Jude /*
5397*5ff13fbcSAllan Jude * It's important for performance that XXH3_hashLong is not inlined.
5398*5ff13fbcSAllan Jude */
5399*5ff13fbcSAllan Jude XXH_NO_INLINE XXH128_hash_t
XXH3_hashLong_128b_withSeed(const void * input,size_t len,XXH64_hash_t seed64,const void * XXH_RESTRICT secret,size_t secretLen)5400*5ff13fbcSAllan Jude XXH3_hashLong_128b_withSeed(const void* input, size_t len,
5401*5ff13fbcSAllan Jude XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen)
5402*5ff13fbcSAllan Jude {
5403*5ff13fbcSAllan Jude (void)secret; (void)secretLen;
5404*5ff13fbcSAllan Jude return XXH3_hashLong_128b_withSeed_internal(input, len, seed64,
5405*5ff13fbcSAllan Jude XXH3_accumulate_512, XXH3_scrambleAcc, XXH3_initCustomSecret);
5406*5ff13fbcSAllan Jude }
5407*5ff13fbcSAllan Jude
5408*5ff13fbcSAllan Jude typedef XXH128_hash_t (*XXH3_hashLong128_f)(const void* XXH_RESTRICT, size_t,
5409*5ff13fbcSAllan Jude XXH64_hash_t, const void* XXH_RESTRICT, size_t);
5410*5ff13fbcSAllan Jude
5411*5ff13fbcSAllan Jude XXH_FORCE_INLINE XXH128_hash_t
XXH3_128bits_internal(const void * input,size_t len,XXH64_hash_t seed64,const void * XXH_RESTRICT secret,size_t secretLen,XXH3_hashLong128_f f_hl128)5412*5ff13fbcSAllan Jude XXH3_128bits_internal(const void* input, size_t len,
5413*5ff13fbcSAllan Jude XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen,
5414*5ff13fbcSAllan Jude XXH3_hashLong128_f f_hl128)
5415*5ff13fbcSAllan Jude {
5416*5ff13fbcSAllan Jude XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
5417*5ff13fbcSAllan Jude /*
5418*5ff13fbcSAllan Jude * If an action is to be taken if `secret` conditions are not respected,
5419*5ff13fbcSAllan Jude * it should be done here.
5420*5ff13fbcSAllan Jude * For now, it's a contract pre-condition.
5421*5ff13fbcSAllan Jude * Adding a check and a branch here would cost performance at every hash.
5422*5ff13fbcSAllan Jude */
5423*5ff13fbcSAllan Jude if (len <= 16)
5424*5ff13fbcSAllan Jude return XXH3_len_0to16_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64);
5425*5ff13fbcSAllan Jude if (len <= 128)
5426*5ff13fbcSAllan Jude return XXH3_len_17to128_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
5427*5ff13fbcSAllan Jude if (len <= XXH3_MIDSIZE_MAX)
5428*5ff13fbcSAllan Jude return XXH3_len_129to240_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
5429*5ff13fbcSAllan Jude return f_hl128(input, len, seed64, secret, secretLen);
5430*5ff13fbcSAllan Jude }
5431*5ff13fbcSAllan Jude
5432*5ff13fbcSAllan Jude
5433*5ff13fbcSAllan Jude /* === Public XXH128 API === */
5434*5ff13fbcSAllan Jude
5435*5ff13fbcSAllan Jude /*! @ingroup xxh3_family */
XXH3_128bits(const void * input,size_t len)5436*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void* input, size_t len)
5437*5ff13fbcSAllan Jude {
5438*5ff13fbcSAllan Jude return XXH3_128bits_internal(input, len, 0,
5439*5ff13fbcSAllan Jude XXH3_kSecret, sizeof(XXH3_kSecret),
5440*5ff13fbcSAllan Jude XXH3_hashLong_128b_default);
5441*5ff13fbcSAllan Jude }
5442*5ff13fbcSAllan Jude
5443*5ff13fbcSAllan Jude /*! @ingroup xxh3_family */
5444*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH128_hash_t
XXH3_128bits_withSecret(const void * input,size_t len,const void * secret,size_t secretSize)5445*5ff13fbcSAllan Jude XXH3_128bits_withSecret(const void* input, size_t len, const void* secret, size_t secretSize)
5446*5ff13fbcSAllan Jude {
5447*5ff13fbcSAllan Jude return XXH3_128bits_internal(input, len, 0,
5448*5ff13fbcSAllan Jude (const xxh_u8*)secret, secretSize,
5449*5ff13fbcSAllan Jude XXH3_hashLong_128b_withSecret);
5450*5ff13fbcSAllan Jude }
5451*5ff13fbcSAllan Jude
5452*5ff13fbcSAllan Jude /*! @ingroup xxh3_family */
5453*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH128_hash_t
XXH3_128bits_withSeed(const void * input,size_t len,XXH64_hash_t seed)5454*5ff13fbcSAllan Jude XXH3_128bits_withSeed(const void* input, size_t len, XXH64_hash_t seed)
5455*5ff13fbcSAllan Jude {
5456*5ff13fbcSAllan Jude return XXH3_128bits_internal(input, len, seed,
5457*5ff13fbcSAllan Jude XXH3_kSecret, sizeof(XXH3_kSecret),
5458*5ff13fbcSAllan Jude XXH3_hashLong_128b_withSeed);
5459*5ff13fbcSAllan Jude }
5460*5ff13fbcSAllan Jude
5461*5ff13fbcSAllan Jude /*! @ingroup xxh3_family */
5462*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH128_hash_t
XXH3_128bits_withSecretandSeed(const void * input,size_t len,const void * secret,size_t secretSize,XXH64_hash_t seed)5463*5ff13fbcSAllan Jude XXH3_128bits_withSecretandSeed(const void* input, size_t len, const void* secret, size_t secretSize, XXH64_hash_t seed)
5464*5ff13fbcSAllan Jude {
5465*5ff13fbcSAllan Jude if (len <= XXH3_MIDSIZE_MAX)
5466*5ff13fbcSAllan Jude return XXH3_128bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL);
5467*5ff13fbcSAllan Jude return XXH3_hashLong_128b_withSecret(input, len, seed, secret, secretSize);
5468*5ff13fbcSAllan Jude }
5469*5ff13fbcSAllan Jude
5470*5ff13fbcSAllan Jude /*! @ingroup xxh3_family */
5471*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH128_hash_t
XXH128(const void * input,size_t len,XXH64_hash_t seed)5472*5ff13fbcSAllan Jude XXH128(const void* input, size_t len, XXH64_hash_t seed)
5473*5ff13fbcSAllan Jude {
5474*5ff13fbcSAllan Jude return XXH3_128bits_withSeed(input, len, seed);
5475*5ff13fbcSAllan Jude }
5476*5ff13fbcSAllan Jude
5477*5ff13fbcSAllan Jude
5478*5ff13fbcSAllan Jude /* === XXH3 128-bit streaming === */
5479*5ff13fbcSAllan Jude
5480*5ff13fbcSAllan Jude /*
5481*5ff13fbcSAllan Jude * All initialization and update functions are identical to 64-bit streaming variant.
5482*5ff13fbcSAllan Jude * The only difference is the finalization routine.
5483*5ff13fbcSAllan Jude */
5484*5ff13fbcSAllan Jude
5485*5ff13fbcSAllan Jude /*! @ingroup xxh3_family */
5486*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_reset(XXH3_state_t * statePtr)5487*5ff13fbcSAllan Jude XXH3_128bits_reset(XXH3_state_t* statePtr)
5488*5ff13fbcSAllan Jude {
5489*5ff13fbcSAllan Jude return XXH3_64bits_reset(statePtr);
5490*5ff13fbcSAllan Jude }
5491*5ff13fbcSAllan Jude
5492*5ff13fbcSAllan Jude /*! @ingroup xxh3_family */
5493*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_reset_withSecret(XXH3_state_t * statePtr,const void * secret,size_t secretSize)5494*5ff13fbcSAllan Jude XXH3_128bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize)
5495*5ff13fbcSAllan Jude {
5496*5ff13fbcSAllan Jude return XXH3_64bits_reset_withSecret(statePtr, secret, secretSize);
5497*5ff13fbcSAllan Jude }
5498*5ff13fbcSAllan Jude
5499*5ff13fbcSAllan Jude /*! @ingroup xxh3_family */
5500*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_reset_withSeed(XXH3_state_t * statePtr,XXH64_hash_t seed)5501*5ff13fbcSAllan Jude XXH3_128bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed)
5502*5ff13fbcSAllan Jude {
5503*5ff13fbcSAllan Jude return XXH3_64bits_reset_withSeed(statePtr, seed);
5504*5ff13fbcSAllan Jude }
5505*5ff13fbcSAllan Jude
5506*5ff13fbcSAllan Jude /*! @ingroup xxh3_family */
5507*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_reset_withSecretandSeed(XXH3_state_t * statePtr,const void * secret,size_t secretSize,XXH64_hash_t seed)5508*5ff13fbcSAllan Jude XXH3_128bits_reset_withSecretandSeed(XXH3_state_t* statePtr, const void* secret, size_t secretSize, XXH64_hash_t seed)
5509*5ff13fbcSAllan Jude {
5510*5ff13fbcSAllan Jude return XXH3_64bits_reset_withSecretandSeed(statePtr, secret, secretSize, seed);
5511*5ff13fbcSAllan Jude }
5512*5ff13fbcSAllan Jude
5513*5ff13fbcSAllan Jude /*! @ingroup xxh3_family */
5514*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_update(XXH3_state_t * state,const void * input,size_t len)5515*5ff13fbcSAllan Jude XXH3_128bits_update(XXH3_state_t* state, const void* input, size_t len)
5516*5ff13fbcSAllan Jude {
5517*5ff13fbcSAllan Jude return XXH3_update(state, (const xxh_u8*)input, len,
5518*5ff13fbcSAllan Jude XXH3_accumulate_512, XXH3_scrambleAcc);
5519*5ff13fbcSAllan Jude }
5520*5ff13fbcSAllan Jude
5521*5ff13fbcSAllan Jude /*! @ingroup xxh3_family */
XXH3_128bits_digest(const XXH3_state_t * state)5522*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (const XXH3_state_t* state)
5523*5ff13fbcSAllan Jude {
5524*5ff13fbcSAllan Jude const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
5525*5ff13fbcSAllan Jude if (state->totalLen > XXH3_MIDSIZE_MAX) {
5526*5ff13fbcSAllan Jude XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
5527*5ff13fbcSAllan Jude XXH3_digest_long(acc, state, secret);
5528*5ff13fbcSAllan Jude XXH_ASSERT(state->secretLimit + XXH_STRIPE_LEN >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
5529*5ff13fbcSAllan Jude { XXH128_hash_t h128;
5530*5ff13fbcSAllan Jude h128.low64 = XXH3_mergeAccs(acc,
5531*5ff13fbcSAllan Jude secret + XXH_SECRET_MERGEACCS_START,
5532*5ff13fbcSAllan Jude (xxh_u64)state->totalLen * XXH_PRIME64_1);
5533*5ff13fbcSAllan Jude h128.high64 = XXH3_mergeAccs(acc,
5534*5ff13fbcSAllan Jude secret + state->secretLimit + XXH_STRIPE_LEN
5535*5ff13fbcSAllan Jude - sizeof(acc) - XXH_SECRET_MERGEACCS_START,
5536*5ff13fbcSAllan Jude ~((xxh_u64)state->totalLen * XXH_PRIME64_2));
5537*5ff13fbcSAllan Jude return h128;
5538*5ff13fbcSAllan Jude }
5539*5ff13fbcSAllan Jude }
5540*5ff13fbcSAllan Jude /* len <= XXH3_MIDSIZE_MAX : short code */
5541*5ff13fbcSAllan Jude if (state->seed)
5542*5ff13fbcSAllan Jude return XXH3_128bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed);
5543*5ff13fbcSAllan Jude return XXH3_128bits_withSecret(state->buffer, (size_t)(state->totalLen),
5544*5ff13fbcSAllan Jude secret, state->secretLimit + XXH_STRIPE_LEN);
5545*5ff13fbcSAllan Jude }
5546*5ff13fbcSAllan Jude
5547*5ff13fbcSAllan Jude /* 128-bit utility functions */
5548*5ff13fbcSAllan Jude
5549*5ff13fbcSAllan Jude #include <string.h> /* memcmp, memcpy */
5550*5ff13fbcSAllan Jude
5551*5ff13fbcSAllan Jude /* return : 1 is equal, 0 if different */
5552*5ff13fbcSAllan Jude /*! @ingroup xxh3_family */
XXH128_isEqual(XXH128_hash_t h1,XXH128_hash_t h2)5553*5ff13fbcSAllan Jude XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2)
5554*5ff13fbcSAllan Jude {
5555*5ff13fbcSAllan Jude /* note : XXH128_hash_t is compact, it has no padding byte */
5556*5ff13fbcSAllan Jude return !(memcmp(&h1, &h2, sizeof(h1)));
5557*5ff13fbcSAllan Jude }
5558*5ff13fbcSAllan Jude
5559*5ff13fbcSAllan Jude /* This prototype is compatible with stdlib's qsort().
5560*5ff13fbcSAllan Jude * return : >0 if *h128_1 > *h128_2
5561*5ff13fbcSAllan Jude * <0 if *h128_1 < *h128_2
5562*5ff13fbcSAllan Jude * =0 if *h128_1 == *h128_2 */
5563*5ff13fbcSAllan Jude /*! @ingroup xxh3_family */
XXH128_cmp(const void * h128_1,const void * h128_2)5564*5ff13fbcSAllan Jude XXH_PUBLIC_API int XXH128_cmp(const void* h128_1, const void* h128_2)
5565*5ff13fbcSAllan Jude {
5566*5ff13fbcSAllan Jude XXH128_hash_t const h1 = *(const XXH128_hash_t*)h128_1;
5567*5ff13fbcSAllan Jude XXH128_hash_t const h2 = *(const XXH128_hash_t*)h128_2;
5568*5ff13fbcSAllan Jude int const hcmp = (h1.high64 > h2.high64) - (h2.high64 > h1.high64);
5569*5ff13fbcSAllan Jude /* note : bets that, in most cases, hash values are different */
5570*5ff13fbcSAllan Jude if (hcmp) return hcmp;
5571*5ff13fbcSAllan Jude return (h1.low64 > h2.low64) - (h2.low64 > h1.low64);
5572*5ff13fbcSAllan Jude }
5573*5ff13fbcSAllan Jude
5574*5ff13fbcSAllan Jude
5575*5ff13fbcSAllan Jude /*====== Canonical representation ======*/
5576*5ff13fbcSAllan Jude /*! @ingroup xxh3_family */
5577*5ff13fbcSAllan Jude XXH_PUBLIC_API void
XXH128_canonicalFromHash(XXH128_canonical_t * dst,XXH128_hash_t hash)5578*5ff13fbcSAllan Jude XXH128_canonicalFromHash(XXH128_canonical_t* dst, XXH128_hash_t hash)
5579*5ff13fbcSAllan Jude {
5580*5ff13fbcSAllan Jude XXH_STATIC_ASSERT(sizeof(XXH128_canonical_t) == sizeof(XXH128_hash_t));
5581*5ff13fbcSAllan Jude if (XXH_CPU_LITTLE_ENDIAN) {
5582*5ff13fbcSAllan Jude hash.high64 = XXH_swap64(hash.high64);
5583*5ff13fbcSAllan Jude hash.low64 = XXH_swap64(hash.low64);
5584*5ff13fbcSAllan Jude }
5585*5ff13fbcSAllan Jude XXH_memcpy(dst, &hash.high64, sizeof(hash.high64));
5586*5ff13fbcSAllan Jude XXH_memcpy((char*)dst + sizeof(hash.high64), &hash.low64, sizeof(hash.low64));
5587*5ff13fbcSAllan Jude }
5588*5ff13fbcSAllan Jude
5589*5ff13fbcSAllan Jude /*! @ingroup xxh3_family */
5590*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH128_hash_t
XXH128_hashFromCanonical(const XXH128_canonical_t * src)5591*5ff13fbcSAllan Jude XXH128_hashFromCanonical(const XXH128_canonical_t* src)
5592*5ff13fbcSAllan Jude {
5593*5ff13fbcSAllan Jude XXH128_hash_t h;
5594*5ff13fbcSAllan Jude h.high64 = XXH_readBE64(src);
5595*5ff13fbcSAllan Jude h.low64 = XXH_readBE64(src->digest + 8);
5596*5ff13fbcSAllan Jude return h;
5597*5ff13fbcSAllan Jude }
5598*5ff13fbcSAllan Jude
5599*5ff13fbcSAllan Jude
5600*5ff13fbcSAllan Jude
5601*5ff13fbcSAllan Jude /* ==========================================
5602*5ff13fbcSAllan Jude * Secret generators
5603*5ff13fbcSAllan Jude * ==========================================
5604*5ff13fbcSAllan Jude */
5605*5ff13fbcSAllan Jude #define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x))
5606*5ff13fbcSAllan Jude
XXH3_combine16(void * dst,XXH128_hash_t h128)5607*5ff13fbcSAllan Jude XXH_FORCE_INLINE void XXH3_combine16(void* dst, XXH128_hash_t h128)
5608*5ff13fbcSAllan Jude {
5609*5ff13fbcSAllan Jude XXH_writeLE64( dst, XXH_readLE64(dst) ^ h128.low64 );
5610*5ff13fbcSAllan Jude XXH_writeLE64( (char*)dst+8, XXH_readLE64((char*)dst+8) ^ h128.high64 );
5611*5ff13fbcSAllan Jude }
5612*5ff13fbcSAllan Jude
5613*5ff13fbcSAllan Jude /*! @ingroup xxh3_family */
5614*5ff13fbcSAllan Jude XXH_PUBLIC_API XXH_errorcode
XXH3_generateSecret(void * secretBuffer,size_t secretSize,const void * customSeed,size_t customSeedSize)5615*5ff13fbcSAllan Jude XXH3_generateSecret(void* secretBuffer, size_t secretSize, const void* customSeed, size_t customSeedSize)
5616*5ff13fbcSAllan Jude {
5617*5ff13fbcSAllan Jude #if (XXH_DEBUGLEVEL >= 1)
5618*5ff13fbcSAllan Jude XXH_ASSERT(secretBuffer != NULL);
5619*5ff13fbcSAllan Jude XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
5620*5ff13fbcSAllan Jude #else
5621*5ff13fbcSAllan Jude /* production mode, assert() are disabled */
5622*5ff13fbcSAllan Jude if (secretBuffer == NULL) return XXH_ERROR;
5623*5ff13fbcSAllan Jude if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
5624*5ff13fbcSAllan Jude #endif
5625*5ff13fbcSAllan Jude
5626*5ff13fbcSAllan Jude if (customSeedSize == 0) {
5627*5ff13fbcSAllan Jude customSeed = XXH3_kSecret;
5628*5ff13fbcSAllan Jude customSeedSize = XXH_SECRET_DEFAULT_SIZE;
5629*5ff13fbcSAllan Jude }
5630*5ff13fbcSAllan Jude #if (XXH_DEBUGLEVEL >= 1)
5631*5ff13fbcSAllan Jude XXH_ASSERT(customSeed != NULL);
5632*5ff13fbcSAllan Jude #else
5633*5ff13fbcSAllan Jude if (customSeed == NULL) return XXH_ERROR;
5634*5ff13fbcSAllan Jude #endif
5635*5ff13fbcSAllan Jude
5636*5ff13fbcSAllan Jude /* Fill secretBuffer with a copy of customSeed - repeat as needed */
5637*5ff13fbcSAllan Jude { size_t pos = 0;
5638*5ff13fbcSAllan Jude while (pos < secretSize) {
5639*5ff13fbcSAllan Jude size_t const toCopy = XXH_MIN((secretSize - pos), customSeedSize);
5640*5ff13fbcSAllan Jude memcpy((char*)secretBuffer + pos, customSeed, toCopy);
5641*5ff13fbcSAllan Jude pos += toCopy;
5642*5ff13fbcSAllan Jude } }
5643*5ff13fbcSAllan Jude
5644*5ff13fbcSAllan Jude { size_t const nbSeg16 = secretSize / 16;
5645*5ff13fbcSAllan Jude size_t n;
5646*5ff13fbcSAllan Jude XXH128_canonical_t scrambler;
5647*5ff13fbcSAllan Jude XXH128_canonicalFromHash(&scrambler, XXH128(customSeed, customSeedSize, 0));
5648*5ff13fbcSAllan Jude for (n=0; n<nbSeg16; n++) {
5649*5ff13fbcSAllan Jude XXH128_hash_t const h128 = XXH128(&scrambler, sizeof(scrambler), n);
5650*5ff13fbcSAllan Jude XXH3_combine16((char*)secretBuffer + n*16, h128);
5651*5ff13fbcSAllan Jude }
5652*5ff13fbcSAllan Jude /* last segment */
5653*5ff13fbcSAllan Jude XXH3_combine16((char*)secretBuffer + secretSize - 16, XXH128_hashFromCanonical(&scrambler));
5654*5ff13fbcSAllan Jude }
5655*5ff13fbcSAllan Jude return XXH_OK;
5656*5ff13fbcSAllan Jude }
5657*5ff13fbcSAllan Jude
5658*5ff13fbcSAllan Jude /*! @ingroup xxh3_family */
5659*5ff13fbcSAllan Jude XXH_PUBLIC_API void
XXH3_generateSecret_fromSeed(void * secretBuffer,XXH64_hash_t seed)5660*5ff13fbcSAllan Jude XXH3_generateSecret_fromSeed(void* secretBuffer, XXH64_hash_t seed)
5661*5ff13fbcSAllan Jude {
5662*5ff13fbcSAllan Jude XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
5663*5ff13fbcSAllan Jude XXH3_initCustomSecret(secret, seed);
5664*5ff13fbcSAllan Jude XXH_ASSERT(secretBuffer != NULL);
5665*5ff13fbcSAllan Jude memcpy(secretBuffer, secret, XXH_SECRET_DEFAULT_SIZE);
5666*5ff13fbcSAllan Jude }
5667*5ff13fbcSAllan Jude
5668*5ff13fbcSAllan Jude
5669*5ff13fbcSAllan Jude
5670*5ff13fbcSAllan Jude /* Pop our optimization override from above */
5671*5ff13fbcSAllan Jude #if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
5672*5ff13fbcSAllan Jude && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
5673*5ff13fbcSAllan Jude && defined(__OPTIMIZE__) && !defined(__OPTIMIZE_SIZE__) /* respect -O0 and -Os */
5674*5ff13fbcSAllan Jude # pragma GCC pop_options
5675*5ff13fbcSAllan Jude #endif
5676*5ff13fbcSAllan Jude
5677*5ff13fbcSAllan Jude #endif /* XXH_NO_LONG_LONG */
5678*5ff13fbcSAllan Jude
5679*5ff13fbcSAllan Jude #endif /* XXH_NO_XXH3 */
5680*5ff13fbcSAllan Jude
5681*5ff13fbcSAllan Jude /*!
5682*5ff13fbcSAllan Jude * @}
5683*5ff13fbcSAllan Jude */
5684*5ff13fbcSAllan Jude #endif /* XXH_IMPLEMENTATION */
56850c16b537SWarner Losh
56860c16b537SWarner Losh
56870c16b537SWarner Losh #if defined (__cplusplus)
56880c16b537SWarner Losh }
56890c16b537SWarner Losh #endif
5690