1 /*
2 * xxHash - Extremely Fast Hash algorithm
3 * Copyright (C) 2012-2016, Yann Collet.
4 *
5 * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are
9 * met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following disclaimer
15 * in the documentation and/or other materials provided with the
16 * distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 *
30 * This program is free software; you can redistribute it and/or modify it under
31 * the terms of the GNU General Public License version 2 as published by the
32 * Free Software Foundation. This program is dual-licensed; you may select
33 * either version 2 of the GNU General Public License ("GPL") or BSD license
34 * ("BSD").
35 *
36 * You can contact the author at:
37 * - xxHash homepage: https://cyan4973.github.io/xxHash/
38 * - xxHash source repository: https://github.com/Cyan4973/xxHash
39 */
40
41 #include <linux/unaligned.h>
42 #include <linux/errno.h>
43 #include <linux/compiler.h>
44 #include <linux/kernel.h>
45 #include <linux/module.h>
46 #include <linux/string.h>
47 #include <linux/xxhash.h>
48
49 /*-*************************************
50 * Macros
51 **************************************/
52 #define xxh_rotl32(x, r) ((x << r) | (x >> (32 - r)))
53 #define xxh_rotl64(x, r) ((x << r) | (x >> (64 - r)))
54
55 #ifdef __LITTLE_ENDIAN
56 # define XXH_CPU_LITTLE_ENDIAN 1
57 #else
58 # define XXH_CPU_LITTLE_ENDIAN 0
59 #endif
60
61 /*-*************************************
62 * Constants
63 **************************************/
64 static const uint32_t PRIME32_1 = 2654435761U;
65 static const uint32_t PRIME32_2 = 2246822519U;
66 static const uint32_t PRIME32_3 = 3266489917U;
67 static const uint32_t PRIME32_4 = 668265263U;
68 static const uint32_t PRIME32_5 = 374761393U;
69
70 static const uint64_t PRIME64_1 = 11400714785074694791ULL;
71 static const uint64_t PRIME64_2 = 14029467366897019727ULL;
72 static const uint64_t PRIME64_3 = 1609587929392839161ULL;
73 static const uint64_t PRIME64_4 = 9650029242287828579ULL;
74 static const uint64_t PRIME64_5 = 2870177450012600261ULL;
75
76 /*-***************************
77 * Simple Hash Functions
78 ****************************/
xxh32_round(uint32_t seed,const uint32_t input)79 static uint32_t xxh32_round(uint32_t seed, const uint32_t input)
80 {
81 seed += input * PRIME32_2;
82 seed = xxh_rotl32(seed, 13);
83 seed *= PRIME32_1;
84 return seed;
85 }
86
xxh32(const void * input,const size_t len,const uint32_t seed)87 uint32_t xxh32(const void *input, const size_t len, const uint32_t seed)
88 {
89 const uint8_t *p = (const uint8_t *)input;
90 const uint8_t *b_end = p + len;
91 uint32_t h32;
92
93 if (len >= 16) {
94 const uint8_t *const limit = b_end - 16;
95 uint32_t v1 = seed + PRIME32_1 + PRIME32_2;
96 uint32_t v2 = seed + PRIME32_2;
97 uint32_t v3 = seed + 0;
98 uint32_t v4 = seed - PRIME32_1;
99
100 do {
101 v1 = xxh32_round(v1, get_unaligned_le32(p));
102 p += 4;
103 v2 = xxh32_round(v2, get_unaligned_le32(p));
104 p += 4;
105 v3 = xxh32_round(v3, get_unaligned_le32(p));
106 p += 4;
107 v4 = xxh32_round(v4, get_unaligned_le32(p));
108 p += 4;
109 } while (p <= limit);
110
111 h32 = xxh_rotl32(v1, 1) + xxh_rotl32(v2, 7) +
112 xxh_rotl32(v3, 12) + xxh_rotl32(v4, 18);
113 } else {
114 h32 = seed + PRIME32_5;
115 }
116
117 h32 += (uint32_t)len;
118
119 while (p + 4 <= b_end) {
120 h32 += get_unaligned_le32(p) * PRIME32_3;
121 h32 = xxh_rotl32(h32, 17) * PRIME32_4;
122 p += 4;
123 }
124
125 while (p < b_end) {
126 h32 += (*p) * PRIME32_5;
127 h32 = xxh_rotl32(h32, 11) * PRIME32_1;
128 p++;
129 }
130
131 h32 ^= h32 >> 15;
132 h32 *= PRIME32_2;
133 h32 ^= h32 >> 13;
134 h32 *= PRIME32_3;
135 h32 ^= h32 >> 16;
136
137 return h32;
138 }
139 EXPORT_SYMBOL(xxh32);
140
xxh64_round(uint64_t acc,const uint64_t input)141 static uint64_t xxh64_round(uint64_t acc, const uint64_t input)
142 {
143 acc += input * PRIME64_2;
144 acc = xxh_rotl64(acc, 31);
145 acc *= PRIME64_1;
146 return acc;
147 }
148
xxh64_merge_round(uint64_t acc,uint64_t val)149 static uint64_t xxh64_merge_round(uint64_t acc, uint64_t val)
150 {
151 val = xxh64_round(0, val);
152 acc ^= val;
153 acc = acc * PRIME64_1 + PRIME64_4;
154 return acc;
155 }
156
xxh64(const void * input,const size_t len,const uint64_t seed)157 uint64_t xxh64(const void *input, const size_t len, const uint64_t seed)
158 {
159 const uint8_t *p = (const uint8_t *)input;
160 const uint8_t *const b_end = p + len;
161 uint64_t h64;
162
163 if (len >= 32) {
164 const uint8_t *const limit = b_end - 32;
165 uint64_t v1 = seed + PRIME64_1 + PRIME64_2;
166 uint64_t v2 = seed + PRIME64_2;
167 uint64_t v3 = seed + 0;
168 uint64_t v4 = seed - PRIME64_1;
169
170 do {
171 v1 = xxh64_round(v1, get_unaligned_le64(p));
172 p += 8;
173 v2 = xxh64_round(v2, get_unaligned_le64(p));
174 p += 8;
175 v3 = xxh64_round(v3, get_unaligned_le64(p));
176 p += 8;
177 v4 = xxh64_round(v4, get_unaligned_le64(p));
178 p += 8;
179 } while (p <= limit);
180
181 h64 = xxh_rotl64(v1, 1) + xxh_rotl64(v2, 7) +
182 xxh_rotl64(v3, 12) + xxh_rotl64(v4, 18);
183 h64 = xxh64_merge_round(h64, v1);
184 h64 = xxh64_merge_round(h64, v2);
185 h64 = xxh64_merge_round(h64, v3);
186 h64 = xxh64_merge_round(h64, v4);
187
188 } else {
189 h64 = seed + PRIME64_5;
190 }
191
192 h64 += (uint64_t)len;
193
194 while (p + 8 <= b_end) {
195 const uint64_t k1 = xxh64_round(0, get_unaligned_le64(p));
196
197 h64 ^= k1;
198 h64 = xxh_rotl64(h64, 27) * PRIME64_1 + PRIME64_4;
199 p += 8;
200 }
201
202 if (p + 4 <= b_end) {
203 h64 ^= (uint64_t)(get_unaligned_le32(p)) * PRIME64_1;
204 h64 = xxh_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
205 p += 4;
206 }
207
208 while (p < b_end) {
209 h64 ^= (*p) * PRIME64_5;
210 h64 = xxh_rotl64(h64, 11) * PRIME64_1;
211 p++;
212 }
213
214 h64 ^= h64 >> 33;
215 h64 *= PRIME64_2;
216 h64 ^= h64 >> 29;
217 h64 *= PRIME64_3;
218 h64 ^= h64 >> 32;
219
220 return h64;
221 }
222 EXPORT_SYMBOL(xxh64);
223
224 /*-**************************************************
225 * Advanced Hash Functions
226 ***************************************************/
xxh64_reset(struct xxh64_state * statePtr,const uint64_t seed)227 void xxh64_reset(struct xxh64_state *statePtr, const uint64_t seed)
228 {
229 /* use a local state for memcpy() to avoid strict-aliasing warnings */
230 struct xxh64_state state;
231
232 memset(&state, 0, sizeof(state));
233 state.v1 = seed + PRIME64_1 + PRIME64_2;
234 state.v2 = seed + PRIME64_2;
235 state.v3 = seed + 0;
236 state.v4 = seed - PRIME64_1;
237 memcpy(statePtr, &state, sizeof(state));
238 }
239 EXPORT_SYMBOL(xxh64_reset);
240
xxh64_update(struct xxh64_state * state,const void * input,const size_t len)241 int xxh64_update(struct xxh64_state *state, const void *input, const size_t len)
242 {
243 const uint8_t *p = (const uint8_t *)input;
244 const uint8_t *const b_end = p + len;
245
246 if (input == NULL)
247 return -EINVAL;
248
249 state->total_len += len;
250
251 if (state->memsize + len < 32) { /* fill in tmp buffer */
252 memcpy(((uint8_t *)state->mem64) + state->memsize, input, len);
253 state->memsize += (uint32_t)len;
254 return 0;
255 }
256
257 if (state->memsize) { /* tmp buffer is full */
258 uint64_t *p64 = state->mem64;
259
260 memcpy(((uint8_t *)p64) + state->memsize, input,
261 32 - state->memsize);
262
263 state->v1 = xxh64_round(state->v1, get_unaligned_le64(p64));
264 p64++;
265 state->v2 = xxh64_round(state->v2, get_unaligned_le64(p64));
266 p64++;
267 state->v3 = xxh64_round(state->v3, get_unaligned_le64(p64));
268 p64++;
269 state->v4 = xxh64_round(state->v4, get_unaligned_le64(p64));
270
271 p += 32 - state->memsize;
272 state->memsize = 0;
273 }
274
275 if (p + 32 <= b_end) {
276 const uint8_t *const limit = b_end - 32;
277 uint64_t v1 = state->v1;
278 uint64_t v2 = state->v2;
279 uint64_t v3 = state->v3;
280 uint64_t v4 = state->v4;
281
282 do {
283 v1 = xxh64_round(v1, get_unaligned_le64(p));
284 p += 8;
285 v2 = xxh64_round(v2, get_unaligned_le64(p));
286 p += 8;
287 v3 = xxh64_round(v3, get_unaligned_le64(p));
288 p += 8;
289 v4 = xxh64_round(v4, get_unaligned_le64(p));
290 p += 8;
291 } while (p <= limit);
292
293 state->v1 = v1;
294 state->v2 = v2;
295 state->v3 = v3;
296 state->v4 = v4;
297 }
298
299 if (p < b_end) {
300 memcpy(state->mem64, p, (size_t)(b_end-p));
301 state->memsize = (uint32_t)(b_end - p);
302 }
303
304 return 0;
305 }
306 EXPORT_SYMBOL(xxh64_update);
307
xxh64_digest(const struct xxh64_state * state)308 uint64_t xxh64_digest(const struct xxh64_state *state)
309 {
310 const uint8_t *p = (const uint8_t *)state->mem64;
311 const uint8_t *const b_end = (const uint8_t *)state->mem64 +
312 state->memsize;
313 uint64_t h64;
314
315 if (state->total_len >= 32) {
316 const uint64_t v1 = state->v1;
317 const uint64_t v2 = state->v2;
318 const uint64_t v3 = state->v3;
319 const uint64_t v4 = state->v4;
320
321 h64 = xxh_rotl64(v1, 1) + xxh_rotl64(v2, 7) +
322 xxh_rotl64(v3, 12) + xxh_rotl64(v4, 18);
323 h64 = xxh64_merge_round(h64, v1);
324 h64 = xxh64_merge_round(h64, v2);
325 h64 = xxh64_merge_round(h64, v3);
326 h64 = xxh64_merge_round(h64, v4);
327 } else {
328 h64 = state->v3 + PRIME64_5;
329 }
330
331 h64 += (uint64_t)state->total_len;
332
333 while (p + 8 <= b_end) {
334 const uint64_t k1 = xxh64_round(0, get_unaligned_le64(p));
335
336 h64 ^= k1;
337 h64 = xxh_rotl64(h64, 27) * PRIME64_1 + PRIME64_4;
338 p += 8;
339 }
340
341 if (p + 4 <= b_end) {
342 h64 ^= (uint64_t)(get_unaligned_le32(p)) * PRIME64_1;
343 h64 = xxh_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
344 p += 4;
345 }
346
347 while (p < b_end) {
348 h64 ^= (*p) * PRIME64_5;
349 h64 = xxh_rotl64(h64, 11) * PRIME64_1;
350 p++;
351 }
352
353 h64 ^= h64 >> 33;
354 h64 *= PRIME64_2;
355 h64 ^= h64 >> 29;
356 h64 *= PRIME64_3;
357 h64 ^= h64 >> 32;
358
359 return h64;
360 }
361 EXPORT_SYMBOL(xxh64_digest);
362
363 MODULE_LICENSE("Dual BSD/GPL");
364 MODULE_DESCRIPTION("xxHash");
365