Lines Matching +full:p +full:- +full:state
2 * xxHash - Extremely Fast Hash algorithm
3 * Copyright (C) 2012-2016, Yann Collet.
5 * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
32 * Free Software Foundation. This program is dual-licensed; you may select
37 * - xxHash homepage: https://cyan4973.github.io/xxHash/
38 * - xxHash source repository: https://github.com/Cyan4973/xxHash
49 /*-*************************************
52 #define xxh_rotl32(x, r) ((x << r) | (x >> (32 - r)))
53 #define xxh_rotl64(x, r) ((x << r) | (x >> (64 - r)))
61 /*-*************************************
76 /*-**************************
91 /*-***************************
104 const uint8_t *p = (const uint8_t *)input; in xxh32() local
105 const uint8_t *b_end = p + len; in xxh32()
109 const uint8_t *const limit = b_end - 16; in xxh32()
113 uint32_t v4 = seed - PRIME32_1; in xxh32()
116 v1 = xxh32_round(v1, get_unaligned_le32(p)); in xxh32()
117 p += 4; in xxh32()
118 v2 = xxh32_round(v2, get_unaligned_le32(p)); in xxh32()
119 p += 4; in xxh32()
120 v3 = xxh32_round(v3, get_unaligned_le32(p)); in xxh32()
121 p += 4; in xxh32()
122 v4 = xxh32_round(v4, get_unaligned_le32(p)); in xxh32()
123 p += 4; in xxh32()
124 } while (p <= limit); in xxh32()
134 while (p + 4 <= b_end) { in xxh32()
135 h32 += get_unaligned_le32(p) * PRIME32_3; in xxh32()
137 p += 4; in xxh32()
140 while (p < b_end) { in xxh32()
141 h32 += (*p) * PRIME32_5; in xxh32()
143 p++; in xxh32()
174 const uint8_t *p = (const uint8_t *)input; in xxh64() local
175 const uint8_t *const b_end = p + len; in xxh64()
179 const uint8_t *const limit = b_end - 32; in xxh64()
183 uint64_t v4 = seed - PRIME64_1; in xxh64()
186 v1 = xxh64_round(v1, get_unaligned_le64(p)); in xxh64()
187 p += 8; in xxh64()
188 v2 = xxh64_round(v2, get_unaligned_le64(p)); in xxh64()
189 p += 8; in xxh64()
190 v3 = xxh64_round(v3, get_unaligned_le64(p)); in xxh64()
191 p += 8; in xxh64()
192 v4 = xxh64_round(v4, get_unaligned_le64(p)); in xxh64()
193 p += 8; in xxh64()
194 } while (p <= limit); in xxh64()
209 while (p + 8 <= b_end) { in xxh64()
210 const uint64_t k1 = xxh64_round(0, get_unaligned_le64(p)); in xxh64()
214 p += 8; in xxh64()
217 if (p + 4 <= b_end) { in xxh64()
218 h64 ^= (uint64_t)(get_unaligned_le32(p)) * PRIME64_1; in xxh64()
220 p += 4; in xxh64()
223 while (p < b_end) { in xxh64()
224 h64 ^= (*p) * PRIME64_5; in xxh64()
226 p++; in xxh64()
239 /*-**************************************************
244 /* use a local state for memcpy() to avoid strict-aliasing warnings */ in xxh32_reset()
245 struct xxh32_state state; in xxh32_reset() local
247 memset(&state, 0, sizeof(state)); in xxh32_reset()
248 state.v1 = seed + PRIME32_1 + PRIME32_2; in xxh32_reset()
249 state.v2 = seed + PRIME32_2; in xxh32_reset()
250 state.v3 = seed + 0; in xxh32_reset()
251 state.v4 = seed - PRIME32_1; in xxh32_reset()
252 memcpy(statePtr, &state, sizeof(state)); in xxh32_reset()
258 /* use a local state for memcpy() to avoid strict-aliasing warnings */ in xxh64_reset()
259 struct xxh64_state state; in xxh64_reset() local
261 memset(&state, 0, sizeof(state)); in xxh64_reset()
262 state.v1 = seed + PRIME64_1 + PRIME64_2; in xxh64_reset()
263 state.v2 = seed + PRIME64_2; in xxh64_reset()
264 state.v3 = seed + 0; in xxh64_reset()
265 state.v4 = seed - PRIME64_1; in xxh64_reset()
266 memcpy(statePtr, &state, sizeof(state)); in xxh64_reset()
270 int xxh32_update(struct xxh32_state *state, const void *input, const size_t len) in xxh32_update() argument
272 const uint8_t *p = (const uint8_t *)input; in xxh32_update() local
273 const uint8_t *const b_end = p + len; in xxh32_update()
276 return -EINVAL; in xxh32_update()
278 state->total_len_32 += (uint32_t)len; in xxh32_update()
279 state->large_len |= (len >= 16) | (state->total_len_32 >= 16); in xxh32_update()
281 if (state->memsize + len < 16) { /* fill in tmp buffer */ in xxh32_update()
282 memcpy((uint8_t *)(state->mem32) + state->memsize, input, len); in xxh32_update()
283 state->memsize += (uint32_t)len; in xxh32_update()
287 if (state->memsize) { /* some data left from previous update */ in xxh32_update()
288 const uint32_t *p32 = state->mem32; in xxh32_update()
290 memcpy((uint8_t *)(state->mem32) + state->memsize, input, in xxh32_update()
291 16 - state->memsize); in xxh32_update()
293 state->v1 = xxh32_round(state->v1, get_unaligned_le32(p32)); in xxh32_update()
295 state->v2 = xxh32_round(state->v2, get_unaligned_le32(p32)); in xxh32_update()
297 state->v3 = xxh32_round(state->v3, get_unaligned_le32(p32)); in xxh32_update()
299 state->v4 = xxh32_round(state->v4, get_unaligned_le32(p32)); in xxh32_update()
302 p += 16-state->memsize; in xxh32_update()
303 state->memsize = 0; in xxh32_update()
306 if (p <= b_end - 16) { in xxh32_update()
307 const uint8_t *const limit = b_end - 16; in xxh32_update()
308 uint32_t v1 = state->v1; in xxh32_update()
309 uint32_t v2 = state->v2; in xxh32_update()
310 uint32_t v3 = state->v3; in xxh32_update()
311 uint32_t v4 = state->v4; in xxh32_update()
314 v1 = xxh32_round(v1, get_unaligned_le32(p)); in xxh32_update()
315 p += 4; in xxh32_update()
316 v2 = xxh32_round(v2, get_unaligned_le32(p)); in xxh32_update()
317 p += 4; in xxh32_update()
318 v3 = xxh32_round(v3, get_unaligned_le32(p)); in xxh32_update()
319 p += 4; in xxh32_update()
320 v4 = xxh32_round(v4, get_unaligned_le32(p)); in xxh32_update()
321 p += 4; in xxh32_update()
322 } while (p <= limit); in xxh32_update()
324 state->v1 = v1; in xxh32_update()
325 state->v2 = v2; in xxh32_update()
326 state->v3 = v3; in xxh32_update()
327 state->v4 = v4; in xxh32_update()
330 if (p < b_end) { in xxh32_update()
331 memcpy(state->mem32, p, (size_t)(b_end-p)); in xxh32_update()
332 state->memsize = (uint32_t)(b_end-p); in xxh32_update()
339 uint32_t xxh32_digest(const struct xxh32_state *state) in xxh32_digest() argument
341 const uint8_t *p = (const uint8_t *)state->mem32; in xxh32_digest() local
342 const uint8_t *const b_end = (const uint8_t *)(state->mem32) + in xxh32_digest()
343 state->memsize; in xxh32_digest()
346 if (state->large_len) { in xxh32_digest()
347 h32 = xxh_rotl32(state->v1, 1) + xxh_rotl32(state->v2, 7) + in xxh32_digest()
348 xxh_rotl32(state->v3, 12) + xxh_rotl32(state->v4, 18); in xxh32_digest()
350 h32 = state->v3 /* == seed */ + PRIME32_5; in xxh32_digest()
353 h32 += state->total_len_32; in xxh32_digest()
355 while (p + 4 <= b_end) { in xxh32_digest()
356 h32 += get_unaligned_le32(p) * PRIME32_3; in xxh32_digest()
358 p += 4; in xxh32_digest()
361 while (p < b_end) { in xxh32_digest()
362 h32 += (*p) * PRIME32_5; in xxh32_digest()
364 p++; in xxh32_digest()
377 int xxh64_update(struct xxh64_state *state, const void *input, const size_t len) in xxh64_update() argument
379 const uint8_t *p = (const uint8_t *)input; in xxh64_update() local
380 const uint8_t *const b_end = p + len; in xxh64_update()
383 return -EINVAL; in xxh64_update()
385 state->total_len += len; in xxh64_update()
387 if (state->memsize + len < 32) { /* fill in tmp buffer */ in xxh64_update()
388 memcpy(((uint8_t *)state->mem64) + state->memsize, input, len); in xxh64_update()
389 state->memsize += (uint32_t)len; in xxh64_update()
393 if (state->memsize) { /* tmp buffer is full */ in xxh64_update()
394 uint64_t *p64 = state->mem64; in xxh64_update()
396 memcpy(((uint8_t *)p64) + state->memsize, input, in xxh64_update()
397 32 - state->memsize); in xxh64_update()
399 state->v1 = xxh64_round(state->v1, get_unaligned_le64(p64)); in xxh64_update()
401 state->v2 = xxh64_round(state->v2, get_unaligned_le64(p64)); in xxh64_update()
403 state->v3 = xxh64_round(state->v3, get_unaligned_le64(p64)); in xxh64_update()
405 state->v4 = xxh64_round(state->v4, get_unaligned_le64(p64)); in xxh64_update()
407 p += 32 - state->memsize; in xxh64_update()
408 state->memsize = 0; in xxh64_update()
411 if (p + 32 <= b_end) { in xxh64_update()
412 const uint8_t *const limit = b_end - 32; in xxh64_update()
413 uint64_t v1 = state->v1; in xxh64_update()
414 uint64_t v2 = state->v2; in xxh64_update()
415 uint64_t v3 = state->v3; in xxh64_update()
416 uint64_t v4 = state->v4; in xxh64_update()
419 v1 = xxh64_round(v1, get_unaligned_le64(p)); in xxh64_update()
420 p += 8; in xxh64_update()
421 v2 = xxh64_round(v2, get_unaligned_le64(p)); in xxh64_update()
422 p += 8; in xxh64_update()
423 v3 = xxh64_round(v3, get_unaligned_le64(p)); in xxh64_update()
424 p += 8; in xxh64_update()
425 v4 = xxh64_round(v4, get_unaligned_le64(p)); in xxh64_update()
426 p += 8; in xxh64_update()
427 } while (p <= limit); in xxh64_update()
429 state->v1 = v1; in xxh64_update()
430 state->v2 = v2; in xxh64_update()
431 state->v3 = v3; in xxh64_update()
432 state->v4 = v4; in xxh64_update()
435 if (p < b_end) { in xxh64_update()
436 memcpy(state->mem64, p, (size_t)(b_end-p)); in xxh64_update()
437 state->memsize = (uint32_t)(b_end - p); in xxh64_update()
444 uint64_t xxh64_digest(const struct xxh64_state *state) in xxh64_digest() argument
446 const uint8_t *p = (const uint8_t *)state->mem64; in xxh64_digest() local
447 const uint8_t *const b_end = (const uint8_t *)state->mem64 + in xxh64_digest()
448 state->memsize; in xxh64_digest()
451 if (state->total_len >= 32) { in xxh64_digest()
452 const uint64_t v1 = state->v1; in xxh64_digest()
453 const uint64_t v2 = state->v2; in xxh64_digest()
454 const uint64_t v3 = state->v3; in xxh64_digest()
455 const uint64_t v4 = state->v4; in xxh64_digest()
464 h64 = state->v3 + PRIME64_5; in xxh64_digest()
467 h64 += (uint64_t)state->total_len; in xxh64_digest()
469 while (p + 8 <= b_end) { in xxh64_digest()
470 const uint64_t k1 = xxh64_round(0, get_unaligned_le64(p)); in xxh64_digest()
474 p += 8; in xxh64_digest()
477 if (p + 4 <= b_end) { in xxh64_digest()
478 h64 ^= (uint64_t)(get_unaligned_le32(p)) * PRIME64_1; in xxh64_digest()
480 p += 4; in xxh64_digest()
483 while (p < b_end) { in xxh64_digest()
484 h64 ^= (*p) * PRIME64_5; in xxh64_digest()
486 p++; in xxh64_digest()