murmurhash3.c (36a1818f5a1e50b805317ba13f827067d50f6970) | murmurhash3.c (d7e1201443713f5addce2d9b4920d25391883a80) |
---|---|
1// SPDX-License-Identifier: LGPL-2.1+ 2/* 3 * MurmurHash3 was written by Austin Appleby, and is placed in the public 4 * domain. The author hereby disclaims copyright to this source code. 5 * 6 * Adapted by John Wiele (jwiele@redhat.com). 7 */ 8 9#include "murmurhash3.h" 10 | 1// SPDX-License-Identifier: LGPL-2.1+ 2/* 3 * MurmurHash3 was written by Austin Appleby, and is placed in the public 4 * domain. The author hereby disclaims copyright to this source code. 5 * 6 * Adapted by John Wiele (jwiele@redhat.com). 7 */ 8 9#include "murmurhash3.h" 10 |
11#include <asm/unaligned.h> 12 |
|
11static inline u64 rotl64(u64 x, s8 r) 12{ 13 return (x << r) | (x >> (64 - r)); 14} 15 16#define ROTL64(x, y) rotl64(x, y) | 13static inline u64 rotl64(u64 x, s8 r) 14{ 15 return (x << r) | (x >> (64 - r)); 16} 17 18#define ROTL64(x, y) rotl64(x, y) |
17static __always_inline u64 getblock64(const u64 *p, int i) 18{ 19#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 20 return p[i]; 21#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 22 return __builtin_bswap64(p[i]); 23#else 24#error "can't figure out byte order" 25#endif 26} | |
27 | 19 |
28static __always_inline void putblock64(u64 *p, int i, u64 value) 29{ 30#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 31 p[i] = value; 32#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 33 p[i] = __builtin_bswap64(value); 34#else 35#error "can't figure out byte order" 36#endif 37} 38 | |
39/* Finalization mix - force all bits of a hash block to avalanche */ 40 41static __always_inline u64 fmix64(u64 k) 42{ 43 k ^= k >> 33; 44 k *= 0xff51afd7ed558ccdLLU; 45 k ^= k >> 33; 46 k *= 0xc4ceb9fe1a85ec53LLU; --- 8 unchanged lines hidden (view full) --- 55 const int nblocks = len / 16; 56 57 u64 h1 = seed; 58 u64 h2 = seed; 59 60 const u64 c1 = 0x87c37b91114253d5LLU; 61 const u64 c2 = 0x4cf5ad432745937fLLU; 62 | 20/* Finalization mix - force all bits of a hash block to avalanche */ 21 22static __always_inline u64 fmix64(u64 k) 23{ 24 k ^= k >> 33; 25 k *= 0xff51afd7ed558ccdLLU; 26 k ^= k >> 33; 27 k *= 0xc4ceb9fe1a85ec53LLU; --- 8 unchanged lines hidden (view full) --- 36 const int nblocks = len / 16; 37 38 u64 h1 = seed; 39 u64 h2 = seed; 40 41 const u64 c1 = 0x87c37b91114253d5LLU; 42 const u64 c2 = 0x4cf5ad432745937fLLU; 43 |
44 u64 *hash_out = out; 45 |
|
63 /* body */ 64 65 const u64 *blocks = (const u64 *)(data); 66 67 int i; 68 69 for (i = 0; i < nblocks; i++) { | 46 /* body */ 47 48 const u64 *blocks = (const u64 *)(data); 49 50 int i; 51 52 for (i = 0; i < nblocks; i++) { |
70 u64 k1 = getblock64(blocks, i * 2 + 0); 71 u64 k2 = getblock64(blocks, i * 2 + 1); | 53 u64 k1 = get_unaligned_le64(&blocks[i * 2]); 54 u64 k2 = get_unaligned_le64(&blocks[i * 2 + 1]); |
72 73 k1 *= c1; 74 k1 = ROTL64(k1, 31); 75 k1 *= c2; 76 h1 ^= k1; 77 78 h1 = ROTL64(h1, 27); 79 h1 += h2; --- 85 unchanged lines hidden (view full) --- 165 h2 += h1; 166 167 h1 = fmix64(h1); 168 h2 = fmix64(h2); 169 170 h1 += h2; 171 h2 += h1; 172 | 55 56 k1 *= c1; 57 k1 = ROTL64(k1, 31); 58 k1 *= c2; 59 h1 ^= k1; 60 61 h1 = ROTL64(h1, 27); 62 h1 += h2; --- 85 unchanged lines hidden (view full) --- 148 h2 += h1; 149 150 h1 = fmix64(h1); 151 h2 = fmix64(h2); 152 153 h1 += h2; 154 h2 += h1; 155 |
173 putblock64((u64 *)out, 0, h1); 174 putblock64((u64 *)out, 1, h2); | 156 put_unaligned_le64(h1, &hash_out[0]); 157 put_unaligned_le64(h2, &hash_out[1]); |
175} | 158} |