1 // SPDX-License-Identifier: LGPL-2.1+ 2 /* 3 * MurmurHash3 was written by Austin Appleby, and is placed in the public 4 * domain. The author hereby disclaims copyright to this source code. 5 * 6 * Adapted by John Wiele (jwiele@redhat.com). 7 */ 8 9 #include "murmurhash3.h" 10 11 static inline u64 rotl64(u64 x, s8 r) 12 { 13 return (x << r) | (x >> (64 - r)); 14 } 15 16 #define ROTL64(x, y) rotl64(x, y) 17 static __always_inline u64 getblock64(const u64 *p, int i) 18 { 19 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 20 return p[i]; 21 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 22 return __builtin_bswap64(p[i]); 23 #else 24 #error "can't figure out byte order" 25 #endif 26 } 27 28 static __always_inline void putblock64(u64 *p, int i, u64 value) 29 { 30 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 31 p[i] = value; 32 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 33 p[i] = __builtin_bswap64(value); 34 #else 35 #error "can't figure out byte order" 36 #endif 37 } 38 39 /* Finalization mix - force all bits of a hash block to avalanche */ 40 41 static __always_inline u64 fmix64(u64 k) 42 { 43 k ^= k >> 33; 44 k *= 0xff51afd7ed558ccdLLU; 45 k ^= k >> 33; 46 k *= 0xc4ceb9fe1a85ec53LLU; 47 k ^= k >> 33; 48 49 return k; 50 } 51 52 void murmurhash3_128(const void *key, const int len, const u32 seed, void *out) 53 { 54 const u8 *data = key; 55 const int nblocks = len / 16; 56 57 u64 h1 = seed; 58 u64 h2 = seed; 59 60 const u64 c1 = 0x87c37b91114253d5LLU; 61 const u64 c2 = 0x4cf5ad432745937fLLU; 62 63 /* body */ 64 65 const u64 *blocks = (const u64 *)(data); 66 67 int i; 68 69 for (i = 0; i < nblocks; i++) { 70 u64 k1 = getblock64(blocks, i * 2 + 0); 71 u64 k2 = getblock64(blocks, i * 2 + 1); 72 73 k1 *= c1; 74 k1 = ROTL64(k1, 31); 75 k1 *= c2; 76 h1 ^= k1; 77 78 h1 = ROTL64(h1, 27); 79 h1 += h2; 80 h1 = h1 * 5 + 0x52dce729; 81 82 k2 *= c2; 83 k2 = ROTL64(k2, 33); 84 k2 *= c1; 85 h2 ^= k2; 86 87 h2 = ROTL64(h2, 31); 88 h2 += h1; 89 h2 = h2 * 5 + 0x38495ab5; 90 } 91 92 /* tail */ 93 94 { 95 const u8 *tail = (const u8 *)(data + nblocks * 16); 96 97 u64 k1 = 0; 98 u64 k2 = 0; 99 100 switch (len & 15) { 101 case 15: 102 k2 ^= ((u64)tail[14]) << 48; 103 fallthrough; 104 case 14: 105 k2 ^= ((u64)tail[13]) << 40; 106 fallthrough; 107 case 13: 108 k2 ^= ((u64)tail[12]) << 32; 109 fallthrough; 110 case 12: 111 k2 ^= ((u64)tail[11]) << 24; 112 fallthrough; 113 case 11: 114 k2 ^= ((u64)tail[10]) << 16; 115 fallthrough; 116 case 10: 117 k2 ^= ((u64)tail[9]) << 8; 118 fallthrough; 119 case 9: 120 k2 ^= ((u64)tail[8]) << 0; 121 k2 *= c2; 122 k2 = ROTL64(k2, 33); 123 k2 *= c1; 124 h2 ^= k2; 125 fallthrough; 126 127 case 8: 128 k1 ^= ((u64)tail[7]) << 56; 129 fallthrough; 130 case 7: 131 k1 ^= ((u64)tail[6]) << 48; 132 fallthrough; 133 case 6: 134 k1 ^= ((u64)tail[5]) << 40; 135 fallthrough; 136 case 5: 137 k1 ^= ((u64)tail[4]) << 32; 138 fallthrough; 139 case 4: 140 k1 ^= ((u64)tail[3]) << 24; 141 fallthrough; 142 case 3: 143 k1 ^= ((u64)tail[2]) << 16; 144 fallthrough; 145 case 2: 146 k1 ^= ((u64)tail[1]) << 8; 147 fallthrough; 148 case 1: 149 k1 ^= ((u64)tail[0]) << 0; 150 k1 *= c1; 151 k1 = ROTL64(k1, 31); 152 k1 *= c2; 153 h1 ^= k1; 154 break; 155 default: 156 break; 157 }; 158 } 159 /* finalization */ 160 161 h1 ^= len; 162 h2 ^= len; 163 164 h1 += h2; 165 h2 += h1; 166 167 h1 = fmix64(h1); 168 h2 = fmix64(h2); 169 170 h1 += h2; 171 h2 += h1; 172 173 putblock64((u64 *)out, 0, h1); 174 putblock64((u64 *)out, 1, h2); 175 } 176