xref: /linux/drivers/md/dm-vdo/murmurhash3.c (revision a3957b1f3ec9ec78dd33422f3a8b4ee6781d8e3b)
1*a3957b1fSMatthew Sakai // SPDX-License-Identifier: LGPL-2.1+
2*a3957b1fSMatthew Sakai /*
3*a3957b1fSMatthew Sakai  * MurmurHash3 was written by Austin Appleby, and is placed in the public
4*a3957b1fSMatthew Sakai  * domain. The author hereby disclaims copyright to this source code.
5*a3957b1fSMatthew Sakai  *
6*a3957b1fSMatthew Sakai  * Adapted by John Wiele (jwiele@redhat.com).
7*a3957b1fSMatthew Sakai  */
8*a3957b1fSMatthew Sakai 
9*a3957b1fSMatthew Sakai #include "murmurhash3.h"
10*a3957b1fSMatthew Sakai 
11*a3957b1fSMatthew Sakai static inline u64 rotl64(u64 x, s8 r)
12*a3957b1fSMatthew Sakai {
13*a3957b1fSMatthew Sakai 	return (x << r) | (x >> (64 - r));
14*a3957b1fSMatthew Sakai }
15*a3957b1fSMatthew Sakai 
16*a3957b1fSMatthew Sakai #define ROTL64(x, y) rotl64(x, y)
17*a3957b1fSMatthew Sakai static __always_inline u64 getblock64(const u64 *p, int i)
18*a3957b1fSMatthew Sakai {
19*a3957b1fSMatthew Sakai #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
20*a3957b1fSMatthew Sakai 	return p[i];
21*a3957b1fSMatthew Sakai #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
22*a3957b1fSMatthew Sakai 	return __builtin_bswap64(p[i]);
23*a3957b1fSMatthew Sakai #else
24*a3957b1fSMatthew Sakai #error "can't figure out byte order"
25*a3957b1fSMatthew Sakai #endif
26*a3957b1fSMatthew Sakai }
27*a3957b1fSMatthew Sakai 
28*a3957b1fSMatthew Sakai static __always_inline void putblock64(u64 *p, int i, u64 value)
29*a3957b1fSMatthew Sakai {
30*a3957b1fSMatthew Sakai #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
31*a3957b1fSMatthew Sakai 	p[i] = value;
32*a3957b1fSMatthew Sakai #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
33*a3957b1fSMatthew Sakai 	p[i] = __builtin_bswap64(value);
34*a3957b1fSMatthew Sakai #else
35*a3957b1fSMatthew Sakai #error "can't figure out byte order"
36*a3957b1fSMatthew Sakai #endif
37*a3957b1fSMatthew Sakai }
38*a3957b1fSMatthew Sakai 
39*a3957b1fSMatthew Sakai /* Finalization mix - force all bits of a hash block to avalanche */
40*a3957b1fSMatthew Sakai 
41*a3957b1fSMatthew Sakai static __always_inline u64 fmix64(u64 k)
42*a3957b1fSMatthew Sakai {
43*a3957b1fSMatthew Sakai 	k ^= k >> 33;
44*a3957b1fSMatthew Sakai 	k *= 0xff51afd7ed558ccdLLU;
45*a3957b1fSMatthew Sakai 	k ^= k >> 33;
46*a3957b1fSMatthew Sakai 	k *= 0xc4ceb9fe1a85ec53LLU;
47*a3957b1fSMatthew Sakai 	k ^= k >> 33;
48*a3957b1fSMatthew Sakai 
49*a3957b1fSMatthew Sakai 	return k;
50*a3957b1fSMatthew Sakai }
51*a3957b1fSMatthew Sakai 
52*a3957b1fSMatthew Sakai void murmurhash3_128(const void *key, const int len, const u32 seed, void *out)
53*a3957b1fSMatthew Sakai {
54*a3957b1fSMatthew Sakai 	const u8 *data = key;
55*a3957b1fSMatthew Sakai 	const int nblocks = len / 16;
56*a3957b1fSMatthew Sakai 
57*a3957b1fSMatthew Sakai 	u64 h1 = seed;
58*a3957b1fSMatthew Sakai 	u64 h2 = seed;
59*a3957b1fSMatthew Sakai 
60*a3957b1fSMatthew Sakai 	const u64 c1 = 0x87c37b91114253d5LLU;
61*a3957b1fSMatthew Sakai 	const u64 c2 = 0x4cf5ad432745937fLLU;
62*a3957b1fSMatthew Sakai 
63*a3957b1fSMatthew Sakai 	/* body */
64*a3957b1fSMatthew Sakai 
65*a3957b1fSMatthew Sakai 	const u64 *blocks = (const u64 *)(data);
66*a3957b1fSMatthew Sakai 
67*a3957b1fSMatthew Sakai 	int i;
68*a3957b1fSMatthew Sakai 
69*a3957b1fSMatthew Sakai 	for (i = 0; i < nblocks; i++) {
70*a3957b1fSMatthew Sakai 		u64 k1 = getblock64(blocks, i * 2 + 0);
71*a3957b1fSMatthew Sakai 		u64 k2 = getblock64(blocks, i * 2 + 1);
72*a3957b1fSMatthew Sakai 
73*a3957b1fSMatthew Sakai 		k1 *= c1;
74*a3957b1fSMatthew Sakai 		k1 = ROTL64(k1, 31);
75*a3957b1fSMatthew Sakai 		k1 *= c2;
76*a3957b1fSMatthew Sakai 		h1 ^= k1;
77*a3957b1fSMatthew Sakai 
78*a3957b1fSMatthew Sakai 		h1 = ROTL64(h1, 27);
79*a3957b1fSMatthew Sakai 		h1 += h2;
80*a3957b1fSMatthew Sakai 		h1 = h1 * 5 + 0x52dce729;
81*a3957b1fSMatthew Sakai 
82*a3957b1fSMatthew Sakai 		k2 *= c2;
83*a3957b1fSMatthew Sakai 		k2 = ROTL64(k2, 33);
84*a3957b1fSMatthew Sakai 		k2 *= c1;
85*a3957b1fSMatthew Sakai 		h2 ^= k2;
86*a3957b1fSMatthew Sakai 
87*a3957b1fSMatthew Sakai 		h2 = ROTL64(h2, 31);
88*a3957b1fSMatthew Sakai 		h2 += h1;
89*a3957b1fSMatthew Sakai 		h2 = h2 * 5 + 0x38495ab5;
90*a3957b1fSMatthew Sakai 	}
91*a3957b1fSMatthew Sakai 
92*a3957b1fSMatthew Sakai 	/* tail */
93*a3957b1fSMatthew Sakai 
94*a3957b1fSMatthew Sakai 	{
95*a3957b1fSMatthew Sakai 		const u8 *tail = (const u8 *)(data + nblocks * 16);
96*a3957b1fSMatthew Sakai 
97*a3957b1fSMatthew Sakai 		u64 k1 = 0;
98*a3957b1fSMatthew Sakai 		u64 k2 = 0;
99*a3957b1fSMatthew Sakai 
100*a3957b1fSMatthew Sakai 		switch (len & 15) {
101*a3957b1fSMatthew Sakai 		case 15:
102*a3957b1fSMatthew Sakai 			k2 ^= ((u64)tail[14]) << 48;
103*a3957b1fSMatthew Sakai 			fallthrough;
104*a3957b1fSMatthew Sakai 		case 14:
105*a3957b1fSMatthew Sakai 			k2 ^= ((u64)tail[13]) << 40;
106*a3957b1fSMatthew Sakai 			fallthrough;
107*a3957b1fSMatthew Sakai 		case 13:
108*a3957b1fSMatthew Sakai 			k2 ^= ((u64)tail[12]) << 32;
109*a3957b1fSMatthew Sakai 			fallthrough;
110*a3957b1fSMatthew Sakai 		case 12:
111*a3957b1fSMatthew Sakai 			k2 ^= ((u64)tail[11]) << 24;
112*a3957b1fSMatthew Sakai 			fallthrough;
113*a3957b1fSMatthew Sakai 		case 11:
114*a3957b1fSMatthew Sakai 			k2 ^= ((u64)tail[10]) << 16;
115*a3957b1fSMatthew Sakai 			fallthrough;
116*a3957b1fSMatthew Sakai 		case 10:
117*a3957b1fSMatthew Sakai 			k2 ^= ((u64)tail[9]) << 8;
118*a3957b1fSMatthew Sakai 			fallthrough;
119*a3957b1fSMatthew Sakai 		case 9:
120*a3957b1fSMatthew Sakai 			k2 ^= ((u64)tail[8]) << 0;
121*a3957b1fSMatthew Sakai 			k2 *= c2;
122*a3957b1fSMatthew Sakai 			k2 = ROTL64(k2, 33);
123*a3957b1fSMatthew Sakai 			k2 *= c1;
124*a3957b1fSMatthew Sakai 			h2 ^= k2;
125*a3957b1fSMatthew Sakai 			fallthrough;
126*a3957b1fSMatthew Sakai 
127*a3957b1fSMatthew Sakai 		case 8:
128*a3957b1fSMatthew Sakai 			k1 ^= ((u64)tail[7]) << 56;
129*a3957b1fSMatthew Sakai 			fallthrough;
130*a3957b1fSMatthew Sakai 		case 7:
131*a3957b1fSMatthew Sakai 			k1 ^= ((u64)tail[6]) << 48;
132*a3957b1fSMatthew Sakai 			fallthrough;
133*a3957b1fSMatthew Sakai 		case 6:
134*a3957b1fSMatthew Sakai 			k1 ^= ((u64)tail[5]) << 40;
135*a3957b1fSMatthew Sakai 			fallthrough;
136*a3957b1fSMatthew Sakai 		case 5:
137*a3957b1fSMatthew Sakai 			k1 ^= ((u64)tail[4]) << 32;
138*a3957b1fSMatthew Sakai 			fallthrough;
139*a3957b1fSMatthew Sakai 		case 4:
140*a3957b1fSMatthew Sakai 			k1 ^= ((u64)tail[3]) << 24;
141*a3957b1fSMatthew Sakai 			fallthrough;
142*a3957b1fSMatthew Sakai 		case 3:
143*a3957b1fSMatthew Sakai 			k1 ^= ((u64)tail[2]) << 16;
144*a3957b1fSMatthew Sakai 			fallthrough;
145*a3957b1fSMatthew Sakai 		case 2:
146*a3957b1fSMatthew Sakai 			k1 ^= ((u64)tail[1]) << 8;
147*a3957b1fSMatthew Sakai 			fallthrough;
148*a3957b1fSMatthew Sakai 		case 1:
149*a3957b1fSMatthew Sakai 			k1 ^= ((u64)tail[0]) << 0;
150*a3957b1fSMatthew Sakai 			k1 *= c1;
151*a3957b1fSMatthew Sakai 			k1 = ROTL64(k1, 31);
152*a3957b1fSMatthew Sakai 			k1 *= c2;
153*a3957b1fSMatthew Sakai 			h1 ^= k1;
154*a3957b1fSMatthew Sakai 			break;
155*a3957b1fSMatthew Sakai 		default:
156*a3957b1fSMatthew Sakai 			break;
157*a3957b1fSMatthew Sakai 		};
158*a3957b1fSMatthew Sakai 	}
159*a3957b1fSMatthew Sakai 	/* finalization */
160*a3957b1fSMatthew Sakai 
161*a3957b1fSMatthew Sakai 	h1 ^= len;
162*a3957b1fSMatthew Sakai 	h2 ^= len;
163*a3957b1fSMatthew Sakai 
164*a3957b1fSMatthew Sakai 	h1 += h2;
165*a3957b1fSMatthew Sakai 	h2 += h1;
166*a3957b1fSMatthew Sakai 
167*a3957b1fSMatthew Sakai 	h1 = fmix64(h1);
168*a3957b1fSMatthew Sakai 	h2 = fmix64(h2);
169*a3957b1fSMatthew Sakai 
170*a3957b1fSMatthew Sakai 	h1 += h2;
171*a3957b1fSMatthew Sakai 	h2 += h1;
172*a3957b1fSMatthew Sakai 
173*a3957b1fSMatthew Sakai 	putblock64((u64 *)out, 0, h1);
174*a3957b1fSMatthew Sakai 	putblock64((u64 *)out, 1, h2);
175*a3957b1fSMatthew Sakai }
176