1 //===-- llvm/ADT/Hashing.h - Utilities for hashing --------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the newly proposed standard C++ interfaces for hashing
10 // arbitrary data and building hash functions for user-defined types. This
11 // interface was originally proposed in N3333[1] and is currently under review
12 // for inclusion in a future TR and/or standard.
13 //
14 // The primary interfaces provide are comprised of one type and three functions:
15 //
16 // -- 'hash_code' class is an opaque type representing the hash code for some
17 // data. It is the intended product of hashing, and can be used to implement
18 // hash tables, checksumming, and other common uses of hashes. It is not an
19 // integer type (although it can be converted to one) because it is risky
20 // to assume much about the internals of a hash_code. In particular, each
21 // execution of the program has a high probability of producing a different
22 // hash_code for a given input. Thus their values are not stable to save or
23 // persist, and should only be used during the execution for the
24 // construction of hashing datastructures.
25 //
26 // -- 'hash_value' is a function designed to be overloaded for each
27 // user-defined type which wishes to be used within a hashing context. It
28 // should be overloaded within the user-defined type's namespace and found
29 // via ADL. Overloads for primitive types are provided by this library.
30 //
31 // -- 'hash_combine' and 'hash_combine_range' are functions designed to aid
32 // programmers in easily and intuitively combining a set of data into
33 // a single hash_code for their object. They should only logically be used
34 // within the implementation of a 'hash_value' routine or similar context.
35 //
36 // Note that 'hash_combine_range' contains very special logic for hashing
37 // a contiguous array of integers or pointers. This logic is *extremely* fast,
38 // on a modern Intel "Gainestown" Xeon (Nehalem uarch) @2.2 GHz, these were
39 // benchmarked at over 6.5 GiB/s for large keys, and <20 cycles/hash for keys
40 // under 32-bytes.
41 //
42 //===----------------------------------------------------------------------===//
43
44 #ifndef LLVM_ADT_HASHING_H
45 #define LLVM_ADT_HASHING_H
46
47 #include "llvm/Config/abi-breaking.h"
48 #include "llvm/Support/DataTypes.h"
49 #include "llvm/Support/ErrorHandling.h"
50 #include "llvm/Support/SwapByteOrder.h"
51 #include "llvm/Support/type_traits.h"
52 #include <algorithm>
53 #include <cassert>
54 #include <cstring>
55 #include <optional>
56 #include <string>
57 #include <tuple>
58 #include <utility>
59
60 namespace llvm {
61 template <typename T, typename Enable> struct DenseMapInfo;
62
63 /// An opaque object representing a hash code.
64 ///
65 /// This object represents the result of hashing some entity. It is intended to
66 /// be used to implement hashtables or other hashing-based data structures.
67 /// While it wraps and exposes a numeric value, this value should not be
68 /// trusted to be stable or predictable across processes or executions.
69 ///
70 /// In order to obtain the hash_code for an object 'x':
71 /// \code
72 /// using llvm::hash_value;
73 /// llvm::hash_code code = hash_value(x);
74 /// \endcode
75 class hash_code {
76 size_t value;
77
78 public:
79 /// Default construct a hash_code.
80 /// Note that this leaves the value uninitialized.
81 hash_code() = default;
82
83 /// Form a hash code directly from a numerical value.
hash_code(size_t value)84 hash_code(size_t value) : value(value) {}
85
86 /// Convert the hash code to its numerical value for use.
size_t()87 /*explicit*/ operator size_t() const { return value; }
88
89 friend bool operator==(const hash_code &lhs, const hash_code &rhs) {
90 return lhs.value == rhs.value;
91 }
92 friend bool operator!=(const hash_code &lhs, const hash_code &rhs) {
93 return lhs.value != rhs.value;
94 }
95
96 /// Allow a hash_code to be directly run through hash_value.
hash_value(const hash_code & code)97 friend size_t hash_value(const hash_code &code) { return code.value; }
98 };
99
100 /// Compute a hash_code for any integer value.
101 ///
102 /// Note that this function is intended to compute the same hash_code for
103 /// a particular value without regard to the pre-promotion type. This is in
104 /// contrast to hash_combine which may produce different hash_codes for
105 /// differing argument types even if they would implicit promote to a common
106 /// type without changing the value.
107 template <typename T>
108 std::enable_if_t<is_integral_or_enum<T>::value, hash_code> hash_value(T value);
109
110 /// Compute a hash_code for a pointer's address.
111 ///
112 /// N.B.: This hashes the *address*. Not the value and not the type.
113 template <typename T> hash_code hash_value(const T *ptr);
114
115 /// Compute a hash_code for a pair of objects.
116 template <typename T, typename U>
117 hash_code hash_value(const std::pair<T, U> &arg);
118
119 /// Compute a hash_code for a tuple.
120 template <typename... Ts>
121 hash_code hash_value(const std::tuple<Ts...> &arg);
122
123 /// Compute a hash_code for a standard string.
124 template <typename T>
125 hash_code hash_value(const std::basic_string<T> &arg);
126
127 /// Compute a hash_code for a standard string.
128 template <typename T> hash_code hash_value(const std::optional<T> &arg);
129
130 // All of the implementation details of actually computing the various hash
131 // code values are held within this namespace. These routines are included in
132 // the header file mainly to allow inlining and constant propagation.
133 namespace hashing {
134 namespace detail {
135
fetch64(const char * p)136 inline uint64_t fetch64(const char *p) {
137 uint64_t result;
138 memcpy(&result, p, sizeof(result));
139 if (sys::IsBigEndianHost)
140 sys::swapByteOrder(result);
141 return result;
142 }
143
fetch32(const char * p)144 inline uint32_t fetch32(const char *p) {
145 uint32_t result;
146 memcpy(&result, p, sizeof(result));
147 if (sys::IsBigEndianHost)
148 sys::swapByteOrder(result);
149 return result;
150 }
151
152 /// Some primes between 2^63 and 2^64 for various uses.
153 static constexpr uint64_t k0 = 0xc3a5c85c97cb3127ULL;
154 static constexpr uint64_t k1 = 0xb492b66fbe98f273ULL;
155 static constexpr uint64_t k2 = 0x9ae16a3b2f90404fULL;
156 static constexpr uint64_t k3 = 0xc949d7c7509e6557ULL;
157
158 /// Bitwise right rotate.
159 /// Normally this will compile to a single instruction, especially if the
160 /// shift is a manifest constant.
rotate(uint64_t val,size_t shift)161 inline uint64_t rotate(uint64_t val, size_t shift) {
162 // Avoid shifting by 64: doing so yields an undefined result.
163 return shift == 0 ? val : ((val >> shift) | (val << (64 - shift)));
164 }
165
shift_mix(uint64_t val)166 inline uint64_t shift_mix(uint64_t val) {
167 return val ^ (val >> 47);
168 }
169
hash_16_bytes(uint64_t low,uint64_t high)170 inline uint64_t hash_16_bytes(uint64_t low, uint64_t high) {
171 // Murmur-inspired hashing.
172 const uint64_t kMul = 0x9ddfea08eb382d69ULL;
173 uint64_t a = (low ^ high) * kMul;
174 a ^= (a >> 47);
175 uint64_t b = (high ^ a) * kMul;
176 b ^= (b >> 47);
177 b *= kMul;
178 return b;
179 }
180
hash_1to3_bytes(const char * s,size_t len,uint64_t seed)181 inline uint64_t hash_1to3_bytes(const char *s, size_t len, uint64_t seed) {
182 uint8_t a = s[0];
183 uint8_t b = s[len >> 1];
184 uint8_t c = s[len - 1];
185 uint32_t y = static_cast<uint32_t>(a) + (static_cast<uint32_t>(b) << 8);
186 uint32_t z = static_cast<uint32_t>(len) + (static_cast<uint32_t>(c) << 2);
187 return shift_mix(y * k2 ^ z * k3 ^ seed) * k2;
188 }
189
hash_4to8_bytes(const char * s,size_t len,uint64_t seed)190 inline uint64_t hash_4to8_bytes(const char *s, size_t len, uint64_t seed) {
191 uint64_t a = fetch32(s);
192 return hash_16_bytes(len + (a << 3), seed ^ fetch32(s + len - 4));
193 }
194
hash_9to16_bytes(const char * s,size_t len,uint64_t seed)195 inline uint64_t hash_9to16_bytes(const char *s, size_t len, uint64_t seed) {
196 uint64_t a = fetch64(s);
197 uint64_t b = fetch64(s + len - 8);
198 return hash_16_bytes(seed ^ a, rotate(b + len, len)) ^ b;
199 }
200
hash_17to32_bytes(const char * s,size_t len,uint64_t seed)201 inline uint64_t hash_17to32_bytes(const char *s, size_t len, uint64_t seed) {
202 uint64_t a = fetch64(s) * k1;
203 uint64_t b = fetch64(s + 8);
204 uint64_t c = fetch64(s + len - 8) * k2;
205 uint64_t d = fetch64(s + len - 16) * k0;
206 return hash_16_bytes(llvm::rotr<uint64_t>(a - b, 43) +
207 llvm::rotr<uint64_t>(c ^ seed, 30) + d,
208 a + llvm::rotr<uint64_t>(b ^ k3, 20) - c + len + seed);
209 }
210
hash_33to64_bytes(const char * s,size_t len,uint64_t seed)211 inline uint64_t hash_33to64_bytes(const char *s, size_t len, uint64_t seed) {
212 uint64_t z = fetch64(s + 24);
213 uint64_t a = fetch64(s) + (len + fetch64(s + len - 16)) * k0;
214 uint64_t b = llvm::rotr<uint64_t>(a + z, 52);
215 uint64_t c = llvm::rotr<uint64_t>(a, 37);
216 a += fetch64(s + 8);
217 c += llvm::rotr<uint64_t>(a, 7);
218 a += fetch64(s + 16);
219 uint64_t vf = a + z;
220 uint64_t vs = b + llvm::rotr<uint64_t>(a, 31) + c;
221 a = fetch64(s + 16) + fetch64(s + len - 32);
222 z = fetch64(s + len - 8);
223 b = llvm::rotr<uint64_t>(a + z, 52);
224 c = llvm::rotr<uint64_t>(a, 37);
225 a += fetch64(s + len - 24);
226 c += llvm::rotr<uint64_t>(a, 7);
227 a += fetch64(s + len - 16);
228 uint64_t wf = a + z;
229 uint64_t ws = b + llvm::rotr<uint64_t>(a, 31) + c;
230 uint64_t r = shift_mix((vf + ws) * k2 + (wf + vs) * k0);
231 return shift_mix((seed ^ (r * k0)) + vs) * k2;
232 }
233
hash_short(const char * s,size_t length,uint64_t seed)234 inline uint64_t hash_short(const char *s, size_t length, uint64_t seed) {
235 if (length >= 4 && length <= 8)
236 return hash_4to8_bytes(s, length, seed);
237 if (length > 8 && length <= 16)
238 return hash_9to16_bytes(s, length, seed);
239 if (length > 16 && length <= 32)
240 return hash_17to32_bytes(s, length, seed);
241 if (length > 32)
242 return hash_33to64_bytes(s, length, seed);
243 if (length != 0)
244 return hash_1to3_bytes(s, length, seed);
245
246 return k2 ^ seed;
247 }
248
249 /// The intermediate state used during hashing.
250 /// Currently, the algorithm for computing hash codes is based on CityHash and
251 /// keeps 56 bytes of arbitrary state.
252 struct hash_state {
253 uint64_t h0 = 0, h1 = 0, h2 = 0, h3 = 0, h4 = 0, h5 = 0, h6 = 0;
254
255 /// Create a new hash_state structure and initialize it based on the
256 /// seed and the first 64-byte chunk.
257 /// This effectively performs the initial mix.
createhash_state258 static hash_state create(const char *s, uint64_t seed) {
259 hash_state state = {0,
260 seed,
261 hash_16_bytes(seed, k1),
262 llvm::rotr<uint64_t>(seed ^ k1, 49),
263 seed * k1,
264 shift_mix(seed),
265 0};
266 state.h6 = hash_16_bytes(state.h4, state.h5);
267 state.mix(s);
268 return state;
269 }
270
271 /// Mix 32-bytes from the input sequence into the 16-bytes of 'a'
272 /// and 'b', including whatever is already in 'a' and 'b'.
mix_32_byteshash_state273 static void mix_32_bytes(const char *s, uint64_t &a, uint64_t &b) {
274 a += fetch64(s);
275 uint64_t c = fetch64(s + 24);
276 b = llvm::rotr<uint64_t>(b + a + c, 21);
277 uint64_t d = a;
278 a += fetch64(s + 8) + fetch64(s + 16);
279 b += llvm::rotr<uint64_t>(a, 44) + d;
280 a += c;
281 }
282
283 /// Mix in a 64-byte buffer of data.
284 /// We mix all 64 bytes even when the chunk length is smaller, but we
285 /// record the actual length.
mixhash_state286 void mix(const char *s) {
287 h0 = llvm::rotr<uint64_t>(h0 + h1 + h3 + fetch64(s + 8), 37) * k1;
288 h1 = llvm::rotr<uint64_t>(h1 + h4 + fetch64(s + 48), 42) * k1;
289 h0 ^= h6;
290 h1 += h3 + fetch64(s + 40);
291 h2 = llvm::rotr<uint64_t>(h2 + h5, 33) * k1;
292 h3 = h4 * k1;
293 h4 = h0 + h5;
294 mix_32_bytes(s, h3, h4);
295 h5 = h2 + h6;
296 h6 = h1 + fetch64(s + 16);
297 mix_32_bytes(s + 32, h5, h6);
298 std::swap(h2, h0);
299 }
300
301 /// Compute the final 64-bit hash code value based on the current
302 /// state and the length of bytes hashed.
finalizehash_state303 uint64_t finalize(size_t length) {
304 return hash_16_bytes(hash_16_bytes(h3, h5) + shift_mix(h1) * k1 + h2,
305 hash_16_bytes(h4, h6) + shift_mix(length) * k1 + h0);
306 }
307 };
308
309 /// In LLVM_ENABLE_ABI_BREAKING_CHECKS builds, the seed is non-deterministic
310 /// per process (address of a function in LLVMSupport) to prevent having users
311 /// depend on the particular hash values. On platforms without ASLR, this is
312 /// still likely non-deterministic per build.
get_execution_seed()313 inline uint64_t get_execution_seed() {
314 // Work around x86-64 negative offset folding for old Clang -fno-pic
315 // https://reviews.llvm.org/D93931
316 #if LLVM_ENABLE_ABI_BREAKING_CHECKS && \
317 (!defined(__clang__) || __clang_major__ > 11)
318 return static_cast<uint64_t>(
319 reinterpret_cast<uintptr_t>(&install_fatal_error_handler));
320 #else
321 return 0xff51afd7ed558ccdULL;
322 #endif
323 }
324
325
326 /// Trait to indicate whether a type's bits can be hashed directly.
327 ///
328 /// A type trait which is true if we want to combine values for hashing by
329 /// reading the underlying data. It is false if values of this type must
330 /// first be passed to hash_value, and the resulting hash_codes combined.
331 //
332 // FIXME: We want to replace is_integral_or_enum and is_pointer here with
333 // a predicate which asserts that comparing the underlying storage of two
334 // values of the type for equality is equivalent to comparing the two values
335 // for equality. For all the platforms we care about, this holds for integers
336 // and pointers, but there are platforms where it doesn't and we would like to
337 // support user-defined types which happen to satisfy this property.
338 template <typename T> struct is_hashable_data
339 : std::integral_constant<bool, ((is_integral_or_enum<T>::value ||
340 std::is_pointer<T>::value) &&
341 64 % sizeof(T) == 0)> {};
342
343 // Special case std::pair to detect when both types are viable and when there
344 // is no alignment-derived padding in the pair. This is a bit of a lie because
345 // std::pair isn't truly POD, but it's close enough in all reasonable
346 // implementations for our use case of hashing the underlying data.
347 template <typename T, typename U> struct is_hashable_data<std::pair<T, U> >
348 : std::integral_constant<bool, (is_hashable_data<T>::value &&
349 is_hashable_data<U>::value &&
350 (sizeof(T) + sizeof(U)) ==
351 sizeof(std::pair<T, U>))> {};
352
353 /// Helper to get the hashable data representation for a type.
354 /// This variant is enabled when the type itself can be used.
355 template <typename T>
356 std::enable_if_t<is_hashable_data<T>::value, T>
357 get_hashable_data(const T &value) {
358 return value;
359 }
360 /// Helper to get the hashable data representation for a type.
361 /// This variant is enabled when we must first call hash_value and use the
362 /// result as our data.
363 template <typename T>
364 std::enable_if_t<!is_hashable_data<T>::value, size_t>
365 get_hashable_data(const T &value) {
366 using ::llvm::hash_value;
367 return hash_value(value);
368 }
369
370 /// Helper to store data from a value into a buffer and advance the
371 /// pointer into that buffer.
372 ///
373 /// This routine first checks whether there is enough space in the provided
374 /// buffer, and if not immediately returns false. If there is space, it
375 /// copies the underlying bytes of value into the buffer, advances the
376 /// buffer_ptr past the copied bytes, and returns true.
377 template <typename T>
378 bool store_and_advance(char *&buffer_ptr, char *buffer_end, const T& value,
379 size_t offset = 0) {
380 size_t store_size = sizeof(value) - offset;
381 if (buffer_ptr + store_size > buffer_end)
382 return false;
383 const char *value_data = reinterpret_cast<const char *>(&value);
384 memcpy(buffer_ptr, value_data + offset, store_size);
385 buffer_ptr += store_size;
386 return true;
387 }
388
389 /// Implement the combining of integral values into a hash_code.
390 ///
391 /// This overload is selected when the value type of the iterator is
392 /// integral. Rather than computing a hash_code for each object and then
393 /// combining them, this (as an optimization) directly combines the integers.
394 template <typename InputIteratorT>
395 hash_code hash_combine_range_impl(InputIteratorT first, InputIteratorT last) {
396 const uint64_t seed = get_execution_seed();
397 char buffer[64], *buffer_ptr = buffer;
398 char *const buffer_end = std::end(buffer);
399 while (first != last && store_and_advance(buffer_ptr, buffer_end,
400 get_hashable_data(*first)))
401 ++first;
402 if (first == last)
403 return hash_short(buffer, buffer_ptr - buffer, seed);
404 assert(buffer_ptr == buffer_end);
405
406 hash_state state = state.create(buffer, seed);
407 size_t length = 64;
408 while (first != last) {
409 // Fill up the buffer. We don't clear it, which re-mixes the last round
410 // when only a partial 64-byte chunk is left.
411 buffer_ptr = buffer;
412 while (first != last && store_and_advance(buffer_ptr, buffer_end,
413 get_hashable_data(*first)))
414 ++first;
415
416 // Rotate the buffer if we did a partial fill in order to simulate doing
417 // a mix of the last 64-bytes. That is how the algorithm works when we
418 // have a contiguous byte sequence, and we want to emulate that here.
419 std::rotate(buffer, buffer_ptr, buffer_end);
420
421 // Mix this chunk into the current state.
422 state.mix(buffer);
423 length += buffer_ptr - buffer;
424 };
425
426 return state.finalize(length);
427 }
428
429 /// Implement the combining of integral values into a hash_code.
430 ///
431 /// This overload is selected when the value type of the iterator is integral
432 /// and when the input iterator is actually a pointer. Rather than computing
433 /// a hash_code for each object and then combining them, this (as an
434 /// optimization) directly combines the integers. Also, because the integers
435 /// are stored in contiguous memory, this routine avoids copying each value
436 /// and directly reads from the underlying memory.
437 template <typename ValueT>
438 std::enable_if_t<is_hashable_data<ValueT>::value, hash_code>
439 hash_combine_range_impl(ValueT *first, ValueT *last) {
440 const uint64_t seed = get_execution_seed();
441 const char *s_begin = reinterpret_cast<const char *>(first);
442 const char *s_end = reinterpret_cast<const char *>(last);
443 const size_t length = std::distance(s_begin, s_end);
444 if (length <= 64)
445 return hash_short(s_begin, length, seed);
446
447 const char *s_aligned_end = s_begin + (length & ~63);
448 hash_state state = state.create(s_begin, seed);
449 s_begin += 64;
450 while (s_begin != s_aligned_end) {
451 state.mix(s_begin);
452 s_begin += 64;
453 }
454 if (length & 63)
455 state.mix(s_end - 64);
456
457 return state.finalize(length);
458 }
459
460 } // namespace detail
461 } // namespace hashing
462
463
464 /// Compute a hash_code for a sequence of values.
465 ///
466 /// This hashes a sequence of values. It produces the same hash_code as
467 /// 'hash_combine(a, b, c, ...)', but can run over arbitrary sized sequences
468 /// and is significantly faster given pointers and types which can be hashed as
469 /// a sequence of bytes.
470 template <typename InputIteratorT>
471 hash_code hash_combine_range(InputIteratorT first, InputIteratorT last) {
472 return ::llvm::hashing::detail::hash_combine_range_impl(first, last);
473 }
474
475
476 // Implementation details for hash_combine.
477 namespace hashing {
478 namespace detail {
479
480 /// Helper class to manage the recursive combining of hash_combine
481 /// arguments.
482 ///
483 /// This class exists to manage the state and various calls involved in the
484 /// recursive combining of arguments used in hash_combine. It is particularly
485 /// useful at minimizing the code in the recursive calls to ease the pain
486 /// caused by a lack of variadic functions.
487 struct hash_combine_recursive_helper {
488 char buffer[64] = {};
489 hash_state state;
490 const uint64_t seed;
491
492 public:
493 /// Construct a recursive hash combining helper.
494 ///
495 /// This sets up the state for a recursive hash combine, including getting
496 /// the seed and buffer setup.
497 hash_combine_recursive_helper()
498 : seed(get_execution_seed()) {}
499
500 /// Combine one chunk of data into the current in-flight hash.
501 ///
502 /// This merges one chunk of data into the hash. First it tries to buffer
503 /// the data. If the buffer is full, it hashes the buffer into its
504 /// hash_state, empties it, and then merges the new chunk in. This also
505 /// handles cases where the data straddles the end of the buffer.
506 template <typename T>
507 char *combine_data(size_t &length, char *buffer_ptr, char *buffer_end, T data) {
508 if (!store_and_advance(buffer_ptr, buffer_end, data)) {
509 // Check for skew which prevents the buffer from being packed, and do
510 // a partial store into the buffer to fill it. This is only a concern
511 // with the variadic combine because that formation can have varying
512 // argument types.
513 size_t partial_store_size = buffer_end - buffer_ptr;
514 memcpy(buffer_ptr, &data, partial_store_size);
515
516 // If the store fails, our buffer is full and ready to hash. We have to
517 // either initialize the hash state (on the first full buffer) or mix
518 // this buffer into the existing hash state. Length tracks the *hashed*
519 // length, not the buffered length.
520 if (length == 0) {
521 state = state.create(buffer, seed);
522 length = 64;
523 } else {
524 // Mix this chunk into the current state and bump length up by 64.
525 state.mix(buffer);
526 length += 64;
527 }
528 // Reset the buffer_ptr to the head of the buffer for the next chunk of
529 // data.
530 buffer_ptr = buffer;
531
532 // Try again to store into the buffer -- this cannot fail as we only
533 // store types smaller than the buffer.
534 if (!store_and_advance(buffer_ptr, buffer_end, data,
535 partial_store_size))
536 llvm_unreachable("buffer smaller than stored type");
537 }
538 return buffer_ptr;
539 }
540
541 /// Recursive, variadic combining method.
542 ///
543 /// This function recurses through each argument, combining that argument
544 /// into a single hash.
545 template <typename T, typename ...Ts>
546 hash_code combine(size_t length, char *buffer_ptr, char *buffer_end,
547 const T &arg, const Ts &...args) {
548 buffer_ptr = combine_data(length, buffer_ptr, buffer_end, get_hashable_data(arg));
549
550 // Recurse to the next argument.
551 return combine(length, buffer_ptr, buffer_end, args...);
552 }
553
554 /// Base case for recursive, variadic combining.
555 ///
556 /// The base case when combining arguments recursively is reached when all
557 /// arguments have been handled. It flushes the remaining buffer and
558 /// constructs a hash_code.
559 hash_code combine(size_t length, char *buffer_ptr, char *buffer_end) {
560 // Check whether the entire set of values fit in the buffer. If so, we'll
561 // use the optimized short hashing routine and skip state entirely.
562 if (length == 0)
563 return hash_short(buffer, buffer_ptr - buffer, seed);
564
565 // Mix the final buffer, rotating it if we did a partial fill in order to
566 // simulate doing a mix of the last 64-bytes. That is how the algorithm
567 // works when we have a contiguous byte sequence, and we want to emulate
568 // that here.
569 std::rotate(buffer, buffer_ptr, buffer_end);
570
571 // Mix this chunk into the current state.
572 state.mix(buffer);
573 length += buffer_ptr - buffer;
574
575 return state.finalize(length);
576 }
577 };
578
579 } // namespace detail
580 } // namespace hashing
581
582 /// Combine values into a single hash_code.
583 ///
584 /// This routine accepts a varying number of arguments of any type. It will
585 /// attempt to combine them into a single hash_code. For user-defined types it
586 /// attempts to call a \see hash_value overload (via ADL) for the type. For
587 /// integer and pointer types it directly combines their data into the
588 /// resulting hash_code.
589 ///
590 /// The result is suitable for returning from a user's hash_value
591 /// *implementation* for their user-defined type. Consumers of a type should
592 /// *not* call this routine, they should instead call 'hash_value'.
593 template <typename ...Ts> hash_code hash_combine(const Ts &...args) {
594 // Recursively hash each argument using a helper class.
595 ::llvm::hashing::detail::hash_combine_recursive_helper helper;
596 return helper.combine(0, helper.buffer, helper.buffer + 64, args...);
597 }
598
599 // Implementation details for implementations of hash_value overloads provided
600 // here.
601 namespace hashing {
602 namespace detail {
603
604 /// Helper to hash the value of a single integer.
605 ///
606 /// Overloads for smaller integer types are not provided to ensure consistent
607 /// behavior in the presence of integral promotions. Essentially,
608 /// "hash_value('4')" and "hash_value('0' + 4)" should be the same.
609 inline hash_code hash_integer_value(uint64_t value) {
610 // Similar to hash_4to8_bytes but using a seed instead of length.
611 const uint64_t seed = get_execution_seed();
612 const char *s = reinterpret_cast<const char *>(&value);
613 const uint64_t a = fetch32(s);
614 return hash_16_bytes(seed + (a << 3), fetch32(s + 4));
615 }
616
617 } // namespace detail
618 } // namespace hashing
619
620 // Declared and documented above, but defined here so that any of the hashing
621 // infrastructure is available.
622 template <typename T>
623 std::enable_if_t<is_integral_or_enum<T>::value, hash_code> hash_value(T value) {
624 return ::llvm::hashing::detail::hash_integer_value(
625 static_cast<uint64_t>(value));
626 }
627
628 // Declared and documented above, but defined here so that any of the hashing
629 // infrastructure is available.
630 template <typename T> hash_code hash_value(const T *ptr) {
631 return ::llvm::hashing::detail::hash_integer_value(
632 reinterpret_cast<uintptr_t>(ptr));
633 }
634
635 // Declared and documented above, but defined here so that any of the hashing
636 // infrastructure is available.
637 template <typename T, typename U>
638 hash_code hash_value(const std::pair<T, U> &arg) {
639 return hash_combine(arg.first, arg.second);
640 }
641
642 template <typename... Ts> hash_code hash_value(const std::tuple<Ts...> &arg) {
643 return std::apply([](const auto &...xs) { return hash_combine(xs...); }, arg);
644 }
645
646 // Declared and documented above, but defined here so that any of the hashing
647 // infrastructure is available.
648 template <typename T>
649 hash_code hash_value(const std::basic_string<T> &arg) {
650 return hash_combine_range(arg.begin(), arg.end());
651 }
652
653 template <typename T> hash_code hash_value(const std::optional<T> &arg) {
654 return arg ? hash_combine(true, *arg) : hash_value(false);
655 }
656
657 template <> struct DenseMapInfo<hash_code, void> {
658 static inline hash_code getEmptyKey() { return hash_code(-1); }
659 static inline hash_code getTombstoneKey() { return hash_code(-2); }
660 static unsigned getHashValue(hash_code val) {
661 return static_cast<unsigned>(size_t(val));
662 }
663 static bool isEqual(hash_code LHS, hash_code RHS) { return LHS == RHS; }
664 };
665
666 } // namespace llvm
667
668 /// Implement std::hash so that hash_code can be used in STL containers.
669 namespace std {
670
671 template<>
672 struct hash<llvm::hash_code> {
673 size_t operator()(llvm::hash_code const& Val) const {
674 return Val;
675 }
676 };
677
678 } // namespace std;
679
680 #endif
681