1 //===-- llvm/ADT/Hashing.h - Utilities for hashing --------------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the newly proposed standard C++ interfaces for hashing 11 // arbitrary data and building hash functions for user-defined types. This 12 // interface was originally proposed in N3333[1] and is currently under review 13 // for inclusion in a future TR and/or standard. 14 // 15 // The primary interfaces provide are comprised of one type and three functions: 16 // 17 // -- 'hash_code' class is an opaque type representing the hash code for some 18 // data. It is the intended product of hashing, and can be used to implement 19 // hash tables, checksumming, and other common uses of hashes. It is not an 20 // integer type (although it can be converted to one) because it is risky 21 // to assume much about the internals of a hash_code. In particular, each 22 // execution of the program has a high probability of producing a different 23 // hash_code for a given input. Thus their values are not stable to save or 24 // persist, and should only be used during the execution for the 25 // construction of hashing datastructures. 26 // 27 // -- 'hash_value' is a function designed to be overloaded for each 28 // user-defined type which wishes to be used within a hashing context. It 29 // should be overloaded within the user-defined type's namespace and found 30 // via ADL. Overloads for primitive types are provided by this library. 31 // 32 // -- 'hash_combine' and 'hash_combine_range' are functions designed to aid 33 // programmers in easily and intuitively combining a set of data into 34 // a single hash_code for their object. They should only logically be used 35 // within the implementation of a 'hash_value' routine or similar context. 36 // 37 // Note that 'hash_combine_range' contains very special logic for hashing 38 // a contiguous array of integers or pointers. This logic is *extremely* fast, 39 // on a modern Intel "Gainestown" Xeon (Nehalem uarch) @2.2 GHz, these were 40 // benchmarked at over 6.5 GiB/s for large keys, and <20 cycles/hash for keys 41 // under 32-bytes. 42 // 43 //===----------------------------------------------------------------------===// 44 45 #ifndef LLVM_ADT_HASHING_H 46 #define LLVM_ADT_HASHING_H 47 48 #include "llvm/ADT/STLExtras.h" 49 #include "llvm/Support/DataTypes.h" 50 #include "llvm/Support/Host.h" 51 #include "llvm/Support/SwapByteOrder.h" 52 #include "llvm/Support/type_traits.h" 53 #include <algorithm> 54 #include <cassert> 55 #include <cstring> 56 #include <iterator> 57 #include <utility> 58 59 // Allow detecting C++11 feature availability when building with Clang without 60 // breaking other compilers. 61 #ifndef __has_feature 62 # define __has_feature(x) 0 63 #endif 64 65 namespace llvm { 66 67 /// \brief An opaque object representing a hash code. 68 /// 69 /// This object represents the result of hashing some entity. It is intended to 70 /// be used to implement hashtables or other hashing-based data structures. 71 /// While it wraps and exposes a numeric value, this value should not be 72 /// trusted to be stable or predictable across processes or executions. 73 /// 74 /// In order to obtain the hash_code for an object 'x': 75 /// \code 76 /// using llvm::hash_value; 77 /// llvm::hash_code code = hash_value(x); 78 /// \endcode 79 class hash_code { 80 size_t value; 81 82 public: 83 /// \brief Default construct a hash_code. 84 /// Note that this leaves the value uninitialized. 85 hash_code() {} 86 87 /// \brief Form a hash code directly from a numerical value. 88 hash_code(size_t value) : value(value) {} 89 90 /// \brief Convert the hash code to its numerical value for use. 91 /*explicit*/ operator size_t() const { return value; } 92 93 friend bool operator==(const hash_code &lhs, const hash_code &rhs) { 94 return lhs.value == rhs.value; 95 } 96 friend bool operator!=(const hash_code &lhs, const hash_code &rhs) { 97 return lhs.value != rhs.value; 98 } 99 100 /// \brief Allow a hash_code to be directly run through hash_value. 101 friend size_t hash_value(const hash_code &code) { return code.value; } 102 }; 103 104 /// \brief Compute a hash_code for any integer value. 105 /// 106 /// Note that this function is intended to compute the same hash_code for 107 /// a particular value without regard to the pre-promotion type. This is in 108 /// contrast to hash_combine which may produce different hash_codes for 109 /// differing argument types even if they would implicit promote to a common 110 /// type without changing the value. 111 template <typename T> 112 typename enable_if<is_integral_or_enum<T>, hash_code>::type hash_value(T value); 113 114 /// \brief Compute a hash_code for a pointer's address. 115 /// 116 /// N.B.: This hashes the *address*. Not the value and not the type. 117 template <typename T> hash_code hash_value(const T *ptr); 118 119 /// \brief Compute a hash_code for a pair of objects. 120 template <typename T, typename U> 121 hash_code hash_value(const std::pair<T, U> &arg); 122 123 /// \brief Compute a hash_code for a standard string. 124 template <typename T> 125 hash_code hash_value(const std::basic_string<T> &arg); 126 127 128 /// \brief Override the execution seed with a fixed value. 129 /// 130 /// This hashing library uses a per-execution seed designed to change on each 131 /// run with high probability in order to ensure that the hash codes are not 132 /// attackable and to ensure that output which is intended to be stable does 133 /// not rely on the particulars of the hash codes produced. 134 /// 135 /// That said, there are use cases where it is important to be able to 136 /// reproduce *exactly* a specific behavior. To that end, we provide a function 137 /// which will forcibly set the seed to a fixed value. This must be done at the 138 /// start of the program, before any hashes are computed. Also, it cannot be 139 /// undone. This makes it thread-hostile and very hard to use outside of 140 /// immediately on start of a simple program designed for reproducible 141 /// behavior. 142 void set_fixed_execution_hash_seed(size_t fixed_value); 143 144 145 // All of the implementation details of actually computing the various hash 146 // code values are held within this namespace. These routines are included in 147 // the header file mainly to allow inlining and constant propagation. 148 namespace hashing { 149 namespace detail { 150 151 inline uint64_t fetch64(const char *p) { 152 uint64_t result; 153 memcpy(&result, p, sizeof(result)); 154 if (sys::IsBigEndianHost) 155 return sys::SwapByteOrder(result); 156 return result; 157 } 158 159 inline uint32_t fetch32(const char *p) { 160 uint32_t result; 161 memcpy(&result, p, sizeof(result)); 162 if (sys::IsBigEndianHost) 163 return sys::SwapByteOrder(result); 164 return result; 165 } 166 167 /// Some primes between 2^63 and 2^64 for various uses. 168 static const uint64_t k0 = 0xc3a5c85c97cb3127ULL; 169 static const uint64_t k1 = 0xb492b66fbe98f273ULL; 170 static const uint64_t k2 = 0x9ae16a3b2f90404fULL; 171 static const uint64_t k3 = 0xc949d7c7509e6557ULL; 172 173 /// \brief Bitwise right rotate. 174 /// Normally this will compile to a single instruction, especially if the 175 /// shift is a manifest constant. 176 inline uint64_t rotate(uint64_t val, size_t shift) { 177 // Avoid shifting by 64: doing so yields an undefined result. 178 return shift == 0 ? val : ((val >> shift) | (val << (64 - shift))); 179 } 180 181 inline uint64_t shift_mix(uint64_t val) { 182 return val ^ (val >> 47); 183 } 184 185 inline uint64_t hash_16_bytes(uint64_t low, uint64_t high) { 186 // Murmur-inspired hashing. 187 const uint64_t kMul = 0x9ddfea08eb382d69ULL; 188 uint64_t a = (low ^ high) * kMul; 189 a ^= (a >> 47); 190 uint64_t b = (high ^ a) * kMul; 191 b ^= (b >> 47); 192 b *= kMul; 193 return b; 194 } 195 196 inline uint64_t hash_1to3_bytes(const char *s, size_t len, uint64_t seed) { 197 uint8_t a = s[0]; 198 uint8_t b = s[len >> 1]; 199 uint8_t c = s[len - 1]; 200 uint32_t y = static_cast<uint32_t>(a) + (static_cast<uint32_t>(b) << 8); 201 uint32_t z = len + (static_cast<uint32_t>(c) << 2); 202 return shift_mix(y * k2 ^ z * k3 ^ seed) * k2; 203 } 204 205 inline uint64_t hash_4to8_bytes(const char *s, size_t len, uint64_t seed) { 206 uint64_t a = fetch32(s); 207 return hash_16_bytes(len + (a << 3), seed ^ fetch32(s + len - 4)); 208 } 209 210 inline uint64_t hash_9to16_bytes(const char *s, size_t len, uint64_t seed) { 211 uint64_t a = fetch64(s); 212 uint64_t b = fetch64(s + len - 8); 213 return hash_16_bytes(seed ^ a, rotate(b + len, len)) ^ b; 214 } 215 216 inline uint64_t hash_17to32_bytes(const char *s, size_t len, uint64_t seed) { 217 uint64_t a = fetch64(s) * k1; 218 uint64_t b = fetch64(s + 8); 219 uint64_t c = fetch64(s + len - 8) * k2; 220 uint64_t d = fetch64(s + len - 16) * k0; 221 return hash_16_bytes(rotate(a - b, 43) + rotate(c ^ seed, 30) + d, 222 a + rotate(b ^ k3, 20) - c + len + seed); 223 } 224 225 inline uint64_t hash_33to64_bytes(const char *s, size_t len, uint64_t seed) { 226 uint64_t z = fetch64(s + 24); 227 uint64_t a = fetch64(s) + (len + fetch64(s + len - 16)) * k0; 228 uint64_t b = rotate(a + z, 52); 229 uint64_t c = rotate(a, 37); 230 a += fetch64(s + 8); 231 c += rotate(a, 7); 232 a += fetch64(s + 16); 233 uint64_t vf = a + z; 234 uint64_t vs = b + rotate(a, 31) + c; 235 a = fetch64(s + 16) + fetch64(s + len - 32); 236 z = fetch64(s + len - 8); 237 b = rotate(a + z, 52); 238 c = rotate(a, 37); 239 a += fetch64(s + len - 24); 240 c += rotate(a, 7); 241 a += fetch64(s + len - 16); 242 uint64_t wf = a + z; 243 uint64_t ws = b + rotate(a, 31) + c; 244 uint64_t r = shift_mix((vf + ws) * k2 + (wf + vs) * k0); 245 return shift_mix((seed ^ (r * k0)) + vs) * k2; 246 } 247 248 inline uint64_t hash_short(const char *s, size_t length, uint64_t seed) { 249 if (length >= 4 && length <= 8) 250 return hash_4to8_bytes(s, length, seed); 251 if (length > 8 && length <= 16) 252 return hash_9to16_bytes(s, length, seed); 253 if (length > 16 && length <= 32) 254 return hash_17to32_bytes(s, length, seed); 255 if (length > 32) 256 return hash_33to64_bytes(s, length, seed); 257 if (length != 0) 258 return hash_1to3_bytes(s, length, seed); 259 260 return k2 ^ seed; 261 } 262 263 /// \brief The intermediate state used during hashing. 264 /// Currently, the algorithm for computing hash codes is based on CityHash and 265 /// keeps 56 bytes of arbitrary state. 266 struct hash_state { 267 uint64_t h0, h1, h2, h3, h4, h5, h6; 268 uint64_t seed; 269 270 /// \brief Create a new hash_state structure and initialize it based on the 271 /// seed and the first 64-byte chunk. 272 /// This effectively performs the initial mix. 273 static hash_state create(const char *s, uint64_t seed) { 274 hash_state state = { 275 0, seed, hash_16_bytes(seed, k1), rotate(seed ^ k1, 49), 276 seed * k1, shift_mix(seed), 0, seed }; 277 state.h6 = hash_16_bytes(state.h4, state.h5); 278 state.mix(s); 279 return state; 280 } 281 282 /// \brief Mix 32-bytes from the input sequence into the 16-bytes of 'a' 283 /// and 'b', including whatever is already in 'a' and 'b'. 284 static void mix_32_bytes(const char *s, uint64_t &a, uint64_t &b) { 285 a += fetch64(s); 286 uint64_t c = fetch64(s + 24); 287 b = rotate(b + a + c, 21); 288 uint64_t d = a; 289 a += fetch64(s + 8) + fetch64(s + 16); 290 b += rotate(a, 44) + d; 291 a += c; 292 } 293 294 /// \brief Mix in a 64-byte buffer of data. 295 /// We mix all 64 bytes even when the chunk length is smaller, but we 296 /// record the actual length. 297 void mix(const char *s) { 298 h0 = rotate(h0 + h1 + h3 + fetch64(s + 8), 37) * k1; 299 h1 = rotate(h1 + h4 + fetch64(s + 48), 42) * k1; 300 h0 ^= h6; 301 h1 += h3 + fetch64(s + 40); 302 h2 = rotate(h2 + h5, 33) * k1; 303 h3 = h4 * k1; 304 h4 = h0 + h5; 305 mix_32_bytes(s, h3, h4); 306 h5 = h2 + h6; 307 h6 = h1 + fetch64(s + 16); 308 mix_32_bytes(s + 32, h5, h6); 309 std::swap(h2, h0); 310 } 311 312 /// \brief Compute the final 64-bit hash code value based on the current 313 /// state and the length of bytes hashed. 314 uint64_t finalize(size_t length) { 315 return hash_16_bytes(hash_16_bytes(h3, h5) + shift_mix(h1) * k1 + h2, 316 hash_16_bytes(h4, h6) + shift_mix(length) * k1 + h0); 317 } 318 }; 319 320 321 /// \brief A global, fixed seed-override variable. 322 /// 323 /// This variable can be set using the \see llvm::set_fixed_execution_seed 324 /// function. See that function for details. Do not, under any circumstances, 325 /// set or read this variable. 326 extern size_t fixed_seed_override; 327 328 inline size_t get_execution_seed() { 329 // FIXME: This needs to be a per-execution seed. This is just a placeholder 330 // implementation. Switching to a per-execution seed is likely to flush out 331 // instability bugs and so will happen as its own commit. 332 // 333 // However, if there is a fixed seed override set the first time this is 334 // called, return that instead of the per-execution seed. 335 const uint64_t seed_prime = 0xff51afd7ed558ccdULL; 336 static size_t seed = fixed_seed_override ? fixed_seed_override 337 : (size_t)seed_prime; 338 return seed; 339 } 340 341 342 /// \brief Trait to indicate whether a type's bits can be hashed directly. 343 /// 344 /// A type trait which is true if we want to combine values for hashing by 345 /// reading the underlying data. It is false if values of this type must 346 /// first be passed to hash_value, and the resulting hash_codes combined. 347 // 348 // FIXME: We want to replace is_integral_or_enum and is_pointer here with 349 // a predicate which asserts that comparing the underlying storage of two 350 // values of the type for equality is equivalent to comparing the two values 351 // for equality. For all the platforms we care about, this holds for integers 352 // and pointers, but there are platforms where it doesn't and we would like to 353 // support user-defined types which happen to satisfy this property. 354 template <typename T> struct is_hashable_data 355 : integral_constant<bool, ((is_integral_or_enum<T>::value || 356 is_pointer<T>::value) && 357 64 % sizeof(T) == 0)> {}; 358 359 // Special case std::pair to detect when both types are viable and when there 360 // is no alignment-derived padding in the pair. This is a bit of a lie because 361 // std::pair isn't truly POD, but it's close enough in all reasonable 362 // implementations for our use case of hashing the underlying data. 363 template <typename T, typename U> struct is_hashable_data<std::pair<T, U> > 364 : integral_constant<bool, (is_hashable_data<T>::value && 365 is_hashable_data<U>::value && 366 (sizeof(T) + sizeof(U)) == 367 sizeof(std::pair<T, U>))> {}; 368 369 /// \brief Helper to get the hashable data representation for a type. 370 /// This variant is enabled when the type itself can be used. 371 template <typename T> 372 typename enable_if<is_hashable_data<T>, T>::type 373 get_hashable_data(const T &value) { 374 return value; 375 } 376 /// \brief Helper to get the hashable data representation for a type. 377 /// This variant is enabled when we must first call hash_value and use the 378 /// result as our data. 379 template <typename T> 380 typename enable_if_c<!is_hashable_data<T>::value, size_t>::type 381 get_hashable_data(const T &value) { 382 using ::llvm::hash_value; 383 return hash_value(value); 384 } 385 386 /// \brief Helper to store data from a value into a buffer and advance the 387 /// pointer into that buffer. 388 /// 389 /// This routine first checks whether there is enough space in the provided 390 /// buffer, and if not immediately returns false. If there is space, it 391 /// copies the underlying bytes of value into the buffer, advances the 392 /// buffer_ptr past the copied bytes, and returns true. 393 template <typename T> 394 bool store_and_advance(char *&buffer_ptr, char *buffer_end, const T& value, 395 size_t offset = 0) { 396 size_t store_size = sizeof(value) - offset; 397 if (buffer_ptr + store_size > buffer_end) 398 return false; 399 const char *value_data = reinterpret_cast<const char *>(&value); 400 memcpy(buffer_ptr, value_data + offset, store_size); 401 buffer_ptr += store_size; 402 return true; 403 } 404 405 /// \brief Implement the combining of integral values into a hash_code. 406 /// 407 /// This overload is selected when the value type of the iterator is 408 /// integral. Rather than computing a hash_code for each object and then 409 /// combining them, this (as an optimization) directly combines the integers. 410 template <typename InputIteratorT> 411 hash_code hash_combine_range_impl(InputIteratorT first, InputIteratorT last) { 412 const size_t seed = get_execution_seed(); 413 char buffer[64], *buffer_ptr = buffer; 414 char *const buffer_end = buffer_ptr + array_lengthof(buffer); 415 while (first != last && store_and_advance(buffer_ptr, buffer_end, 416 get_hashable_data(*first))) 417 ++first; 418 if (first == last) 419 return hash_short(buffer, buffer_ptr - buffer, seed); 420 assert(buffer_ptr == buffer_end); 421 422 hash_state state = state.create(buffer, seed); 423 size_t length = 64; 424 while (first != last) { 425 // Fill up the buffer. We don't clear it, which re-mixes the last round 426 // when only a partial 64-byte chunk is left. 427 buffer_ptr = buffer; 428 while (first != last && store_and_advance(buffer_ptr, buffer_end, 429 get_hashable_data(*first))) 430 ++first; 431 432 // Rotate the buffer if we did a partial fill in order to simulate doing 433 // a mix of the last 64-bytes. That is how the algorithm works when we 434 // have a contiguous byte sequence, and we want to emulate that here. 435 std::rotate(buffer, buffer_ptr, buffer_end); 436 437 // Mix this chunk into the current state. 438 state.mix(buffer); 439 length += buffer_ptr - buffer; 440 }; 441 442 return state.finalize(length); 443 } 444 445 /// \brief Implement the combining of integral values into a hash_code. 446 /// 447 /// This overload is selected when the value type of the iterator is integral 448 /// and when the input iterator is actually a pointer. Rather than computing 449 /// a hash_code for each object and then combining them, this (as an 450 /// optimization) directly combines the integers. Also, because the integers 451 /// are stored in contiguous memory, this routine avoids copying each value 452 /// and directly reads from the underlying memory. 453 template <typename ValueT> 454 typename enable_if<is_hashable_data<ValueT>, hash_code>::type 455 hash_combine_range_impl(ValueT *first, ValueT *last) { 456 const size_t seed = get_execution_seed(); 457 const char *s_begin = reinterpret_cast<const char *>(first); 458 const char *s_end = reinterpret_cast<const char *>(last); 459 const size_t length = std::distance(s_begin, s_end); 460 if (length <= 64) 461 return hash_short(s_begin, length, seed); 462 463 const char *s_aligned_end = s_begin + (length & ~63); 464 hash_state state = state.create(s_begin, seed); 465 s_begin += 64; 466 while (s_begin != s_aligned_end) { 467 state.mix(s_begin); 468 s_begin += 64; 469 } 470 if (length & 63) 471 state.mix(s_end - 64); 472 473 return state.finalize(length); 474 } 475 476 } // namespace detail 477 } // namespace hashing 478 479 480 /// \brief Compute a hash_code for a sequence of values. 481 /// 482 /// This hashes a sequence of values. It produces the same hash_code as 483 /// 'hash_combine(a, b, c, ...)', but can run over arbitrary sized sequences 484 /// and is significantly faster given pointers and types which can be hashed as 485 /// a sequence of bytes. 486 template <typename InputIteratorT> 487 hash_code hash_combine_range(InputIteratorT first, InputIteratorT last) { 488 return ::llvm::hashing::detail::hash_combine_range_impl(first, last); 489 } 490 491 492 // Implementation details for hash_combine. 493 namespace hashing { 494 namespace detail { 495 496 /// \brief Helper class to manage the recursive combining of hash_combine 497 /// arguments. 498 /// 499 /// This class exists to manage the state and various calls involved in the 500 /// recursive combining of arguments used in hash_combine. It is particularly 501 /// useful at minimizing the code in the recursive calls to ease the pain 502 /// caused by a lack of variadic functions. 503 struct hash_combine_recursive_helper { 504 char buffer[64]; 505 hash_state state; 506 const size_t seed; 507 508 public: 509 /// \brief Construct a recursive hash combining helper. 510 /// 511 /// This sets up the state for a recursive hash combine, including getting 512 /// the seed and buffer setup. 513 hash_combine_recursive_helper() 514 : seed(get_execution_seed()) {} 515 516 /// \brief Combine one chunk of data into the current in-flight hash. 517 /// 518 /// This merges one chunk of data into the hash. First it tries to buffer 519 /// the data. If the buffer is full, it hashes the buffer into its 520 /// hash_state, empties it, and then merges the new chunk in. This also 521 /// handles cases where the data straddles the end of the buffer. 522 template <typename T> 523 char *combine_data(size_t &length, char *buffer_ptr, char *buffer_end, T data) { 524 if (!store_and_advance(buffer_ptr, buffer_end, data)) { 525 // Check for skew which prevents the buffer from being packed, and do 526 // a partial store into the buffer to fill it. This is only a concern 527 // with the variadic combine because that formation can have varying 528 // argument types. 529 size_t partial_store_size = buffer_end - buffer_ptr; 530 memcpy(buffer_ptr, &data, partial_store_size); 531 532 // If the store fails, our buffer is full and ready to hash. We have to 533 // either initialize the hash state (on the first full buffer) or mix 534 // this buffer into the existing hash state. Length tracks the *hashed* 535 // length, not the buffered length. 536 if (length == 0) { 537 state = state.create(buffer, seed); 538 length = 64; 539 } else { 540 // Mix this chunk into the current state and bump length up by 64. 541 state.mix(buffer); 542 length += 64; 543 } 544 // Reset the buffer_ptr to the head of the buffer for the next chunk of 545 // data. 546 buffer_ptr = buffer; 547 548 // Try again to store into the buffer -- this cannot fail as we only 549 // store types smaller than the buffer. 550 if (!store_and_advance(buffer_ptr, buffer_end, data, 551 partial_store_size)) 552 abort(); 553 } 554 return buffer_ptr; 555 } 556 557 #if defined(__has_feature) && __has_feature(__cxx_variadic_templates__) 558 559 /// \brief Recursive, variadic combining method. 560 /// 561 /// This function recurses through each argument, combining that argument 562 /// into a single hash. 563 template <typename T, typename ...Ts> 564 hash_code combine(size_t length, char *buffer_ptr, char *buffer_end, 565 const T &arg, const Ts &...args) { 566 buffer_ptr = combine_data(length, buffer_ptr, buffer_end, get_hashable_data(arg)); 567 568 // Recurse to the next argument. 569 return combine(length, buffer_ptr, buffer_end, args...); 570 } 571 572 #else 573 // Manually expanded recursive combining methods. See variadic above for 574 // documentation. 575 576 template <typename T1, typename T2, typename T3, typename T4, typename T5, 577 typename T6> 578 hash_code combine(size_t length, char *buffer_ptr, char *buffer_end, 579 const T1 &arg1, const T2 &arg2, const T3 &arg3, 580 const T4 &arg4, const T5 &arg5, const T6 &arg6) { 581 buffer_ptr = combine_data(length, buffer_ptr, buffer_end, get_hashable_data(arg1)); 582 return combine(length, buffer_ptr, buffer_end, arg2, arg3, arg4, arg5, arg6); 583 } 584 template <typename T1, typename T2, typename T3, typename T4, typename T5> 585 hash_code combine(size_t length, char *buffer_ptr, char *buffer_end, 586 const T1 &arg1, const T2 &arg2, const T3 &arg3, 587 const T4 &arg4, const T5 &arg5) { 588 buffer_ptr = combine_data(length, buffer_ptr, buffer_end, get_hashable_data(arg1)); 589 return combine(length, buffer_ptr, buffer_end, arg2, arg3, arg4, arg5); 590 } 591 template <typename T1, typename T2, typename T3, typename T4> 592 hash_code combine(size_t length, char *buffer_ptr, char *buffer_end, 593 const T1 &arg1, const T2 &arg2, const T3 &arg3, 594 const T4 &arg4) { 595 buffer_ptr = combine_data(length, buffer_ptr, buffer_end, get_hashable_data(arg1)); 596 return combine(length, buffer_ptr, buffer_end, arg2, arg3, arg4); 597 } 598 template <typename T1, typename T2, typename T3> 599 hash_code combine(size_t length, char *buffer_ptr, char *buffer_end, 600 const T1 &arg1, const T2 &arg2, const T3 &arg3) { 601 buffer_ptr = combine_data(length, buffer_ptr, buffer_end, get_hashable_data(arg1)); 602 return combine(length, buffer_ptr, buffer_end, arg2, arg3); 603 } 604 template <typename T1, typename T2> 605 hash_code combine(size_t length, char *buffer_ptr, char *buffer_end, 606 const T1 &arg1, const T2 &arg2) { 607 buffer_ptr = combine_data(length, buffer_ptr, buffer_end, get_hashable_data(arg1)); 608 return combine(length, buffer_ptr, buffer_end, arg2); 609 } 610 template <typename T1> 611 hash_code combine(size_t length, char *buffer_ptr, char *buffer_end, 612 const T1 &arg1) { 613 buffer_ptr = combine_data(length, buffer_ptr, buffer_end, get_hashable_data(arg1)); 614 return combine(length, buffer_ptr, buffer_end); 615 } 616 617 #endif 618 619 /// \brief Base case for recursive, variadic combining. 620 /// 621 /// The base case when combining arguments recursively is reached when all 622 /// arguments have been handled. It flushes the remaining buffer and 623 /// constructs a hash_code. 624 hash_code combine(size_t length, char *buffer_ptr, char *buffer_end) { 625 // Check whether the entire set of values fit in the buffer. If so, we'll 626 // use the optimized short hashing routine and skip state entirely. 627 if (length == 0) 628 return hash_short(buffer, buffer_ptr - buffer, seed); 629 630 // Mix the final buffer, rotating it if we did a partial fill in order to 631 // simulate doing a mix of the last 64-bytes. That is how the algorithm 632 // works when we have a contiguous byte sequence, and we want to emulate 633 // that here. 634 std::rotate(buffer, buffer_ptr, buffer_end); 635 636 // Mix this chunk into the current state. 637 state.mix(buffer); 638 length += buffer_ptr - buffer; 639 640 return state.finalize(length); 641 } 642 }; 643 644 } // namespace detail 645 } // namespace hashing 646 647 648 #if __has_feature(__cxx_variadic_templates__) 649 650 /// \brief Combine values into a single hash_code. 651 /// 652 /// This routine accepts a varying number of arguments of any type. It will 653 /// attempt to combine them into a single hash_code. For user-defined types it 654 /// attempts to call a \see hash_value overload (via ADL) for the type. For 655 /// integer and pointer types it directly combines their data into the 656 /// resulting hash_code. 657 /// 658 /// The result is suitable for returning from a user's hash_value 659 /// *implementation* for their user-defined type. Consumers of a type should 660 /// *not* call this routine, they should instead call 'hash_value'. 661 template <typename ...Ts> hash_code hash_combine(const Ts &...args) { 662 // Recursively hash each argument using a helper class. 663 ::llvm::hashing::detail::hash_combine_recursive_helper helper; 664 return helper.combine(0, helper.buffer, helper.buffer + 64, args...); 665 } 666 667 #else 668 669 // What follows are manually exploded overloads for each argument width. See 670 // the above variadic definition for documentation and specification. 671 672 template <typename T1, typename T2, typename T3, typename T4, typename T5, 673 typename T6> 674 hash_code hash_combine(const T1 &arg1, const T2 &arg2, const T3 &arg3, 675 const T4 &arg4, const T5 &arg5, const T6 &arg6) { 676 ::llvm::hashing::detail::hash_combine_recursive_helper helper; 677 return helper.combine(0, helper.buffer, helper.buffer + 64, 678 arg1, arg2, arg3, arg4, arg5, arg6); 679 } 680 template <typename T1, typename T2, typename T3, typename T4, typename T5> 681 hash_code hash_combine(const T1 &arg1, const T2 &arg2, const T3 &arg3, 682 const T4 &arg4, const T5 &arg5) { 683 ::llvm::hashing::detail::hash_combine_recursive_helper helper; 684 return helper.combine(0, helper.buffer, helper.buffer + 64, 685 arg1, arg2, arg3, arg4, arg5); 686 } 687 template <typename T1, typename T2, typename T3, typename T4> 688 hash_code hash_combine(const T1 &arg1, const T2 &arg2, const T3 &arg3, 689 const T4 &arg4) { 690 ::llvm::hashing::detail::hash_combine_recursive_helper helper; 691 return helper.combine(0, helper.buffer, helper.buffer + 64, 692 arg1, arg2, arg3, arg4); 693 } 694 template <typename T1, typename T2, typename T3> 695 hash_code hash_combine(const T1 &arg1, const T2 &arg2, const T3 &arg3) { 696 ::llvm::hashing::detail::hash_combine_recursive_helper helper; 697 return helper.combine(0, helper.buffer, helper.buffer + 64, arg1, arg2, arg3); 698 } 699 template <typename T1, typename T2> 700 hash_code hash_combine(const T1 &arg1, const T2 &arg2) { 701 ::llvm::hashing::detail::hash_combine_recursive_helper helper; 702 return helper.combine(0, helper.buffer, helper.buffer + 64, arg1, arg2); 703 } 704 template <typename T1> 705 hash_code hash_combine(const T1 &arg1) { 706 ::llvm::hashing::detail::hash_combine_recursive_helper helper; 707 return helper.combine(0, helper.buffer, helper.buffer + 64, arg1); 708 } 709 710 #endif 711 712 713 // Implementation details for implementations of hash_value overloads provided 714 // here. 715 namespace hashing { 716 namespace detail { 717 718 /// \brief Helper to hash the value of a single integer. 719 /// 720 /// Overloads for smaller integer types are not provided to ensure consistent 721 /// behavior in the presence of integral promotions. Essentially, 722 /// "hash_value('4')" and "hash_value('0' + 4)" should be the same. 723 inline hash_code hash_integer_value(uint64_t value) { 724 // Similar to hash_4to8_bytes but using a seed instead of length. 725 const uint64_t seed = get_execution_seed(); 726 const char *s = reinterpret_cast<const char *>(&value); 727 const uint64_t a = fetch32(s); 728 return hash_16_bytes(seed + (a << 3), fetch32(s + 4)); 729 } 730 731 } // namespace detail 732 } // namespace hashing 733 734 // Declared and documented above, but defined here so that any of the hashing 735 // infrastructure is available. 736 template <typename T> 737 typename enable_if<is_integral_or_enum<T>, hash_code>::type 738 hash_value(T value) { 739 return ::llvm::hashing::detail::hash_integer_value(value); 740 } 741 742 // Declared and documented above, but defined here so that any of the hashing 743 // infrastructure is available. 744 template <typename T> hash_code hash_value(const T *ptr) { 745 return ::llvm::hashing::detail::hash_integer_value( 746 reinterpret_cast<uintptr_t>(ptr)); 747 } 748 749 // Declared and documented above, but defined here so that any of the hashing 750 // infrastructure is available. 751 template <typename T, typename U> 752 hash_code hash_value(const std::pair<T, U> &arg) { 753 return hash_combine(arg.first, arg.second); 754 } 755 756 // Declared and documented above, but defined here so that any of the hashing 757 // infrastructure is available. 758 template <typename T> 759 hash_code hash_value(const std::basic_string<T> &arg) { 760 return hash_combine_range(arg.begin(), arg.end()); 761 } 762 763 } // namespace llvm 764 765 #endif 766