1 //===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the DenseMap class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #ifndef LLVM_ADT_DENSEMAP_H 15 #define LLVM_ADT_DENSEMAP_H 16 17 #include "llvm/ADT/DenseMapInfo.h" 18 #include "llvm/ADT/EpochTracker.h" 19 #include "llvm/Support/AlignOf.h" 20 #include "llvm/Support/Compiler.h" 21 #include "llvm/Support/MathExtras.h" 22 #include "llvm/Support/type_traits.h" 23 #include <algorithm> 24 #include <cassert> 25 #include <cstddef> 26 #include <cstring> 27 #include <iterator> 28 #include <limits> 29 #include <new> 30 #include <utility> 31 32 namespace llvm { 33 34 namespace detail { 35 36 // We extend a pair to allow users to override the bucket type with their own 37 // implementation without requiring two members. 38 template <typename KeyT, typename ValueT> 39 struct DenseMapPair : public std::pair<KeyT, ValueT> { 40 KeyT &getFirst() { return std::pair<KeyT, ValueT>::first; } 41 const KeyT &getFirst() const { return std::pair<KeyT, ValueT>::first; } 42 ValueT &getSecond() { return std::pair<KeyT, ValueT>::second; } 43 const ValueT &getSecond() const { return std::pair<KeyT, ValueT>::second; } 44 }; 45 46 } // end namespace detail 47 48 template < 49 typename KeyT, typename ValueT, typename KeyInfoT = DenseMapInfo<KeyT>, 50 typename Bucket = detail::DenseMapPair<KeyT, ValueT>, bool IsConst = false> 51 class DenseMapIterator; 52 53 template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT, 54 typename BucketT> 55 class DenseMapBase : public DebugEpochBase { 56 template <typename T> 57 using const_arg_type_t = typename const_pointer_or_const_ref<T>::type; 58 59 public: 60 typedef unsigned size_type; 61 typedef KeyT key_type; 62 typedef ValueT mapped_type; 63 typedef BucketT value_type; 64 65 typedef DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT> iterator; 66 typedef DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT, true> 67 const_iterator; 68 inline iterator begin() { 69 // When the map is empty, avoid the overhead of AdvancePastEmptyBuckets(). 70 return empty() ? end() : iterator(getBuckets(), getBucketsEnd(), *this); 71 } 72 inline iterator end() { 73 return iterator(getBucketsEnd(), getBucketsEnd(), *this, true); 74 } 75 inline const_iterator begin() const { 76 return empty() ? end() 77 : const_iterator(getBuckets(), getBucketsEnd(), *this); 78 } 79 inline const_iterator end() const { 80 return const_iterator(getBucketsEnd(), getBucketsEnd(), *this, true); 81 } 82 83 LLVM_NODISCARD bool empty() const { 84 return getNumEntries() == 0; 85 } 86 unsigned size() const { return getNumEntries(); } 87 88 /// Grow the densemap so that it can contain at least \p NumEntries items 89 /// before resizing again. 90 void reserve(size_type NumEntries) { 91 auto NumBuckets = getMinBucketToReserveForEntries(NumEntries); 92 incrementEpoch(); 93 if (NumBuckets > getNumBuckets()) 94 grow(NumBuckets); 95 } 96 97 void clear() { 98 incrementEpoch(); 99 if (getNumEntries() == 0 && getNumTombstones() == 0) return; 100 101 // If the capacity of the array is huge, and the # elements used is small, 102 // shrink the array. 103 if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) { 104 shrink_and_clear(); 105 return; 106 } 107 108 const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey(); 109 unsigned NumEntries = getNumEntries(); 110 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) { 111 if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) { 112 if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) { 113 P->getSecond().~ValueT(); 114 --NumEntries; 115 } 116 P->getFirst() = EmptyKey; 117 } 118 } 119 assert(NumEntries == 0 && "Node count imbalance!"); 120 setNumEntries(0); 121 setNumTombstones(0); 122 } 123 124 /// Return 1 if the specified key is in the map, 0 otherwise. 125 size_type count(const_arg_type_t<KeyT> Val) const { 126 const BucketT *TheBucket; 127 return LookupBucketFor(Val, TheBucket) ? 1 : 0; 128 } 129 130 iterator find(const_arg_type_t<KeyT> Val) { 131 BucketT *TheBucket; 132 if (LookupBucketFor(Val, TheBucket)) 133 return iterator(TheBucket, getBucketsEnd(), *this, true); 134 return end(); 135 } 136 const_iterator find(const_arg_type_t<KeyT> Val) const { 137 const BucketT *TheBucket; 138 if (LookupBucketFor(Val, TheBucket)) 139 return const_iterator(TheBucket, getBucketsEnd(), *this, true); 140 return end(); 141 } 142 143 /// Alternate version of find() which allows a different, and possibly 144 /// less expensive, key type. 145 /// The DenseMapInfo is responsible for supplying methods 146 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key 147 /// type used. 148 template<class LookupKeyT> 149 iterator find_as(const LookupKeyT &Val) { 150 BucketT *TheBucket; 151 if (LookupBucketFor(Val, TheBucket)) 152 return iterator(TheBucket, getBucketsEnd(), *this, true); 153 return end(); 154 } 155 template<class LookupKeyT> 156 const_iterator find_as(const LookupKeyT &Val) const { 157 const BucketT *TheBucket; 158 if (LookupBucketFor(Val, TheBucket)) 159 return const_iterator(TheBucket, getBucketsEnd(), *this, true); 160 return end(); 161 } 162 163 /// lookup - Return the entry for the specified key, or a default 164 /// constructed value if no such entry exists. 165 ValueT lookup(const_arg_type_t<KeyT> Val) const { 166 const BucketT *TheBucket; 167 if (LookupBucketFor(Val, TheBucket)) 168 return TheBucket->getSecond(); 169 return ValueT(); 170 } 171 172 // Inserts key,value pair into the map if the key isn't already in the map. 173 // If the key is already in the map, it returns false and doesn't update the 174 // value. 175 std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) { 176 return try_emplace(KV.first, KV.second); 177 } 178 179 // Inserts key,value pair into the map if the key isn't already in the map. 180 // If the key is already in the map, it returns false and doesn't update the 181 // value. 182 std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) { 183 return try_emplace(std::move(KV.first), std::move(KV.second)); 184 } 185 186 // Inserts key,value pair into the map if the key isn't already in the map. 187 // The value is constructed in-place if the key is not in the map, otherwise 188 // it is not moved. 189 template <typename... Ts> 190 std::pair<iterator, bool> try_emplace(KeyT &&Key, Ts &&... Args) { 191 BucketT *TheBucket; 192 if (LookupBucketFor(Key, TheBucket)) 193 return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true), 194 false); // Already in map. 195 196 // Otherwise, insert the new element. 197 TheBucket = 198 InsertIntoBucket(TheBucket, std::move(Key), std::forward<Ts>(Args)...); 199 return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true), 200 true); 201 } 202 203 // Inserts key,value pair into the map if the key isn't already in the map. 204 // The value is constructed in-place if the key is not in the map, otherwise 205 // it is not moved. 206 template <typename... Ts> 207 std::pair<iterator, bool> try_emplace(const KeyT &Key, Ts &&... Args) { 208 BucketT *TheBucket; 209 if (LookupBucketFor(Key, TheBucket)) 210 return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true), 211 false); // Already in map. 212 213 // Otherwise, insert the new element. 214 TheBucket = InsertIntoBucket(TheBucket, Key, std::forward<Ts>(Args)...); 215 return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true), 216 true); 217 } 218 219 /// Alternate version of insert() which allows a different, and possibly 220 /// less expensive, key type. 221 /// The DenseMapInfo is responsible for supplying methods 222 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key 223 /// type used. 224 template <typename LookupKeyT> 225 std::pair<iterator, bool> insert_as(std::pair<KeyT, ValueT> &&KV, 226 const LookupKeyT &Val) { 227 BucketT *TheBucket; 228 if (LookupBucketFor(Val, TheBucket)) 229 return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true), 230 false); // Already in map. 231 232 // Otherwise, insert the new element. 233 TheBucket = InsertIntoBucketWithLookup(TheBucket, std::move(KV.first), 234 std::move(KV.second), Val); 235 return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true), 236 true); 237 } 238 239 /// insert - Range insertion of pairs. 240 template<typename InputIt> 241 void insert(InputIt I, InputIt E) { 242 for (; I != E; ++I) 243 insert(*I); 244 } 245 246 bool erase(const KeyT &Val) { 247 BucketT *TheBucket; 248 if (!LookupBucketFor(Val, TheBucket)) 249 return false; // not in map. 250 251 TheBucket->getSecond().~ValueT(); 252 TheBucket->getFirst() = getTombstoneKey(); 253 decrementNumEntries(); 254 incrementNumTombstones(); 255 return true; 256 } 257 void erase(iterator I) { 258 BucketT *TheBucket = &*I; 259 TheBucket->getSecond().~ValueT(); 260 TheBucket->getFirst() = getTombstoneKey(); 261 decrementNumEntries(); 262 incrementNumTombstones(); 263 } 264 265 value_type& FindAndConstruct(const KeyT &Key) { 266 BucketT *TheBucket; 267 if (LookupBucketFor(Key, TheBucket)) 268 return *TheBucket; 269 270 return *InsertIntoBucket(TheBucket, Key); 271 } 272 273 ValueT &operator[](const KeyT &Key) { 274 return FindAndConstruct(Key).second; 275 } 276 277 value_type& FindAndConstruct(KeyT &&Key) { 278 BucketT *TheBucket; 279 if (LookupBucketFor(Key, TheBucket)) 280 return *TheBucket; 281 282 return *InsertIntoBucket(TheBucket, std::move(Key)); 283 } 284 285 ValueT &operator[](KeyT &&Key) { 286 return FindAndConstruct(std::move(Key)).second; 287 } 288 289 /// isPointerIntoBucketsArray - Return true if the specified pointer points 290 /// somewhere into the DenseMap's array of buckets (i.e. either to a key or 291 /// value in the DenseMap). 292 bool isPointerIntoBucketsArray(const void *Ptr) const { 293 return Ptr >= getBuckets() && Ptr < getBucketsEnd(); 294 } 295 296 /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets 297 /// array. In conjunction with the previous method, this can be used to 298 /// determine whether an insertion caused the DenseMap to reallocate. 299 const void *getPointerIntoBucketsArray() const { return getBuckets(); } 300 301 protected: 302 DenseMapBase() = default; 303 304 void destroyAll() { 305 if (getNumBuckets() == 0) // Nothing to do. 306 return; 307 308 const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey(); 309 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) { 310 if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) && 311 !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) 312 P->getSecond().~ValueT(); 313 P->getFirst().~KeyT(); 314 } 315 } 316 317 void initEmpty() { 318 setNumEntries(0); 319 setNumTombstones(0); 320 321 assert((getNumBuckets() & (getNumBuckets()-1)) == 0 && 322 "# initial buckets must be a power of two!"); 323 const KeyT EmptyKey = getEmptyKey(); 324 for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B) 325 ::new (&B->getFirst()) KeyT(EmptyKey); 326 } 327 328 /// Returns the number of buckets to allocate to ensure that the DenseMap can 329 /// accommodate \p NumEntries without need to grow(). 330 unsigned getMinBucketToReserveForEntries(unsigned NumEntries) { 331 // Ensure that "NumEntries * 4 < NumBuckets * 3" 332 if (NumEntries == 0) 333 return 0; 334 // +1 is required because of the strict equality. 335 // For example if NumEntries is 48, we need to return 401. 336 return NextPowerOf2(NumEntries * 4 / 3 + 1); 337 } 338 339 void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) { 340 initEmpty(); 341 342 // Insert all the old elements. 343 const KeyT EmptyKey = getEmptyKey(); 344 const KeyT TombstoneKey = getTombstoneKey(); 345 for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) { 346 if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) && 347 !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) { 348 // Insert the key/value into the new table. 349 BucketT *DestBucket; 350 bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket); 351 (void)FoundVal; // silence warning. 352 assert(!FoundVal && "Key already in new map?"); 353 DestBucket->getFirst() = std::move(B->getFirst()); 354 ::new (&DestBucket->getSecond()) ValueT(std::move(B->getSecond())); 355 incrementNumEntries(); 356 357 // Free the value. 358 B->getSecond().~ValueT(); 359 } 360 B->getFirst().~KeyT(); 361 } 362 } 363 364 template <typename OtherBaseT> 365 void copyFrom( 366 const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT> &other) { 367 assert(&other != this); 368 assert(getNumBuckets() == other.getNumBuckets()); 369 370 setNumEntries(other.getNumEntries()); 371 setNumTombstones(other.getNumTombstones()); 372 373 if (isPodLike<KeyT>::value && isPodLike<ValueT>::value) 374 memcpy(getBuckets(), other.getBuckets(), 375 getNumBuckets() * sizeof(BucketT)); 376 else 377 for (size_t i = 0; i < getNumBuckets(); ++i) { 378 ::new (&getBuckets()[i].getFirst()) 379 KeyT(other.getBuckets()[i].getFirst()); 380 if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) && 381 !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey())) 382 ::new (&getBuckets()[i].getSecond()) 383 ValueT(other.getBuckets()[i].getSecond()); 384 } 385 } 386 387 static unsigned getHashValue(const KeyT &Val) { 388 return KeyInfoT::getHashValue(Val); 389 } 390 template<typename LookupKeyT> 391 static unsigned getHashValue(const LookupKeyT &Val) { 392 return KeyInfoT::getHashValue(Val); 393 } 394 static const KeyT getEmptyKey() { 395 static_assert(std::is_base_of<DenseMapBase, DerivedT>::value, 396 "Must pass the derived type to this template!"); 397 return KeyInfoT::getEmptyKey(); 398 } 399 static const KeyT getTombstoneKey() { 400 return KeyInfoT::getTombstoneKey(); 401 } 402 403 private: 404 unsigned getNumEntries() const { 405 return static_cast<const DerivedT *>(this)->getNumEntries(); 406 } 407 void setNumEntries(unsigned Num) { 408 static_cast<DerivedT *>(this)->setNumEntries(Num); 409 } 410 void incrementNumEntries() { 411 setNumEntries(getNumEntries() + 1); 412 } 413 void decrementNumEntries() { 414 setNumEntries(getNumEntries() - 1); 415 } 416 unsigned getNumTombstones() const { 417 return static_cast<const DerivedT *>(this)->getNumTombstones(); 418 } 419 void setNumTombstones(unsigned Num) { 420 static_cast<DerivedT *>(this)->setNumTombstones(Num); 421 } 422 void incrementNumTombstones() { 423 setNumTombstones(getNumTombstones() + 1); 424 } 425 void decrementNumTombstones() { 426 setNumTombstones(getNumTombstones() - 1); 427 } 428 const BucketT *getBuckets() const { 429 return static_cast<const DerivedT *>(this)->getBuckets(); 430 } 431 BucketT *getBuckets() { 432 return static_cast<DerivedT *>(this)->getBuckets(); 433 } 434 unsigned getNumBuckets() const { 435 return static_cast<const DerivedT *>(this)->getNumBuckets(); 436 } 437 BucketT *getBucketsEnd() { 438 return getBuckets() + getNumBuckets(); 439 } 440 const BucketT *getBucketsEnd() const { 441 return getBuckets() + getNumBuckets(); 442 } 443 444 void grow(unsigned AtLeast) { 445 static_cast<DerivedT *>(this)->grow(AtLeast); 446 } 447 448 void shrink_and_clear() { 449 static_cast<DerivedT *>(this)->shrink_and_clear(); 450 } 451 452 template <typename KeyArg, typename... ValueArgs> 453 BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key, 454 ValueArgs &&... Values) { 455 TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket); 456 457 TheBucket->getFirst() = std::forward<KeyArg>(Key); 458 ::new (&TheBucket->getSecond()) ValueT(std::forward<ValueArgs>(Values)...); 459 return TheBucket; 460 } 461 462 template <typename LookupKeyT> 463 BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket, KeyT &&Key, 464 ValueT &&Value, LookupKeyT &Lookup) { 465 TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket); 466 467 TheBucket->getFirst() = std::move(Key); 468 ::new (&TheBucket->getSecond()) ValueT(std::move(Value)); 469 return TheBucket; 470 } 471 472 template <typename LookupKeyT> 473 BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup, 474 BucketT *TheBucket) { 475 incrementEpoch(); 476 477 // If the load of the hash table is more than 3/4, or if fewer than 1/8 of 478 // the buckets are empty (meaning that many are filled with tombstones), 479 // grow the table. 480 // 481 // The later case is tricky. For example, if we had one empty bucket with 482 // tons of tombstones, failing lookups (e.g. for insertion) would have to 483 // probe almost the entire table until it found the empty bucket. If the 484 // table completely filled with tombstones, no lookup would ever succeed, 485 // causing infinite loops in lookup. 486 unsigned NewNumEntries = getNumEntries() + 1; 487 unsigned NumBuckets = getNumBuckets(); 488 if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) { 489 this->grow(NumBuckets * 2); 490 LookupBucketFor(Lookup, TheBucket); 491 NumBuckets = getNumBuckets(); 492 } else if (LLVM_UNLIKELY(NumBuckets-(NewNumEntries+getNumTombstones()) <= 493 NumBuckets/8)) { 494 this->grow(NumBuckets); 495 LookupBucketFor(Lookup, TheBucket); 496 } 497 assert(TheBucket); 498 499 // Only update the state after we've grown our bucket space appropriately 500 // so that when growing buckets we have self-consistent entry count. 501 incrementNumEntries(); 502 503 // If we are writing over a tombstone, remember this. 504 const KeyT EmptyKey = getEmptyKey(); 505 if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey)) 506 decrementNumTombstones(); 507 508 return TheBucket; 509 } 510 511 /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in 512 /// FoundBucket. If the bucket contains the key and a value, this returns 513 /// true, otherwise it returns a bucket with an empty marker or tombstone and 514 /// returns false. 515 template<typename LookupKeyT> 516 bool LookupBucketFor(const LookupKeyT &Val, 517 const BucketT *&FoundBucket) const { 518 const BucketT *BucketsPtr = getBuckets(); 519 const unsigned NumBuckets = getNumBuckets(); 520 521 if (NumBuckets == 0) { 522 FoundBucket = nullptr; 523 return false; 524 } 525 526 // FoundTombstone - Keep track of whether we find a tombstone while probing. 527 const BucketT *FoundTombstone = nullptr; 528 const KeyT EmptyKey = getEmptyKey(); 529 const KeyT TombstoneKey = getTombstoneKey(); 530 assert(!KeyInfoT::isEqual(Val, EmptyKey) && 531 !KeyInfoT::isEqual(Val, TombstoneKey) && 532 "Empty/Tombstone value shouldn't be inserted into map!"); 533 534 unsigned BucketNo = getHashValue(Val) & (NumBuckets-1); 535 unsigned ProbeAmt = 1; 536 while (true) { 537 const BucketT *ThisBucket = BucketsPtr + BucketNo; 538 // Found Val's bucket? If so, return it. 539 if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) { 540 FoundBucket = ThisBucket; 541 return true; 542 } 543 544 // If we found an empty bucket, the key doesn't exist in the set. 545 // Insert it and return the default value. 546 if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) { 547 // If we've already seen a tombstone while probing, fill it in instead 548 // of the empty bucket we eventually probed to. 549 FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket; 550 return false; 551 } 552 553 // If this is a tombstone, remember it. If Val ends up not in the map, we 554 // prefer to return it than something that would require more probing. 555 if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) && 556 !FoundTombstone) 557 FoundTombstone = ThisBucket; // Remember the first tombstone found. 558 559 // Otherwise, it's a hash collision or a tombstone, continue quadratic 560 // probing. 561 BucketNo += ProbeAmt++; 562 BucketNo &= (NumBuckets-1); 563 } 564 } 565 566 template <typename LookupKeyT> 567 bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) { 568 const BucketT *ConstFoundBucket; 569 bool Result = const_cast<const DenseMapBase *>(this) 570 ->LookupBucketFor(Val, ConstFoundBucket); 571 FoundBucket = const_cast<BucketT *>(ConstFoundBucket); 572 return Result; 573 } 574 575 public: 576 /// Return the approximate size (in bytes) of the actual map. 577 /// This is just the raw memory used by DenseMap. 578 /// If entries are pointers to objects, the size of the referenced objects 579 /// are not included. 580 size_t getMemorySize() const { 581 return getNumBuckets() * sizeof(BucketT); 582 } 583 }; 584 585 template <typename KeyT, typename ValueT, 586 typename KeyInfoT = DenseMapInfo<KeyT>, 587 typename BucketT = detail::DenseMapPair<KeyT, ValueT>> 588 class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>, 589 KeyT, ValueT, KeyInfoT, BucketT> { 590 // Lift some types from the dependent base class into this class for 591 // simplicity of referring to them. 592 typedef DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT> BaseT; 593 friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>; 594 595 BucketT *Buckets; 596 unsigned NumEntries; 597 unsigned NumTombstones; 598 unsigned NumBuckets; 599 600 public: 601 /// Create a DenseMap wth an optional \p InitialReserve that guarantee that 602 /// this number of elements can be inserted in the map without grow() 603 explicit DenseMap(unsigned InitialReserve = 0) { init(InitialReserve); } 604 605 DenseMap(const DenseMap &other) : BaseT() { 606 init(0); 607 copyFrom(other); 608 } 609 610 DenseMap(DenseMap &&other) : BaseT() { 611 init(0); 612 swap(other); 613 } 614 615 template<typename InputIt> 616 DenseMap(const InputIt &I, const InputIt &E) { 617 init(std::distance(I, E)); 618 this->insert(I, E); 619 } 620 621 ~DenseMap() { 622 this->destroyAll(); 623 operator delete(Buckets); 624 } 625 626 void swap(DenseMap& RHS) { 627 this->incrementEpoch(); 628 RHS.incrementEpoch(); 629 std::swap(Buckets, RHS.Buckets); 630 std::swap(NumEntries, RHS.NumEntries); 631 std::swap(NumTombstones, RHS.NumTombstones); 632 std::swap(NumBuckets, RHS.NumBuckets); 633 } 634 635 DenseMap& operator=(const DenseMap& other) { 636 if (&other != this) 637 copyFrom(other); 638 return *this; 639 } 640 641 DenseMap& operator=(DenseMap &&other) { 642 this->destroyAll(); 643 operator delete(Buckets); 644 init(0); 645 swap(other); 646 return *this; 647 } 648 649 void copyFrom(const DenseMap& other) { 650 this->destroyAll(); 651 operator delete(Buckets); 652 if (allocateBuckets(other.NumBuckets)) { 653 this->BaseT::copyFrom(other); 654 } else { 655 NumEntries = 0; 656 NumTombstones = 0; 657 } 658 } 659 660 void init(unsigned InitNumEntries) { 661 auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries); 662 if (allocateBuckets(InitBuckets)) { 663 this->BaseT::initEmpty(); 664 } else { 665 NumEntries = 0; 666 NumTombstones = 0; 667 } 668 } 669 670 void grow(unsigned AtLeast) { 671 unsigned OldNumBuckets = NumBuckets; 672 BucketT *OldBuckets = Buckets; 673 674 allocateBuckets(std::max<unsigned>(64, static_cast<unsigned>(NextPowerOf2(AtLeast-1)))); 675 assert(Buckets); 676 if (!OldBuckets) { 677 this->BaseT::initEmpty(); 678 return; 679 } 680 681 this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets); 682 683 // Free the old table. 684 operator delete(OldBuckets); 685 } 686 687 void shrink_and_clear() { 688 unsigned OldNumEntries = NumEntries; 689 this->destroyAll(); 690 691 // Reduce the number of buckets. 692 unsigned NewNumBuckets = 0; 693 if (OldNumEntries) 694 NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1)); 695 if (NewNumBuckets == NumBuckets) { 696 this->BaseT::initEmpty(); 697 return; 698 } 699 700 operator delete(Buckets); 701 init(NewNumBuckets); 702 } 703 704 private: 705 unsigned getNumEntries() const { 706 return NumEntries; 707 } 708 void setNumEntries(unsigned Num) { 709 NumEntries = Num; 710 } 711 712 unsigned getNumTombstones() const { 713 return NumTombstones; 714 } 715 void setNumTombstones(unsigned Num) { 716 NumTombstones = Num; 717 } 718 719 BucketT *getBuckets() const { 720 return Buckets; 721 } 722 723 unsigned getNumBuckets() const { 724 return NumBuckets; 725 } 726 727 bool allocateBuckets(unsigned Num) { 728 NumBuckets = Num; 729 if (NumBuckets == 0) { 730 Buckets = nullptr; 731 return false; 732 } 733 734 Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT) * NumBuckets)); 735 return true; 736 } 737 }; 738 739 template <typename KeyT, typename ValueT, unsigned InlineBuckets = 4, 740 typename KeyInfoT = DenseMapInfo<KeyT>, 741 typename BucketT = detail::DenseMapPair<KeyT, ValueT>> 742 class SmallDenseMap 743 : public DenseMapBase< 744 SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT, BucketT>, KeyT, 745 ValueT, KeyInfoT, BucketT> { 746 // Lift some types from the dependent base class into this class for 747 // simplicity of referring to them. 748 typedef DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT> BaseT; 749 friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>; 750 static_assert(isPowerOf2_64(InlineBuckets), 751 "InlineBuckets must be a power of 2."); 752 753 unsigned Small : 1; 754 unsigned NumEntries : 31; 755 unsigned NumTombstones; 756 757 struct LargeRep { 758 BucketT *Buckets; 759 unsigned NumBuckets; 760 }; 761 762 /// A "union" of an inline bucket array and the struct representing 763 /// a large bucket. This union will be discriminated by the 'Small' bit. 764 AlignedCharArrayUnion<BucketT[InlineBuckets], LargeRep> storage; 765 766 public: 767 explicit SmallDenseMap(unsigned NumInitBuckets = 0) { 768 init(NumInitBuckets); 769 } 770 771 SmallDenseMap(const SmallDenseMap &other) : BaseT() { 772 init(0); 773 copyFrom(other); 774 } 775 776 SmallDenseMap(SmallDenseMap &&other) : BaseT() { 777 init(0); 778 swap(other); 779 } 780 781 template<typename InputIt> 782 SmallDenseMap(const InputIt &I, const InputIt &E) { 783 init(NextPowerOf2(std::distance(I, E))); 784 this->insert(I, E); 785 } 786 787 ~SmallDenseMap() { 788 this->destroyAll(); 789 deallocateBuckets(); 790 } 791 792 void swap(SmallDenseMap& RHS) { 793 unsigned TmpNumEntries = RHS.NumEntries; 794 RHS.NumEntries = NumEntries; 795 NumEntries = TmpNumEntries; 796 std::swap(NumTombstones, RHS.NumTombstones); 797 798 const KeyT EmptyKey = this->getEmptyKey(); 799 const KeyT TombstoneKey = this->getTombstoneKey(); 800 if (Small && RHS.Small) { 801 // If we're swapping inline bucket arrays, we have to cope with some of 802 // the tricky bits of DenseMap's storage system: the buckets are not 803 // fully initialized. Thus we swap every key, but we may have 804 // a one-directional move of the value. 805 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) { 806 BucketT *LHSB = &getInlineBuckets()[i], 807 *RHSB = &RHS.getInlineBuckets()[i]; 808 bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) && 809 !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey)); 810 bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) && 811 !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey)); 812 if (hasLHSValue && hasRHSValue) { 813 // Swap together if we can... 814 std::swap(*LHSB, *RHSB); 815 continue; 816 } 817 // Swap separately and handle any assymetry. 818 std::swap(LHSB->getFirst(), RHSB->getFirst()); 819 if (hasLHSValue) { 820 ::new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond())); 821 LHSB->getSecond().~ValueT(); 822 } else if (hasRHSValue) { 823 ::new (&LHSB->getSecond()) ValueT(std::move(RHSB->getSecond())); 824 RHSB->getSecond().~ValueT(); 825 } 826 } 827 return; 828 } 829 if (!Small && !RHS.Small) { 830 std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets); 831 std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets); 832 return; 833 } 834 835 SmallDenseMap &SmallSide = Small ? *this : RHS; 836 SmallDenseMap &LargeSide = Small ? RHS : *this; 837 838 // First stash the large side's rep and move the small side across. 839 LargeRep TmpRep = std::move(*LargeSide.getLargeRep()); 840 LargeSide.getLargeRep()->~LargeRep(); 841 LargeSide.Small = true; 842 // This is similar to the standard move-from-old-buckets, but the bucket 843 // count hasn't actually rotated in this case. So we have to carefully 844 // move construct the keys and values into their new locations, but there 845 // is no need to re-hash things. 846 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) { 847 BucketT *NewB = &LargeSide.getInlineBuckets()[i], 848 *OldB = &SmallSide.getInlineBuckets()[i]; 849 ::new (&NewB->getFirst()) KeyT(std::move(OldB->getFirst())); 850 OldB->getFirst().~KeyT(); 851 if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) && 852 !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) { 853 ::new (&NewB->getSecond()) ValueT(std::move(OldB->getSecond())); 854 OldB->getSecond().~ValueT(); 855 } 856 } 857 858 // The hard part of moving the small buckets across is done, just move 859 // the TmpRep into its new home. 860 SmallSide.Small = false; 861 new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep)); 862 } 863 864 SmallDenseMap& operator=(const SmallDenseMap& other) { 865 if (&other != this) 866 copyFrom(other); 867 return *this; 868 } 869 870 SmallDenseMap& operator=(SmallDenseMap &&other) { 871 this->destroyAll(); 872 deallocateBuckets(); 873 init(0); 874 swap(other); 875 return *this; 876 } 877 878 void copyFrom(const SmallDenseMap& other) { 879 this->destroyAll(); 880 deallocateBuckets(); 881 Small = true; 882 if (other.getNumBuckets() > InlineBuckets) { 883 Small = false; 884 new (getLargeRep()) LargeRep(allocateBuckets(other.getNumBuckets())); 885 } 886 this->BaseT::copyFrom(other); 887 } 888 889 void init(unsigned InitBuckets) { 890 Small = true; 891 if (InitBuckets > InlineBuckets) { 892 Small = false; 893 new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets)); 894 } 895 this->BaseT::initEmpty(); 896 } 897 898 void grow(unsigned AtLeast) { 899 if (AtLeast >= InlineBuckets) 900 AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast-1)); 901 902 if (Small) { 903 if (AtLeast < InlineBuckets) 904 return; // Nothing to do. 905 906 // First move the inline buckets into a temporary storage. 907 AlignedCharArrayUnion<BucketT[InlineBuckets]> TmpStorage; 908 BucketT *TmpBegin = reinterpret_cast<BucketT *>(TmpStorage.buffer); 909 BucketT *TmpEnd = TmpBegin; 910 911 // Loop over the buckets, moving non-empty, non-tombstones into the 912 // temporary storage. Have the loop move the TmpEnd forward as it goes. 913 const KeyT EmptyKey = this->getEmptyKey(); 914 const KeyT TombstoneKey = this->getTombstoneKey(); 915 for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) { 916 if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) && 917 !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) { 918 assert(size_t(TmpEnd - TmpBegin) < InlineBuckets && 919 "Too many inline buckets!"); 920 ::new (&TmpEnd->getFirst()) KeyT(std::move(P->getFirst())); 921 ::new (&TmpEnd->getSecond()) ValueT(std::move(P->getSecond())); 922 ++TmpEnd; 923 P->getSecond().~ValueT(); 924 } 925 P->getFirst().~KeyT(); 926 } 927 928 // Now make this map use the large rep, and move all the entries back 929 // into it. 930 Small = false; 931 new (getLargeRep()) LargeRep(allocateBuckets(AtLeast)); 932 this->moveFromOldBuckets(TmpBegin, TmpEnd); 933 return; 934 } 935 936 LargeRep OldRep = std::move(*getLargeRep()); 937 getLargeRep()->~LargeRep(); 938 if (AtLeast <= InlineBuckets) { 939 Small = true; 940 } else { 941 new (getLargeRep()) LargeRep(allocateBuckets(AtLeast)); 942 } 943 944 this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets); 945 946 // Free the old table. 947 operator delete(OldRep.Buckets); 948 } 949 950 void shrink_and_clear() { 951 unsigned OldSize = this->size(); 952 this->destroyAll(); 953 954 // Reduce the number of buckets. 955 unsigned NewNumBuckets = 0; 956 if (OldSize) { 957 NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1); 958 if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u) 959 NewNumBuckets = 64; 960 } 961 if ((Small && NewNumBuckets <= InlineBuckets) || 962 (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) { 963 this->BaseT::initEmpty(); 964 return; 965 } 966 967 deallocateBuckets(); 968 init(NewNumBuckets); 969 } 970 971 private: 972 unsigned getNumEntries() const { 973 return NumEntries; 974 } 975 void setNumEntries(unsigned Num) { 976 // NumEntries is hardcoded to be 31 bits wide. 977 assert(Num < (1U << 31) && "Cannot support more than 1<<31 entries"); 978 NumEntries = Num; 979 } 980 981 unsigned getNumTombstones() const { 982 return NumTombstones; 983 } 984 void setNumTombstones(unsigned Num) { 985 NumTombstones = Num; 986 } 987 988 const BucketT *getInlineBuckets() const { 989 assert(Small); 990 // Note that this cast does not violate aliasing rules as we assert that 991 // the memory's dynamic type is the small, inline bucket buffer, and the 992 // 'storage.buffer' static type is 'char *'. 993 return reinterpret_cast<const BucketT *>(storage.buffer); 994 } 995 BucketT *getInlineBuckets() { 996 return const_cast<BucketT *>( 997 const_cast<const SmallDenseMap *>(this)->getInlineBuckets()); 998 } 999 const LargeRep *getLargeRep() const { 1000 assert(!Small); 1001 // Note, same rule about aliasing as with getInlineBuckets. 1002 return reinterpret_cast<const LargeRep *>(storage.buffer); 1003 } 1004 LargeRep *getLargeRep() { 1005 return const_cast<LargeRep *>( 1006 const_cast<const SmallDenseMap *>(this)->getLargeRep()); 1007 } 1008 1009 const BucketT *getBuckets() const { 1010 return Small ? getInlineBuckets() : getLargeRep()->Buckets; 1011 } 1012 BucketT *getBuckets() { 1013 return const_cast<BucketT *>( 1014 const_cast<const SmallDenseMap *>(this)->getBuckets()); 1015 } 1016 unsigned getNumBuckets() const { 1017 return Small ? InlineBuckets : getLargeRep()->NumBuckets; 1018 } 1019 1020 void deallocateBuckets() { 1021 if (Small) 1022 return; 1023 1024 operator delete(getLargeRep()->Buckets); 1025 getLargeRep()->~LargeRep(); 1026 } 1027 1028 LargeRep allocateBuckets(unsigned Num) { 1029 assert(Num > InlineBuckets && "Must allocate more buckets than are inline"); 1030 LargeRep Rep = { 1031 static_cast<BucketT*>(operator new(sizeof(BucketT) * Num)), Num 1032 }; 1033 return Rep; 1034 } 1035 }; 1036 1037 template <typename KeyT, typename ValueT, typename KeyInfoT, typename Bucket, 1038 bool IsConst> 1039 class DenseMapIterator : DebugEpochBase::HandleBase { 1040 typedef DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true> ConstIterator; 1041 friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>; 1042 friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, false>; 1043 1044 public: 1045 typedef ptrdiff_t difference_type; 1046 typedef typename std::conditional<IsConst, const Bucket, Bucket>::type 1047 value_type; 1048 typedef value_type *pointer; 1049 typedef value_type &reference; 1050 typedef std::forward_iterator_tag iterator_category; 1051 1052 private: 1053 pointer Ptr, End; 1054 1055 public: 1056 DenseMapIterator() : Ptr(nullptr), End(nullptr) {} 1057 1058 DenseMapIterator(pointer Pos, pointer E, const DebugEpochBase &Epoch, 1059 bool NoAdvance = false) 1060 : DebugEpochBase::HandleBase(&Epoch), Ptr(Pos), End(E) { 1061 assert(isHandleInSync() && "invalid construction!"); 1062 if (!NoAdvance) AdvancePastEmptyBuckets(); 1063 } 1064 1065 // Converting ctor from non-const iterators to const iterators. SFINAE'd out 1066 // for const iterator destinations so it doesn't end up as a user defined copy 1067 // constructor. 1068 template <bool IsConstSrc, 1069 typename = typename std::enable_if<!IsConstSrc && IsConst>::type> 1070 DenseMapIterator( 1071 const DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc> &I) 1072 : DebugEpochBase::HandleBase(I), Ptr(I.Ptr), End(I.End) {} 1073 1074 reference operator*() const { 1075 assert(isHandleInSync() && "invalid iterator access!"); 1076 return *Ptr; 1077 } 1078 pointer operator->() const { 1079 assert(isHandleInSync() && "invalid iterator access!"); 1080 return Ptr; 1081 } 1082 1083 bool operator==(const ConstIterator &RHS) const { 1084 assert((!Ptr || isHandleInSync()) && "handle not in sync!"); 1085 assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!"); 1086 assert(getEpochAddress() == RHS.getEpochAddress() && 1087 "comparing incomparable iterators!"); 1088 return Ptr == RHS.Ptr; 1089 } 1090 bool operator!=(const ConstIterator &RHS) const { 1091 assert((!Ptr || isHandleInSync()) && "handle not in sync!"); 1092 assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!"); 1093 assert(getEpochAddress() == RHS.getEpochAddress() && 1094 "comparing incomparable iterators!"); 1095 return Ptr != RHS.Ptr; 1096 } 1097 1098 inline DenseMapIterator& operator++() { // Preincrement 1099 assert(isHandleInSync() && "invalid iterator access!"); 1100 ++Ptr; 1101 AdvancePastEmptyBuckets(); 1102 return *this; 1103 } 1104 DenseMapIterator operator++(int) { // Postincrement 1105 assert(isHandleInSync() && "invalid iterator access!"); 1106 DenseMapIterator tmp = *this; ++*this; return tmp; 1107 } 1108 1109 private: 1110 void AdvancePastEmptyBuckets() { 1111 const KeyT Empty = KeyInfoT::getEmptyKey(); 1112 const KeyT Tombstone = KeyInfoT::getTombstoneKey(); 1113 1114 while (Ptr != End && (KeyInfoT::isEqual(Ptr->getFirst(), Empty) || 1115 KeyInfoT::isEqual(Ptr->getFirst(), Tombstone))) 1116 ++Ptr; 1117 } 1118 }; 1119 1120 template<typename KeyT, typename ValueT, typename KeyInfoT> 1121 static inline size_t 1122 capacity_in_bytes(const DenseMap<KeyT, ValueT, KeyInfoT> &X) { 1123 return X.getMemorySize(); 1124 } 1125 1126 } // end namespace llvm 1127 1128 #endif // LLVM_ADT_DENSEMAP_H 1129