Home | History | Annotate | Download | only in base
      1 /*
      2  * Copyright (C) 2014 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_LIBARTBASE_BASE_HASH_SET_H_
     18 #define ART_LIBARTBASE_BASE_HASH_SET_H_
     19 
     20 #include <stdint.h>
     21 
     22 #include <functional>
     23 #include <iterator>
     24 #include <memory>
     25 #include <type_traits>
     26 #include <utility>
     27 
     28 #include <android-base/logging.h>
     29 
     30 #include "bit_utils.h"
     31 #include "macros.h"
     32 
     33 namespace art {
     34 
     35 // Returns true if an item is empty.
     36 template <class T>
     37 class DefaultEmptyFn {
     38  public:
     39   void MakeEmpty(T& item) const {
     40     item = T();
     41   }
     42   bool IsEmpty(const T& item) const {
     43     return item == T();
     44   }
     45 };
     46 
     47 template <class T>
     48 class DefaultEmptyFn<T*> {
     49  public:
     50   void MakeEmpty(T*& item) const {
     51     item = nullptr;
     52   }
     53   bool IsEmpty(T* const& item) const {
     54     return item == nullptr;
     55   }
     56 };
     57 
     58 // Low memory version of a hash set, uses less memory than std::unordered_set since elements aren't
     59 // boxed. Uses linear probing to resolve collisions.
     60 // EmptyFn needs to implement two functions MakeEmpty(T& item) and IsEmpty(const T& item).
     61 // TODO: We could get rid of this requirement by using a bitmap, though maybe this would be slower
     62 // and more complicated.
     63 template <class T, class EmptyFn = DefaultEmptyFn<T>, class HashFn = std::hash<T>,
     64     class Pred = std::equal_to<T>, class Alloc = std::allocator<T>>
     65 class HashSet {
     66   template <class Elem, class HashSetType>
     67   class BaseIterator : std::iterator<std::forward_iterator_tag, Elem> {
     68    public:
     69     BaseIterator(const BaseIterator&) = default;
     70     BaseIterator(BaseIterator&&) = default;
     71     BaseIterator(HashSetType* hash_set, size_t index) : index_(index), hash_set_(hash_set) {
     72     }
     73     BaseIterator& operator=(const BaseIterator&) = default;
     74     BaseIterator& operator=(BaseIterator&&) = default;
     75 
     76     bool operator==(const BaseIterator& other) const {
     77       return hash_set_ == other.hash_set_ && this->index_ == other.index_;
     78     }
     79 
     80     bool operator!=(const BaseIterator& other) const {
     81       return !(*this == other);
     82     }
     83 
     84     BaseIterator operator++() {  // Value after modification.
     85       this->index_ = this->NextNonEmptySlot(this->index_, hash_set_);
     86       return *this;
     87     }
     88 
     89     BaseIterator operator++(int) {
     90       BaseIterator temp = *this;
     91       this->index_ = this->NextNonEmptySlot(this->index_, hash_set_);
     92       return temp;
     93     }
     94 
     95     Elem& operator*() const {
     96       DCHECK(!hash_set_->IsFreeSlot(this->index_));
     97       return hash_set_->ElementForIndex(this->index_);
     98     }
     99 
    100     Elem* operator->() const {
    101       return &**this;
    102     }
    103 
    104     // TODO: Operator -- --(int)  (and use std::bidirectional_iterator_tag)
    105 
    106    private:
    107     size_t index_;
    108     HashSetType* hash_set_;
    109 
    110     size_t NextNonEmptySlot(size_t index, const HashSet* hash_set) const {
    111       const size_t num_buckets = hash_set->NumBuckets();
    112       DCHECK_LT(index, num_buckets);
    113       do {
    114         ++index;
    115       } while (index < num_buckets && hash_set->IsFreeSlot(index));
    116       return index;
    117     }
    118 
    119     friend class HashSet;
    120   };
    121 
    122  public:
    123   using value_type = T;
    124   using allocator_type = Alloc;
    125   using reference = T&;
    126   using const_reference = const T&;
    127   using pointer = T*;
    128   using const_pointer = const T*;
    129   using iterator = BaseIterator<T, HashSet>;
    130   using const_iterator = BaseIterator<const T, const HashSet>;
    131   using size_type = size_t;
    132   using difference_type = ptrdiff_t;
    133 
    134   static constexpr double kDefaultMinLoadFactor = 0.4;
    135   static constexpr double kDefaultMaxLoadFactor = 0.7;
    136   static constexpr size_t kMinBuckets = 1000;
    137 
    138   // If we don't own the data, this will create a new array which owns the data.
    139   void Clear() {
    140     DeallocateStorage();
    141     num_elements_ = 0;
    142     elements_until_expand_ = 0;
    143   }
    144 
    145   HashSet() : HashSet(kDefaultMinLoadFactor, kDefaultMaxLoadFactor) {}
    146 
    147   HashSet(double min_load_factor, double max_load_factor) noexcept
    148       : num_elements_(0u),
    149         num_buckets_(0u),
    150         elements_until_expand_(0u),
    151         owns_data_(false),
    152         data_(nullptr),
    153         min_load_factor_(min_load_factor),
    154         max_load_factor_(max_load_factor) {
    155     DCHECK_GT(min_load_factor, 0.0);
    156     DCHECK_LT(max_load_factor, 1.0);
    157   }
    158 
    159   explicit HashSet(const allocator_type& alloc) noexcept
    160       : allocfn_(alloc),
    161         hashfn_(),
    162         emptyfn_(),
    163         pred_(),
    164         num_elements_(0u),
    165         num_buckets_(0u),
    166         elements_until_expand_(0u),
    167         owns_data_(false),
    168         data_(nullptr),
    169         min_load_factor_(kDefaultMinLoadFactor),
    170         max_load_factor_(kDefaultMaxLoadFactor) {
    171   }
    172 
    173   HashSet(const HashSet& other) noexcept
    174       : allocfn_(other.allocfn_),
    175         hashfn_(other.hashfn_),
    176         emptyfn_(other.emptyfn_),
    177         pred_(other.pred_),
    178         num_elements_(other.num_elements_),
    179         num_buckets_(0),
    180         elements_until_expand_(other.elements_until_expand_),
    181         owns_data_(false),
    182         data_(nullptr),
    183         min_load_factor_(other.min_load_factor_),
    184         max_load_factor_(other.max_load_factor_) {
    185     AllocateStorage(other.NumBuckets());
    186     for (size_t i = 0; i < num_buckets_; ++i) {
    187       ElementForIndex(i) = other.data_[i];
    188     }
    189   }
    190 
    191   // noexcept required so that the move constructor is used instead of copy constructor.
    192   // b/27860101
    193   HashSet(HashSet&& other) noexcept
    194       : allocfn_(std::move(other.allocfn_)),
    195         hashfn_(std::move(other.hashfn_)),
    196         emptyfn_(std::move(other.emptyfn_)),
    197         pred_(std::move(other.pred_)),
    198         num_elements_(other.num_elements_),
    199         num_buckets_(other.num_buckets_),
    200         elements_until_expand_(other.elements_until_expand_),
    201         owns_data_(other.owns_data_),
    202         data_(other.data_),
    203         min_load_factor_(other.min_load_factor_),
    204         max_load_factor_(other.max_load_factor_) {
    205     other.num_elements_ = 0u;
    206     other.num_buckets_ = 0u;
    207     other.elements_until_expand_ = 0u;
    208     other.owns_data_ = false;
    209     other.data_ = nullptr;
    210   }
    211 
    212   // Construct from existing data.
    213   // Read from a block of memory, if make_copy_of_data is false, then data_ points to within the
    214   // passed in ptr_.
    215   HashSet(const uint8_t* ptr, bool make_copy_of_data, size_t* read_count) noexcept {
    216     uint64_t temp;
    217     size_t offset = 0;
    218     offset = ReadFromBytes(ptr, offset, &temp);
    219     num_elements_ = static_cast<uint64_t>(temp);
    220     offset = ReadFromBytes(ptr, offset, &temp);
    221     num_buckets_ = static_cast<uint64_t>(temp);
    222     CHECK_LE(num_elements_, num_buckets_);
    223     offset = ReadFromBytes(ptr, offset, &temp);
    224     elements_until_expand_ = static_cast<uint64_t>(temp);
    225     offset = ReadFromBytes(ptr, offset, &min_load_factor_);
    226     offset = ReadFromBytes(ptr, offset, &max_load_factor_);
    227     if (!make_copy_of_data) {
    228       owns_data_ = false;
    229       data_ = const_cast<T*>(reinterpret_cast<const T*>(ptr + offset));
    230       offset += sizeof(*data_) * num_buckets_;
    231     } else {
    232       AllocateStorage(num_buckets_);
    233       // Write elements, not that this may not be safe for cross compilation if the elements are
    234       // pointer sized.
    235       for (size_t i = 0; i < num_buckets_; ++i) {
    236         offset = ReadFromBytes(ptr, offset, &data_[i]);
    237       }
    238     }
    239     // Caller responsible for aligning.
    240     *read_count = offset;
    241   }
    242 
    243   // Returns how large the table is after being written. If target is null, then no writing happens
    244   // but the size is still returned. Target must be 8 byte aligned.
    245   size_t WriteToMemory(uint8_t* ptr) const {
    246     size_t offset = 0;
    247     offset = WriteToBytes(ptr, offset, static_cast<uint64_t>(num_elements_));
    248     offset = WriteToBytes(ptr, offset, static_cast<uint64_t>(num_buckets_));
    249     offset = WriteToBytes(ptr, offset, static_cast<uint64_t>(elements_until_expand_));
    250     offset = WriteToBytes(ptr, offset, min_load_factor_);
    251     offset = WriteToBytes(ptr, offset, max_load_factor_);
    252     // Write elements, not that this may not be safe for cross compilation if the elements are
    253     // pointer sized.
    254     for (size_t i = 0; i < num_buckets_; ++i) {
    255       offset = WriteToBytes(ptr, offset, data_[i]);
    256     }
    257     // Caller responsible for aligning.
    258     return offset;
    259   }
    260 
    261   ~HashSet() {
    262     DeallocateStorage();
    263   }
    264 
    265   HashSet& operator=(HashSet&& other) noexcept {
    266     HashSet(std::move(other)).swap(*this);  // NOLINT [runtime/explicit] [5]
    267     return *this;
    268   }
    269 
    270   HashSet& operator=(const HashSet& other) noexcept {
    271     HashSet(other).swap(*this);  // NOLINT(runtime/explicit) - a case of lint gone mad.
    272     return *this;
    273   }
    274 
    275   // Lower case for c++11 for each.
    276   iterator begin() {
    277     iterator ret(this, 0);
    278     if (num_buckets_ != 0 && IsFreeSlot(ret.index_)) {
    279       ++ret;  // Skip all the empty slots.
    280     }
    281     return ret;
    282   }
    283 
    284   // Lower case for c++11 for each. const version.
    285   const_iterator begin() const {
    286     const_iterator ret(this, 0);
    287     if (num_buckets_ != 0 && IsFreeSlot(ret.index_)) {
    288       ++ret;  // Skip all the empty slots.
    289     }
    290     return ret;
    291   }
    292 
    293   // Lower case for c++11 for each.
    294   iterator end() {
    295     return iterator(this, NumBuckets());
    296   }
    297 
    298   // Lower case for c++11 for each. const version.
    299   const_iterator end() const {
    300     return const_iterator(this, NumBuckets());
    301   }
    302 
    303   bool Empty() const {
    304     return Size() == 0;
    305   }
    306 
    307   // Return true if the hash set has ownership of the underlying data.
    308   bool OwnsData() const {
    309     return owns_data_;
    310   }
    311 
    312   // Erase algorithm:
    313   // Make an empty slot where the iterator is pointing.
    314   // Scan forwards until we hit another empty slot.
    315   // If an element in between doesn't rehash to the range from the current empty slot to the
    316   // iterator. It must be before the empty slot, in that case we can move it to the empty slot
    317   // and set the empty slot to be the location we just moved from.
    318   // Relies on maintaining the invariant that there's no empty slots from the 'ideal' index of an
    319   // element to its actual location/index.
    320   iterator Erase(iterator it) {
    321     // empty_index is the index that will become empty.
    322     size_t empty_index = it.index_;
    323     DCHECK(!IsFreeSlot(empty_index));
    324     size_t next_index = empty_index;
    325     bool filled = false;  // True if we filled the empty index.
    326     while (true) {
    327       next_index = NextIndex(next_index);
    328       T& next_element = ElementForIndex(next_index);
    329       // If the next element is empty, we are done. Make sure to clear the current empty index.
    330       if (emptyfn_.IsEmpty(next_element)) {
    331         emptyfn_.MakeEmpty(ElementForIndex(empty_index));
    332         break;
    333       }
    334       // Otherwise try to see if the next element can fill the current empty index.
    335       const size_t next_hash = hashfn_(next_element);
    336       // Calculate the ideal index, if it is within empty_index + 1 to next_index then there is
    337       // nothing we can do.
    338       size_t next_ideal_index = IndexForHash(next_hash);
    339       // Loop around if needed for our check.
    340       size_t unwrapped_next_index = next_index;
    341       if (unwrapped_next_index < empty_index) {
    342         unwrapped_next_index += NumBuckets();
    343       }
    344       // Loop around if needed for our check.
    345       size_t unwrapped_next_ideal_index = next_ideal_index;
    346       if (unwrapped_next_ideal_index < empty_index) {
    347         unwrapped_next_ideal_index += NumBuckets();
    348       }
    349       if (unwrapped_next_ideal_index <= empty_index ||
    350           unwrapped_next_ideal_index > unwrapped_next_index) {
    351         // If the target index isn't within our current range it must have been probed from before
    352         // the empty index.
    353         ElementForIndex(empty_index) = std::move(next_element);
    354         filled = true;  // TODO: Optimize
    355         empty_index = next_index;
    356       }
    357     }
    358     --num_elements_;
    359     // If we didn't fill the slot then we need go to the next non free slot.
    360     if (!filled) {
    361       ++it;
    362     }
    363     return it;
    364   }
    365 
    366   // Find an element, returns end() if not found.
    367   // Allows custom key (K) types, example of when this is useful:
    368   // Set of Class* sorted by name, want to find a class with a name but can't allocate a dummy
    369   // object in the heap for performance solution.
    370   template <typename K>
    371   iterator Find(const K& key) {
    372     return FindWithHash(key, hashfn_(key));
    373   }
    374 
    375   template <typename K>
    376   const_iterator Find(const K& key) const {
    377     return FindWithHash(key, hashfn_(key));
    378   }
    379 
    380   template <typename K>
    381   iterator FindWithHash(const K& key, size_t hash) {
    382     return iterator(this, FindIndex(key, hash));
    383   }
    384 
    385   template <typename K>
    386   const_iterator FindWithHash(const K& key, size_t hash) const {
    387     return const_iterator(this, FindIndex(key, hash));
    388   }
    389 
    390   // Insert an element, allows duplicates.
    391   template <typename U, typename = typename std::enable_if<std::is_convertible<U, T>::value>::type>
    392   void Insert(U&& element) {
    393     InsertWithHash(std::forward<U>(element), hashfn_(element));
    394   }
    395 
    396   template <typename U, typename = typename std::enable_if<std::is_convertible<U, T>::value>::type>
    397   void InsertWithHash(U&& element, size_t hash) {
    398     DCHECK_EQ(hash, hashfn_(element));
    399     if (num_elements_ >= elements_until_expand_) {
    400       Expand();
    401       DCHECK_LT(num_elements_, elements_until_expand_);
    402     }
    403     const size_t index = FirstAvailableSlot(IndexForHash(hash));
    404     data_[index] = std::forward<U>(element);
    405     ++num_elements_;
    406   }
    407 
    408   size_t Size() const {
    409     return num_elements_;
    410   }
    411 
    412   void swap(HashSet& other) {
    413     // Use argument-dependent lookup with fall-back to std::swap() for function objects.
    414     using std::swap;
    415     swap(allocfn_, other.allocfn_);
    416     swap(hashfn_, other.hashfn_);
    417     swap(emptyfn_, other.emptyfn_);
    418     swap(pred_, other.pred_);
    419     std::swap(data_, other.data_);
    420     std::swap(num_buckets_, other.num_buckets_);
    421     std::swap(num_elements_, other.num_elements_);
    422     std::swap(elements_until_expand_, other.elements_until_expand_);
    423     std::swap(min_load_factor_, other.min_load_factor_);
    424     std::swap(max_load_factor_, other.max_load_factor_);
    425     std::swap(owns_data_, other.owns_data_);
    426   }
    427 
    428   allocator_type get_allocator() const {
    429     return allocfn_;
    430   }
    431 
    432   void ShrinkToMaximumLoad() {
    433     Resize(Size() / max_load_factor_);
    434   }
    435 
    436   // Reserve enough room to insert until Size() == num_elements without requiring to grow the hash
    437   // set. No-op if the hash set is already large enough to do this.
    438   void Reserve(size_t num_elements) {
    439     size_t num_buckets = num_elements / max_load_factor_;
    440     // Deal with rounding errors. Add one for rounding.
    441     while (static_cast<size_t>(num_buckets * max_load_factor_) <= num_elements + 1u) {
    442       ++num_buckets;
    443     }
    444     if (num_buckets > NumBuckets()) {
    445       Resize(num_buckets);
    446     }
    447   }
    448 
    449   // To distance that inserted elements were probed. Used for measuring how good hash functions
    450   // are.
    451   size_t TotalProbeDistance() const {
    452     size_t total = 0;
    453     for (size_t i = 0; i < NumBuckets(); ++i) {
    454       const T& element = ElementForIndex(i);
    455       if (!emptyfn_.IsEmpty(element)) {
    456         size_t ideal_location = IndexForHash(hashfn_(element));
    457         if (ideal_location > i) {
    458           total += i + NumBuckets() - ideal_location;
    459         } else {
    460           total += i - ideal_location;
    461         }
    462       }
    463     }
    464     return total;
    465   }
    466 
    467   // Calculate the current load factor and return it.
    468   double CalculateLoadFactor() const {
    469     return static_cast<double>(Size()) / static_cast<double>(NumBuckets());
    470   }
    471 
    472   // Make sure that everything reinserts in the right spot. Returns the number of errors.
    473   size_t Verify() NO_THREAD_SAFETY_ANALYSIS {
    474     size_t errors = 0;
    475     for (size_t i = 0; i < num_buckets_; ++i) {
    476       T& element = data_[i];
    477       if (!emptyfn_.IsEmpty(element)) {
    478         T temp;
    479         emptyfn_.MakeEmpty(temp);
    480         std::swap(temp, element);
    481         size_t first_slot = FirstAvailableSlot(IndexForHash(hashfn_(temp)));
    482         if (i != first_slot) {
    483           LOG(ERROR) << "Element " << i << " should be in slot " << first_slot;
    484           ++errors;
    485         }
    486         std::swap(temp, element);
    487       }
    488     }
    489     return errors;
    490   }
    491 
    492   double GetMinLoadFactor() const {
    493     return min_load_factor_;
    494   }
    495 
    496   double GetMaxLoadFactor() const {
    497     return max_load_factor_;
    498   }
    499 
    500   // Change the load factor of the hash set. If the current load factor is greater than the max
    501   // specified, then we resize the hash table storage.
    502   void SetLoadFactor(double min_load_factor, double max_load_factor) {
    503     DCHECK_LT(min_load_factor, max_load_factor);
    504     DCHECK_GT(min_load_factor, 0.0);
    505     DCHECK_LT(max_load_factor, 1.0);
    506     min_load_factor_ = min_load_factor;
    507     max_load_factor_ = max_load_factor;
    508     elements_until_expand_ = NumBuckets() * max_load_factor_;
    509     // If the current load factor isn't in the range, then resize to the mean of the minimum and
    510     // maximum load factor.
    511     const double load_factor = CalculateLoadFactor();
    512     if (load_factor > max_load_factor_) {
    513       Resize(Size() / ((min_load_factor_ + max_load_factor_) * 0.5));
    514     }
    515   }
    516 
    517   // The hash set expands when Size() reaches ElementsUntilExpand().
    518   size_t ElementsUntilExpand() const {
    519     return elements_until_expand_;
    520   }
    521 
    522   size_t NumBuckets() const {
    523     return num_buckets_;
    524   }
    525 
    526  private:
    527   T& ElementForIndex(size_t index) {
    528     DCHECK_LT(index, NumBuckets());
    529     DCHECK(data_ != nullptr);
    530     return data_[index];
    531   }
    532 
    533   const T& ElementForIndex(size_t index) const {
    534     DCHECK_LT(index, NumBuckets());
    535     DCHECK(data_ != nullptr);
    536     return data_[index];
    537   }
    538 
    539   size_t IndexForHash(size_t hash) const {
    540     // Protect against undefined behavior (division by zero).
    541     if (UNLIKELY(num_buckets_ == 0)) {
    542       return 0;
    543     }
    544     return hash % num_buckets_;
    545   }
    546 
    547   size_t NextIndex(size_t index) const {
    548     if (UNLIKELY(++index >= num_buckets_)) {
    549       DCHECK_EQ(index, NumBuckets());
    550       return 0;
    551     }
    552     return index;
    553   }
    554 
    555   // Find the hash table slot for an element, or return NumBuckets() if not found.
    556   // This value for not found is important so that iterator(this, FindIndex(...)) == end().
    557   template <typename K>
    558   size_t FindIndex(const K& element, size_t hash) const {
    559     // Guard against failing to get an element for a non-existing index.
    560     if (UNLIKELY(NumBuckets() == 0)) {
    561       return 0;
    562     }
    563     DCHECK_EQ(hashfn_(element), hash);
    564     size_t index = IndexForHash(hash);
    565     while (true) {
    566       const T& slot = ElementForIndex(index);
    567       if (emptyfn_.IsEmpty(slot)) {
    568         return NumBuckets();
    569       }
    570       if (pred_(slot, element)) {
    571         return index;
    572       }
    573       index = NextIndex(index);
    574     }
    575   }
    576 
    577   bool IsFreeSlot(size_t index) const {
    578     return emptyfn_.IsEmpty(ElementForIndex(index));
    579   }
    580 
    581   // Allocate a number of buckets.
    582   void AllocateStorage(size_t num_buckets) {
    583     num_buckets_ = num_buckets;
    584     data_ = allocfn_.allocate(num_buckets_);
    585     owns_data_ = true;
    586     for (size_t i = 0; i < num_buckets_; ++i) {
    587       allocfn_.construct(allocfn_.address(data_[i]));
    588       emptyfn_.MakeEmpty(data_[i]);
    589     }
    590   }
    591 
    592   void DeallocateStorage() {
    593     if (owns_data_) {
    594       for (size_t i = 0; i < NumBuckets(); ++i) {
    595         allocfn_.destroy(allocfn_.address(data_[i]));
    596       }
    597       if (data_ != nullptr) {
    598         allocfn_.deallocate(data_, NumBuckets());
    599       }
    600       owns_data_ = false;
    601     }
    602     data_ = nullptr;
    603     num_buckets_ = 0;
    604   }
    605 
    606   // Expand the set based on the load factors.
    607   void Expand() {
    608     size_t min_index = static_cast<size_t>(Size() / min_load_factor_);
    609     // Resize based on the minimum load factor.
    610     Resize(min_index);
    611   }
    612 
    613   // Expand / shrink the table to the new specified size.
    614   void Resize(size_t new_size) {
    615     if (new_size < kMinBuckets) {
    616       new_size = kMinBuckets;
    617     }
    618     DCHECK_GE(new_size, Size());
    619     T* const old_data = data_;
    620     size_t old_num_buckets = num_buckets_;
    621     // Reinsert all of the old elements.
    622     const bool owned_data = owns_data_;
    623     AllocateStorage(new_size);
    624     for (size_t i = 0; i < old_num_buckets; ++i) {
    625       T& element = old_data[i];
    626       if (!emptyfn_.IsEmpty(element)) {
    627         data_[FirstAvailableSlot(IndexForHash(hashfn_(element)))] = std::move(element);
    628       }
    629       if (owned_data) {
    630         allocfn_.destroy(allocfn_.address(element));
    631       }
    632     }
    633     if (owned_data) {
    634       allocfn_.deallocate(old_data, old_num_buckets);
    635     }
    636 
    637     // When we hit elements_until_expand_, we are at the max load factor and must expand again.
    638     elements_until_expand_ = NumBuckets() * max_load_factor_;
    639   }
    640 
    641   ALWAYS_INLINE size_t FirstAvailableSlot(size_t index) const {
    642     DCHECK_LT(index, NumBuckets());  // Don't try to get a slot out of range.
    643     size_t non_empty_count = 0;
    644     while (!emptyfn_.IsEmpty(data_[index])) {
    645       index = NextIndex(index);
    646       non_empty_count++;
    647       DCHECK_LE(non_empty_count, NumBuckets());  // Don't loop forever.
    648     }
    649     return index;
    650   }
    651 
    652   // Return new offset.
    653   template <typename Elem>
    654   static size_t WriteToBytes(uint8_t* ptr, size_t offset, Elem n) {
    655     DCHECK_ALIGNED(ptr + offset, sizeof(n));
    656     if (ptr != nullptr) {
    657       *reinterpret_cast<Elem*>(ptr + offset) = n;
    658     }
    659     return offset + sizeof(n);
    660   }
    661 
    662   template <typename Elem>
    663   static size_t ReadFromBytes(const uint8_t* ptr, size_t offset, Elem* out) {
    664     DCHECK(ptr != nullptr);
    665     DCHECK_ALIGNED(ptr + offset, sizeof(*out));
    666     *out = *reinterpret_cast<const Elem*>(ptr + offset);
    667     return offset + sizeof(*out);
    668   }
    669 
    670   Alloc allocfn_;  // Allocator function.
    671   HashFn hashfn_;  // Hashing function.
    672   EmptyFn emptyfn_;  // IsEmpty/SetEmpty function.
    673   Pred pred_;  // Equals function.
    674   size_t num_elements_;  // Number of inserted elements.
    675   size_t num_buckets_;  // Number of hash table buckets.
    676   size_t elements_until_expand_;  // Maximum number of elements until we expand the table.
    677   bool owns_data_;  // If we own data_ and are responsible for freeing it.
    678   T* data_;  // Backing storage.
    679   double min_load_factor_;
    680   double max_load_factor_;
    681 
    682   ART_FRIEND_TEST(InternTableTest, CrossHash);
    683 };
    684 
    685 template <class T, class EmptyFn, class HashFn, class Pred, class Alloc>
    686 void swap(HashSet<T, EmptyFn, HashFn, Pred, Alloc>& lhs,
    687           HashSet<T, EmptyFn, HashFn, Pred, Alloc>& rhs) {
    688   lhs.swap(rhs);
    689 }
    690 
    691 }  // namespace art
    692 
    693 #endif  // ART_LIBARTBASE_BASE_HASH_SET_H_
    694