Home | History | Annotate | Download | only in accounting
      1 /*
      2  * Copyright (C) 2012 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_GC_ACCOUNTING_ATOMIC_STACK_H_
     18 #define ART_RUNTIME_GC_ACCOUNTING_ATOMIC_STACK_H_
     19 
     20 #include <sys/mman.h>  // For the PROT_* and MAP_* constants.
     21 
     22 #include <algorithm>
     23 #include <memory>
     24 #include <string>
     25 
     26 #include <android-base/logging.h>
     27 
     28 #include "base/atomic.h"
     29 #include "base/macros.h"
     30 #include "mem_map.h"
     31 #include "stack_reference.h"
     32 
     33 // This implements a double-ended queue (deque) with various flavors of PushBack operations,
     34 // as well as PopBack and PopFront operations. We expect that all calls are performed
     35 // by a single thread (normally the GC). There is one exception, which accounts for the
     36 // name:
     37 // - Multiple calls to AtomicPushBack*() and AtomicBumpBack() may be made concurrently,
     38 // provided no other calls are made at the same time.
     39 
     40 namespace art {
     41 namespace gc {
     42 namespace accounting {
     43 
     44 // Internal representation is StackReference<T>, so this only works with mirror::Object or its
     45 // subclasses.
     46 template <typename T>
     47 class AtomicStack {
     48  public:
     49   class ObjectComparator {
     50    public:
     51     // These two comparators are for std::binary_search.
     52     bool operator()(const T* a, const StackReference<T>& b) const NO_THREAD_SAFETY_ANALYSIS {
     53       return a < b.AsMirrorPtr();
     54     }
     55     bool operator()(const StackReference<T>& a, const T* b) const NO_THREAD_SAFETY_ANALYSIS {
     56       return a.AsMirrorPtr() < b;
     57     }
     58     // This comparator is for std::sort.
     59     bool operator()(const StackReference<T>& a, const StackReference<T>& b) const
     60         NO_THREAD_SAFETY_ANALYSIS {
     61       return a.AsMirrorPtr() < b.AsMirrorPtr();
     62     }
     63   };
     64 
     65   // Capacity is how many elements we can store in the stack.
     66   static AtomicStack* Create(const std::string& name, size_t growth_limit, size_t capacity) {
     67     std::unique_ptr<AtomicStack> mark_stack(new AtomicStack(name, growth_limit, capacity));
     68     mark_stack->Init();
     69     return mark_stack.release();
     70   }
     71 
     72   ~AtomicStack() {}
     73 
     74   void Reset() {
     75     DCHECK(mem_map_.get() != nullptr);
     76     DCHECK(begin_ != nullptr);
     77     front_index_.StoreRelaxed(0);
     78     back_index_.StoreRelaxed(0);
     79     debug_is_sorted_ = true;
     80     mem_map_->MadviseDontNeedAndZero();
     81   }
     82 
     83   // Beware: Mixing atomic pushes and atomic pops will cause ABA problem.
     84 
     85   // Returns false if we overflowed the stack.
     86   bool AtomicPushBackIgnoreGrowthLimit(T* value) REQUIRES_SHARED(Locks::mutator_lock_) {
     87     return AtomicPushBackInternal(value, capacity_);
     88   }
     89 
     90   // Returns false if we overflowed the stack.
     91   bool AtomicPushBack(T* value) REQUIRES_SHARED(Locks::mutator_lock_) {
     92     return AtomicPushBackInternal(value, growth_limit_);
     93   }
     94 
     95   // Atomically bump the back index by the given number of
     96   // slots. Returns false if we overflowed the stack.
     97   bool AtomicBumpBack(size_t num_slots, StackReference<T>** start_address,
     98                       StackReference<T>** end_address)
     99       REQUIRES_SHARED(Locks::mutator_lock_) {
    100     if (kIsDebugBuild) {
    101       debug_is_sorted_ = false;
    102     }
    103     int32_t index;
    104     int32_t new_index;
    105     do {
    106       index = back_index_.LoadRelaxed();
    107       new_index = index + num_slots;
    108       if (UNLIKELY(static_cast<size_t>(new_index) >= growth_limit_)) {
    109         // Stack overflow.
    110         return false;
    111       }
    112     } while (!back_index_.CompareAndSetWeakRelaxed(index, new_index));
    113     *start_address = begin_ + index;
    114     *end_address = begin_ + new_index;
    115     if (kIsDebugBuild) {
    116       // Sanity check that the memory is zero.
    117       for (int32_t i = index; i < new_index; ++i) {
    118         DCHECK_EQ(begin_[i].AsMirrorPtr(), static_cast<T*>(nullptr))
    119             << "i=" << i << " index=" << index << " new_index=" << new_index;
    120       }
    121     }
    122     return true;
    123   }
    124 
    125   void AssertAllZero() REQUIRES_SHARED(Locks::mutator_lock_) {
    126     if (kIsDebugBuild) {
    127       for (size_t i = 0; i < capacity_; ++i) {
    128         DCHECK_EQ(begin_[i].AsMirrorPtr(), static_cast<T*>(nullptr)) << "i=" << i;
    129       }
    130     }
    131   }
    132 
    133   void PushBack(T* value) REQUIRES_SHARED(Locks::mutator_lock_) {
    134     if (kIsDebugBuild) {
    135       debug_is_sorted_ = false;
    136     }
    137     const int32_t index = back_index_.LoadRelaxed();
    138     DCHECK_LT(static_cast<size_t>(index), growth_limit_);
    139     back_index_.StoreRelaxed(index + 1);
    140     begin_[index].Assign(value);
    141   }
    142 
    143   T* PopBack() REQUIRES_SHARED(Locks::mutator_lock_) {
    144     DCHECK_GT(back_index_.LoadRelaxed(), front_index_.LoadRelaxed());
    145     // Decrement the back index non atomically.
    146     back_index_.StoreRelaxed(back_index_.LoadRelaxed() - 1);
    147     return begin_[back_index_.LoadRelaxed()].AsMirrorPtr();
    148   }
    149 
    150   // Take an item from the front of the stack.
    151   T PopFront() {
    152     int32_t index = front_index_.LoadRelaxed();
    153     DCHECK_LT(index, back_index_.LoadRelaxed());
    154     front_index_.StoreRelaxed(index + 1);
    155     return begin_[index];
    156   }
    157 
    158   // Pop a number of elements.
    159   void PopBackCount(int32_t n) {
    160     DCHECK_GE(Size(), static_cast<size_t>(n));
    161     back_index_.StoreRelaxed(back_index_.LoadRelaxed() - n);
    162   }
    163 
    164   bool IsEmpty() const {
    165     return Size() == 0;
    166   }
    167 
    168   bool IsFull() const {
    169     return Size() == growth_limit_;
    170   }
    171 
    172   size_t Size() const {
    173     DCHECK_LE(front_index_.LoadRelaxed(), back_index_.LoadRelaxed());
    174     return back_index_.LoadRelaxed() - front_index_.LoadRelaxed();
    175   }
    176 
    177   StackReference<T>* Begin() const {
    178     return begin_ + front_index_.LoadRelaxed();
    179   }
    180   StackReference<T>* End() const {
    181     return begin_ + back_index_.LoadRelaxed();
    182   }
    183 
    184   size_t Capacity() const {
    185     return capacity_;
    186   }
    187 
    188   // Will clear the stack.
    189   void Resize(size_t new_capacity) {
    190     capacity_ = new_capacity;
    191     growth_limit_ = new_capacity;
    192     Init();
    193   }
    194 
    195   void Sort() {
    196     int32_t start_back_index = back_index_.LoadRelaxed();
    197     int32_t start_front_index = front_index_.LoadRelaxed();
    198     std::sort(Begin(), End(), ObjectComparator());
    199     CHECK_EQ(start_back_index, back_index_.LoadRelaxed());
    200     CHECK_EQ(start_front_index, front_index_.LoadRelaxed());
    201     if (kIsDebugBuild) {
    202       debug_is_sorted_ = true;
    203     }
    204   }
    205 
    206   bool ContainsSorted(const T* value) const REQUIRES_SHARED(Locks::mutator_lock_) {
    207     DCHECK(debug_is_sorted_);
    208     return std::binary_search(Begin(), End(), value, ObjectComparator());
    209   }
    210 
    211   bool Contains(const T* value) const REQUIRES_SHARED(Locks::mutator_lock_) {
    212     for (auto cur = Begin(), end = End(); cur != end; ++cur) {
    213       if (cur->AsMirrorPtr() == value) {
    214         return true;
    215       }
    216     }
    217     return false;
    218   }
    219 
    220  private:
    221   AtomicStack(const std::string& name, size_t growth_limit, size_t capacity)
    222       : name_(name),
    223         back_index_(0),
    224         front_index_(0),
    225         begin_(nullptr),
    226         growth_limit_(growth_limit),
    227         capacity_(capacity),
    228         debug_is_sorted_(true) {
    229   }
    230 
    231   // Returns false if we overflowed the stack.
    232   bool AtomicPushBackInternal(T* value, size_t limit) ALWAYS_INLINE
    233       REQUIRES_SHARED(Locks::mutator_lock_) {
    234     if (kIsDebugBuild) {
    235       debug_is_sorted_ = false;
    236     }
    237     int32_t index;
    238     do {
    239       index = back_index_.LoadRelaxed();
    240       if (UNLIKELY(static_cast<size_t>(index) >= limit)) {
    241         // Stack overflow.
    242         return false;
    243       }
    244     } while (!back_index_.CompareAndSetWeakRelaxed(index, index + 1));
    245     begin_[index].Assign(value);
    246     return true;
    247   }
    248 
    249   // Size in number of elements.
    250   void Init() {
    251     std::string error_msg;
    252     mem_map_.reset(MemMap::MapAnonymous(name_.c_str(), nullptr, capacity_ * sizeof(begin_[0]),
    253                                         PROT_READ | PROT_WRITE, false, false, &error_msg));
    254     CHECK(mem_map_.get() != nullptr) << "couldn't allocate mark stack.\n" << error_msg;
    255     uint8_t* addr = mem_map_->Begin();
    256     CHECK(addr != nullptr);
    257     debug_is_sorted_ = true;
    258     begin_ = reinterpret_cast<StackReference<T>*>(addr);
    259     Reset();
    260   }
    261 
    262   // Name of the mark stack.
    263   std::string name_;
    264   // Memory mapping of the atomic stack.
    265   std::unique_ptr<MemMap> mem_map_;
    266   // Back index (index after the last element pushed).
    267   AtomicInteger back_index_;
    268   // Front index, used for implementing PopFront.
    269   AtomicInteger front_index_;
    270   // Base of the atomic stack.
    271   StackReference<T>* begin_;
    272   // Current maximum which we can push back to, must be <= capacity_.
    273   size_t growth_limit_;
    274   // Maximum number of elements.
    275   size_t capacity_;
    276   // Whether or not the stack is sorted, only updated in debug mode to avoid performance overhead.
    277   bool debug_is_sorted_;
    278 
    279   DISALLOW_COPY_AND_ASSIGN(AtomicStack);
    280 };
    281 
    282 typedef AtomicStack<mirror::Object> ObjectStack;
    283 
    284 }  // namespace accounting
    285 }  // namespace gc
    286 }  // namespace art
    287 
    288 #endif  // ART_RUNTIME_GC_ACCOUNTING_ATOMIC_STACK_H_
    289