Home | History | Annotate | Download | only in accounting
      1 /*
      2  * Copyright (C) 2012 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_GC_ACCOUNTING_ATOMIC_STACK_H_
     18 #define ART_RUNTIME_GC_ACCOUNTING_ATOMIC_STACK_H_
     19 
     20 #include <string>
     21 
     22 #include "atomic_integer.h"
     23 #include "base/logging.h"
     24 #include "base/macros.h"
     25 #include "UniquePtr.h"
     26 #include "mem_map.h"
     27 #include "utils.h"
     28 
     29 namespace art {
     30 namespace gc {
     31 namespace accounting {
     32 
     33 template <typename T>
     34 class AtomicStack {
     35  public:
     36   // Capacity is how many elements we can store in the stack.
     37   static AtomicStack* Create(const std::string& name, size_t capacity) {
     38     UniquePtr<AtomicStack> mark_stack(new AtomicStack(name, capacity));
     39     mark_stack->Init();
     40     return mark_stack.release();
     41   }
     42 
     43   ~AtomicStack() {}
     44 
     45   void Reset() {
     46     DCHECK(mem_map_.get() != NULL);
     47     DCHECK(begin_ != NULL);
     48     front_index_ = 0;
     49     back_index_ = 0;
     50     debug_is_sorted_ = true;
     51     int result = madvise(begin_, sizeof(T) * capacity_, MADV_DONTNEED);
     52     if (result == -1) {
     53       PLOG(WARNING) << "madvise failed";
     54     }
     55   }
     56 
     57   // Beware: Mixing atomic pushes and atomic pops will cause ABA problem.
     58 
     59   // Returns false if we overflowed the stack.
     60   bool AtomicPushBack(const T& value) {
     61     if (kIsDebugBuild) {
     62       debug_is_sorted_ = false;
     63     }
     64     int32_t index;
     65     do {
     66       index = back_index_;
     67       if (UNLIKELY(static_cast<size_t>(index) >= capacity_)) {
     68         // Stack overflow.
     69         return false;
     70       }
     71     } while (!back_index_.compare_and_swap(index, index + 1));
     72     begin_[index] = value;
     73     return true;
     74   }
     75 
     76   void PushBack(const T& value) {
     77     if (kIsDebugBuild) {
     78       debug_is_sorted_ = false;
     79     }
     80     int32_t index = back_index_;
     81     DCHECK_LT(static_cast<size_t>(index), capacity_);
     82     back_index_ = index + 1;
     83     begin_[index] = value;
     84   }
     85 
     86   T PopBack() {
     87     DCHECK_GT(back_index_, front_index_);
     88     // Decrement the back index non atomically.
     89     back_index_ = back_index_ - 1;
     90     return begin_[back_index_];
     91   }
     92 
     93   // Take an item from the front of the stack.
     94   T PopFront() {
     95     int32_t index = front_index_;
     96     DCHECK_LT(index, back_index_.load());
     97     front_index_ = front_index_ + 1;
     98     return begin_[index];
     99   }
    100 
    101   // Pop a number of elements.
    102   void PopBackCount(int32_t n) {
    103     DCHECK_GE(Size(), static_cast<size_t>(n));
    104     back_index_.fetch_sub(n);
    105   }
    106 
    107   bool IsEmpty() const {
    108     return Size() == 0;
    109   }
    110 
    111   size_t Size() const {
    112     DCHECK_LE(front_index_, back_index_);
    113     return back_index_ - front_index_;
    114   }
    115 
    116   T* Begin() const {
    117     return const_cast<T*>(begin_ + front_index_);
    118   }
    119 
    120   T* End() const {
    121     return const_cast<T*>(begin_ + back_index_);
    122   }
    123 
    124   size_t Capacity() const {
    125     return capacity_;
    126   }
    127 
    128   // Will clear the stack.
    129   void Resize(size_t new_capacity) {
    130     capacity_ = new_capacity;
    131     Init();
    132   }
    133 
    134   void Sort() {
    135     int32_t start_back_index = back_index_.load();
    136     int32_t start_front_index = front_index_.load();
    137     std::sort(Begin(), End());
    138     CHECK_EQ(start_back_index, back_index_.load());
    139     CHECK_EQ(start_front_index, front_index_.load());
    140     if (kIsDebugBuild) {
    141       debug_is_sorted_ = true;
    142     }
    143   }
    144 
    145   bool ContainsSorted(const T& value) const {
    146     DCHECK(debug_is_sorted_);
    147     return std::binary_search(Begin(), End(), value);
    148   }
    149 
    150   bool Contains(const T& value) const {
    151     return std::find(Begin(), End(), value) != End();
    152   }
    153 
    154  private:
    155   AtomicStack(const std::string& name, const size_t capacity)
    156       : name_(name),
    157         back_index_(0),
    158         front_index_(0),
    159         begin_(NULL),
    160         capacity_(capacity),
    161         debug_is_sorted_(true) {
    162   }
    163 
    164   // Size in number of elements.
    165   void Init() {
    166     mem_map_.reset(MemMap::MapAnonymous(name_.c_str(), NULL, capacity_ * sizeof(T), PROT_READ | PROT_WRITE));
    167     CHECK(mem_map_.get() != NULL) << "couldn't allocate mark stack";
    168     byte* addr = mem_map_->Begin();
    169     CHECK(addr != NULL);
    170     debug_is_sorted_ = true;
    171     begin_ = reinterpret_cast<T*>(addr);
    172     Reset();
    173   }
    174 
    175   // Name of the mark stack.
    176   std::string name_;
    177 
    178   // Memory mapping of the atomic stack.
    179   UniquePtr<MemMap> mem_map_;
    180 
    181   // Back index (index after the last element pushed).
    182   AtomicInteger back_index_;
    183 
    184   // Front index, used for implementing PopFront.
    185   AtomicInteger front_index_;
    186 
    187   // Base of the atomic stack.
    188   T* begin_;
    189 
    190   // Maximum number of elements.
    191   size_t capacity_;
    192 
    193   // Whether or not the stack is sorted, only updated in debug mode to avoid performance overhead.
    194   bool debug_is_sorted_;
    195 
    196   DISALLOW_COPY_AND_ASSIGN(AtomicStack);
    197 };
    198 
    199 typedef AtomicStack<mirror::Object*> ObjectStack;
    200 
    201 }  // namespace accounting
    202 }  // namespace gc
    203 }  // namespace art
    204 
    205 #endif  // ART_RUNTIME_GC_ACCOUNTING_ATOMIC_STACK_H_
    206