Home | History | Annotate | Download | only in runtime
      1 /*
      2  * Copyright (C) 2014 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_GC_ROOT_H_
     18 #define ART_RUNTIME_GC_ROOT_H_
     19 
     20 #include "base/macros.h"
     21 #include "base/mutex.h"       // For Locks::mutator_lock_.
     22 #include "mirror/object_reference.h"
     23 #include "read_barrier_option.h"
     24 
     25 namespace art {
     26 class ArtField;
     27 class ArtMethod;
     28 template<class MirrorType> class ObjPtr;
     29 
     30 namespace mirror {
     31 class Object;
     32 }  // namespace mirror
     33 
     34 template <size_t kBufferSize>
     35 class BufferedRootVisitor;
     36 
     37 // Dependent on pointer size so that we don't have frames that are too big on 64 bit.
     38 static const size_t kDefaultBufferedRootCount = 1024 / sizeof(void*);
     39 
     40 enum RootType {
     41   kRootUnknown = 0,
     42   kRootJNIGlobal,
     43   kRootJNILocal,
     44   kRootJavaFrame,
     45   kRootNativeStack,
     46   kRootStickyClass,
     47   kRootThreadBlock,
     48   kRootMonitorUsed,
     49   kRootThreadObject,
     50   kRootInternedString,
     51   kRootFinalizing,  // used for HPROF's conversion to HprofHeapTag
     52   kRootDebugger,
     53   kRootReferenceCleanup,  // used for HPROF's conversion to HprofHeapTag
     54   kRootVMInternal,
     55   kRootJNIMonitor,
     56 };
     57 std::ostream& operator<<(std::ostream& os, const RootType& root_type);
     58 
     59 // Only used by hprof. thread_id_ and type_ are only used by hprof.
     60 class RootInfo {
     61  public:
     62   // Thread id 0 is for non thread roots.
     63   explicit RootInfo(RootType type, uint32_t thread_id = 0)
     64      : type_(type), thread_id_(thread_id) {
     65   }
     66   RootInfo(const RootInfo&) = default;
     67   virtual ~RootInfo() {
     68   }
     69   RootType GetType() const {
     70     return type_;
     71   }
     72   uint32_t GetThreadId() const {
     73     return thread_id_;
     74   }
     75   virtual void Describe(std::ostream& os) const {
     76     os << "Type=" << type_ << " thread_id=" << thread_id_;
     77   }
     78   std::string ToString() const;
     79 
     80  private:
     81   const RootType type_;
     82   const uint32_t thread_id_;
     83 };
     84 
     85 inline std::ostream& operator<<(std::ostream& os, const RootInfo& root_info) {
     86   root_info.Describe(os);
     87   return os;
     88 }
     89 
     90 // Not all combinations of flags are valid. You may not visit all roots as well as the new roots
     91 // (no logical reason to do this). You also may not start logging new roots and stop logging new
     92 // roots (also no logical reason to do this).
     93 //
     94 // The precise flag ensures that more metadata is supplied. An example is vreg data for compiled
     95 // method frames.
     96 enum VisitRootFlags : uint8_t {
     97   kVisitRootFlagAllRoots = 0x1,
     98   kVisitRootFlagNewRoots = 0x2,
     99   kVisitRootFlagStartLoggingNewRoots = 0x4,
    100   kVisitRootFlagStopLoggingNewRoots = 0x8,
    101   kVisitRootFlagClearRootLog = 0x10,
    102   kVisitRootFlagClassLoader = 0x20,
    103   kVisitRootFlagPrecise = 0x80,
    104 };
    105 
    106 class RootVisitor {
    107  public:
    108   virtual ~RootVisitor() { }
    109 
    110   // Single root version, not overridable.
    111   ALWAYS_INLINE void VisitRoot(mirror::Object** root, const RootInfo& info)
    112       REQUIRES_SHARED(Locks::mutator_lock_) {
    113     VisitRoots(&root, 1, info);
    114   }
    115 
    116   // Single root version, not overridable.
    117   ALWAYS_INLINE void VisitRootIfNonNull(mirror::Object** root, const RootInfo& info)
    118       REQUIRES_SHARED(Locks::mutator_lock_) {
    119     if (*root != nullptr) {
    120       VisitRoot(root, info);
    121     }
    122   }
    123 
    124   virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info)
    125       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
    126 
    127   virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
    128                           const RootInfo& info)
    129       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
    130 };
    131 
    132 // Only visits roots one at a time, doesn't handle updating roots. Used when performance isn't
    133 // critical.
    134 class SingleRootVisitor : public RootVisitor {
    135  private:
    136   void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE
    137       REQUIRES_SHARED(Locks::mutator_lock_) {
    138     for (size_t i = 0; i < count; ++i) {
    139       VisitRoot(*roots[i], info);
    140     }
    141   }
    142 
    143   void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
    144                           const RootInfo& info) OVERRIDE
    145       REQUIRES_SHARED(Locks::mutator_lock_) {
    146     for (size_t i = 0; i < count; ++i) {
    147       VisitRoot(roots[i]->AsMirrorPtr(), info);
    148     }
    149   }
    150 
    151   virtual void VisitRoot(mirror::Object* root, const RootInfo& info) = 0;
    152 };
    153 
    154 class GcRootSource {
    155  public:
    156   GcRootSource()
    157       : field_(nullptr), method_(nullptr) {
    158   }
    159   explicit GcRootSource(ArtField* field)
    160       : field_(field), method_(nullptr) {
    161   }
    162   explicit GcRootSource(ArtMethod* method)
    163       : field_(nullptr), method_(method) {
    164   }
    165   ArtField* GetArtField() const {
    166     return field_;
    167   }
    168   ArtMethod* GetArtMethod() const {
    169     return method_;
    170   }
    171   bool HasArtField() const {
    172     return field_ != nullptr;
    173   }
    174   bool HasArtMethod() const {
    175     return method_ != nullptr;
    176   }
    177 
    178  private:
    179   ArtField* const field_;
    180   ArtMethod* const method_;
    181 
    182   DISALLOW_COPY_AND_ASSIGN(GcRootSource);
    183 };
    184 
    185 template<class MirrorType>
    186 class GcRoot {
    187  public:
    188   template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
    189   ALWAYS_INLINE MirrorType* Read(GcRootSource* gc_root_source = nullptr) const
    190       REQUIRES_SHARED(Locks::mutator_lock_);
    191 
    192   void VisitRoot(RootVisitor* visitor, const RootInfo& info) const
    193       REQUIRES_SHARED(Locks::mutator_lock_) {
    194     DCHECK(!IsNull());
    195     mirror::CompressedReference<mirror::Object>* roots[1] = { &root_ };
    196     visitor->VisitRoots(roots, 1u, info);
    197     DCHECK(!IsNull());
    198   }
    199 
    200   void VisitRootIfNonNull(RootVisitor* visitor, const RootInfo& info) const
    201       REQUIRES_SHARED(Locks::mutator_lock_) {
    202     if (!IsNull()) {
    203       VisitRoot(visitor, info);
    204     }
    205   }
    206 
    207   ALWAYS_INLINE mirror::CompressedReference<mirror::Object>* AddressWithoutBarrier() {
    208     return &root_;
    209   }
    210 
    211   ALWAYS_INLINE bool IsNull() const {
    212     // It's safe to null-check it without a read barrier.
    213     return root_.IsNull();
    214   }
    215 
    216   ALWAYS_INLINE GcRoot() {}
    217   explicit ALWAYS_INLINE GcRoot(MirrorType* ref)
    218       REQUIRES_SHARED(Locks::mutator_lock_);
    219   explicit ALWAYS_INLINE GcRoot(ObjPtr<MirrorType> ref)
    220       REQUIRES_SHARED(Locks::mutator_lock_);
    221 
    222  private:
    223   // Root visitors take pointers to root_ and place them in CompressedReference** arrays. We use a
    224   // CompressedReference<mirror::Object> here since it violates strict aliasing requirements to
    225   // cast CompressedReference<MirrorType>* to CompressedReference<mirror::Object>*.
    226   mutable mirror::CompressedReference<mirror::Object> root_;
    227 
    228   template <size_t kBufferSize> friend class BufferedRootVisitor;
    229 };
    230 
    231 // Simple data structure for buffered root visiting to avoid virtual dispatch overhead. Currently
    232 // only for CompressedReferences since these are more common than the Object** roots which are only
    233 // for thread local roots.
    234 template <size_t kBufferSize>
    235 class BufferedRootVisitor {
    236  public:
    237   BufferedRootVisitor(RootVisitor* visitor, const RootInfo& root_info)
    238       : visitor_(visitor), root_info_(root_info), buffer_pos_(0) {
    239   }
    240 
    241   ~BufferedRootVisitor() {
    242     Flush();
    243   }
    244 
    245   template <class MirrorType>
    246   ALWAYS_INLINE void VisitRootIfNonNull(GcRoot<MirrorType>& root)
    247       REQUIRES_SHARED(Locks::mutator_lock_) {
    248     if (!root.IsNull()) {
    249       VisitRoot(root);
    250     }
    251   }
    252 
    253   template <class MirrorType>
    254   ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root)
    255       REQUIRES_SHARED(Locks::mutator_lock_) {
    256     if (!root->IsNull()) {
    257       VisitRoot(root);
    258     }
    259   }
    260 
    261   template <class MirrorType>
    262   void VisitRoot(GcRoot<MirrorType>& root) REQUIRES_SHARED(Locks::mutator_lock_) {
    263     VisitRoot(root.AddressWithoutBarrier());
    264   }
    265 
    266   template <class MirrorType>
    267   void VisitRoot(mirror::CompressedReference<MirrorType>* root)
    268       REQUIRES_SHARED(Locks::mutator_lock_) {
    269     if (UNLIKELY(buffer_pos_ >= kBufferSize)) {
    270       Flush();
    271     }
    272     roots_[buffer_pos_++] = root;
    273   }
    274 
    275   void Flush() REQUIRES_SHARED(Locks::mutator_lock_) {
    276     visitor_->VisitRoots(roots_, buffer_pos_, root_info_);
    277     buffer_pos_ = 0;
    278   }
    279 
    280  private:
    281   RootVisitor* const visitor_;
    282   RootInfo root_info_;
    283   mirror::CompressedReference<mirror::Object>* roots_[kBufferSize];
    284   size_t buffer_pos_;
    285 };
    286 
    287 class UnbufferedRootVisitor {
    288  public:
    289   UnbufferedRootVisitor(RootVisitor* visitor, const RootInfo& root_info)
    290       : visitor_(visitor), root_info_(root_info) {}
    291 
    292   template <class MirrorType>
    293   ALWAYS_INLINE void VisitRootIfNonNull(GcRoot<MirrorType>& root) const
    294       REQUIRES_SHARED(Locks::mutator_lock_) {
    295     if (!root.IsNull()) {
    296       VisitRoot(root);
    297     }
    298   }
    299 
    300   template <class MirrorType>
    301   ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root) const
    302       REQUIRES_SHARED(Locks::mutator_lock_) {
    303     if (!root->IsNull()) {
    304       VisitRoot(root);
    305     }
    306   }
    307 
    308   template <class MirrorType>
    309   void VisitRoot(GcRoot<MirrorType>& root) const REQUIRES_SHARED(Locks::mutator_lock_) {
    310     VisitRoot(root.AddressWithoutBarrier());
    311   }
    312 
    313   template <class MirrorType>
    314   void VisitRoot(mirror::CompressedReference<MirrorType>* root) const
    315       REQUIRES_SHARED(Locks::mutator_lock_) {
    316     visitor_->VisitRoots(&root, 1, root_info_);
    317   }
    318 
    319  private:
    320   RootVisitor* const visitor_;
    321   RootInfo root_info_;
    322 };
    323 
    324 }  // namespace art
    325 
    326 #endif  // ART_RUNTIME_GC_ROOT_H_
    327