Home | History | Annotate | Download | only in runtime
      1 /*
      2  * Copyright (C) 2009 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_INDIRECT_REFERENCE_TABLE_H_
     18 #define ART_RUNTIME_INDIRECT_REFERENCE_TABLE_H_
     19 
     20 #include <stdint.h>
     21 
     22 #include <iosfwd>
     23 #include <limits>
     24 #include <string>
     25 
     26 #include <android-base/logging.h>
     27 
     28 #include "base/bit_utils.h"
     29 #include "base/macros.h"
     30 #include "base/mutex.h"
     31 #include "gc_root.h"
     32 #include "obj_ptr.h"
     33 #include "offsets.h"
     34 #include "read_barrier_option.h"
     35 
     36 namespace art {
     37 
     38 class RootInfo;
     39 
     40 namespace mirror {
     41 class Object;
     42 }  // namespace mirror
     43 
     44 class MemMap;
     45 
     46 // Maintain a table of indirect references.  Used for local/global JNI references.
     47 //
     48 // The table contains object references, where the strong (local/global) references are part of the
     49 // GC root set (but not the weak global references). When an object is added we return an
     50 // IndirectRef that is not a valid pointer but can be used to find the original value in O(1) time.
     51 // Conversions to and from indirect references are performed on upcalls and downcalls, so they need
     52 // to be very fast.
     53 //
     54 // To be efficient for JNI local variable storage, we need to provide operations that allow us to
     55 // operate on segments of the table, where segments are pushed and popped as if on a stack. For
     56 // example, deletion of an entry should only succeed if it appears in the current segment, and we
     57 // want to be able to strip off the current segment quickly when a method returns. Additions to the
     58 // table must be made in the current segment even if space is available in an earlier area.
     59 //
     60 // A new segment is created when we call into native code from interpreted code, or when we handle
     61 // the JNI PushLocalFrame function.
     62 //
     63 // The GC must be able to scan the entire table quickly.
     64 //
     65 // In summary, these must be very fast:
     66 //  - adding or removing a segment
     67 //  - adding references to a new segment
     68 //  - converting an indirect reference back to an Object
     69 // These can be a little slower, but must still be pretty quick:
     70 //  - adding references to a "mature" segment
     71 //  - removing individual references
     72 //  - scanning the entire table straight through
     73 //
     74 // If there's more than one segment, we don't guarantee that the table will fill completely before
     75 // we fail due to lack of space. We do ensure that the current segment will pack tightly, which
     76 // should satisfy JNI requirements (e.g. EnsureLocalCapacity).
     77 //
     78 // Only SynchronizedGet is synchronized.
     79 
     80 // Indirect reference definition.  This must be interchangeable with JNI's jobject, and it's
     81 // convenient to let null be null, so we use void*.
     82 //
     83 // We need a (potentially) large table index and a 2-bit reference type (global, local, weak
     84 // global). We also reserve some bits to be used to detect stale indirect references: we put a
     85 // serial number in the extra bits, and keep a copy of the serial number in the table. This requires
     86 // more memory and additional memory accesses on add/get, but is moving-GC safe. It will catch
     87 // additional problems, e.g.: create iref1 for obj, delete iref1, create iref2 for same obj,
     88 // lookup iref1. A pattern based on object bits will miss this.
     89 typedef void* IndirectRef;
     90 
     91 // Indirect reference kind, used as the two low bits of IndirectRef.
     92 //
     93 // For convenience these match up with enum jobjectRefType from jni.h.
     94 enum IndirectRefKind {
     95   kHandleScopeOrInvalid = 0,           // <<stack indirect reference table or invalid reference>>
     96   kLocal                = 1,           // <<local reference>>
     97   kGlobal               = 2,           // <<global reference>>
     98   kWeakGlobal           = 3,           // <<weak global reference>>
     99   kLastKind             = kWeakGlobal
    100 };
    101 std::ostream& operator<<(std::ostream& os, const IndirectRefKind& rhs);
    102 const char* GetIndirectRefKindString(const IndirectRefKind& kind);
    103 
    104 // Table definition.
    105 //
    106 // For the global reference table, the expected common operations are adding a new entry and
    107 // removing a recently-added entry (usually the most-recently-added entry).  For JNI local
    108 // references, the common operations are adding a new entry and removing an entire table segment.
    109 //
    110 // If we delete entries from the middle of the list, we will be left with "holes".  We track the
    111 // number of holes so that, when adding new elements, we can quickly decide to do a trivial append
    112 // or go slot-hunting.
    113 //
    114 // When the top-most entry is removed, any holes immediately below it are also removed. Thus,
    115 // deletion of an entry may reduce "top_index" by more than one.
    116 //
    117 // To get the desired behavior for JNI locals, we need to know the bottom and top of the current
    118 // "segment". The top is managed internally, and the bottom is passed in as a function argument.
    119 // When we call a native method or push a local frame, the current top index gets pushed on, and
    120 // serves as the new bottom. When we pop a frame off, the value from the stack becomes the new top
    121 // index, and the value stored in the previous frame becomes the new bottom.
    122 //
    123 // Holes are being locally cached for the segment. Otherwise we'd have to pass bottom index and
    124 // number of holes, which restricts us to 16 bits for the top index. The value is cached within the
    125 // table. To avoid code in generated JNI transitions, which implicitly form segments, the code for
    126 // adding and removing references needs to detect the change of a segment. Helper fields are used
    127 // for this detection.
    128 //
    129 // Common alternative implementation: make IndirectRef a pointer to the actual reference slot.
    130 // Instead of getting a table and doing a lookup, the lookup can be done instantly. Operations like
    131 // determining the type and deleting the reference are more expensive because the table must be
    132 // hunted for (i.e. you have to do a pointer comparison to see which table it's in), you can't move
    133 // the table when expanding it (so realloc() is out), and tricks like serial number checking to
    134 // detect stale references aren't possible (though we may be able to get similar benefits with other
    135 // approaches).
    136 //
    137 // TODO: consider a "lastDeleteIndex" for quick hole-filling when an add immediately follows a
    138 // delete; must invalidate after segment pop might be worth only using it for JNI globals.
    139 //
    140 // TODO: may want completely different add/remove algorithms for global and local refs to improve
    141 // performance.  A large circular buffer might reduce the amortized cost of adding global
    142 // references.
    143 
    144 // The state of the current segment. We only store the index. Splitting it for index and hole
    145 // count restricts the range too much.
    146 struct IRTSegmentState {
    147   uint32_t top_index;
    148 };
    149 
    150 // Use as initial value for "cookie", and when table has only one segment.
    151 static constexpr IRTSegmentState kIRTFirstSegment = { 0 };
    152 
    153 // Try to choose kIRTPrevCount so that sizeof(IrtEntry) is a power of 2.
    154 // Contains multiple entries but only one active one, this helps us detect use after free errors
    155 // since the serial stored in the indirect ref wont match.
    156 static constexpr size_t kIRTPrevCount = kIsDebugBuild ? 7 : 3;
    157 
    158 class IrtEntry {
    159  public:
    160   void Add(ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_);
    161 
    162   GcRoot<mirror::Object>* GetReference() {
    163     DCHECK_LT(serial_, kIRTPrevCount);
    164     return &references_[serial_];
    165   }
    166 
    167   const GcRoot<mirror::Object>* GetReference() const {
    168     DCHECK_LT(serial_, kIRTPrevCount);
    169     return &references_[serial_];
    170   }
    171 
    172   uint32_t GetSerial() const {
    173     return serial_;
    174   }
    175 
    176   void SetReference(ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_);
    177 
    178  private:
    179   uint32_t serial_;
    180   GcRoot<mirror::Object> references_[kIRTPrevCount];
    181 };
    182 static_assert(sizeof(IrtEntry) == (1 + kIRTPrevCount) * sizeof(uint32_t),
    183               "Unexpected sizeof(IrtEntry)");
    184 static_assert(IsPowerOfTwo(sizeof(IrtEntry)), "Unexpected sizeof(IrtEntry)");
    185 
    186 class IrtIterator {
    187  public:
    188   IrtIterator(IrtEntry* table, size_t i, size_t capacity) REQUIRES_SHARED(Locks::mutator_lock_)
    189       : table_(table), i_(i), capacity_(capacity) {
    190     // capacity_ is used in some target; has warning with unused attribute.
    191     UNUSED(capacity_);
    192   }
    193 
    194   IrtIterator& operator++() REQUIRES_SHARED(Locks::mutator_lock_) {
    195     ++i_;
    196     return *this;
    197   }
    198 
    199   GcRoot<mirror::Object>* operator*() REQUIRES_SHARED(Locks::mutator_lock_) {
    200     // This does not have a read barrier as this is used to visit roots.
    201     return table_[i_].GetReference();
    202   }
    203 
    204   bool equals(const IrtIterator& rhs) const {
    205     return (i_ == rhs.i_ && table_ == rhs.table_);
    206   }
    207 
    208  private:
    209   IrtEntry* const table_;
    210   size_t i_;
    211   const size_t capacity_;
    212 };
    213 
    214 bool inline operator==(const IrtIterator& lhs, const IrtIterator& rhs) {
    215   return lhs.equals(rhs);
    216 }
    217 
    218 bool inline operator!=(const IrtIterator& lhs, const IrtIterator& rhs) {
    219   return !lhs.equals(rhs);
    220 }
    221 
    222 class IndirectReferenceTable {
    223  public:
    224   enum class ResizableCapacity {
    225     kNo,
    226     kYes
    227   };
    228 
    229   // WARNING: Construction of the IndirectReferenceTable may fail.
    230   // error_msg must not be null. If error_msg is set by the constructor, then
    231   // construction has failed and the IndirectReferenceTable will be in an
    232   // invalid state. Use IsValid to check whether the object is in an invalid
    233   // state.
    234   IndirectReferenceTable(size_t max_count,
    235                          IndirectRefKind kind,
    236                          ResizableCapacity resizable,
    237                          std::string* error_msg);
    238 
    239   ~IndirectReferenceTable();
    240 
    241   /*
    242    * Checks whether construction of the IndirectReferenceTable succeeded.
    243    *
    244    * This object must only be used if IsValid() returns true. It is safe to
    245    * call IsValid from multiple threads without locking or other explicit
    246    * synchronization.
    247    */
    248   bool IsValid() const;
    249 
    250   // Add a new entry. "obj" must be a valid non-null object reference. This function will
    251   // return null if an error happened (with an appropriate error message set).
    252   IndirectRef Add(IRTSegmentState previous_state,
    253                   ObjPtr<mirror::Object> obj,
    254                   std::string* error_msg)
    255       REQUIRES_SHARED(Locks::mutator_lock_);
    256 
    257   // Given an IndirectRef in the table, return the Object it refers to.
    258   //
    259   // This function may abort under error conditions.
    260   template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
    261   ObjPtr<mirror::Object> Get(IndirectRef iref) const REQUIRES_SHARED(Locks::mutator_lock_)
    262       ALWAYS_INLINE;
    263 
    264   // Synchronized get which reads a reference, acquiring a lock if necessary.
    265   template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
    266   ObjPtr<mirror::Object> SynchronizedGet(IndirectRef iref) const
    267       REQUIRES_SHARED(Locks::mutator_lock_) {
    268     return Get<kReadBarrierOption>(iref);
    269   }
    270 
    271   // Updates an existing indirect reference to point to a new object.
    272   void Update(IndirectRef iref, ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_);
    273 
    274   // Remove an existing entry.
    275   //
    276   // If the entry is not between the current top index and the bottom index
    277   // specified by the cookie, we don't remove anything.  This is the behavior
    278   // required by JNI's DeleteLocalRef function.
    279   //
    280   // Returns "false" if nothing was removed.
    281   bool Remove(IRTSegmentState previous_state, IndirectRef iref);
    282 
    283   void AssertEmpty() REQUIRES_SHARED(Locks::mutator_lock_);
    284 
    285   void Dump(std::ostream& os) const
    286       REQUIRES_SHARED(Locks::mutator_lock_)
    287       REQUIRES(!Locks::alloc_tracker_lock_);
    288 
    289   // Return the #of entries in the entire table.  This includes holes, and
    290   // so may be larger than the actual number of "live" entries.
    291   size_t Capacity() const {
    292     return segment_state_.top_index;
    293   }
    294 
    295   // Ensure that at least free_capacity elements are available, or return false.
    296   bool EnsureFreeCapacity(size_t free_capacity, std::string* error_msg)
    297       REQUIRES_SHARED(Locks::mutator_lock_);
    298   // See implementation of EnsureFreeCapacity. We'll only state here how much is trivially free,
    299   // without recovering holes. Thus this is a conservative estimate.
    300   size_t FreeCapacity() const;
    301 
    302   // Note IrtIterator does not have a read barrier as it's used to visit roots.
    303   IrtIterator begin() {
    304     return IrtIterator(table_, 0, Capacity());
    305   }
    306 
    307   IrtIterator end() {
    308     return IrtIterator(table_, Capacity(), Capacity());
    309   }
    310 
    311   void VisitRoots(RootVisitor* visitor, const RootInfo& root_info)
    312       REQUIRES_SHARED(Locks::mutator_lock_);
    313 
    314   IRTSegmentState GetSegmentState() const {
    315     return segment_state_;
    316   }
    317 
    318   void SetSegmentState(IRTSegmentState new_state);
    319 
    320   static Offset SegmentStateOffset(size_t pointer_size ATTRIBUTE_UNUSED) {
    321     // Note: Currently segment_state_ is at offset 0. We're testing the expected value in
    322     //       jni_internal_test to make sure it stays correct. It is not OFFSETOF_MEMBER, as that
    323     //       is not pointer-size-safe.
    324     return Offset(0);
    325   }
    326 
    327   // Release pages past the end of the table that may have previously held references.
    328   void Trim() REQUIRES_SHARED(Locks::mutator_lock_);
    329 
    330   // Determine what kind of indirect reference this is. Opposite of EncodeIndirectRefKind.
    331   ALWAYS_INLINE static inline IndirectRefKind GetIndirectRefKind(IndirectRef iref) {
    332     return DecodeIndirectRefKind(reinterpret_cast<uintptr_t>(iref));
    333   }
    334 
    335  private:
    336   static constexpr size_t kSerialBits = MinimumBitsToStore(kIRTPrevCount);
    337   static constexpr uint32_t kShiftedSerialMask = (1u << kSerialBits) - 1;
    338 
    339   static constexpr size_t kKindBits = MinimumBitsToStore(
    340       static_cast<uint32_t>(IndirectRefKind::kLastKind));
    341   static constexpr uint32_t kKindMask = (1u << kKindBits) - 1;
    342 
    343   static constexpr uintptr_t EncodeIndex(uint32_t table_index) {
    344     static_assert(sizeof(IndirectRef) == sizeof(uintptr_t), "Unexpected IndirectRef size");
    345     DCHECK_LE(MinimumBitsToStore(table_index), BitSizeOf<uintptr_t>() - kSerialBits - kKindBits);
    346     return (static_cast<uintptr_t>(table_index) << kKindBits << kSerialBits);
    347   }
    348   static constexpr uint32_t DecodeIndex(uintptr_t uref) {
    349     return static_cast<uint32_t>((uref >> kKindBits) >> kSerialBits);
    350   }
    351 
    352   static constexpr uintptr_t EncodeIndirectRefKind(IndirectRefKind kind) {
    353     return static_cast<uintptr_t>(kind);
    354   }
    355   static constexpr IndirectRefKind DecodeIndirectRefKind(uintptr_t uref) {
    356     return static_cast<IndirectRefKind>(uref & kKindMask);
    357   }
    358 
    359   static constexpr uintptr_t EncodeSerial(uint32_t serial) {
    360     DCHECK_LE(MinimumBitsToStore(serial), kSerialBits);
    361     return serial << kKindBits;
    362   }
    363   static constexpr uint32_t DecodeSerial(uintptr_t uref) {
    364     return static_cast<uint32_t>(uref >> kKindBits) & kShiftedSerialMask;
    365   }
    366 
    367   constexpr uintptr_t EncodeIndirectRef(uint32_t table_index, uint32_t serial) const {
    368     DCHECK_LT(table_index, max_entries_);
    369     return EncodeIndex(table_index) | EncodeSerial(serial) | EncodeIndirectRefKind(kind_);
    370   }
    371 
    372   static void ConstexprChecks();
    373 
    374   // Extract the table index from an indirect reference.
    375   ALWAYS_INLINE static uint32_t ExtractIndex(IndirectRef iref) {
    376     return DecodeIndex(reinterpret_cast<uintptr_t>(iref));
    377   }
    378 
    379   IndirectRef ToIndirectRef(uint32_t table_index) const {
    380     DCHECK_LT(table_index, max_entries_);
    381     uint32_t serial = table_[table_index].GetSerial();
    382     return reinterpret_cast<IndirectRef>(EncodeIndirectRef(table_index, serial));
    383   }
    384 
    385   // Resize the backing table. Currently must be larger than the current size.
    386   bool Resize(size_t new_size, std::string* error_msg);
    387 
    388   void RecoverHoles(IRTSegmentState from);
    389 
    390   // Abort if check_jni is not enabled. Otherwise, just log as an error.
    391   static void AbortIfNoCheckJNI(const std::string& msg);
    392 
    393   /* extra debugging checks */
    394   bool GetChecked(IndirectRef) const REQUIRES_SHARED(Locks::mutator_lock_);
    395   bool CheckEntry(const char*, IndirectRef, uint32_t) const;
    396 
    397   /// semi-public - read/write by jni down calls.
    398   IRTSegmentState segment_state_;
    399 
    400   // Mem map where we store the indirect refs.
    401   std::unique_ptr<MemMap> table_mem_map_;
    402   // bottom of the stack. Do not directly access the object references
    403   // in this as they are roots. Use Get() that has a read barrier.
    404   IrtEntry* table_;
    405   // bit mask, ORed into all irefs.
    406   const IndirectRefKind kind_;
    407 
    408   // max #of entries allowed (modulo resizing).
    409   size_t max_entries_;
    410 
    411   // Some values to retain old behavior with holes. Description of the algorithm is in the .cc
    412   // file.
    413   // TODO: Consider other data structures for compact tables, e.g., free lists.
    414   size_t current_num_holes_;
    415   IRTSegmentState last_known_previous_state_;
    416 
    417   // Whether the table's capacity may be resized. As there are no locks used, it is the caller's
    418   // responsibility to ensure thread-safety.
    419   ResizableCapacity resizable_;
    420 };
    421 
    422 }  // namespace art
    423 
    424 #endif  // ART_RUNTIME_INDIRECT_REFERENCE_TABLE_H_
    425