Home | History | Annotate | Download | only in mirror
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_MIRROR_DEX_CACHE_H_
     18 #define ART_RUNTIME_MIRROR_DEX_CACHE_H_
     19 
     20 #include "array.h"
     21 #include "base/bit_utils.h"
     22 #include "dex_file_types.h"
     23 #include "object.h"
     24 #include "object_array.h"
     25 
     26 namespace art {
     27 
     28 class ArtField;
     29 class ArtMethod;
     30 struct DexCacheOffsets;
     31 class DexFile;
     32 class ImageWriter;
     33 union JValue;
     34 class LinearAlloc;
     35 class Thread;
     36 
     37 namespace mirror {
     38 
     39 class CallSite;
     40 class Class;
     41 class MethodType;
     42 class String;
     43 
     44 template <typename T> struct PACKED(8) DexCachePair {
     45   GcRoot<T> object;
     46   uint32_t index;
     47   // The array is initially [ {0,0}, {0,0}, {0,0} ... ]
     48   // We maintain the invariant that once a dex cache entry is populated,
     49   // the pointer is always non-0
     50   // Any given entry would thus be:
     51   // {non-0, non-0} OR {0,0}
     52   //
     53   // It's generally sufficiently enough then to check if the
     54   // lookup index matches the stored index (for a >0 lookup index)
     55   // because if it's true the pointer is also non-null.
     56   //
     57   // For the 0th entry which is a special case, the value is either
     58   // {0,0} (initial state) or {non-0, 0} which indicates
     59   // that a valid object is stored at that index for a dex section id of 0.
     60   //
     61   // As an optimization, we want to avoid branching on the object pointer since
     62   // it's always non-null if the id branch succeeds (except for the 0th id).
     63   // Set the initial state for the 0th entry to be {0,1} which is guaranteed to fail
     64   // the lookup id == stored id branch.
     65   DexCachePair(ObjPtr<T> object, uint32_t index)
     66       : object(object),
     67         index(index) {}
     68   DexCachePair() : index(0) {}
     69   DexCachePair(const DexCachePair<T>&) = default;
     70   DexCachePair& operator=(const DexCachePair<T>&) = default;
     71 
     72   static void Initialize(std::atomic<DexCachePair<T>>* dex_cache) {
     73     DexCachePair<T> first_elem;
     74     first_elem.object = GcRoot<T>(nullptr);
     75     first_elem.index = InvalidIndexForSlot(0);
     76     dex_cache[0].store(first_elem, std::memory_order_relaxed);
     77   }
     78 
     79   static uint32_t InvalidIndexForSlot(uint32_t slot) {
     80     // Since the cache size is a power of two, 0 will always map to slot 0.
     81     // Use 1 for slot 0 and 0 for all other slots.
     82     return (slot == 0) ? 1u : 0u;
     83   }
     84 
     85   T* GetObjectForIndex(uint32_t idx) REQUIRES_SHARED(Locks::mutator_lock_) {
     86     if (idx != index) {
     87       return nullptr;
     88     }
     89     DCHECK(!object.IsNull());
     90     return object.Read();
     91   }
     92 };
     93 
     94 template <typename T> struct PACKED(2 * __SIZEOF_POINTER__) NativeDexCachePair {
     95   T* object;
     96   size_t index;
     97   // This is similar to DexCachePair except that we're storing a native pointer
     98   // instead of a GC root. See DexCachePair for the details.
     99   NativeDexCachePair(T* object, uint32_t index)
    100       : object(object),
    101         index(index) {}
    102   NativeDexCachePair() : object(nullptr), index(0u) { }
    103   NativeDexCachePair(const NativeDexCachePair<T>&) = default;
    104   NativeDexCachePair& operator=(const NativeDexCachePair<T>&) = default;
    105 
    106   static void Initialize(std::atomic<NativeDexCachePair<T>>* dex_cache, PointerSize pointer_size);
    107 
    108   static uint32_t InvalidIndexForSlot(uint32_t slot) {
    109     // Since the cache size is a power of two, 0 will always map to slot 0.
    110     // Use 1 for slot 0 and 0 for all other slots.
    111     return (slot == 0) ? 1u : 0u;
    112   }
    113 
    114   T* GetObjectForIndex(uint32_t idx) REQUIRES_SHARED(Locks::mutator_lock_) {
    115     if (idx != index) {
    116       return nullptr;
    117     }
    118     DCHECK(object != nullptr);
    119     return object;
    120   }
    121 };
    122 
    123 using TypeDexCachePair = DexCachePair<Class>;
    124 using TypeDexCacheType = std::atomic<TypeDexCachePair>;
    125 
    126 using StringDexCachePair = DexCachePair<String>;
    127 using StringDexCacheType = std::atomic<StringDexCachePair>;
    128 
    129 using FieldDexCachePair = NativeDexCachePair<ArtField>;
    130 using FieldDexCacheType = std::atomic<FieldDexCachePair>;
    131 
    132 using MethodTypeDexCachePair = DexCachePair<MethodType>;
    133 using MethodTypeDexCacheType = std::atomic<MethodTypeDexCachePair>;
    134 
    135 // C++ mirror of java.lang.DexCache.
    136 class MANAGED DexCache FINAL : public Object {
    137  public:
    138   // Size of java.lang.DexCache.class.
    139   static uint32_t ClassSize(PointerSize pointer_size);
    140 
    141   // Size of type dex cache. Needs to be a power of 2 for entrypoint assumptions to hold.
    142   static constexpr size_t kDexCacheTypeCacheSize = 1024;
    143   static_assert(IsPowerOfTwo(kDexCacheTypeCacheSize),
    144                 "Type dex cache size is not a power of 2.");
    145 
    146   // Size of string dex cache. Needs to be a power of 2 for entrypoint assumptions to hold.
    147   static constexpr size_t kDexCacheStringCacheSize = 1024;
    148   static_assert(IsPowerOfTwo(kDexCacheStringCacheSize),
    149                 "String dex cache size is not a power of 2.");
    150 
    151   // Size of field dex cache. Needs to be a power of 2 for entrypoint assumptions to hold.
    152   static constexpr size_t kDexCacheFieldCacheSize = 1024;
    153   static_assert(IsPowerOfTwo(kDexCacheFieldCacheSize),
    154                 "Field dex cache size is not a power of 2.");
    155 
    156   // Size of method type dex cache. Needs to be a power of 2 for entrypoint assumptions
    157   // to hold.
    158   static constexpr size_t kDexCacheMethodTypeCacheSize = 1024;
    159   static_assert(IsPowerOfTwo(kDexCacheMethodTypeCacheSize),
    160                 "MethodType dex cache size is not a power of 2.");
    161 
    162   static constexpr size_t StaticTypeSize() {
    163     return kDexCacheTypeCacheSize;
    164   }
    165 
    166   static constexpr size_t StaticStringSize() {
    167     return kDexCacheStringCacheSize;
    168   }
    169 
    170   static constexpr size_t StaticArtFieldSize() {
    171     return kDexCacheFieldCacheSize;
    172   }
    173 
    174   static constexpr size_t StaticMethodTypeSize() {
    175     return kDexCacheMethodTypeCacheSize;
    176   }
    177 
    178   // Size of an instance of java.lang.DexCache not including referenced values.
    179   static constexpr uint32_t InstanceSize() {
    180     return sizeof(DexCache);
    181   }
    182 
    183   static void InitializeDexCache(Thread* self,
    184                                  ObjPtr<mirror::DexCache> dex_cache,
    185                                  ObjPtr<mirror::String> location,
    186                                  const DexFile* dex_file,
    187                                  LinearAlloc* linear_alloc,
    188                                  PointerSize image_pointer_size)
    189       REQUIRES_SHARED(Locks::mutator_lock_)
    190       REQUIRES(Locks::dex_lock_);
    191 
    192   void Fixup(ArtMethod* trampoline, PointerSize pointer_size)
    193       REQUIRES_SHARED(Locks::mutator_lock_);
    194 
    195   template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
    196   void FixupStrings(StringDexCacheType* dest, const Visitor& visitor)
    197       REQUIRES_SHARED(Locks::mutator_lock_);
    198 
    199   template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
    200   void FixupResolvedTypes(TypeDexCacheType* dest, const Visitor& visitor)
    201       REQUIRES_SHARED(Locks::mutator_lock_);
    202 
    203   template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
    204   void FixupResolvedMethodTypes(MethodTypeDexCacheType* dest, const Visitor& visitor)
    205       REQUIRES_SHARED(Locks::mutator_lock_);
    206 
    207   template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
    208   void FixupResolvedCallSites(GcRoot<mirror::CallSite>* dest, const Visitor& visitor)
    209       REQUIRES_SHARED(Locks::mutator_lock_);
    210 
    211   String* GetLocation() REQUIRES_SHARED(Locks::mutator_lock_) {
    212     return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(DexCache, location_));
    213   }
    214 
    215   static MemberOffset StringsOffset() {
    216     return OFFSET_OF_OBJECT_MEMBER(DexCache, strings_);
    217   }
    218 
    219   static MemberOffset ResolvedTypesOffset() {
    220     return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_types_);
    221   }
    222 
    223   static MemberOffset ResolvedFieldsOffset() {
    224     return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_fields_);
    225   }
    226 
    227   static MemberOffset ResolvedMethodsOffset() {
    228     return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_methods_);
    229   }
    230 
    231   static MemberOffset ResolvedMethodTypesOffset() {
    232     return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_method_types_);
    233   }
    234 
    235   static MemberOffset ResolvedCallSitesOffset() {
    236     return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_call_sites_);
    237   }
    238 
    239   static MemberOffset NumStringsOffset() {
    240     return OFFSET_OF_OBJECT_MEMBER(DexCache, num_strings_);
    241   }
    242 
    243   static MemberOffset NumResolvedTypesOffset() {
    244     return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_types_);
    245   }
    246 
    247   static MemberOffset NumResolvedFieldsOffset() {
    248     return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_fields_);
    249   }
    250 
    251   static MemberOffset NumResolvedMethodsOffset() {
    252     return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_methods_);
    253   }
    254 
    255   static MemberOffset NumResolvedMethodTypesOffset() {
    256     return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_method_types_);
    257   }
    258 
    259   static MemberOffset NumResolvedCallSitesOffset() {
    260     return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_call_sites_);
    261   }
    262 
    263   String* GetResolvedString(dex::StringIndex string_idx) ALWAYS_INLINE
    264       REQUIRES_SHARED(Locks::mutator_lock_);
    265 
    266   void SetResolvedString(dex::StringIndex string_idx, ObjPtr<mirror::String> resolved) ALWAYS_INLINE
    267       REQUIRES_SHARED(Locks::mutator_lock_);
    268 
    269   // Clear a string for a string_idx, used to undo string intern transactions to make sure
    270   // the string isn't kept live.
    271   void ClearString(dex::StringIndex string_idx) REQUIRES_SHARED(Locks::mutator_lock_);
    272 
    273   Class* GetResolvedType(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
    274 
    275   void SetResolvedType(dex::TypeIndex type_idx, ObjPtr<Class> resolved)
    276       REQUIRES_SHARED(Locks::mutator_lock_);
    277 
    278   void ClearResolvedType(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
    279 
    280   ALWAYS_INLINE ArtMethod* GetResolvedMethod(uint32_t method_idx, PointerSize ptr_size)
    281       REQUIRES_SHARED(Locks::mutator_lock_);
    282 
    283   ALWAYS_INLINE void SetResolvedMethod(uint32_t method_idx,
    284                                        ArtMethod* resolved,
    285                                        PointerSize ptr_size)
    286       REQUIRES_SHARED(Locks::mutator_lock_);
    287 
    288   // Pointer sized variant, used for patching.
    289   ALWAYS_INLINE ArtField* GetResolvedField(uint32_t idx, PointerSize ptr_size)
    290       REQUIRES_SHARED(Locks::mutator_lock_);
    291 
    292   // Pointer sized variant, used for patching.
    293   ALWAYS_INLINE void SetResolvedField(uint32_t idx, ArtField* field, PointerSize ptr_size)
    294       REQUIRES_SHARED(Locks::mutator_lock_);
    295   ALWAYS_INLINE void ClearResolvedField(uint32_t idx, PointerSize ptr_size)
    296       REQUIRES_SHARED(Locks::mutator_lock_);
    297 
    298   MethodType* GetResolvedMethodType(uint32_t proto_idx) REQUIRES_SHARED(Locks::mutator_lock_);
    299 
    300   void SetResolvedMethodType(uint32_t proto_idx, MethodType* resolved)
    301       REQUIRES_SHARED(Locks::mutator_lock_);
    302 
    303   CallSite* GetResolvedCallSite(uint32_t call_site_idx) REQUIRES_SHARED(Locks::mutator_lock_);
    304 
    305   // Attempts to bind |call_site_idx| to the call site |resolved|. The
    306   // caller must use the return value in place of |resolved|. This is
    307   // because multiple threads can invoke the bootstrap method each
    308   // producing a call site, but the method handle invocation on the
    309   // call site must be on a common agreed value.
    310   CallSite* SetResolvedCallSite(uint32_t call_site_idx, CallSite* resolved) WARN_UNUSED
    311       REQUIRES_SHARED(Locks::mutator_lock_);
    312 
    313   StringDexCacheType* GetStrings() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
    314     return GetFieldPtr64<StringDexCacheType*>(StringsOffset());
    315   }
    316 
    317   void SetStrings(StringDexCacheType* strings) ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
    318     SetFieldPtr<false>(StringsOffset(), strings);
    319   }
    320 
    321   TypeDexCacheType* GetResolvedTypes() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
    322     return GetFieldPtr<TypeDexCacheType*>(ResolvedTypesOffset());
    323   }
    324 
    325   void SetResolvedTypes(TypeDexCacheType* resolved_types)
    326       ALWAYS_INLINE
    327       REQUIRES_SHARED(Locks::mutator_lock_) {
    328     SetFieldPtr<false>(ResolvedTypesOffset(), resolved_types);
    329   }
    330 
    331   ArtMethod** GetResolvedMethods() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
    332     return GetFieldPtr<ArtMethod**>(ResolvedMethodsOffset());
    333   }
    334 
    335   void SetResolvedMethods(ArtMethod** resolved_methods)
    336       ALWAYS_INLINE
    337       REQUIRES_SHARED(Locks::mutator_lock_) {
    338     SetFieldPtr<false>(ResolvedMethodsOffset(), resolved_methods);
    339   }
    340 
    341   FieldDexCacheType* GetResolvedFields() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
    342     return GetFieldPtr<FieldDexCacheType*>(ResolvedFieldsOffset());
    343   }
    344 
    345   void SetResolvedFields(FieldDexCacheType* resolved_fields)
    346       ALWAYS_INLINE
    347       REQUIRES_SHARED(Locks::mutator_lock_) {
    348     SetFieldPtr<false>(ResolvedFieldsOffset(), resolved_fields);
    349   }
    350 
    351   MethodTypeDexCacheType* GetResolvedMethodTypes()
    352       ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
    353     return GetFieldPtr64<MethodTypeDexCacheType*>(ResolvedMethodTypesOffset());
    354   }
    355 
    356   void SetResolvedMethodTypes(MethodTypeDexCacheType* resolved_method_types)
    357       ALWAYS_INLINE
    358       REQUIRES_SHARED(Locks::mutator_lock_) {
    359     SetFieldPtr<false>(ResolvedMethodTypesOffset(), resolved_method_types);
    360   }
    361 
    362   GcRoot<CallSite>* GetResolvedCallSites()
    363       ALWAYS_INLINE
    364       REQUIRES_SHARED(Locks::mutator_lock_) {
    365     return GetFieldPtr<GcRoot<CallSite>*>(ResolvedCallSitesOffset());
    366   }
    367 
    368   void SetResolvedCallSites(GcRoot<CallSite>* resolved_call_sites)
    369       ALWAYS_INLINE
    370       REQUIRES_SHARED(Locks::mutator_lock_) {
    371     SetFieldPtr<false>(ResolvedCallSitesOffset(), resolved_call_sites);
    372   }
    373 
    374   size_t NumStrings() REQUIRES_SHARED(Locks::mutator_lock_) {
    375     return GetField32(NumStringsOffset());
    376   }
    377 
    378   size_t NumResolvedTypes() REQUIRES_SHARED(Locks::mutator_lock_) {
    379     return GetField32(NumResolvedTypesOffset());
    380   }
    381 
    382   size_t NumResolvedMethods() REQUIRES_SHARED(Locks::mutator_lock_) {
    383     return GetField32(NumResolvedMethodsOffset());
    384   }
    385 
    386   size_t NumResolvedFields() REQUIRES_SHARED(Locks::mutator_lock_) {
    387     return GetField32(NumResolvedFieldsOffset());
    388   }
    389 
    390   size_t NumResolvedMethodTypes() REQUIRES_SHARED(Locks::mutator_lock_) {
    391     return GetField32(NumResolvedMethodTypesOffset());
    392   }
    393 
    394   size_t NumResolvedCallSites() REQUIRES_SHARED(Locks::mutator_lock_) {
    395     return GetField32(NumResolvedCallSitesOffset());
    396   }
    397 
    398   const DexFile* GetDexFile() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
    399     return GetFieldPtr<const DexFile*>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_));
    400   }
    401 
    402   void SetDexFile(const DexFile* dex_file) REQUIRES_SHARED(Locks::mutator_lock_) {
    403     SetFieldPtr<false>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_), dex_file);
    404   }
    405 
    406   void SetLocation(ObjPtr<String> location) REQUIRES_SHARED(Locks::mutator_lock_);
    407 
    408   // NOTE: Get/SetElementPtrSize() are intended for working with ArtMethod** and ArtField**
    409   // provided by GetResolvedMethods/Fields() and ArtMethod::GetDexCacheResolvedMethods(),
    410   // so they need to be public.
    411 
    412   template <typename PtrType>
    413   static PtrType GetElementPtrSize(PtrType* ptr_array, size_t idx, PointerSize ptr_size);
    414 
    415   template <typename PtrType>
    416   static void SetElementPtrSize(PtrType* ptr_array, size_t idx, PtrType ptr, PointerSize ptr_size);
    417 
    418   template <typename T>
    419   static NativeDexCachePair<T> GetNativePairPtrSize(std::atomic<NativeDexCachePair<T>>* pair_array,
    420                                                     size_t idx,
    421                                                     PointerSize ptr_size);
    422 
    423   template <typename T>
    424   static void SetNativePairPtrSize(std::atomic<NativeDexCachePair<T>>* pair_array,
    425                                    size_t idx,
    426                                    NativeDexCachePair<T> pair,
    427                                    PointerSize ptr_size);
    428 
    429   uint32_t StringSlotIndex(dex::StringIndex string_idx) REQUIRES_SHARED(Locks::mutator_lock_);
    430   uint32_t TypeSlotIndex(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
    431   uint32_t FieldSlotIndex(uint32_t field_idx) REQUIRES_SHARED(Locks::mutator_lock_);
    432   uint32_t MethodTypeSlotIndex(uint32_t proto_idx) REQUIRES_SHARED(Locks::mutator_lock_);
    433 
    434  private:
    435   void Init(const DexFile* dex_file,
    436             ObjPtr<String> location,
    437             StringDexCacheType* strings,
    438             uint32_t num_strings,
    439             TypeDexCacheType* resolved_types,
    440             uint32_t num_resolved_types,
    441             ArtMethod** resolved_methods,
    442             uint32_t num_resolved_methods,
    443             FieldDexCacheType* resolved_fields,
    444             uint32_t num_resolved_fields,
    445             MethodTypeDexCacheType* resolved_method_types,
    446             uint32_t num_resolved_method_types,
    447             GcRoot<CallSite>* resolved_call_sites,
    448             uint32_t num_resolved_call_sites,
    449             PointerSize pointer_size)
    450       REQUIRES_SHARED(Locks::mutator_lock_);
    451 
    452   // std::pair<> is not trivially copyable and as such it is unsuitable for atomic operations,
    453   // so we use a custom pair class for loading and storing the NativeDexCachePair<>.
    454   template <typename IntType>
    455   struct PACKED(2 * sizeof(IntType)) ConversionPair {
    456     ConversionPair(IntType f, IntType s) : first(f), second(s) { }
    457     ConversionPair(const ConversionPair&) = default;
    458     ConversionPair& operator=(const ConversionPair&) = default;
    459     IntType first;
    460     IntType second;
    461   };
    462   using ConversionPair32 = ConversionPair<uint32_t>;
    463   using ConversionPair64 = ConversionPair<uint64_t>;
    464 
    465   // Visit instance fields of the dex cache as well as its associated arrays.
    466   template <bool kVisitNativeRoots,
    467             VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
    468             ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
    469             typename Visitor>
    470   void VisitReferences(ObjPtr<Class> klass, const Visitor& visitor)
    471       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
    472 
    473   // Due to lack of 16-byte atomics support, we use hand-crafted routines.
    474 #if  defined(__aarch64__)
    475   // 16-byte atomics are supported on aarch64.
    476   ALWAYS_INLINE static ConversionPair64 AtomicLoadRelaxed16B(
    477       std::atomic<ConversionPair64>* target) {
    478     return target->load(std::memory_order_relaxed);
    479   }
    480 
    481   ALWAYS_INLINE static void AtomicStoreRelease16B(
    482       std::atomic<ConversionPair64>* target, ConversionPair64 value) {
    483     target->store(value, std::memory_order_release);
    484   }
    485 #elif defined(__x86_64__)
    486   ALWAYS_INLINE static ConversionPair64 AtomicLoadRelaxed16B(
    487       std::atomic<ConversionPair64>* target) {
    488     uint64_t first, second;
    489     __asm__ __volatile__(
    490         "lock cmpxchg16b (%2)"
    491         : "=&a"(first), "=&d"(second)
    492         : "r"(target), "a"(0), "d"(0), "b"(0), "c"(0)
    493         : "cc");
    494     return ConversionPair64(first, second);
    495   }
    496 
    497   ALWAYS_INLINE static void AtomicStoreRelease16B(
    498       std::atomic<ConversionPair64>* target, ConversionPair64 value) {
    499     uint64_t first, second;
    500     __asm__ __volatile__ (
    501         "movq (%2), %%rax\n\t"
    502         "movq 8(%2), %%rdx\n\t"
    503         "1:\n\t"
    504         "lock cmpxchg16b (%2)\n\t"
    505         "jnz 1b"
    506         : "=&a"(first), "=&d"(second)
    507         : "r"(target), "b"(value.first), "c"(value.second)
    508         : "cc");
    509   }
    510 #else
    511   static ConversionPair64 AtomicLoadRelaxed16B(std::atomic<ConversionPair64>* target);
    512   static void AtomicStoreRelease16B(std::atomic<ConversionPair64>* target, ConversionPair64 value);
    513 #endif
    514 
    515   HeapReference<String> location_;
    516   // Number of elements in the call_sites_ array. Note that this appears here
    517   // because of our packing logic for 32 bit fields.
    518   uint32_t num_resolved_call_sites_;
    519 
    520   uint64_t dex_file_;               // const DexFile*
    521   uint64_t resolved_call_sites_;    // GcRoot<CallSite>* array with num_resolved_call_sites_
    522                                     // elements.
    523   uint64_t resolved_fields_;        // std::atomic<FieldDexCachePair>*, array with
    524                                     // num_resolved_fields_ elements.
    525   uint64_t resolved_method_types_;  // std::atomic<MethodTypeDexCachePair>* array with
    526                                     // num_resolved_method_types_ elements.
    527   uint64_t resolved_methods_;       // ArtMethod*, array with num_resolved_methods_ elements.
    528   uint64_t resolved_types_;         // TypeDexCacheType*, array with num_resolved_types_ elements.
    529   uint64_t strings_;                // std::atomic<StringDexCachePair>*, array with num_strings_
    530                                     // elements.
    531 
    532   uint32_t num_resolved_fields_;        // Number of elements in the resolved_fields_ array.
    533   uint32_t num_resolved_method_types_;  // Number of elements in the resolved_method_types_ array.
    534   uint32_t num_resolved_methods_;       // Number of elements in the resolved_methods_ array.
    535   uint32_t num_resolved_types_;         // Number of elements in the resolved_types_ array.
    536   uint32_t num_strings_;                // Number of elements in the strings_ array.
    537 
    538   friend struct art::DexCacheOffsets;  // for verifying offset information
    539   friend class Object;  // For VisitReferences
    540   DISALLOW_IMPLICIT_CONSTRUCTORS(DexCache);
    541 };
    542 
    543 }  // namespace mirror
    544 }  // namespace art
    545 
    546 #endif  // ART_RUNTIME_MIRROR_DEX_CACHE_H_
    547