Home | History | Annotate | Download | only in mirror
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_MIRROR_DEX_CACHE_H_
     18 #define ART_RUNTIME_MIRROR_DEX_CACHE_H_
     19 
     20 #include "array.h"
     21 #include "base/bit_utils.h"
     22 #include "base/mutex.h"
     23 #include "dex/dex_file_types.h"
     24 #include "object.h"
     25 #include "object_array.h"
     26 
     27 namespace art {
     28 
     29 class ArtField;
     30 class ArtMethod;
     31 struct DexCacheOffsets;
     32 class DexFile;
     33 class ImageWriter;
     34 union JValue;
     35 class LinearAlloc;
     36 class Thread;
     37 
     38 namespace mirror {
     39 
     40 class CallSite;
     41 class Class;
     42 class MethodType;
     43 class String;
     44 
     45 template <typename T> struct PACKED(8) DexCachePair {
     46   GcRoot<T> object;
     47   uint32_t index;
     48   // The array is initially [ {0,0}, {0,0}, {0,0} ... ]
     49   // We maintain the invariant that once a dex cache entry is populated,
     50   // the pointer is always non-0
     51   // Any given entry would thus be:
     52   // {non-0, non-0} OR {0,0}
     53   //
     54   // It's generally sufficiently enough then to check if the
     55   // lookup index matches the stored index (for a >0 lookup index)
     56   // because if it's true the pointer is also non-null.
     57   //
     58   // For the 0th entry which is a special case, the value is either
     59   // {0,0} (initial state) or {non-0, 0} which indicates
     60   // that a valid object is stored at that index for a dex section id of 0.
     61   //
     62   // As an optimization, we want to avoid branching on the object pointer since
     63   // it's always non-null if the id branch succeeds (except for the 0th id).
     64   // Set the initial state for the 0th entry to be {0,1} which is guaranteed to fail
     65   // the lookup id == stored id branch.
     66   DexCachePair(ObjPtr<T> object, uint32_t index)
     67       : object(object),
     68         index(index) {}
     69   DexCachePair() : index(0) {}
     70   DexCachePair(const DexCachePair<T>&) = default;
     71   DexCachePair& operator=(const DexCachePair<T>&) = default;
     72 
     73   static void Initialize(std::atomic<DexCachePair<T>>* dex_cache) {
     74     DexCachePair<T> first_elem;
     75     first_elem.object = GcRoot<T>(nullptr);
     76     first_elem.index = InvalidIndexForSlot(0);
     77     dex_cache[0].store(first_elem, std::memory_order_relaxed);
     78   }
     79 
     80   static uint32_t InvalidIndexForSlot(uint32_t slot) {
     81     // Since the cache size is a power of two, 0 will always map to slot 0.
     82     // Use 1 for slot 0 and 0 for all other slots.
     83     return (slot == 0) ? 1u : 0u;
     84   }
     85 
     86   T* GetObjectForIndex(uint32_t idx) REQUIRES_SHARED(Locks::mutator_lock_) {
     87     if (idx != index) {
     88       return nullptr;
     89     }
     90     DCHECK(!object.IsNull());
     91     return object.Read();
     92   }
     93 };
     94 
     95 template <typename T> struct PACKED(2 * __SIZEOF_POINTER__) NativeDexCachePair {
     96   T* object;
     97   size_t index;
     98   // This is similar to DexCachePair except that we're storing a native pointer
     99   // instead of a GC root. See DexCachePair for the details.
    100   NativeDexCachePair(T* object, uint32_t index)
    101       : object(object),
    102         index(index) {}
    103   NativeDexCachePair() : object(nullptr), index(0u) { }
    104   NativeDexCachePair(const NativeDexCachePair<T>&) = default;
    105   NativeDexCachePair& operator=(const NativeDexCachePair<T>&) = default;
    106 
    107   static void Initialize(std::atomic<NativeDexCachePair<T>>* dex_cache, PointerSize pointer_size);
    108 
    109   static uint32_t InvalidIndexForSlot(uint32_t slot) {
    110     // Since the cache size is a power of two, 0 will always map to slot 0.
    111     // Use 1 for slot 0 and 0 for all other slots.
    112     return (slot == 0) ? 1u : 0u;
    113   }
    114 
    115   T* GetObjectForIndex(uint32_t idx) REQUIRES_SHARED(Locks::mutator_lock_) {
    116     if (idx != index) {
    117       return nullptr;
    118     }
    119     DCHECK(object != nullptr);
    120     return object;
    121   }
    122 };
    123 
    124 using TypeDexCachePair = DexCachePair<Class>;
    125 using TypeDexCacheType = std::atomic<TypeDexCachePair>;
    126 
    127 using StringDexCachePair = DexCachePair<String>;
    128 using StringDexCacheType = std::atomic<StringDexCachePair>;
    129 
    130 using FieldDexCachePair = NativeDexCachePair<ArtField>;
    131 using FieldDexCacheType = std::atomic<FieldDexCachePair>;
    132 
    133 using MethodDexCachePair = NativeDexCachePair<ArtMethod>;
    134 using MethodDexCacheType = std::atomic<MethodDexCachePair>;
    135 
    136 using MethodTypeDexCachePair = DexCachePair<MethodType>;
    137 using MethodTypeDexCacheType = std::atomic<MethodTypeDexCachePair>;
    138 
    139 // C++ mirror of java.lang.DexCache.
    140 class MANAGED DexCache FINAL : public Object {
    141  public:
    142   // Size of java.lang.DexCache.class.
    143   static uint32_t ClassSize(PointerSize pointer_size);
    144 
    145   // Size of type dex cache. Needs to be a power of 2 for entrypoint assumptions to hold.
    146   static constexpr size_t kDexCacheTypeCacheSize = 1024;
    147   static_assert(IsPowerOfTwo(kDexCacheTypeCacheSize),
    148                 "Type dex cache size is not a power of 2.");
    149 
    150   // Size of string dex cache. Needs to be a power of 2 for entrypoint assumptions to hold.
    151   static constexpr size_t kDexCacheStringCacheSize = 1024;
    152   static_assert(IsPowerOfTwo(kDexCacheStringCacheSize),
    153                 "String dex cache size is not a power of 2.");
    154 
    155   // Size of field dex cache. Needs to be a power of 2 for entrypoint assumptions to hold.
    156   static constexpr size_t kDexCacheFieldCacheSize = 1024;
    157   static_assert(IsPowerOfTwo(kDexCacheFieldCacheSize),
    158                 "Field dex cache size is not a power of 2.");
    159 
    160   // Size of method dex cache. Needs to be a power of 2 for entrypoint assumptions to hold.
    161   static constexpr size_t kDexCacheMethodCacheSize = 1024;
    162   static_assert(IsPowerOfTwo(kDexCacheMethodCacheSize),
    163                 "Method dex cache size is not a power of 2.");
    164 
    165   // Size of method type dex cache. Needs to be a power of 2 for entrypoint assumptions
    166   // to hold.
    167   static constexpr size_t kDexCacheMethodTypeCacheSize = 1024;
    168   static_assert(IsPowerOfTwo(kDexCacheMethodTypeCacheSize),
    169                 "MethodType dex cache size is not a power of 2.");
    170 
    171   static constexpr size_t StaticTypeSize() {
    172     return kDexCacheTypeCacheSize;
    173   }
    174 
    175   static constexpr size_t StaticStringSize() {
    176     return kDexCacheStringCacheSize;
    177   }
    178 
    179   static constexpr size_t StaticArtFieldSize() {
    180     return kDexCacheFieldCacheSize;
    181   }
    182 
    183   static constexpr size_t StaticMethodSize() {
    184     return kDexCacheMethodCacheSize;
    185   }
    186 
    187   static constexpr size_t StaticMethodTypeSize() {
    188     return kDexCacheMethodTypeCacheSize;
    189   }
    190 
    191   // Size of an instance of java.lang.DexCache not including referenced values.
    192   static constexpr uint32_t InstanceSize() {
    193     return sizeof(DexCache);
    194   }
    195 
    196   static void InitializeDexCache(Thread* self,
    197                                  ObjPtr<mirror::DexCache> dex_cache,
    198                                  ObjPtr<mirror::String> location,
    199                                  const DexFile* dex_file,
    200                                  LinearAlloc* linear_alloc,
    201                                  PointerSize image_pointer_size)
    202       REQUIRES_SHARED(Locks::mutator_lock_)
    203       REQUIRES(Locks::dex_lock_);
    204 
    205   template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
    206   void FixupStrings(StringDexCacheType* dest, const Visitor& visitor)
    207       REQUIRES_SHARED(Locks::mutator_lock_);
    208 
    209   template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
    210   void FixupResolvedTypes(TypeDexCacheType* dest, const Visitor& visitor)
    211       REQUIRES_SHARED(Locks::mutator_lock_);
    212 
    213   template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
    214   void FixupResolvedMethodTypes(MethodTypeDexCacheType* dest, const Visitor& visitor)
    215       REQUIRES_SHARED(Locks::mutator_lock_);
    216 
    217   template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
    218   void FixupResolvedCallSites(GcRoot<mirror::CallSite>* dest, const Visitor& visitor)
    219       REQUIRES_SHARED(Locks::mutator_lock_);
    220 
    221   String* GetLocation() REQUIRES_SHARED(Locks::mutator_lock_) {
    222     return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(DexCache, location_));
    223   }
    224 
    225   static MemberOffset StringsOffset() {
    226     return OFFSET_OF_OBJECT_MEMBER(DexCache, strings_);
    227   }
    228 
    229   static MemberOffset ResolvedTypesOffset() {
    230     return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_types_);
    231   }
    232 
    233   static MemberOffset ResolvedFieldsOffset() {
    234     return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_fields_);
    235   }
    236 
    237   static MemberOffset ResolvedMethodsOffset() {
    238     return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_methods_);
    239   }
    240 
    241   static MemberOffset ResolvedMethodTypesOffset() {
    242     return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_method_types_);
    243   }
    244 
    245   static MemberOffset ResolvedCallSitesOffset() {
    246     return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_call_sites_);
    247   }
    248 
    249   static MemberOffset NumStringsOffset() {
    250     return OFFSET_OF_OBJECT_MEMBER(DexCache, num_strings_);
    251   }
    252 
    253   static MemberOffset NumResolvedTypesOffset() {
    254     return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_types_);
    255   }
    256 
    257   static MemberOffset NumResolvedFieldsOffset() {
    258     return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_fields_);
    259   }
    260 
    261   static MemberOffset NumResolvedMethodsOffset() {
    262     return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_methods_);
    263   }
    264 
    265   static MemberOffset NumResolvedMethodTypesOffset() {
    266     return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_method_types_);
    267   }
    268 
    269   static MemberOffset NumResolvedCallSitesOffset() {
    270     return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_call_sites_);
    271   }
    272 
    273   String* GetResolvedString(dex::StringIndex string_idx) ALWAYS_INLINE
    274       REQUIRES_SHARED(Locks::mutator_lock_);
    275 
    276   void SetResolvedString(dex::StringIndex string_idx, ObjPtr<mirror::String> resolved) ALWAYS_INLINE
    277       REQUIRES_SHARED(Locks::mutator_lock_);
    278 
    279   // Clear a string for a string_idx, used to undo string intern transactions to make sure
    280   // the string isn't kept live.
    281   void ClearString(dex::StringIndex string_idx) REQUIRES_SHARED(Locks::mutator_lock_);
    282 
    283   Class* GetResolvedType(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
    284 
    285   void SetResolvedType(dex::TypeIndex type_idx, ObjPtr<Class> resolved)
    286       REQUIRES_SHARED(Locks::mutator_lock_);
    287 
    288   void ClearResolvedType(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
    289 
    290   ALWAYS_INLINE ArtMethod* GetResolvedMethod(uint32_t method_idx, PointerSize ptr_size)
    291       REQUIRES_SHARED(Locks::mutator_lock_);
    292 
    293   ALWAYS_INLINE void SetResolvedMethod(uint32_t method_idx,
    294                                        ArtMethod* resolved,
    295                                        PointerSize ptr_size)
    296       REQUIRES_SHARED(Locks::mutator_lock_);
    297   ALWAYS_INLINE void ClearResolvedMethod(uint32_t method_idx, PointerSize ptr_size)
    298       REQUIRES_SHARED(Locks::mutator_lock_);
    299 
    300   // Pointer sized variant, used for patching.
    301   ALWAYS_INLINE ArtField* GetResolvedField(uint32_t idx, PointerSize ptr_size)
    302       REQUIRES_SHARED(Locks::mutator_lock_);
    303 
    304   // Pointer sized variant, used for patching.
    305   ALWAYS_INLINE void SetResolvedField(uint32_t idx, ArtField* field, PointerSize ptr_size)
    306       REQUIRES_SHARED(Locks::mutator_lock_);
    307   ALWAYS_INLINE void ClearResolvedField(uint32_t idx, PointerSize ptr_size)
    308       REQUIRES_SHARED(Locks::mutator_lock_);
    309 
    310   MethodType* GetResolvedMethodType(uint32_t proto_idx) REQUIRES_SHARED(Locks::mutator_lock_);
    311 
    312   void SetResolvedMethodType(uint32_t proto_idx, MethodType* resolved)
    313       REQUIRES_SHARED(Locks::mutator_lock_);
    314 
    315   CallSite* GetResolvedCallSite(uint32_t call_site_idx) REQUIRES_SHARED(Locks::mutator_lock_);
    316 
    317   // Attempts to bind |call_site_idx| to the call site |resolved|. The
    318   // caller must use the return value in place of |resolved|. This is
    319   // because multiple threads can invoke the bootstrap method each
    320   // producing a call site, but the method handle invocation on the
    321   // call site must be on a common agreed value.
    322   CallSite* SetResolvedCallSite(uint32_t call_site_idx, CallSite* resolved) WARN_UNUSED
    323       REQUIRES_SHARED(Locks::mutator_lock_);
    324 
    325   StringDexCacheType* GetStrings() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
    326     return GetFieldPtr64<StringDexCacheType*>(StringsOffset());
    327   }
    328 
    329   void SetStrings(StringDexCacheType* strings) ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
    330     SetFieldPtr<false>(StringsOffset(), strings);
    331   }
    332 
    333   TypeDexCacheType* GetResolvedTypes() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
    334     return GetFieldPtr<TypeDexCacheType*>(ResolvedTypesOffset());
    335   }
    336 
    337   void SetResolvedTypes(TypeDexCacheType* resolved_types)
    338       ALWAYS_INLINE
    339       REQUIRES_SHARED(Locks::mutator_lock_) {
    340     SetFieldPtr<false>(ResolvedTypesOffset(), resolved_types);
    341   }
    342 
    343   MethodDexCacheType* GetResolvedMethods() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
    344     return GetFieldPtr<MethodDexCacheType*>(ResolvedMethodsOffset());
    345   }
    346 
    347   void SetResolvedMethods(MethodDexCacheType* resolved_methods)
    348       ALWAYS_INLINE
    349       REQUIRES_SHARED(Locks::mutator_lock_) {
    350     SetFieldPtr<false>(ResolvedMethodsOffset(), resolved_methods);
    351   }
    352 
    353   FieldDexCacheType* GetResolvedFields() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
    354     return GetFieldPtr<FieldDexCacheType*>(ResolvedFieldsOffset());
    355   }
    356 
    357   void SetResolvedFields(FieldDexCacheType* resolved_fields)
    358       ALWAYS_INLINE
    359       REQUIRES_SHARED(Locks::mutator_lock_) {
    360     SetFieldPtr<false>(ResolvedFieldsOffset(), resolved_fields);
    361   }
    362 
    363   MethodTypeDexCacheType* GetResolvedMethodTypes()
    364       ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
    365     return GetFieldPtr64<MethodTypeDexCacheType*>(ResolvedMethodTypesOffset());
    366   }
    367 
    368   void SetResolvedMethodTypes(MethodTypeDexCacheType* resolved_method_types)
    369       ALWAYS_INLINE
    370       REQUIRES_SHARED(Locks::mutator_lock_) {
    371     SetFieldPtr<false>(ResolvedMethodTypesOffset(), resolved_method_types);
    372   }
    373 
    374   GcRoot<CallSite>* GetResolvedCallSites()
    375       ALWAYS_INLINE
    376       REQUIRES_SHARED(Locks::mutator_lock_) {
    377     return GetFieldPtr<GcRoot<CallSite>*>(ResolvedCallSitesOffset());
    378   }
    379 
    380   void SetResolvedCallSites(GcRoot<CallSite>* resolved_call_sites)
    381       ALWAYS_INLINE
    382       REQUIRES_SHARED(Locks::mutator_lock_) {
    383     SetFieldPtr<false>(ResolvedCallSitesOffset(), resolved_call_sites);
    384   }
    385 
    386   size_t NumStrings() REQUIRES_SHARED(Locks::mutator_lock_) {
    387     return GetField32(NumStringsOffset());
    388   }
    389 
    390   size_t NumResolvedTypes() REQUIRES_SHARED(Locks::mutator_lock_) {
    391     return GetField32(NumResolvedTypesOffset());
    392   }
    393 
    394   size_t NumResolvedMethods() REQUIRES_SHARED(Locks::mutator_lock_) {
    395     return GetField32(NumResolvedMethodsOffset());
    396   }
    397 
    398   size_t NumResolvedFields() REQUIRES_SHARED(Locks::mutator_lock_) {
    399     return GetField32(NumResolvedFieldsOffset());
    400   }
    401 
    402   size_t NumResolvedMethodTypes() REQUIRES_SHARED(Locks::mutator_lock_) {
    403     return GetField32(NumResolvedMethodTypesOffset());
    404   }
    405 
    406   size_t NumResolvedCallSites() REQUIRES_SHARED(Locks::mutator_lock_) {
    407     return GetField32(NumResolvedCallSitesOffset());
    408   }
    409 
    410   const DexFile* GetDexFile() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
    411     return GetFieldPtr<const DexFile*>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_));
    412   }
    413 
    414   void SetDexFile(const DexFile* dex_file) REQUIRES_SHARED(Locks::mutator_lock_) {
    415     SetFieldPtr<false>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_), dex_file);
    416   }
    417 
    418   void SetLocation(ObjPtr<String> location) REQUIRES_SHARED(Locks::mutator_lock_);
    419 
    420   template <typename T>
    421   static NativeDexCachePair<T> GetNativePairPtrSize(std::atomic<NativeDexCachePair<T>>* pair_array,
    422                                                     size_t idx,
    423                                                     PointerSize ptr_size);
    424 
    425   template <typename T>
    426   static void SetNativePairPtrSize(std::atomic<NativeDexCachePair<T>>* pair_array,
    427                                    size_t idx,
    428                                    NativeDexCachePair<T> pair,
    429                                    PointerSize ptr_size);
    430 
    431   uint32_t StringSlotIndex(dex::StringIndex string_idx) REQUIRES_SHARED(Locks::mutator_lock_);
    432   uint32_t TypeSlotIndex(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
    433   uint32_t FieldSlotIndex(uint32_t field_idx) REQUIRES_SHARED(Locks::mutator_lock_);
    434   uint32_t MethodSlotIndex(uint32_t method_idx) REQUIRES_SHARED(Locks::mutator_lock_);
    435   uint32_t MethodTypeSlotIndex(uint32_t proto_idx) REQUIRES_SHARED(Locks::mutator_lock_);
    436 
    437  private:
    438   void Init(const DexFile* dex_file,
    439             ObjPtr<String> location,
    440             StringDexCacheType* strings,
    441             uint32_t num_strings,
    442             TypeDexCacheType* resolved_types,
    443             uint32_t num_resolved_types,
    444             MethodDexCacheType* resolved_methods,
    445             uint32_t num_resolved_methods,
    446             FieldDexCacheType* resolved_fields,
    447             uint32_t num_resolved_fields,
    448             MethodTypeDexCacheType* resolved_method_types,
    449             uint32_t num_resolved_method_types,
    450             GcRoot<CallSite>* resolved_call_sites,
    451             uint32_t num_resolved_call_sites)
    452       REQUIRES_SHARED(Locks::mutator_lock_);
    453 
    454   // std::pair<> is not trivially copyable and as such it is unsuitable for atomic operations,
    455   // so we use a custom pair class for loading and storing the NativeDexCachePair<>.
    456   template <typename IntType>
    457   struct PACKED(2 * sizeof(IntType)) ConversionPair {
    458     ConversionPair(IntType f, IntType s) : first(f), second(s) { }
    459     ConversionPair(const ConversionPair&) = default;
    460     ConversionPair& operator=(const ConversionPair&) = default;
    461     IntType first;
    462     IntType second;
    463   };
    464   using ConversionPair32 = ConversionPair<uint32_t>;
    465   using ConversionPair64 = ConversionPair<uint64_t>;
    466 
    467   // Visit instance fields of the dex cache as well as its associated arrays.
    468   template <bool kVisitNativeRoots,
    469             VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
    470             ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
    471             typename Visitor>
    472   void VisitReferences(ObjPtr<Class> klass, const Visitor& visitor)
    473       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
    474 
    475   // Due to lack of 16-byte atomics support, we use hand-crafted routines.
    476 #if defined(__aarch64__) || defined(__mips__)
    477   // 16-byte atomics are supported on aarch64, mips and mips64.
    478   ALWAYS_INLINE static ConversionPair64 AtomicLoadRelaxed16B(
    479       std::atomic<ConversionPair64>* target) {
    480     return target->load(std::memory_order_relaxed);
    481   }
    482 
    483   ALWAYS_INLINE static void AtomicStoreRelease16B(
    484       std::atomic<ConversionPair64>* target, ConversionPair64 value) {
    485     target->store(value, std::memory_order_release);
    486   }
    487 #elif defined(__x86_64__)
    488   ALWAYS_INLINE static ConversionPair64 AtomicLoadRelaxed16B(
    489       std::atomic<ConversionPair64>* target) {
    490     uint64_t first, second;
    491     __asm__ __volatile__(
    492         "lock cmpxchg16b (%2)"
    493         : "=&a"(first), "=&d"(second)
    494         : "r"(target), "a"(0), "d"(0), "b"(0), "c"(0)
    495         : "cc");
    496     return ConversionPair64(first, second);
    497   }
    498 
    499   ALWAYS_INLINE static void AtomicStoreRelease16B(
    500       std::atomic<ConversionPair64>* target, ConversionPair64 value) {
    501     uint64_t first, second;
    502     __asm__ __volatile__ (
    503         "movq (%2), %%rax\n\t"
    504         "movq 8(%2), %%rdx\n\t"
    505         "1:\n\t"
    506         "lock cmpxchg16b (%2)\n\t"
    507         "jnz 1b"
    508         : "=&a"(first), "=&d"(second)
    509         : "r"(target), "b"(value.first), "c"(value.second)
    510         : "cc");
    511   }
    512 #else
    513   static ConversionPair64 AtomicLoadRelaxed16B(std::atomic<ConversionPair64>* target);
    514   static void AtomicStoreRelease16B(std::atomic<ConversionPair64>* target, ConversionPair64 value);
    515 #endif
    516 
    517   HeapReference<String> location_;
    518   // Number of elements in the call_sites_ array. Note that this appears here
    519   // because of our packing logic for 32 bit fields.
    520   uint32_t num_resolved_call_sites_;
    521 
    522   uint64_t dex_file_;               // const DexFile*
    523   uint64_t resolved_call_sites_;    // GcRoot<CallSite>* array with num_resolved_call_sites_
    524                                     // elements.
    525   uint64_t resolved_fields_;        // std::atomic<FieldDexCachePair>*, array with
    526                                     // num_resolved_fields_ elements.
    527   uint64_t resolved_method_types_;  // std::atomic<MethodTypeDexCachePair>* array with
    528                                     // num_resolved_method_types_ elements.
    529   uint64_t resolved_methods_;       // ArtMethod*, array with num_resolved_methods_ elements.
    530   uint64_t resolved_types_;         // TypeDexCacheType*, array with num_resolved_types_ elements.
    531   uint64_t strings_;                // std::atomic<StringDexCachePair>*, array with num_strings_
    532                                     // elements.
    533 
    534   uint32_t num_resolved_fields_;        // Number of elements in the resolved_fields_ array.
    535   uint32_t num_resolved_method_types_;  // Number of elements in the resolved_method_types_ array.
    536   uint32_t num_resolved_methods_;       // Number of elements in the resolved_methods_ array.
    537   uint32_t num_resolved_types_;         // Number of elements in the resolved_types_ array.
    538   uint32_t num_strings_;                // Number of elements in the strings_ array.
    539 
    540   friend struct art::DexCacheOffsets;  // for verifying offset information
    541   friend class Object;  // For VisitReferences
    542   DISALLOW_IMPLICIT_CONSTRUCTORS(DexCache);
    543 };
    544 
    545 }  // namespace mirror
    546 }  // namespace art
    547 
    548 #endif  // ART_RUNTIME_MIRROR_DEX_CACHE_H_
    549