Home | History | Annotate | Download | only in mirror
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_MIRROR_OBJECT_READBARRIER_INL_H_
     18 #define ART_RUNTIME_MIRROR_OBJECT_READBARRIER_INL_H_
     19 
     20 #include "object.h"
     21 
     22 #include "base/atomic.h"
     23 #include "heap_poisoning.h"
     24 #include "lock_word-inl.h"
     25 #include "object_reference-inl.h"
     26 #include "read_barrier.h"
     27 #include "runtime.h"
     28 
     29 namespace art {
     30 namespace mirror {
     31 
     32 template<VerifyObjectFlags kVerifyFlags>
     33 inline LockWord Object::GetLockWord(bool as_volatile) {
     34   if (as_volatile) {
     35     return LockWord(GetField32Volatile<kVerifyFlags>(MonitorOffset()));
     36   }
     37   return LockWord(GetField32<kVerifyFlags>(MonitorOffset()));
     38 }
     39 
     40 template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
     41 inline bool Object::CasField32(MemberOffset field_offset,
     42                                int32_t old_value,
     43                                int32_t new_value,
     44                                CASMode mode,
     45                                std::memory_order memory_order) {
     46   if (kCheckTransaction) {
     47     DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
     48   }
     49   if (kTransactionActive) {
     50     Runtime::Current()->RecordWriteField32(this, field_offset, old_value, true);
     51   }
     52   if (kVerifyFlags & kVerifyThis) {
     53     VerifyObject(this);
     54   }
     55   uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
     56   AtomicInteger* atomic_addr = reinterpret_cast<AtomicInteger*>(raw_addr);
     57 
     58   return atomic_addr->CompareAndSet(old_value, new_value, mode, memory_order);
     59 }
     60 
     61 inline bool Object::CasLockWord(LockWord old_val,
     62                                 LockWord new_val,
     63                                 CASMode mode,
     64                                 std::memory_order memory_order) {
     65   // Force use of non-transactional mode and do not check.
     66   return CasField32<false, false>(MonitorOffset(),
     67                                   old_val.GetValue(),
     68                                   new_val.GetValue(),
     69                                   mode,
     70                                   memory_order);
     71 }
     72 
     73 inline uint32_t Object::GetReadBarrierState(uintptr_t* fake_address_dependency) {
     74   if (!kUseBakerReadBarrier) {
     75     LOG(FATAL) << "Unreachable";
     76     UNREACHABLE();
     77   }
     78 #if defined(__arm__)
     79   uintptr_t obj = reinterpret_cast<uintptr_t>(this);
     80   uintptr_t result;
     81   DCHECK_EQ(OFFSETOF_MEMBER(Object, monitor_), 4U);
     82   // Use inline assembly to prevent the compiler from optimizing away the false dependency.
     83   __asm__ __volatile__(
     84       "ldr %[result], [%[obj], #4]\n\t"
     85       // This instruction is enough to "fool the compiler and the CPU" by having `fad` always be
     86       // null, without them being able to assume that fact.
     87       "eor %[fad], %[result], %[result]\n\t"
     88       : [result] "+r" (result), [fad] "=r" (*fake_address_dependency)
     89       : [obj] "r" (obj));
     90   DCHECK_EQ(*fake_address_dependency, 0U);
     91   LockWord lw(static_cast<uint32_t>(result));
     92   uint32_t rb_state = lw.ReadBarrierState();
     93   return rb_state;
     94 #elif defined(__aarch64__)
     95   uintptr_t obj = reinterpret_cast<uintptr_t>(this);
     96   uintptr_t result;
     97   DCHECK_EQ(OFFSETOF_MEMBER(Object, monitor_), 4U);
     98   // Use inline assembly to prevent the compiler from optimizing away the false dependency.
     99   __asm__ __volatile__(
    100       "ldr %w[result], [%[obj], #4]\n\t"
    101       // This instruction is enough to "fool the compiler and the CPU" by having `fad` always be
    102       // null, without them being able to assume that fact.
    103       "eor %[fad], %[result], %[result]\n\t"
    104       : [result] "+r" (result), [fad] "=r" (*fake_address_dependency)
    105       : [obj] "r" (obj));
    106   DCHECK_EQ(*fake_address_dependency, 0U);
    107   LockWord lw(static_cast<uint32_t>(result));
    108   uint32_t rb_state = lw.ReadBarrierState();
    109   return rb_state;
    110 #elif defined(__i386__) || defined(__x86_64__)
    111   LockWord lw = GetLockWord(false);
    112   // i386/x86_64 don't need fake address dependency. Use a compiler fence to avoid compiler
    113   // reordering.
    114   *fake_address_dependency = 0;
    115   std::atomic_signal_fence(std::memory_order_acquire);
    116   uint32_t rb_state = lw.ReadBarrierState();
    117   return rb_state;
    118 #else
    119   // MIPS32/MIPS64: use a memory barrier to prevent load-load reordering.
    120   LockWord lw = GetLockWord(false);
    121   *fake_address_dependency = 0;
    122   std::atomic_thread_fence(std::memory_order_acquire);
    123   uint32_t rb_state = lw.ReadBarrierState();
    124   return rb_state;
    125 #endif
    126 }
    127 
    128 inline uint32_t Object::GetReadBarrierState() {
    129   if (!kUseBakerReadBarrier) {
    130     LOG(FATAL) << "Unreachable";
    131     UNREACHABLE();
    132   }
    133   DCHECK(kUseBakerReadBarrier);
    134   LockWord lw(GetFieldPrimitive<uint32_t, /*kIsVolatile=*/false>(MonitorOffset()));
    135   uint32_t rb_state = lw.ReadBarrierState();
    136   DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state;
    137   return rb_state;
    138 }
    139 
    140 inline uint32_t Object::GetReadBarrierStateAcquire() {
    141   if (!kUseBakerReadBarrier) {
    142     LOG(FATAL) << "Unreachable";
    143     UNREACHABLE();
    144   }
    145   LockWord lw(GetFieldAcquire<uint32_t>(MonitorOffset()));
    146   uint32_t rb_state = lw.ReadBarrierState();
    147   DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state;
    148   return rb_state;
    149 }
    150 
    151 template<std::memory_order kMemoryOrder>
    152 inline bool Object::AtomicSetReadBarrierState(uint32_t expected_rb_state, uint32_t rb_state) {
    153   if (!kUseBakerReadBarrier) {
    154     LOG(FATAL) << "Unreachable";
    155     UNREACHABLE();
    156   }
    157   DCHECK(ReadBarrier::IsValidReadBarrierState(expected_rb_state)) << expected_rb_state;
    158   DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state;
    159   LockWord expected_lw;
    160   LockWord new_lw;
    161   do {
    162     LockWord lw = GetLockWord(false);
    163     if (UNLIKELY(lw.ReadBarrierState() != expected_rb_state)) {
    164       // Lost the race.
    165       return false;
    166     }
    167     expected_lw = lw;
    168     expected_lw.SetReadBarrierState(expected_rb_state);
    169     new_lw = lw;
    170     new_lw.SetReadBarrierState(rb_state);
    171     // ConcurrentCopying::ProcessMarkStackRef uses this with
    172     // `kMemoryOrder` == `std::memory_order_release`.
    173     // If `kMemoryOrder` == `std::memory_order_release`, use a CAS release so that when GC updates
    174     // all the fields of an object and then changes the object from gray to black (non-gray), the
    175     // field updates (stores) will be visible (won't be reordered after this CAS.)
    176   } while (!CasLockWord(expected_lw, new_lw, CASMode::kWeak, kMemoryOrder));
    177   return true;
    178 }
    179 
    180 inline bool Object::AtomicSetMarkBit(uint32_t expected_mark_bit, uint32_t mark_bit) {
    181   LockWord expected_lw;
    182   LockWord new_lw;
    183   do {
    184     LockWord lw = GetLockWord(false);
    185     if (UNLIKELY(lw.MarkBitState() != expected_mark_bit)) {
    186       // Lost the race.
    187       return false;
    188     }
    189     expected_lw = lw;
    190     new_lw = lw;
    191     new_lw.SetMarkBitState(mark_bit);
    192     // Since this is only set from the mutator, we can use the non-release CAS.
    193   } while (!CasLockWord(expected_lw, new_lw, CASMode::kWeak, std::memory_order_relaxed));
    194   return true;
    195 }
    196 
    197 }  // namespace mirror
    198 }  // namespace art
    199 
    200 #endif  // ART_RUNTIME_MIRROR_OBJECT_READBARRIER_INL_H_
    201