Home | History | Annotate | Download | only in collector
      1 /*
      2  * Copyright (C) 2015 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_INL_H_
     18 #define ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_INL_H_
     19 
     20 #include "concurrent_copying.h"
     21 
     22 #include "gc/accounting/atomic_stack.h"
     23 #include "gc/accounting/space_bitmap-inl.h"
     24 #include "gc/heap.h"
     25 #include "gc/space/region_space.h"
     26 #include "gc/verification.h"
     27 #include "lock_word.h"
     28 #include "mirror/object-readbarrier-inl.h"
     29 
     30 namespace art {
     31 namespace gc {
     32 namespace collector {
     33 
     34 inline mirror::Object* ConcurrentCopying::MarkUnevacFromSpaceRegion(
     35     mirror::Object* ref, accounting::ContinuousSpaceBitmap* bitmap) {
     36   // For the Baker-style RB, in a rare case, we could incorrectly change the object from white
     37   // to gray even though the object has already been marked through. This happens if a mutator
     38   // thread gets preempted before the AtomicSetReadBarrierState below, GC marks through the
     39   // object (changes it from white to gray and back to white), and the thread runs and
     40   // incorrectly changes it from white to gray. If this happens, the object will get added to the
     41   // mark stack again and get changed back to white after it is processed.
     42   if (kUseBakerReadBarrier) {
     43     // Test the bitmap first to avoid graying an object that has already been marked through most
     44     // of the time.
     45     if (bitmap->Test(ref)) {
     46       return ref;
     47     }
     48   }
     49   // This may or may not succeed, which is ok because the object may already be gray.
     50   bool success = false;
     51   if (kUseBakerReadBarrier) {
     52     // GC will mark the bitmap when popping from mark stack. If only the GC is touching the bitmap
     53     // we can avoid an expensive CAS.
     54     // For the baker case, an object is marked if either the mark bit marked or the bitmap bit is
     55     // set.
     56     success = ref->AtomicSetReadBarrierState(/* expected_rb_state */ ReadBarrier::WhiteState(),
     57                                              /* rb_state */ ReadBarrier::GrayState());
     58   } else {
     59     success = !bitmap->AtomicTestAndSet(ref);
     60   }
     61   if (success) {
     62     // Newly marked.
     63     if (kUseBakerReadBarrier) {
     64       DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::GrayState());
     65     }
     66     PushOntoMarkStack(ref);
     67   }
     68   return ref;
     69 }
     70 
     71 template<bool kGrayImmuneObject>
     72 inline mirror::Object* ConcurrentCopying::MarkImmuneSpace(mirror::Object* ref) {
     73   if (kUseBakerReadBarrier) {
     74     // The GC-running thread doesn't (need to) gray immune objects except when updating thread roots
     75     // in the thread flip on behalf of suspended threads (when gc_grays_immune_objects_ is
     76     // true). Also, a mutator doesn't (need to) gray an immune object after GC has updated all
     77     // immune space objects (when updated_all_immune_objects_ is true).
     78     if (kIsDebugBuild) {
     79       if (Thread::Current() == thread_running_gc_) {
     80         DCHECK(!kGrayImmuneObject ||
     81                updated_all_immune_objects_.LoadRelaxed() ||
     82                gc_grays_immune_objects_);
     83       } else {
     84         DCHECK(kGrayImmuneObject);
     85       }
     86     }
     87     if (!kGrayImmuneObject || updated_all_immune_objects_.LoadRelaxed()) {
     88       return ref;
     89     }
     90     // This may or may not succeed, which is ok because the object may already be gray.
     91     bool success = ref->AtomicSetReadBarrierState(/* expected_rb_state */ ReadBarrier::WhiteState(),
     92                                                   /* rb_state */ ReadBarrier::GrayState());
     93     if (success) {
     94       MutexLock mu(Thread::Current(), immune_gray_stack_lock_);
     95       immune_gray_stack_.push_back(ref);
     96     }
     97   }
     98   return ref;
     99 }
    100 
    101 template<bool kGrayImmuneObject, bool kFromGCThread>
    102 inline mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref,
    103                                                mirror::Object* holder,
    104                                                MemberOffset offset) {
    105   if (from_ref == nullptr) {
    106     return nullptr;
    107   }
    108   DCHECK(heap_->collector_type_ == kCollectorTypeCC);
    109   if (kFromGCThread) {
    110     DCHECK(is_active_);
    111     DCHECK_EQ(Thread::Current(), thread_running_gc_);
    112   } else if (UNLIKELY(kUseBakerReadBarrier && !is_active_)) {
    113     // In the lock word forward address state, the read barrier bits
    114     // in the lock word are part of the stored forwarding address and
    115     // invalid. This is usually OK as the from-space copy of objects
    116     // aren't accessed by mutators due to the to-space
    117     // invariant. However, during the dex2oat image writing relocation
    118     // and the zygote compaction, objects can be in the forward
    119     // address state (to store the forward/relocation addresses) and
    120     // they can still be accessed and the invalid read barrier bits
    121     // are consulted. If they look like gray but aren't really, the
    122     // read barriers slow path can trigger when it shouldn't. To guard
    123     // against this, return here if the CC collector isn't running.
    124     return from_ref;
    125   }
    126   DCHECK(region_space_ != nullptr) << "Read barrier slow path taken when CC isn't running?";
    127   if (region_space_->HasAddress(from_ref)) {
    128     space::RegionSpace::RegionType rtype = region_space_->GetRegionTypeUnsafe(from_ref);
    129     switch (rtype) {
    130       case space::RegionSpace::RegionType::kRegionTypeToSpace:
    131         // It's already marked.
    132         return from_ref;
    133       case space::RegionSpace::RegionType::kRegionTypeFromSpace: {
    134         mirror::Object* to_ref = GetFwdPtr(from_ref);
    135         if (to_ref == nullptr) {
    136           // It isn't marked yet. Mark it by copying it to the to-space.
    137           to_ref = Copy(from_ref, holder, offset);
    138         }
    139         // The copy should either be in a to-space region, or in the
    140         // non-moving space, if it could not fit in a to-space region.
    141         DCHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref))
    142             << "from_ref=" << from_ref << " to_ref=" << to_ref;
    143         return to_ref;
    144       }
    145       case space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace:
    146         return MarkUnevacFromSpaceRegion(from_ref, region_space_bitmap_);
    147       default:
    148         // The reference is in an unused region.
    149         LOG(FATAL_WITHOUT_ABORT) << DumpHeapReference(holder, offset, from_ref);
    150         region_space_->DumpNonFreeRegions(LOG_STREAM(FATAL_WITHOUT_ABORT));
    151         heap_->GetVerification()->LogHeapCorruption(holder, offset, from_ref, /* fatal */ true);
    152         UNREACHABLE();
    153     }
    154   } else {
    155     if (immune_spaces_.ContainsObject(from_ref)) {
    156       return MarkImmuneSpace<kGrayImmuneObject>(from_ref);
    157     } else {
    158       return MarkNonMoving(from_ref, holder, offset);
    159     }
    160   }
    161 }
    162 
    163 inline mirror::Object* ConcurrentCopying::MarkFromReadBarrier(mirror::Object* from_ref) {
    164   mirror::Object* ret;
    165   // We can get here before marking starts since we gray immune objects before the marking phase.
    166   if (from_ref == nullptr || !Thread::Current()->GetIsGcMarking()) {
    167     return from_ref;
    168   }
    169   // TODO: Consider removing this check when we are done investigating slow paths. b/30162165
    170   if (UNLIKELY(mark_from_read_barrier_measurements_)) {
    171     ret = MarkFromReadBarrierWithMeasurements(from_ref);
    172   } else {
    173     ret = Mark(from_ref);
    174   }
    175   // Only set the mark bit for baker barrier.
    176   if (kUseBakerReadBarrier && LIKELY(!rb_mark_bit_stack_full_ && ret->AtomicSetMarkBit(0, 1))) {
    177     // If the mark stack is full, we may temporarily go to mark and back to unmarked. Seeing both
    178     // values are OK since the only race is doing an unnecessary Mark.
    179     if (!rb_mark_bit_stack_->AtomicPushBack(ret)) {
    180       // Mark stack is full, set the bit back to zero.
    181       CHECK(ret->AtomicSetMarkBit(1, 0));
    182       // Set rb_mark_bit_stack_full_, this is racy but OK since AtomicPushBack is thread safe.
    183       rb_mark_bit_stack_full_ = true;
    184     }
    185   }
    186   return ret;
    187 }
    188 
    189 inline mirror::Object* ConcurrentCopying::GetFwdPtr(mirror::Object* from_ref) {
    190   DCHECK(region_space_->IsInFromSpace(from_ref));
    191   LockWord lw = from_ref->GetLockWord(false);
    192   if (lw.GetState() == LockWord::kForwardingAddress) {
    193     mirror::Object* fwd_ptr = reinterpret_cast<mirror::Object*>(lw.ForwardingAddress());
    194     DCHECK(fwd_ptr != nullptr);
    195     return fwd_ptr;
    196   } else {
    197     return nullptr;
    198   }
    199 }
    200 
    201 inline bool ConcurrentCopying::IsMarkedInUnevacFromSpace(mirror::Object* from_ref) {
    202   // Use load acquire on the read barrier pointer to ensure that we never see a white read barrier
    203   // state with an unmarked bit due to reordering.
    204   DCHECK(region_space_->IsInUnevacFromSpace(from_ref));
    205   if (kUseBakerReadBarrier && from_ref->GetReadBarrierStateAcquire() == ReadBarrier::GrayState()) {
    206     return true;
    207   }
    208   return region_space_bitmap_->Test(from_ref);
    209 }
    210 
    211 }  // namespace collector
    212 }  // namespace gc
    213 }  // namespace art
    214 
    215 #endif  // ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_INL_H_
    216