1 /* 2 * Copyright (C) 2015 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_INL_H_ 18 #define ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_INL_H_ 19 20 #include "concurrent_copying.h" 21 22 #include "gc/accounting/atomic_stack.h" 23 #include "gc/accounting/space_bitmap-inl.h" 24 #include "gc/heap.h" 25 #include "gc/space/region_space.h" 26 #include "lock_word.h" 27 #include "mirror/object-readbarrier-inl.h" 28 29 namespace art { 30 namespace gc { 31 namespace collector { 32 33 inline mirror::Object* ConcurrentCopying::MarkUnevacFromSpaceRegion( 34 mirror::Object* ref, accounting::ContinuousSpaceBitmap* bitmap) { 35 // For the Baker-style RB, in a rare case, we could incorrectly change the object from white 36 // to gray even though the object has already been marked through. This happens if a mutator 37 // thread gets preempted before the AtomicSetReadBarrierState below, GC marks through the 38 // object (changes it from white to gray and back to white), and the thread runs and 39 // incorrectly changes it from white to gray. If this happens, the object will get added to the 40 // mark stack again and get changed back to white after it is processed. 41 if (kUseBakerReadBarrier) { 42 // Test the bitmap first to avoid graying an object that has already been marked through most 43 // of the time. 44 if (bitmap->Test(ref)) { 45 return ref; 46 } 47 } 48 // This may or may not succeed, which is ok because the object may already be gray. 49 bool success = false; 50 if (kUseBakerReadBarrier) { 51 // GC will mark the bitmap when popping from mark stack. If only the GC is touching the bitmap 52 // we can avoid an expensive CAS. 53 // For the baker case, an object is marked if either the mark bit marked or the bitmap bit is 54 // set. 55 success = ref->AtomicSetReadBarrierState(ReadBarrier::WhiteState(), ReadBarrier::GrayState()); 56 } else { 57 success = !bitmap->AtomicTestAndSet(ref); 58 } 59 if (success) { 60 // Newly marked. 61 if (kUseBakerReadBarrier) { 62 DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::GrayState()); 63 } 64 PushOntoMarkStack(ref); 65 } 66 return ref; 67 } 68 69 template<bool kGrayImmuneObject> 70 inline mirror::Object* ConcurrentCopying::MarkImmuneSpace(mirror::Object* ref) { 71 if (kUseBakerReadBarrier) { 72 // The GC-running thread doesn't (need to) gray immune objects except when updating thread roots 73 // in the thread flip on behalf of suspended threads (when gc_grays_immune_objects_ is 74 // true). Also, a mutator doesn't (need to) gray an immune object after GC has updated all 75 // immune space objects (when updated_all_immune_objects_ is true). 76 if (kIsDebugBuild) { 77 if (Thread::Current() == thread_running_gc_) { 78 DCHECK(!kGrayImmuneObject || 79 updated_all_immune_objects_.LoadRelaxed() || 80 gc_grays_immune_objects_); 81 } else { 82 DCHECK(kGrayImmuneObject); 83 } 84 } 85 if (!kGrayImmuneObject || updated_all_immune_objects_.LoadRelaxed()) { 86 return ref; 87 } 88 // This may or may not succeed, which is ok because the object may already be gray. 89 bool success = ref->AtomicSetReadBarrierState(ReadBarrier::WhiteState(), 90 ReadBarrier::GrayState()); 91 if (success) { 92 MutexLock mu(Thread::Current(), immune_gray_stack_lock_); 93 immune_gray_stack_.push_back(ref); 94 } 95 } 96 return ref; 97 } 98 99 template<bool kGrayImmuneObject, bool kFromGCThread> 100 inline mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref, 101 mirror::Object* holder, 102 MemberOffset offset) { 103 if (from_ref == nullptr) { 104 return nullptr; 105 } 106 DCHECK(heap_->collector_type_ == kCollectorTypeCC); 107 if (kFromGCThread) { 108 DCHECK(is_active_); 109 DCHECK_EQ(Thread::Current(), thread_running_gc_); 110 } else if (UNLIKELY(kUseBakerReadBarrier && !is_active_)) { 111 // In the lock word forward address state, the read barrier bits 112 // in the lock word are part of the stored forwarding address and 113 // invalid. This is usually OK as the from-space copy of objects 114 // aren't accessed by mutators due to the to-space 115 // invariant. However, during the dex2oat image writing relocation 116 // and the zygote compaction, objects can be in the forward 117 // address state (to store the forward/relocation addresses) and 118 // they can still be accessed and the invalid read barrier bits 119 // are consulted. If they look like gray but aren't really, the 120 // read barriers slow path can trigger when it shouldn't. To guard 121 // against this, return here if the CC collector isn't running. 122 return from_ref; 123 } 124 DCHECK(region_space_ != nullptr) << "Read barrier slow path taken when CC isn't running?"; 125 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref); 126 switch (rtype) { 127 case space::RegionSpace::RegionType::kRegionTypeToSpace: 128 // It's already marked. 129 return from_ref; 130 case space::RegionSpace::RegionType::kRegionTypeFromSpace: { 131 mirror::Object* to_ref = GetFwdPtr(from_ref); 132 if (to_ref == nullptr) { 133 // It isn't marked yet. Mark it by copying it to the to-space. 134 to_ref = Copy(from_ref, holder, offset); 135 } 136 DCHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref)) 137 << "from_ref=" << from_ref << " to_ref=" << to_ref; 138 return to_ref; 139 } 140 case space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace: { 141 return MarkUnevacFromSpaceRegion(from_ref, region_space_bitmap_); 142 } 143 case space::RegionSpace::RegionType::kRegionTypeNone: 144 if (immune_spaces_.ContainsObject(from_ref)) { 145 return MarkImmuneSpace<kGrayImmuneObject>(from_ref); 146 } else { 147 return MarkNonMoving(from_ref, holder, offset); 148 } 149 default: 150 UNREACHABLE(); 151 } 152 } 153 154 inline mirror::Object* ConcurrentCopying::MarkFromReadBarrier(mirror::Object* from_ref) { 155 mirror::Object* ret; 156 // We can get here before marking starts since we gray immune objects before the marking phase. 157 if (from_ref == nullptr || !Thread::Current()->GetIsGcMarking()) { 158 return from_ref; 159 } 160 // TODO: Consider removing this check when we are done investigating slow paths. b/30162165 161 if (UNLIKELY(mark_from_read_barrier_measurements_)) { 162 ret = MarkFromReadBarrierWithMeasurements(from_ref); 163 } else { 164 ret = Mark(from_ref); 165 } 166 // Only set the mark bit for baker barrier. 167 if (kUseBakerReadBarrier && LIKELY(!rb_mark_bit_stack_full_ && ret->AtomicSetMarkBit(0, 1))) { 168 // If the mark stack is full, we may temporarily go to mark and back to unmarked. Seeing both 169 // values are OK since the only race is doing an unnecessary Mark. 170 if (!rb_mark_bit_stack_->AtomicPushBack(ret)) { 171 // Mark stack is full, set the bit back to zero. 172 CHECK(ret->AtomicSetMarkBit(1, 0)); 173 // Set rb_mark_bit_stack_full_, this is racy but OK since AtomicPushBack is thread safe. 174 rb_mark_bit_stack_full_ = true; 175 } 176 } 177 return ret; 178 } 179 180 inline mirror::Object* ConcurrentCopying::GetFwdPtr(mirror::Object* from_ref) { 181 DCHECK(region_space_->IsInFromSpace(from_ref)); 182 LockWord lw = from_ref->GetLockWord(false); 183 if (lw.GetState() == LockWord::kForwardingAddress) { 184 mirror::Object* fwd_ptr = reinterpret_cast<mirror::Object*>(lw.ForwardingAddress()); 185 DCHECK(fwd_ptr != nullptr); 186 return fwd_ptr; 187 } else { 188 return nullptr; 189 } 190 } 191 192 inline bool ConcurrentCopying::IsMarkedInUnevacFromSpace(mirror::Object* from_ref) { 193 // Use load acquire on the read barrier pointer to ensure that we never see a white read barrier 194 // state with an unmarked bit due to reordering. 195 DCHECK(region_space_->IsInUnevacFromSpace(from_ref)); 196 if (kUseBakerReadBarrier && from_ref->GetReadBarrierStateAcquire() == ReadBarrier::GrayState()) { 197 return true; 198 } 199 return region_space_bitmap_->Test(from_ref); 200 } 201 202 } // namespace collector 203 } // namespace gc 204 } // namespace art 205 206 #endif // ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_INL_H_ 207