1 /* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include "reference_queue.h" 18 19 #include "accounting/card_table-inl.h" 20 #include "collector/concurrent_copying.h" 21 #include "heap.h" 22 #include "mirror/class-inl.h" 23 #include "mirror/object-inl.h" 24 #include "mirror/reference-inl.h" 25 26 namespace art { 27 namespace gc { 28 29 ReferenceQueue::ReferenceQueue(Mutex* lock) : lock_(lock), list_(nullptr) { 30 } 31 32 void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, ObjPtr<mirror::Reference> ref) { 33 DCHECK(ref != nullptr); 34 MutexLock mu(self, *lock_); 35 if (ref->IsUnprocessed()) { 36 EnqueueReference(ref); 37 } 38 } 39 40 void ReferenceQueue::EnqueueReference(ObjPtr<mirror::Reference> ref) { 41 DCHECK(ref != nullptr); 42 CHECK(ref->IsUnprocessed()); 43 if (IsEmpty()) { 44 // 1 element cyclic queue, ie: Reference ref = ..; ref.pendingNext = ref; 45 list_ = ref.Ptr(); 46 } else { 47 // The list is owned by the GC, everything that has been inserted must already be at least 48 // gray. 49 ObjPtr<mirror::Reference> head = list_->GetPendingNext<kWithoutReadBarrier>(); 50 DCHECK(head != nullptr); 51 ref->SetPendingNext(head); 52 } 53 // Add the reference in the middle to preserve the cycle. 54 list_->SetPendingNext(ref); 55 } 56 57 ObjPtr<mirror::Reference> ReferenceQueue::DequeuePendingReference() { 58 DCHECK(!IsEmpty()); 59 ObjPtr<mirror::Reference> ref = list_->GetPendingNext<kWithoutReadBarrier>(); 60 DCHECK(ref != nullptr); 61 // Note: the following code is thread-safe because it is only called from ProcessReferences which 62 // is single threaded. 63 if (list_ == ref) { 64 list_ = nullptr; 65 } else { 66 ObjPtr<mirror::Reference> next = ref->GetPendingNext<kWithoutReadBarrier>(); 67 list_->SetPendingNext(next); 68 } 69 ref->SetPendingNext(nullptr); 70 return ref; 71 } 72 73 // This must be called whenever DequeuePendingReference is called. 74 void ReferenceQueue::DisableReadBarrierForReference(ObjPtr<mirror::Reference> ref) { 75 Heap* heap = Runtime::Current()->GetHeap(); 76 if (kUseBakerOrBrooksReadBarrier && heap->CurrentCollectorType() == kCollectorTypeCC && 77 heap->ConcurrentCopyingCollector()->IsActive()) { 78 // Change the gray ptr we left in ConcurrentCopying::ProcessMarkStackRef() to white. 79 // We check IsActive() above because we don't want to do this when the zygote compaction 80 // collector (SemiSpace) is running. 81 CHECK(ref != nullptr); 82 collector::ConcurrentCopying* concurrent_copying = heap->ConcurrentCopyingCollector(); 83 uint32_t rb_state = ref->GetReadBarrierState(); 84 if (rb_state == ReadBarrier::GrayState()) { 85 ref->AtomicSetReadBarrierState(ReadBarrier::GrayState(), ReadBarrier::WhiteState()); 86 CHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::WhiteState()); 87 } else { 88 // In ConcurrentCopying::ProcessMarkStackRef() we may leave a white reference in the queue and 89 // find it here, which is OK. 90 CHECK_EQ(rb_state, ReadBarrier::WhiteState()) << "ref=" << ref << " rb_state=" << rb_state; 91 ObjPtr<mirror::Object> referent = ref->GetReferent<kWithoutReadBarrier>(); 92 // The referent could be null if it's cleared by a mutator (Reference.clear()). 93 if (referent != nullptr) { 94 CHECK(concurrent_copying->IsInToSpace(referent.Ptr())) 95 << "ref=" << ref << " rb_state=" << ref->GetReadBarrierState() 96 << " referent=" << referent; 97 } 98 } 99 } 100 } 101 102 void ReferenceQueue::Dump(std::ostream& os) const { 103 ObjPtr<mirror::Reference> cur = list_; 104 os << "Reference starting at list_=" << list_ << "\n"; 105 if (cur == nullptr) { 106 return; 107 } 108 do { 109 ObjPtr<mirror::Reference> pending_next = cur->GetPendingNext(); 110 os << "Reference= " << cur << " PendingNext=" << pending_next; 111 if (cur->IsFinalizerReferenceInstance()) { 112 os << " Zombie=" << cur->AsFinalizerReference()->GetZombie(); 113 } 114 os << "\n"; 115 cur = pending_next; 116 } while (cur != list_); 117 } 118 119 size_t ReferenceQueue::GetLength() const { 120 size_t count = 0; 121 ObjPtr<mirror::Reference> cur = list_; 122 if (cur != nullptr) { 123 do { 124 ++count; 125 cur = cur->GetPendingNext(); 126 } while (cur != list_); 127 } 128 return count; 129 } 130 131 void ReferenceQueue::ClearWhiteReferences(ReferenceQueue* cleared_references, 132 collector::GarbageCollector* collector) { 133 while (!IsEmpty()) { 134 ObjPtr<mirror::Reference> ref = DequeuePendingReference(); 135 mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr(); 136 // do_atomic_update is false because this happens during the reference processing phase where 137 // Reference.clear() would block. 138 if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update*/false)) { 139 // Referent is white, clear it. 140 if (Runtime::Current()->IsActiveTransaction()) { 141 ref->ClearReferent<true>(); 142 } else { 143 ref->ClearReferent<false>(); 144 } 145 cleared_references->EnqueueReference(ref); 146 } 147 // Delay disabling the read barrier until here so that the ClearReferent call above in 148 // transaction mode will trigger the read barrier. 149 DisableReadBarrierForReference(ref); 150 } 151 } 152 153 void ReferenceQueue::EnqueueFinalizerReferences(ReferenceQueue* cleared_references, 154 collector::GarbageCollector* collector) { 155 while (!IsEmpty()) { 156 ObjPtr<mirror::FinalizerReference> ref = DequeuePendingReference()->AsFinalizerReference(); 157 mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr(); 158 // do_atomic_update is false because this happens during the reference processing phase where 159 // Reference.clear() would block. 160 if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update*/false)) { 161 ObjPtr<mirror::Object> forward_address = collector->MarkObject(referent_addr->AsMirrorPtr()); 162 // Move the updated referent to the zombie field. 163 if (Runtime::Current()->IsActiveTransaction()) { 164 ref->SetZombie<true>(forward_address); 165 ref->ClearReferent<true>(); 166 } else { 167 ref->SetZombie<false>(forward_address); 168 ref->ClearReferent<false>(); 169 } 170 cleared_references->EnqueueReference(ref); 171 } 172 // Delay disabling the read barrier until here so that the ClearReferent call above in 173 // transaction mode will trigger the read barrier. 174 DisableReadBarrierForReference(ref->AsReference()); 175 } 176 } 177 178 void ReferenceQueue::ForwardSoftReferences(MarkObjectVisitor* visitor) { 179 if (UNLIKELY(IsEmpty())) { 180 return; 181 } 182 ObjPtr<mirror::Reference> const head = list_; 183 ObjPtr<mirror::Reference> ref = head; 184 do { 185 mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr(); 186 if (referent_addr->AsMirrorPtr() != nullptr) { 187 // do_atomic_update is false because mutators can't access the referent due to the weak ref 188 // access blocking. 189 visitor->MarkHeapReference(referent_addr, /*do_atomic_update*/ false); 190 } 191 ref = ref->GetPendingNext(); 192 } while (LIKELY(ref != head)); 193 } 194 195 void ReferenceQueue::UpdateRoots(IsMarkedVisitor* visitor) { 196 if (list_ != nullptr) { 197 list_ = down_cast<mirror::Reference*>(visitor->IsMarked(list_)); 198 } 199 } 200 201 } // namespace gc 202 } // namespace art 203