Home | History | Annotate | Download | only in gc
      1 /*
      2  * Copyright (C) 2013 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "reference_queue.h"
     18 
     19 #include "accounting/card_table-inl.h"
     20 #include "base/mutex.h"
     21 #include "collector/concurrent_copying.h"
     22 #include "heap.h"
     23 #include "mirror/class-inl.h"
     24 #include "mirror/object-inl.h"
     25 #include "mirror/reference-inl.h"
     26 #include "object_callbacks.h"
     27 
     28 namespace art {
     29 namespace gc {
     30 
     31 ReferenceQueue::ReferenceQueue(Mutex* lock) : lock_(lock), list_(nullptr) {
     32 }
     33 
     34 void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, ObjPtr<mirror::Reference> ref) {
     35   DCHECK(ref != nullptr);
     36   MutexLock mu(self, *lock_);
     37   if (ref->IsUnprocessed()) {
     38     EnqueueReference(ref);
     39   }
     40 }
     41 
     42 void ReferenceQueue::EnqueueReference(ObjPtr<mirror::Reference> ref) {
     43   DCHECK(ref != nullptr);
     44   CHECK(ref->IsUnprocessed());
     45   if (IsEmpty()) {
     46     // 1 element cyclic queue, ie: Reference ref = ..; ref.pendingNext = ref;
     47     list_ = ref.Ptr();
     48   } else {
     49     // The list is owned by the GC, everything that has been inserted must already be at least
     50     // gray.
     51     ObjPtr<mirror::Reference> head = list_->GetPendingNext<kWithoutReadBarrier>();
     52     DCHECK(head != nullptr);
     53     ref->SetPendingNext(head);
     54   }
     55   // Add the reference in the middle to preserve the cycle.
     56   list_->SetPendingNext(ref);
     57 }
     58 
     59 ObjPtr<mirror::Reference> ReferenceQueue::DequeuePendingReference() {
     60   DCHECK(!IsEmpty());
     61   ObjPtr<mirror::Reference> ref = list_->GetPendingNext<kWithoutReadBarrier>();
     62   DCHECK(ref != nullptr);
     63   // Note: the following code is thread-safe because it is only called from ProcessReferences which
     64   // is single threaded.
     65   if (list_ == ref) {
     66     list_ = nullptr;
     67   } else {
     68     ObjPtr<mirror::Reference> next = ref->GetPendingNext<kWithoutReadBarrier>();
     69     list_->SetPendingNext(next);
     70   }
     71   ref->SetPendingNext(nullptr);
     72   return ref;
     73 }
     74 
     75 // This must be called whenever DequeuePendingReference is called.
     76 void ReferenceQueue::DisableReadBarrierForReference(ObjPtr<mirror::Reference> ref) {
     77   Heap* heap = Runtime::Current()->GetHeap();
     78   if (kUseBakerOrBrooksReadBarrier && heap->CurrentCollectorType() == kCollectorTypeCC &&
     79       heap->ConcurrentCopyingCollector()->IsActive()) {
     80     // Change the gray ptr we left in ConcurrentCopying::ProcessMarkStackRef() to non-gray.
     81     // We check IsActive() above because we don't want to do this when the zygote compaction
     82     // collector (SemiSpace) is running.
     83     CHECK(ref != nullptr);
     84     collector::ConcurrentCopying* concurrent_copying = heap->ConcurrentCopyingCollector();
     85     uint32_t rb_state = ref->GetReadBarrierState();
     86     if (rb_state == ReadBarrier::GrayState()) {
     87       ref->AtomicSetReadBarrierState(ReadBarrier::GrayState(), ReadBarrier::NonGrayState());
     88       CHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::NonGrayState());
     89     } else {
     90       // In ConcurrentCopying::ProcessMarkStackRef() we may leave a non-gray reference in the queue
     91       // and find it here, which is OK.
     92       CHECK_EQ(rb_state, ReadBarrier::NonGrayState()) << "ref=" << ref << " rb_state=" << rb_state;
     93       ObjPtr<mirror::Object> referent = ref->GetReferent<kWithoutReadBarrier>();
     94       // The referent could be null if it's cleared by a mutator (Reference.clear()).
     95       if (referent != nullptr) {
     96         CHECK(concurrent_copying->IsInToSpace(referent.Ptr()))
     97             << "ref=" << ref << " rb_state=" << ref->GetReadBarrierState()
     98             << " referent=" << referent;
     99       }
    100     }
    101   }
    102 }
    103 
    104 void ReferenceQueue::Dump(std::ostream& os) const {
    105   ObjPtr<mirror::Reference> cur = list_;
    106   os << "Reference starting at list_=" << list_ << "\n";
    107   if (cur == nullptr) {
    108     return;
    109   }
    110   do {
    111     ObjPtr<mirror::Reference> pending_next = cur->GetPendingNext();
    112     os << "Reference= " << cur << " PendingNext=" << pending_next;
    113     if (cur->IsFinalizerReferenceInstance()) {
    114       os << " Zombie=" << cur->AsFinalizerReference()->GetZombie();
    115     }
    116     os << "\n";
    117     cur = pending_next;
    118   } while (cur != list_);
    119 }
    120 
    121 size_t ReferenceQueue::GetLength() const {
    122   size_t count = 0;
    123   ObjPtr<mirror::Reference> cur = list_;
    124   if (cur != nullptr) {
    125     do {
    126       ++count;
    127       cur = cur->GetPendingNext();
    128     } while (cur != list_);
    129   }
    130   return count;
    131 }
    132 
    133 void ReferenceQueue::ClearWhiteReferences(ReferenceQueue* cleared_references,
    134                                           collector::GarbageCollector* collector) {
    135   while (!IsEmpty()) {
    136     ObjPtr<mirror::Reference> ref = DequeuePendingReference();
    137     mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
    138     // do_atomic_update is false because this happens during the reference processing phase where
    139     // Reference.clear() would block.
    140     if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update=*/false)) {
    141       // Referent is white, clear it.
    142       if (Runtime::Current()->IsActiveTransaction()) {
    143         ref->ClearReferent<true>();
    144       } else {
    145         ref->ClearReferent<false>();
    146       }
    147       cleared_references->EnqueueReference(ref);
    148     }
    149     // Delay disabling the read barrier until here so that the ClearReferent call above in
    150     // transaction mode will trigger the read barrier.
    151     DisableReadBarrierForReference(ref);
    152   }
    153 }
    154 
    155 void ReferenceQueue::EnqueueFinalizerReferences(ReferenceQueue* cleared_references,
    156                                                 collector::GarbageCollector* collector) {
    157   while (!IsEmpty()) {
    158     ObjPtr<mirror::FinalizerReference> ref = DequeuePendingReference()->AsFinalizerReference();
    159     mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
    160     // do_atomic_update is false because this happens during the reference processing phase where
    161     // Reference.clear() would block.
    162     if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update=*/false)) {
    163       ObjPtr<mirror::Object> forward_address = collector->MarkObject(referent_addr->AsMirrorPtr());
    164       // Move the updated referent to the zombie field.
    165       if (Runtime::Current()->IsActiveTransaction()) {
    166         ref->SetZombie<true>(forward_address);
    167         ref->ClearReferent<true>();
    168       } else {
    169         ref->SetZombie<false>(forward_address);
    170         ref->ClearReferent<false>();
    171       }
    172       cleared_references->EnqueueReference(ref);
    173     }
    174     // Delay disabling the read barrier until here so that the ClearReferent call above in
    175     // transaction mode will trigger the read barrier.
    176     DisableReadBarrierForReference(ref->AsReference());
    177   }
    178 }
    179 
    180 void ReferenceQueue::ForwardSoftReferences(MarkObjectVisitor* visitor) {
    181   if (UNLIKELY(IsEmpty())) {
    182     return;
    183   }
    184   const ObjPtr<mirror::Reference> head = list_;
    185   ObjPtr<mirror::Reference> ref = head;
    186   do {
    187     mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
    188     if (referent_addr->AsMirrorPtr() != nullptr) {
    189       // do_atomic_update is false because mutators can't access the referent due to the weak ref
    190       // access blocking.
    191       visitor->MarkHeapReference(referent_addr, /*do_atomic_update=*/ false);
    192     }
    193     ref = ref->GetPendingNext();
    194   } while (LIKELY(ref != head));
    195 }
    196 
    197 void ReferenceQueue::UpdateRoots(IsMarkedVisitor* visitor) {
    198   if (list_ != nullptr) {
    199     list_ = down_cast<mirror::Reference*>(visitor->IsMarked(list_));
    200   }
    201 }
    202 
    203 }  // namespace gc
    204 }  // namespace art
    205