Home | History | Annotate | Download | only in gc
      1 /*
      2  * Copyright (C) 2014 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "reference_processor.h"
     18 
     19 #include "base/time_utils.h"
     20 #include "collector/garbage_collector.h"
     21 #include "mirror/class-inl.h"
     22 #include "mirror/object-inl.h"
     23 #include "mirror/reference-inl.h"
     24 #include "reference_processor-inl.h"
     25 #include "reflection.h"
     26 #include "ScopedLocalRef.h"
     27 #include "scoped_thread_state_change.h"
     28 #include "task_processor.h"
     29 #include "utils.h"
     30 #include "well_known_classes.h"
     31 
     32 namespace art {
     33 namespace gc {
     34 
     35 static constexpr bool kAsyncReferenceQueueAdd = false;
     36 
     37 ReferenceProcessor::ReferenceProcessor()
     38     : collector_(nullptr),
     39       preserving_references_(false),
     40       condition_("reference processor condition", *Locks::reference_processor_lock_) ,
     41       soft_reference_queue_(Locks::reference_queue_soft_references_lock_),
     42       weak_reference_queue_(Locks::reference_queue_weak_references_lock_),
     43       finalizer_reference_queue_(Locks::reference_queue_finalizer_references_lock_),
     44       phantom_reference_queue_(Locks::reference_queue_phantom_references_lock_),
     45       cleared_references_(Locks::reference_queue_cleared_references_lock_) {
     46 }
     47 
     48 void ReferenceProcessor::EnableSlowPath() {
     49   mirror::Reference::GetJavaLangRefReference()->SetSlowPath(true);
     50 }
     51 
     52 void ReferenceProcessor::DisableSlowPath(Thread* self) {
     53   mirror::Reference::GetJavaLangRefReference()->SetSlowPath(false);
     54   condition_.Broadcast(self);
     55 }
     56 
     57 void ReferenceProcessor::BroadcastForSlowPath(Thread* self) {
     58   CHECK(kUseReadBarrier);
     59   MutexLock mu(self, *Locks::reference_processor_lock_);
     60   condition_.Broadcast(self);
     61 }
     62 
     63 mirror::Object* ReferenceProcessor::GetReferent(Thread* self, mirror::Reference* reference) {
     64   if (!kUseReadBarrier || self->GetWeakRefAccessEnabled()) {
     65     // Under read barrier / concurrent copying collector, it's not safe to call GetReferent() when
     66     // weak ref access is disabled as the call includes a read barrier which may push a ref onto the
     67     // mark stack and interfere with termination of marking.
     68     mirror::Object* const referent = reference->GetReferent();
     69     // If the referent is null then it is already cleared, we can just return null since there is no
     70     // scenario where it becomes non-null during the reference processing phase.
     71     if (UNLIKELY(!SlowPathEnabled()) || referent == nullptr) {
     72       return referent;
     73     }
     74   }
     75   MutexLock mu(self, *Locks::reference_processor_lock_);
     76   while ((!kUseReadBarrier && SlowPathEnabled()) ||
     77          (kUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
     78     mirror::HeapReference<mirror::Object>* const referent_addr =
     79         reference->GetReferentReferenceAddr();
     80     // If the referent became cleared, return it. Don't need barrier since thread roots can't get
     81     // updated until after we leave the function due to holding the mutator lock.
     82     if (referent_addr->AsMirrorPtr() == nullptr) {
     83       return nullptr;
     84     }
     85     // Try to see if the referent is already marked by using the is_marked_callback. We can return
     86     // it to the mutator as long as the GC is not preserving references.
     87     if (LIKELY(collector_ != nullptr)) {
     88       // If it's null it means not marked, but it could become marked if the referent is reachable
     89       // by finalizer referents. So we cannot return in this case and must block. Otherwise, we
     90       // can return it to the mutator as long as the GC is not preserving references, in which
     91       // case only black nodes can be safely returned. If the GC is preserving references, the
     92       // mutator could take a white field from a grey or white node and move it somewhere else
     93       // in the heap causing corruption since this field would get swept.
     94       if (collector_->IsMarkedHeapReference(referent_addr)) {
     95         if (!preserving_references_ ||
     96            (LIKELY(!reference->IsFinalizerReferenceInstance()) && reference->IsUnprocessed())) {
     97           return referent_addr->AsMirrorPtr();
     98         }
     99       }
    100     }
    101     condition_.WaitHoldingLocks(self);
    102   }
    103   return reference->GetReferent();
    104 }
    105 
    106 void ReferenceProcessor::StartPreservingReferences(Thread* self) {
    107   MutexLock mu(self, *Locks::reference_processor_lock_);
    108   preserving_references_ = true;
    109 }
    110 
    111 void ReferenceProcessor::StopPreservingReferences(Thread* self) {
    112   MutexLock mu(self, *Locks::reference_processor_lock_);
    113   preserving_references_ = false;
    114   // We are done preserving references, some people who are blocked may see a marked referent.
    115   condition_.Broadcast(self);
    116 }
    117 
    118 // Process reference class instances and schedule finalizations.
    119 void ReferenceProcessor::ProcessReferences(bool concurrent, TimingLogger* timings,
    120                                            bool clear_soft_references,
    121                                            collector::GarbageCollector* collector) {
    122   TimingLogger::ScopedTiming t(concurrent ? __FUNCTION__ : "(Paused)ProcessReferences", timings);
    123   Thread* self = Thread::Current();
    124   {
    125     MutexLock mu(self, *Locks::reference_processor_lock_);
    126     collector_ = collector;
    127     if (!kUseReadBarrier) {
    128       CHECK_EQ(SlowPathEnabled(), concurrent) << "Slow path must be enabled iff concurrent";
    129     } else {
    130       // Weak ref access is enabled at Zygote compaction by SemiSpace (concurrent == false).
    131       CHECK_EQ(!self->GetWeakRefAccessEnabled(), concurrent);
    132     }
    133   }
    134   // Unless required to clear soft references with white references, preserve some white referents.
    135   if (!clear_soft_references) {
    136     TimingLogger::ScopedTiming split(concurrent ? "ForwardSoftReferences" :
    137         "(Paused)ForwardSoftReferences", timings);
    138     if (concurrent) {
    139       StartPreservingReferences(self);
    140     }
    141     // TODO: Add smarter logic for preserving soft references. The behavior should be a conditional
    142     // mark if the SoftReference is supposed to be preserved.
    143     soft_reference_queue_.ForwardSoftReferences(collector);
    144     collector->ProcessMarkStack();
    145     if (concurrent) {
    146       StopPreservingReferences(self);
    147     }
    148   }
    149   // Clear all remaining soft and weak references with white referents.
    150   soft_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
    151   weak_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
    152   {
    153     TimingLogger::ScopedTiming t2(concurrent ? "EnqueueFinalizerReferences" :
    154         "(Paused)EnqueueFinalizerReferences", timings);
    155     if (concurrent) {
    156       StartPreservingReferences(self);
    157     }
    158     // Preserve all white objects with finalize methods and schedule them for finalization.
    159     finalizer_reference_queue_.EnqueueFinalizerReferences(&cleared_references_, collector);
    160     collector->ProcessMarkStack();
    161     if (concurrent) {
    162       StopPreservingReferences(self);
    163     }
    164   }
    165   // Clear all finalizer referent reachable soft and weak references with white referents.
    166   soft_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
    167   weak_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
    168   // Clear all phantom references with white referents.
    169   phantom_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
    170   // At this point all reference queues other than the cleared references should be empty.
    171   DCHECK(soft_reference_queue_.IsEmpty());
    172   DCHECK(weak_reference_queue_.IsEmpty());
    173   DCHECK(finalizer_reference_queue_.IsEmpty());
    174   DCHECK(phantom_reference_queue_.IsEmpty());
    175   {
    176     MutexLock mu(self, *Locks::reference_processor_lock_);
    177     // Need to always do this since the next GC may be concurrent. Doing this for only concurrent
    178     // could result in a stale is_marked_callback_ being called before the reference processing
    179     // starts since there is a small window of time where slow_path_enabled_ is enabled but the
    180     // callback isn't yet set.
    181     collector_ = nullptr;
    182     if (!kUseReadBarrier && concurrent) {
    183       // Done processing, disable the slow path and broadcast to the waiters.
    184       DisableSlowPath(self);
    185     }
    186   }
    187 }
    188 
    189 // Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
    190 // marked, put it on the appropriate list in the heap for later processing.
    191 void ReferenceProcessor::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
    192                                                 collector::GarbageCollector* collector) {
    193   // klass can be the class of the old object if the visitor already updated the class of ref.
    194   DCHECK(klass != nullptr);
    195   DCHECK(klass->IsTypeOfReferenceClass());
    196   mirror::HeapReference<mirror::Object>* referent = ref->GetReferentReferenceAddr();
    197   if (referent->AsMirrorPtr() != nullptr && !collector->IsMarkedHeapReference(referent)) {
    198     Thread* self = Thread::Current();
    199     // TODO: Remove these locks, and use atomic stacks for storing references?
    200     // We need to check that the references haven't already been enqueued since we can end up
    201     // scanning the same reference multiple times due to dirty cards.
    202     if (klass->IsSoftReferenceClass()) {
    203       soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
    204     } else if (klass->IsWeakReferenceClass()) {
    205       weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
    206     } else if (klass->IsFinalizerReferenceClass()) {
    207       finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
    208     } else if (klass->IsPhantomReferenceClass()) {
    209       phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
    210     } else {
    211       LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) << " " << std::hex
    212                  << klass->GetAccessFlags();
    213     }
    214   }
    215 }
    216 
    217 void ReferenceProcessor::UpdateRoots(IsMarkedVisitor* visitor) {
    218   cleared_references_.UpdateRoots(visitor);
    219 }
    220 
    221 class ClearedReferenceTask : public HeapTask {
    222  public:
    223   explicit ClearedReferenceTask(jobject cleared_references)
    224       : HeapTask(NanoTime()), cleared_references_(cleared_references) {
    225   }
    226   virtual void Run(Thread* thread) {
    227     ScopedObjectAccess soa(thread);
    228     jvalue args[1];
    229     args[0].l = cleared_references_;
    230     InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_ReferenceQueue_add, args);
    231     soa.Env()->DeleteGlobalRef(cleared_references_);
    232   }
    233 
    234  private:
    235   const jobject cleared_references_;
    236 };
    237 
    238 void ReferenceProcessor::EnqueueClearedReferences(Thread* self) {
    239   Locks::mutator_lock_->AssertNotHeld(self);
    240   // When a runtime isn't started there are no reference queues to care about so ignore.
    241   if (!cleared_references_.IsEmpty()) {
    242     if (LIKELY(Runtime::Current()->IsStarted())) {
    243       jobject cleared_references;
    244       {
    245         ReaderMutexLock mu(self, *Locks::mutator_lock_);
    246         cleared_references = self->GetJniEnv()->vm->AddGlobalRef(
    247             self, cleared_references_.GetList());
    248       }
    249       if (kAsyncReferenceQueueAdd) {
    250         // TODO: This can cause RunFinalization to terminate before newly freed objects are
    251         // finalized since they may not be enqueued by the time RunFinalization starts.
    252         Runtime::Current()->GetHeap()->GetTaskProcessor()->AddTask(
    253             self, new ClearedReferenceTask(cleared_references));
    254       } else {
    255         ClearedReferenceTask task(cleared_references);
    256         task.Run(self);
    257       }
    258     }
    259     cleared_references_.Clear();
    260   }
    261 }
    262 
    263 bool ReferenceProcessor::MakeCircularListIfUnenqueued(mirror::FinalizerReference* reference) {
    264   Thread* self = Thread::Current();
    265   MutexLock mu(self, *Locks::reference_processor_lock_);
    266   // Wait untul we are done processing reference.
    267   while ((!kUseReadBarrier && SlowPathEnabled()) ||
    268          (kUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
    269     condition_.WaitHoldingLocks(self);
    270   }
    271   // At this point, since the sentinel of the reference is live, it is guaranteed to not be
    272   // enqueued if we just finished processing references. Otherwise, we may be doing the main GC
    273   // phase. Since we are holding the reference processor lock, it guarantees that reference
    274   // processing can't begin. The GC could have just enqueued the reference one one of the internal
    275   // GC queues, but since we hold the lock finalizer_reference_queue_ lock it also prevents this
    276   // race.
    277   MutexLock mu2(self, *Locks::reference_queue_finalizer_references_lock_);
    278   if (reference->IsUnprocessed()) {
    279     CHECK(reference->IsFinalizerReferenceInstance());
    280     reference->SetPendingNext(reference);
    281     return true;
    282   }
    283   return false;
    284 }
    285 
    286 }  // namespace gc
    287 }  // namespace art
    288