Home | History | Annotate | Download | only in gc
      1 /*
      2  * Copyright (C) 2014 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "reference_processor.h"
     18 
     19 #include "base/time_utils.h"
     20 #include "collector/garbage_collector.h"
     21 #include "java_vm_ext.h"
     22 #include "mirror/class-inl.h"
     23 #include "mirror/object-inl.h"
     24 #include "mirror/reference-inl.h"
     25 #include "reference_processor-inl.h"
     26 #include "reflection.h"
     27 #include "ScopedLocalRef.h"
     28 #include "scoped_thread_state_change-inl.h"
     29 #include "task_processor.h"
     30 #include "utils.h"
     31 #include "well_known_classes.h"
     32 
     33 namespace art {
     34 namespace gc {
     35 
     36 static constexpr bool kAsyncReferenceQueueAdd = false;
     37 
     38 ReferenceProcessor::ReferenceProcessor()
     39     : collector_(nullptr),
     40       preserving_references_(false),
     41       condition_("reference processor condition", *Locks::reference_processor_lock_) ,
     42       soft_reference_queue_(Locks::reference_queue_soft_references_lock_),
     43       weak_reference_queue_(Locks::reference_queue_weak_references_lock_),
     44       finalizer_reference_queue_(Locks::reference_queue_finalizer_references_lock_),
     45       phantom_reference_queue_(Locks::reference_queue_phantom_references_lock_),
     46       cleared_references_(Locks::reference_queue_cleared_references_lock_) {
     47 }
     48 
     49 void ReferenceProcessor::EnableSlowPath() {
     50   mirror::Reference::GetJavaLangRefReference()->SetSlowPath(true);
     51 }
     52 
     53 void ReferenceProcessor::DisableSlowPath(Thread* self) {
     54   mirror::Reference::GetJavaLangRefReference()->SetSlowPath(false);
     55   condition_.Broadcast(self);
     56 }
     57 
     58 void ReferenceProcessor::BroadcastForSlowPath(Thread* self) {
     59   MutexLock mu(self, *Locks::reference_processor_lock_);
     60   condition_.Broadcast(self);
     61 }
     62 
     63 ObjPtr<mirror::Object> ReferenceProcessor::GetReferent(Thread* self,
     64                                                        ObjPtr<mirror::Reference> reference) {
     65   if (!kUseReadBarrier || self->GetWeakRefAccessEnabled()) {
     66     // Under read barrier / concurrent copying collector, it's not safe to call GetReferent() when
     67     // weak ref access is disabled as the call includes a read barrier which may push a ref onto the
     68     // mark stack and interfere with termination of marking.
     69     ObjPtr<mirror::Object> const referent = reference->GetReferent();
     70     // If the referent is null then it is already cleared, we can just return null since there is no
     71     // scenario where it becomes non-null during the reference processing phase.
     72     if (UNLIKELY(!SlowPathEnabled()) || referent == nullptr) {
     73       return referent;
     74     }
     75   }
     76   MutexLock mu(self, *Locks::reference_processor_lock_);
     77   while ((!kUseReadBarrier && SlowPathEnabled()) ||
     78          (kUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
     79     ObjPtr<mirror::Object> referent = reference->GetReferent<kWithoutReadBarrier>();
     80     // If the referent became cleared, return it. Don't need barrier since thread roots can't get
     81     // updated until after we leave the function due to holding the mutator lock.
     82     if (referent == nullptr) {
     83       return nullptr;
     84     }
     85     // Try to see if the referent is already marked by using the is_marked_callback. We can return
     86     // it to the mutator as long as the GC is not preserving references.
     87     if (LIKELY(collector_ != nullptr)) {
     88       // If it's null it means not marked, but it could become marked if the referent is reachable
     89       // by finalizer referents. So we cannot return in this case and must block. Otherwise, we
     90       // can return it to the mutator as long as the GC is not preserving references, in which
     91       // case only black nodes can be safely returned. If the GC is preserving references, the
     92       // mutator could take a white field from a grey or white node and move it somewhere else
     93       // in the heap causing corruption since this field would get swept.
     94       // Use the cached referent instead of calling GetReferent since other threads could call
     95       // Reference.clear() after we did the null check resulting in a null pointer being
     96       // incorrectly passed to IsMarked. b/33569625
     97       ObjPtr<mirror::Object> forwarded_ref = collector_->IsMarked(referent.Ptr());
     98       if (forwarded_ref != nullptr) {
     99         // Non null means that it is marked.
    100         if (!preserving_references_ ||
    101            (LIKELY(!reference->IsFinalizerReferenceInstance()) && reference->IsUnprocessed())) {
    102           return forwarded_ref;
    103         }
    104       }
    105     }
    106     // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
    107     // presence of threads blocking for weak ref access.
    108     self->CheckEmptyCheckpointFromWeakRefAccess(Locks::reference_processor_lock_);
    109     condition_.WaitHoldingLocks(self);
    110   }
    111   return reference->GetReferent();
    112 }
    113 
    114 void ReferenceProcessor::StartPreservingReferences(Thread* self) {
    115   MutexLock mu(self, *Locks::reference_processor_lock_);
    116   preserving_references_ = true;
    117 }
    118 
    119 void ReferenceProcessor::StopPreservingReferences(Thread* self) {
    120   MutexLock mu(self, *Locks::reference_processor_lock_);
    121   preserving_references_ = false;
    122   // We are done preserving references, some people who are blocked may see a marked referent.
    123   condition_.Broadcast(self);
    124 }
    125 
    126 // Process reference class instances and schedule finalizations.
    127 void ReferenceProcessor::ProcessReferences(bool concurrent,
    128                                            TimingLogger* timings,
    129                                            bool clear_soft_references,
    130                                            collector::GarbageCollector* collector) {
    131   TimingLogger::ScopedTiming t(concurrent ? __FUNCTION__ : "(Paused)ProcessReferences", timings);
    132   Thread* self = Thread::Current();
    133   {
    134     MutexLock mu(self, *Locks::reference_processor_lock_);
    135     collector_ = collector;
    136     if (!kUseReadBarrier) {
    137       CHECK_EQ(SlowPathEnabled(), concurrent) << "Slow path must be enabled iff concurrent";
    138     } else {
    139       // Weak ref access is enabled at Zygote compaction by SemiSpace (concurrent == false).
    140       CHECK_EQ(!self->GetWeakRefAccessEnabled(), concurrent);
    141     }
    142   }
    143   if (kIsDebugBuild && collector->IsTransactionActive()) {
    144     // In transaction mode, we shouldn't enqueue any Reference to the queues.
    145     // See DelayReferenceReferent().
    146     DCHECK(soft_reference_queue_.IsEmpty());
    147     DCHECK(weak_reference_queue_.IsEmpty());
    148     DCHECK(finalizer_reference_queue_.IsEmpty());
    149     DCHECK(phantom_reference_queue_.IsEmpty());
    150   }
    151   // Unless required to clear soft references with white references, preserve some white referents.
    152   if (!clear_soft_references) {
    153     TimingLogger::ScopedTiming split(concurrent ? "ForwardSoftReferences" :
    154         "(Paused)ForwardSoftReferences", timings);
    155     if (concurrent) {
    156       StartPreservingReferences(self);
    157     }
    158     // TODO: Add smarter logic for preserving soft references. The behavior should be a conditional
    159     // mark if the SoftReference is supposed to be preserved.
    160     soft_reference_queue_.ForwardSoftReferences(collector);
    161     collector->ProcessMarkStack();
    162     if (concurrent) {
    163       StopPreservingReferences(self);
    164     }
    165   }
    166   // Clear all remaining soft and weak references with white referents.
    167   soft_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
    168   weak_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
    169   {
    170     TimingLogger::ScopedTiming t2(concurrent ? "EnqueueFinalizerReferences" :
    171         "(Paused)EnqueueFinalizerReferences", timings);
    172     if (concurrent) {
    173       StartPreservingReferences(self);
    174     }
    175     // Preserve all white objects with finalize methods and schedule them for finalization.
    176     finalizer_reference_queue_.EnqueueFinalizerReferences(&cleared_references_, collector);
    177     collector->ProcessMarkStack();
    178     if (concurrent) {
    179       StopPreservingReferences(self);
    180     }
    181   }
    182   // Clear all finalizer referent reachable soft and weak references with white referents.
    183   soft_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
    184   weak_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
    185   // Clear all phantom references with white referents.
    186   phantom_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
    187   // At this point all reference queues other than the cleared references should be empty.
    188   DCHECK(soft_reference_queue_.IsEmpty());
    189   DCHECK(weak_reference_queue_.IsEmpty());
    190   DCHECK(finalizer_reference_queue_.IsEmpty());
    191   DCHECK(phantom_reference_queue_.IsEmpty());
    192   {
    193     MutexLock mu(self, *Locks::reference_processor_lock_);
    194     // Need to always do this since the next GC may be concurrent. Doing this for only concurrent
    195     // could result in a stale is_marked_callback_ being called before the reference processing
    196     // starts since there is a small window of time where slow_path_enabled_ is enabled but the
    197     // callback isn't yet set.
    198     collector_ = nullptr;
    199     if (!kUseReadBarrier && concurrent) {
    200       // Done processing, disable the slow path and broadcast to the waiters.
    201       DisableSlowPath(self);
    202     }
    203   }
    204 }
    205 
    206 // Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
    207 // marked, put it on the appropriate list in the heap for later processing.
    208 void ReferenceProcessor::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
    209                                                 ObjPtr<mirror::Reference> ref,
    210                                                 collector::GarbageCollector* collector) {
    211   // klass can be the class of the old object if the visitor already updated the class of ref.
    212   DCHECK(klass != nullptr);
    213   DCHECK(klass->IsTypeOfReferenceClass());
    214   mirror::HeapReference<mirror::Object>* referent = ref->GetReferentReferenceAddr();
    215   // do_atomic_update needs to be true because this happens outside of the reference processing
    216   // phase.
    217   if (!collector->IsNullOrMarkedHeapReference(referent, /*do_atomic_update*/true)) {
    218     if (UNLIKELY(collector->IsTransactionActive())) {
    219       // In transaction mode, keep the referent alive and avoid any reference processing to avoid the
    220       // issue of rolling back reference processing.  do_atomic_update needs to be true because this
    221       // happens outside of the reference processing phase.
    222       if (!referent->IsNull()) {
    223         collector->MarkHeapReference(referent, /*do_atomic_update*/ true);
    224       }
    225       return;
    226     }
    227     Thread* self = Thread::Current();
    228     // TODO: Remove these locks, and use atomic stacks for storing references?
    229     // We need to check that the references haven't already been enqueued since we can end up
    230     // scanning the same reference multiple times due to dirty cards.
    231     if (klass->IsSoftReferenceClass()) {
    232       soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
    233     } else if (klass->IsWeakReferenceClass()) {
    234       weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
    235     } else if (klass->IsFinalizerReferenceClass()) {
    236       finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
    237     } else if (klass->IsPhantomReferenceClass()) {
    238       phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
    239     } else {
    240       LOG(FATAL) << "Invalid reference type " << klass->PrettyClass() << " " << std::hex
    241                  << klass->GetAccessFlags();
    242     }
    243   }
    244 }
    245 
    246 void ReferenceProcessor::UpdateRoots(IsMarkedVisitor* visitor) {
    247   cleared_references_.UpdateRoots(visitor);
    248 }
    249 
    250 class ClearedReferenceTask : public HeapTask {
    251  public:
    252   explicit ClearedReferenceTask(jobject cleared_references)
    253       : HeapTask(NanoTime()), cleared_references_(cleared_references) {
    254   }
    255   virtual void Run(Thread* thread) {
    256     ScopedObjectAccess soa(thread);
    257     jvalue args[1];
    258     args[0].l = cleared_references_;
    259     InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_ReferenceQueue_add, args);
    260     soa.Env()->DeleteGlobalRef(cleared_references_);
    261   }
    262 
    263  private:
    264   const jobject cleared_references_;
    265 };
    266 
    267 void ReferenceProcessor::EnqueueClearedReferences(Thread* self) {
    268   Locks::mutator_lock_->AssertNotHeld(self);
    269   // When a runtime isn't started there are no reference queues to care about so ignore.
    270   if (!cleared_references_.IsEmpty()) {
    271     if (LIKELY(Runtime::Current()->IsStarted())) {
    272       jobject cleared_references;
    273       {
    274         ReaderMutexLock mu(self, *Locks::mutator_lock_);
    275         cleared_references = self->GetJniEnv()->vm->AddGlobalRef(
    276             self, cleared_references_.GetList());
    277       }
    278       if (kAsyncReferenceQueueAdd) {
    279         // TODO: This can cause RunFinalization to terminate before newly freed objects are
    280         // finalized since they may not be enqueued by the time RunFinalization starts.
    281         Runtime::Current()->GetHeap()->GetTaskProcessor()->AddTask(
    282             self, new ClearedReferenceTask(cleared_references));
    283       } else {
    284         ClearedReferenceTask task(cleared_references);
    285         task.Run(self);
    286       }
    287     }
    288     cleared_references_.Clear();
    289   }
    290 }
    291 
    292 void ReferenceProcessor::ClearReferent(ObjPtr<mirror::Reference> ref) {
    293   Thread* self = Thread::Current();
    294   MutexLock mu(self, *Locks::reference_processor_lock_);
    295   // Need to wait until reference processing is done since IsMarkedHeapReference does not have a
    296   // CAS. If we do not wait, it can result in the GC un-clearing references due to race conditions.
    297   // This also handles the race where the referent gets cleared after a null check but before
    298   // IsMarkedHeapReference is called.
    299   WaitUntilDoneProcessingReferences(self);
    300   if (Runtime::Current()->IsActiveTransaction()) {
    301     ref->ClearReferent<true>();
    302   } else {
    303     ref->ClearReferent<false>();
    304   }
    305 }
    306 
    307 void ReferenceProcessor::WaitUntilDoneProcessingReferences(Thread* self) {
    308   // Wait until we are done processing reference.
    309   while ((!kUseReadBarrier && SlowPathEnabled()) ||
    310          (kUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
    311     // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
    312     // presence of threads blocking for weak ref access.
    313     self->CheckEmptyCheckpointFromWeakRefAccess(Locks::reference_processor_lock_);
    314     condition_.WaitHoldingLocks(self);
    315   }
    316 }
    317 
    318 bool ReferenceProcessor::MakeCircularListIfUnenqueued(
    319     ObjPtr<mirror::FinalizerReference> reference) {
    320   Thread* self = Thread::Current();
    321   MutexLock mu(self, *Locks::reference_processor_lock_);
    322   WaitUntilDoneProcessingReferences(self);
    323   // At this point, since the sentinel of the reference is live, it is guaranteed to not be
    324   // enqueued if we just finished processing references. Otherwise, we may be doing the main GC
    325   // phase. Since we are holding the reference processor lock, it guarantees that reference
    326   // processing can't begin. The GC could have just enqueued the reference one one of the internal
    327   // GC queues, but since we hold the lock finalizer_reference_queue_ lock it also prevents this
    328   // race.
    329   MutexLock mu2(self, *Locks::reference_queue_finalizer_references_lock_);
    330   if (reference->IsUnprocessed()) {
    331     CHECK(reference->IsFinalizerReferenceInstance());
    332     reference->SetPendingNext(reference);
    333     return true;
    334   }
    335   return false;
    336 }
    337 
    338 }  // namespace gc
    339 }  // namespace art
    340