Home | History | Annotate | Download | only in gc
      1 /*
      2  * Copyright (C) 2014 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "reference_processor.h"
     18 
     19 #include "base/time_utils.h"
     20 #include "base/utils.h"
     21 #include "collector/garbage_collector.h"
     22 #include "java_vm_ext.h"
     23 #include "mirror/class-inl.h"
     24 #include "mirror/object-inl.h"
     25 #include "mirror/reference-inl.h"
     26 #include "nativehelper/scoped_local_ref.h"
     27 #include "object_callbacks.h"
     28 #include "reference_processor-inl.h"
     29 #include "reflection.h"
     30 #include "scoped_thread_state_change-inl.h"
     31 #include "task_processor.h"
     32 #include "well_known_classes.h"
     33 
     34 namespace art {
     35 namespace gc {
     36 
     37 static constexpr bool kAsyncReferenceQueueAdd = false;
     38 
     39 ReferenceProcessor::ReferenceProcessor()
     40     : collector_(nullptr),
     41       preserving_references_(false),
     42       condition_("reference processor condition", *Locks::reference_processor_lock_) ,
     43       soft_reference_queue_(Locks::reference_queue_soft_references_lock_),
     44       weak_reference_queue_(Locks::reference_queue_weak_references_lock_),
     45       finalizer_reference_queue_(Locks::reference_queue_finalizer_references_lock_),
     46       phantom_reference_queue_(Locks::reference_queue_phantom_references_lock_),
     47       cleared_references_(Locks::reference_queue_cleared_references_lock_) {
     48 }
     49 
     50 void ReferenceProcessor::EnableSlowPath() {
     51   mirror::Reference::GetJavaLangRefReference()->SetSlowPath(true);
     52 }
     53 
     54 void ReferenceProcessor::DisableSlowPath(Thread* self) {
     55   mirror::Reference::GetJavaLangRefReference()->SetSlowPath(false);
     56   condition_.Broadcast(self);
     57 }
     58 
     59 void ReferenceProcessor::BroadcastForSlowPath(Thread* self) {
     60   MutexLock mu(self, *Locks::reference_processor_lock_);
     61   condition_.Broadcast(self);
     62 }
     63 
     64 ObjPtr<mirror::Object> ReferenceProcessor::GetReferent(Thread* self,
     65                                                        ObjPtr<mirror::Reference> reference) {
     66   if (!kUseReadBarrier || self->GetWeakRefAccessEnabled()) {
     67     // Under read barrier / concurrent copying collector, it's not safe to call GetReferent() when
     68     // weak ref access is disabled as the call includes a read barrier which may push a ref onto the
     69     // mark stack and interfere with termination of marking.
     70     ObjPtr<mirror::Object> const referent = reference->GetReferent();
     71     // If the referent is null then it is already cleared, we can just return null since there is no
     72     // scenario where it becomes non-null during the reference processing phase.
     73     if (UNLIKELY(!SlowPathEnabled()) || referent == nullptr) {
     74       return referent;
     75     }
     76   }
     77   MutexLock mu(self, *Locks::reference_processor_lock_);
     78   while ((!kUseReadBarrier && SlowPathEnabled()) ||
     79          (kUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
     80     ObjPtr<mirror::Object> referent = reference->GetReferent<kWithoutReadBarrier>();
     81     // If the referent became cleared, return it. Don't need barrier since thread roots can't get
     82     // updated until after we leave the function due to holding the mutator lock.
     83     if (referent == nullptr) {
     84       return nullptr;
     85     }
     86     // Try to see if the referent is already marked by using the is_marked_callback. We can return
     87     // it to the mutator as long as the GC is not preserving references.
     88     if (LIKELY(collector_ != nullptr)) {
     89       // If it's null it means not marked, but it could become marked if the referent is reachable
     90       // by finalizer referents. So we cannot return in this case and must block. Otherwise, we
     91       // can return it to the mutator as long as the GC is not preserving references, in which
     92       // case only black nodes can be safely returned. If the GC is preserving references, the
     93       // mutator could take a white field from a grey or white node and move it somewhere else
     94       // in the heap causing corruption since this field would get swept.
     95       // Use the cached referent instead of calling GetReferent since other threads could call
     96       // Reference.clear() after we did the null check resulting in a null pointer being
     97       // incorrectly passed to IsMarked. b/33569625
     98       ObjPtr<mirror::Object> forwarded_ref = collector_->IsMarked(referent.Ptr());
     99       if (forwarded_ref != nullptr) {
    100         // Non null means that it is marked.
    101         if (!preserving_references_ ||
    102            (LIKELY(!reference->IsFinalizerReferenceInstance()) && reference->IsUnprocessed())) {
    103           return forwarded_ref;
    104         }
    105       }
    106     }
    107     // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
    108     // presence of threads blocking for weak ref access.
    109     self->CheckEmptyCheckpointFromWeakRefAccess(Locks::reference_processor_lock_);
    110     condition_.WaitHoldingLocks(self);
    111   }
    112   return reference->GetReferent();
    113 }
    114 
    115 void ReferenceProcessor::StartPreservingReferences(Thread* self) {
    116   MutexLock mu(self, *Locks::reference_processor_lock_);
    117   preserving_references_ = true;
    118 }
    119 
    120 void ReferenceProcessor::StopPreservingReferences(Thread* self) {
    121   MutexLock mu(self, *Locks::reference_processor_lock_);
    122   preserving_references_ = false;
    123   // We are done preserving references, some people who are blocked may see a marked referent.
    124   condition_.Broadcast(self);
    125 }
    126 
    127 // Process reference class instances and schedule finalizations.
    128 void ReferenceProcessor::ProcessReferences(bool concurrent,
    129                                            TimingLogger* timings,
    130                                            bool clear_soft_references,
    131                                            collector::GarbageCollector* collector) {
    132   TimingLogger::ScopedTiming t(concurrent ? __FUNCTION__ : "(Paused)ProcessReferences", timings);
    133   Thread* self = Thread::Current();
    134   {
    135     MutexLock mu(self, *Locks::reference_processor_lock_);
    136     collector_ = collector;
    137     if (!kUseReadBarrier) {
    138       CHECK_EQ(SlowPathEnabled(), concurrent) << "Slow path must be enabled iff concurrent";
    139     } else {
    140       // Weak ref access is enabled at Zygote compaction by SemiSpace (concurrent == false).
    141       CHECK_EQ(!self->GetWeakRefAccessEnabled(), concurrent);
    142     }
    143   }
    144   if (kIsDebugBuild && collector->IsTransactionActive()) {
    145     // In transaction mode, we shouldn't enqueue any Reference to the queues.
    146     // See DelayReferenceReferent().
    147     DCHECK(soft_reference_queue_.IsEmpty());
    148     DCHECK(weak_reference_queue_.IsEmpty());
    149     DCHECK(finalizer_reference_queue_.IsEmpty());
    150     DCHECK(phantom_reference_queue_.IsEmpty());
    151   }
    152   // Unless required to clear soft references with white references, preserve some white referents.
    153   if (!clear_soft_references) {
    154     TimingLogger::ScopedTiming split(concurrent ? "ForwardSoftReferences" :
    155         "(Paused)ForwardSoftReferences", timings);
    156     if (concurrent) {
    157       StartPreservingReferences(self);
    158     }
    159     // TODO: Add smarter logic for preserving soft references. The behavior should be a conditional
    160     // mark if the SoftReference is supposed to be preserved.
    161     soft_reference_queue_.ForwardSoftReferences(collector);
    162     collector->ProcessMarkStack();
    163     if (concurrent) {
    164       StopPreservingReferences(self);
    165     }
    166   }
    167   // Clear all remaining soft and weak references with white referents.
    168   soft_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
    169   weak_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
    170   {
    171     TimingLogger::ScopedTiming t2(concurrent ? "EnqueueFinalizerReferences" :
    172         "(Paused)EnqueueFinalizerReferences", timings);
    173     if (concurrent) {
    174       StartPreservingReferences(self);
    175     }
    176     // Preserve all white objects with finalize methods and schedule them for finalization.
    177     finalizer_reference_queue_.EnqueueFinalizerReferences(&cleared_references_, collector);
    178     collector->ProcessMarkStack();
    179     if (concurrent) {
    180       StopPreservingReferences(self);
    181     }
    182   }
    183   // Clear all finalizer referent reachable soft and weak references with white referents.
    184   soft_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
    185   weak_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
    186   // Clear all phantom references with white referents.
    187   phantom_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
    188   // At this point all reference queues other than the cleared references should be empty.
    189   DCHECK(soft_reference_queue_.IsEmpty());
    190   DCHECK(weak_reference_queue_.IsEmpty());
    191   DCHECK(finalizer_reference_queue_.IsEmpty());
    192   DCHECK(phantom_reference_queue_.IsEmpty());
    193   {
    194     MutexLock mu(self, *Locks::reference_processor_lock_);
    195     // Need to always do this since the next GC may be concurrent. Doing this for only concurrent
    196     // could result in a stale is_marked_callback_ being called before the reference processing
    197     // starts since there is a small window of time where slow_path_enabled_ is enabled but the
    198     // callback isn't yet set.
    199     collector_ = nullptr;
    200     if (!kUseReadBarrier && concurrent) {
    201       // Done processing, disable the slow path and broadcast to the waiters.
    202       DisableSlowPath(self);
    203     }
    204   }
    205 }
    206 
    207 // Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
    208 // marked, put it on the appropriate list in the heap for later processing.
    209 void ReferenceProcessor::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
    210                                                 ObjPtr<mirror::Reference> ref,
    211                                                 collector::GarbageCollector* collector) {
    212   // klass can be the class of the old object if the visitor already updated the class of ref.
    213   DCHECK(klass != nullptr);
    214   DCHECK(klass->IsTypeOfReferenceClass());
    215   mirror::HeapReference<mirror::Object>* referent = ref->GetReferentReferenceAddr();
    216   // do_atomic_update needs to be true because this happens outside of the reference processing
    217   // phase.
    218   if (!collector->IsNullOrMarkedHeapReference(referent, /*do_atomic_update*/true)) {
    219     if (UNLIKELY(collector->IsTransactionActive())) {
    220       // In transaction mode, keep the referent alive and avoid any reference processing to avoid the
    221       // issue of rolling back reference processing.  do_atomic_update needs to be true because this
    222       // happens outside of the reference processing phase.
    223       if (!referent->IsNull()) {
    224         collector->MarkHeapReference(referent, /*do_atomic_update*/ true);
    225       }
    226       return;
    227     }
    228     Thread* self = Thread::Current();
    229     // TODO: Remove these locks, and use atomic stacks for storing references?
    230     // We need to check that the references haven't already been enqueued since we can end up
    231     // scanning the same reference multiple times due to dirty cards.
    232     if (klass->IsSoftReferenceClass()) {
    233       soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
    234     } else if (klass->IsWeakReferenceClass()) {
    235       weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
    236     } else if (klass->IsFinalizerReferenceClass()) {
    237       finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
    238     } else if (klass->IsPhantomReferenceClass()) {
    239       phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
    240     } else {
    241       LOG(FATAL) << "Invalid reference type " << klass->PrettyClass() << " " << std::hex
    242                  << klass->GetAccessFlags();
    243     }
    244   }
    245 }
    246 
    247 void ReferenceProcessor::UpdateRoots(IsMarkedVisitor* visitor) {
    248   cleared_references_.UpdateRoots(visitor);
    249 }
    250 
    251 class ClearedReferenceTask : public HeapTask {
    252  public:
    253   explicit ClearedReferenceTask(jobject cleared_references)
    254       : HeapTask(NanoTime()), cleared_references_(cleared_references) {
    255   }
    256   virtual void Run(Thread* thread) {
    257     ScopedObjectAccess soa(thread);
    258     jvalue args[1];
    259     args[0].l = cleared_references_;
    260     InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_ReferenceQueue_add, args);
    261     soa.Env()->DeleteGlobalRef(cleared_references_);
    262   }
    263 
    264  private:
    265   const jobject cleared_references_;
    266 };
    267 
    268 void ReferenceProcessor::EnqueueClearedReferences(Thread* self) {
    269   Locks::mutator_lock_->AssertNotHeld(self);
    270   // When a runtime isn't started there are no reference queues to care about so ignore.
    271   if (!cleared_references_.IsEmpty()) {
    272     if (LIKELY(Runtime::Current()->IsStarted())) {
    273       jobject cleared_references;
    274       {
    275         ReaderMutexLock mu(self, *Locks::mutator_lock_);
    276         cleared_references = self->GetJniEnv()->GetVm()->AddGlobalRef(
    277             self, cleared_references_.GetList());
    278       }
    279       if (kAsyncReferenceQueueAdd) {
    280         // TODO: This can cause RunFinalization to terminate before newly freed objects are
    281         // finalized since they may not be enqueued by the time RunFinalization starts.
    282         Runtime::Current()->GetHeap()->GetTaskProcessor()->AddTask(
    283             self, new ClearedReferenceTask(cleared_references));
    284       } else {
    285         ClearedReferenceTask task(cleared_references);
    286         task.Run(self);
    287       }
    288     }
    289     cleared_references_.Clear();
    290   }
    291 }
    292 
    293 void ReferenceProcessor::ClearReferent(ObjPtr<mirror::Reference> ref) {
    294   Thread* self = Thread::Current();
    295   MutexLock mu(self, *Locks::reference_processor_lock_);
    296   // Need to wait until reference processing is done since IsMarkedHeapReference does not have a
    297   // CAS. If we do not wait, it can result in the GC un-clearing references due to race conditions.
    298   // This also handles the race where the referent gets cleared after a null check but before
    299   // IsMarkedHeapReference is called.
    300   WaitUntilDoneProcessingReferences(self);
    301   if (Runtime::Current()->IsActiveTransaction()) {
    302     ref->ClearReferent<true>();
    303   } else {
    304     ref->ClearReferent<false>();
    305   }
    306 }
    307 
    308 void ReferenceProcessor::WaitUntilDoneProcessingReferences(Thread* self) {
    309   // Wait until we are done processing reference.
    310   while ((!kUseReadBarrier && SlowPathEnabled()) ||
    311          (kUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
    312     // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
    313     // presence of threads blocking for weak ref access.
    314     self->CheckEmptyCheckpointFromWeakRefAccess(Locks::reference_processor_lock_);
    315     condition_.WaitHoldingLocks(self);
    316   }
    317 }
    318 
    319 bool ReferenceProcessor::MakeCircularListIfUnenqueued(
    320     ObjPtr<mirror::FinalizerReference> reference) {
    321   Thread* self = Thread::Current();
    322   MutexLock mu(self, *Locks::reference_processor_lock_);
    323   WaitUntilDoneProcessingReferences(self);
    324   // At this point, since the sentinel of the reference is live, it is guaranteed to not be
    325   // enqueued if we just finished processing references. Otherwise, we may be doing the main GC
    326   // phase. Since we are holding the reference processor lock, it guarantees that reference
    327   // processing can't begin. The GC could have just enqueued the reference one one of the internal
    328   // GC queues, but since we hold the lock finalizer_reference_queue_ lock it also prevents this
    329   // race.
    330   MutexLock mu2(self, *Locks::reference_queue_finalizer_references_lock_);
    331   if (reference->IsUnprocessed()) {
    332     CHECK(reference->IsFinalizerReferenceInstance());
    333     reference->SetPendingNext(reference);
    334     return true;
    335   }
    336   return false;
    337 }
    338 
    339 }  // namespace gc
    340 }  // namespace art
    341