Home | History | Annotate | Download | only in gc
      1 /*
      2  * Copyright (C) 2014 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "reference_processor.h"
     18 
     19 #include "art_field-inl.h"
     20 #include "base/mutex.h"
     21 #include "base/time_utils.h"
     22 #include "base/utils.h"
     23 #include "class_root.h"
     24 #include "collector/garbage_collector.h"
     25 #include "jni/java_vm_ext.h"
     26 #include "mirror/class-inl.h"
     27 #include "mirror/object-inl.h"
     28 #include "mirror/reference-inl.h"
     29 #include "nativehelper/scoped_local_ref.h"
     30 #include "object_callbacks.h"
     31 #include "reflection.h"
     32 #include "scoped_thread_state_change-inl.h"
     33 #include "task_processor.h"
     34 #include "thread_pool.h"
     35 #include "well_known_classes.h"
     36 
     37 namespace art {
     38 namespace gc {
     39 
     40 static constexpr bool kAsyncReferenceQueueAdd = false;
     41 
     42 ReferenceProcessor::ReferenceProcessor()
     43     : collector_(nullptr),
     44       preserving_references_(false),
     45       condition_("reference processor condition", *Locks::reference_processor_lock_) ,
     46       soft_reference_queue_(Locks::reference_queue_soft_references_lock_),
     47       weak_reference_queue_(Locks::reference_queue_weak_references_lock_),
     48       finalizer_reference_queue_(Locks::reference_queue_finalizer_references_lock_),
     49       phantom_reference_queue_(Locks::reference_queue_phantom_references_lock_),
     50       cleared_references_(Locks::reference_queue_cleared_references_lock_) {
     51 }
     52 
     53 static inline MemberOffset GetSlowPathFlagOffset(ObjPtr<mirror::Class> reference_class)
     54     REQUIRES_SHARED(Locks::mutator_lock_) {
     55   DCHECK(reference_class == GetClassRoot<mirror::Reference>());
     56   // Second static field
     57   ArtField* field = reference_class->GetStaticField(1);
     58   DCHECK_STREQ(field->GetName(), "slowPathEnabled");
     59   return field->GetOffset();
     60 }
     61 
     62 static inline void SetSlowPathFlag(bool enabled) REQUIRES_SHARED(Locks::mutator_lock_) {
     63   ObjPtr<mirror::Class> reference_class = GetClassRoot<mirror::Reference>();
     64   MemberOffset slow_path_offset = GetSlowPathFlagOffset(reference_class);
     65   reference_class->SetFieldBoolean</* kTransactionActive= */ false, /* kCheckTransaction= */ false>(
     66       slow_path_offset, enabled ? 1 : 0);
     67 }
     68 
     69 void ReferenceProcessor::EnableSlowPath() {
     70   SetSlowPathFlag(/* enabled= */ true);
     71 }
     72 
     73 void ReferenceProcessor::DisableSlowPath(Thread* self) {
     74   SetSlowPathFlag(/* enabled= */ false);
     75   condition_.Broadcast(self);
     76 }
     77 
     78 bool ReferenceProcessor::SlowPathEnabled() {
     79   ObjPtr<mirror::Class> reference_class = GetClassRoot<mirror::Reference>();
     80   MemberOffset slow_path_offset = GetSlowPathFlagOffset(reference_class);
     81   return reference_class->GetFieldBoolean(slow_path_offset);
     82 }
     83 
     84 void ReferenceProcessor::BroadcastForSlowPath(Thread* self) {
     85   MutexLock mu(self, *Locks::reference_processor_lock_);
     86   condition_.Broadcast(self);
     87 }
     88 
     89 ObjPtr<mirror::Object> ReferenceProcessor::GetReferent(Thread* self,
     90                                                        ObjPtr<mirror::Reference> reference) {
     91   if (!kUseReadBarrier || self->GetWeakRefAccessEnabled()) {
     92     // Under read barrier / concurrent copying collector, it's not safe to call GetReferent() when
     93     // weak ref access is disabled as the call includes a read barrier which may push a ref onto the
     94     // mark stack and interfere with termination of marking.
     95     const ObjPtr<mirror::Object> referent = reference->GetReferent();
     96     // If the referent is null then it is already cleared, we can just return null since there is no
     97     // scenario where it becomes non-null during the reference processing phase.
     98     if (UNLIKELY(!SlowPathEnabled()) || referent == nullptr) {
     99       return referent;
    100     }
    101   }
    102   MutexLock mu(self, *Locks::reference_processor_lock_);
    103   while ((!kUseReadBarrier && SlowPathEnabled()) ||
    104          (kUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
    105     ObjPtr<mirror::Object> referent = reference->GetReferent<kWithoutReadBarrier>();
    106     // If the referent became cleared, return it. Don't need barrier since thread roots can't get
    107     // updated until after we leave the function due to holding the mutator lock.
    108     if (referent == nullptr) {
    109       return nullptr;
    110     }
    111     // Try to see if the referent is already marked by using the is_marked_callback. We can return
    112     // it to the mutator as long as the GC is not preserving references.
    113     if (LIKELY(collector_ != nullptr)) {
    114       // If it's null it means not marked, but it could become marked if the referent is reachable
    115       // by finalizer referents. So we cannot return in this case and must block. Otherwise, we
    116       // can return it to the mutator as long as the GC is not preserving references, in which
    117       // case only black nodes can be safely returned. If the GC is preserving references, the
    118       // mutator could take a white field from a grey or white node and move it somewhere else
    119       // in the heap causing corruption since this field would get swept.
    120       // Use the cached referent instead of calling GetReferent since other threads could call
    121       // Reference.clear() after we did the null check resulting in a null pointer being
    122       // incorrectly passed to IsMarked. b/33569625
    123       ObjPtr<mirror::Object> forwarded_ref = collector_->IsMarked(referent.Ptr());
    124       if (forwarded_ref != nullptr) {
    125         // Non null means that it is marked.
    126         if (!preserving_references_ ||
    127            (LIKELY(!reference->IsFinalizerReferenceInstance()) && reference->IsUnprocessed())) {
    128           return forwarded_ref;
    129         }
    130       }
    131     }
    132     // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
    133     // presence of threads blocking for weak ref access.
    134     self->CheckEmptyCheckpointFromWeakRefAccess(Locks::reference_processor_lock_);
    135     condition_.WaitHoldingLocks(self);
    136   }
    137   return reference->GetReferent();
    138 }
    139 
    140 void ReferenceProcessor::StartPreservingReferences(Thread* self) {
    141   MutexLock mu(self, *Locks::reference_processor_lock_);
    142   preserving_references_ = true;
    143 }
    144 
    145 void ReferenceProcessor::StopPreservingReferences(Thread* self) {
    146   MutexLock mu(self, *Locks::reference_processor_lock_);
    147   preserving_references_ = false;
    148   // We are done preserving references, some people who are blocked may see a marked referent.
    149   condition_.Broadcast(self);
    150 }
    151 
    152 // Process reference class instances and schedule finalizations.
    153 void ReferenceProcessor::ProcessReferences(bool concurrent,
    154                                            TimingLogger* timings,
    155                                            bool clear_soft_references,
    156                                            collector::GarbageCollector* collector) {
    157   TimingLogger::ScopedTiming t(concurrent ? __FUNCTION__ : "(Paused)ProcessReferences", timings);
    158   Thread* self = Thread::Current();
    159   {
    160     MutexLock mu(self, *Locks::reference_processor_lock_);
    161     collector_ = collector;
    162     if (!kUseReadBarrier) {
    163       CHECK_EQ(SlowPathEnabled(), concurrent) << "Slow path must be enabled iff concurrent";
    164     } else {
    165       // Weak ref access is enabled at Zygote compaction by SemiSpace (concurrent == false).
    166       CHECK_EQ(!self->GetWeakRefAccessEnabled(), concurrent);
    167     }
    168   }
    169   if (kIsDebugBuild && collector->IsTransactionActive()) {
    170     // In transaction mode, we shouldn't enqueue any Reference to the queues.
    171     // See DelayReferenceReferent().
    172     DCHECK(soft_reference_queue_.IsEmpty());
    173     DCHECK(weak_reference_queue_.IsEmpty());
    174     DCHECK(finalizer_reference_queue_.IsEmpty());
    175     DCHECK(phantom_reference_queue_.IsEmpty());
    176   }
    177   // Unless required to clear soft references with white references, preserve some white referents.
    178   if (!clear_soft_references) {
    179     TimingLogger::ScopedTiming split(concurrent ? "ForwardSoftReferences" :
    180         "(Paused)ForwardSoftReferences", timings);
    181     if (concurrent) {
    182       StartPreservingReferences(self);
    183     }
    184     // TODO: Add smarter logic for preserving soft references. The behavior should be a conditional
    185     // mark if the SoftReference is supposed to be preserved.
    186     soft_reference_queue_.ForwardSoftReferences(collector);
    187     collector->ProcessMarkStack();
    188     if (concurrent) {
    189       StopPreservingReferences(self);
    190     }
    191   }
    192   // Clear all remaining soft and weak references with white referents.
    193   soft_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
    194   weak_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
    195   {
    196     TimingLogger::ScopedTiming t2(concurrent ? "EnqueueFinalizerReferences" :
    197         "(Paused)EnqueueFinalizerReferences", timings);
    198     if (concurrent) {
    199       StartPreservingReferences(self);
    200     }
    201     // Preserve all white objects with finalize methods and schedule them for finalization.
    202     finalizer_reference_queue_.EnqueueFinalizerReferences(&cleared_references_, collector);
    203     collector->ProcessMarkStack();
    204     if (concurrent) {
    205       StopPreservingReferences(self);
    206     }
    207   }
    208   // Clear all finalizer referent reachable soft and weak references with white referents.
    209   soft_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
    210   weak_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
    211   // Clear all phantom references with white referents.
    212   phantom_reference_queue_.ClearWhiteReferences(&cleared_references_, collector);
    213   // At this point all reference queues other than the cleared references should be empty.
    214   DCHECK(soft_reference_queue_.IsEmpty());
    215   DCHECK(weak_reference_queue_.IsEmpty());
    216   DCHECK(finalizer_reference_queue_.IsEmpty());
    217   DCHECK(phantom_reference_queue_.IsEmpty());
    218   {
    219     MutexLock mu(self, *Locks::reference_processor_lock_);
    220     // Need to always do this since the next GC may be concurrent. Doing this for only concurrent
    221     // could result in a stale is_marked_callback_ being called before the reference processing
    222     // starts since there is a small window of time where slow_path_enabled_ is enabled but the
    223     // callback isn't yet set.
    224     collector_ = nullptr;
    225     if (!kUseReadBarrier && concurrent) {
    226       // Done processing, disable the slow path and broadcast to the waiters.
    227       DisableSlowPath(self);
    228     }
    229   }
    230 }
    231 
    232 // Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
    233 // marked, put it on the appropriate list in the heap for later processing.
    234 void ReferenceProcessor::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
    235                                                 ObjPtr<mirror::Reference> ref,
    236                                                 collector::GarbageCollector* collector) {
    237   // klass can be the class of the old object if the visitor already updated the class of ref.
    238   DCHECK(klass != nullptr);
    239   DCHECK(klass->IsTypeOfReferenceClass());
    240   mirror::HeapReference<mirror::Object>* referent = ref->GetReferentReferenceAddr();
    241   // do_atomic_update needs to be true because this happens outside of the reference processing
    242   // phase.
    243   if (!collector->IsNullOrMarkedHeapReference(referent, /*do_atomic_update=*/true)) {
    244     if (UNLIKELY(collector->IsTransactionActive())) {
    245       // In transaction mode, keep the referent alive and avoid any reference processing to avoid the
    246       // issue of rolling back reference processing.  do_atomic_update needs to be true because this
    247       // happens outside of the reference processing phase.
    248       if (!referent->IsNull()) {
    249         collector->MarkHeapReference(referent, /*do_atomic_update=*/ true);
    250       }
    251       return;
    252     }
    253     Thread* self = Thread::Current();
    254     // TODO: Remove these locks, and use atomic stacks for storing references?
    255     // We need to check that the references haven't already been enqueued since we can end up
    256     // scanning the same reference multiple times due to dirty cards.
    257     if (klass->IsSoftReferenceClass()) {
    258       soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
    259     } else if (klass->IsWeakReferenceClass()) {
    260       weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
    261     } else if (klass->IsFinalizerReferenceClass()) {
    262       finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
    263     } else if (klass->IsPhantomReferenceClass()) {
    264       phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
    265     } else {
    266       LOG(FATAL) << "Invalid reference type " << klass->PrettyClass() << " " << std::hex
    267                  << klass->GetAccessFlags();
    268     }
    269   }
    270 }
    271 
    272 void ReferenceProcessor::UpdateRoots(IsMarkedVisitor* visitor) {
    273   cleared_references_.UpdateRoots(visitor);
    274 }
    275 
    276 class ClearedReferenceTask : public HeapTask {
    277  public:
    278   explicit ClearedReferenceTask(jobject cleared_references)
    279       : HeapTask(NanoTime()), cleared_references_(cleared_references) {
    280   }
    281   void Run(Thread* thread) override {
    282     ScopedObjectAccess soa(thread);
    283     jvalue args[1];
    284     args[0].l = cleared_references_;
    285     InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_ReferenceQueue_add, args);
    286     soa.Env()->DeleteGlobalRef(cleared_references_);
    287   }
    288 
    289  private:
    290   const jobject cleared_references_;
    291 };
    292 
    293 SelfDeletingTask* ReferenceProcessor::CollectClearedReferences(Thread* self) {
    294   Locks::mutator_lock_->AssertNotHeld(self);
    295   // By default we don't actually need to do anything. Just return this no-op task to avoid having
    296   // to put in ifs.
    297   std::unique_ptr<SelfDeletingTask> result(new FunctionTask([](Thread*) {}));
    298   // When a runtime isn't started there are no reference queues to care about so ignore.
    299   if (!cleared_references_.IsEmpty()) {
    300     if (LIKELY(Runtime::Current()->IsStarted())) {
    301       jobject cleared_references;
    302       {
    303         ReaderMutexLock mu(self, *Locks::mutator_lock_);
    304         cleared_references = self->GetJniEnv()->GetVm()->AddGlobalRef(
    305             self, cleared_references_.GetList());
    306       }
    307       if (kAsyncReferenceQueueAdd) {
    308         // TODO: This can cause RunFinalization to terminate before newly freed objects are
    309         // finalized since they may not be enqueued by the time RunFinalization starts.
    310         Runtime::Current()->GetHeap()->GetTaskProcessor()->AddTask(
    311             self, new ClearedReferenceTask(cleared_references));
    312       } else {
    313         result.reset(new ClearedReferenceTask(cleared_references));
    314       }
    315     }
    316     cleared_references_.Clear();
    317   }
    318   return result.release();
    319 }
    320 
    321 void ReferenceProcessor::ClearReferent(ObjPtr<mirror::Reference> ref) {
    322   Thread* self = Thread::Current();
    323   MutexLock mu(self, *Locks::reference_processor_lock_);
    324   // Need to wait until reference processing is done since IsMarkedHeapReference does not have a
    325   // CAS. If we do not wait, it can result in the GC un-clearing references due to race conditions.
    326   // This also handles the race where the referent gets cleared after a null check but before
    327   // IsMarkedHeapReference is called.
    328   WaitUntilDoneProcessingReferences(self);
    329   if (Runtime::Current()->IsActiveTransaction()) {
    330     ref->ClearReferent<true>();
    331   } else {
    332     ref->ClearReferent<false>();
    333   }
    334 }
    335 
    336 void ReferenceProcessor::WaitUntilDoneProcessingReferences(Thread* self) {
    337   // Wait until we are done processing reference.
    338   while ((!kUseReadBarrier && SlowPathEnabled()) ||
    339          (kUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
    340     // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
    341     // presence of threads blocking for weak ref access.
    342     self->CheckEmptyCheckpointFromWeakRefAccess(Locks::reference_processor_lock_);
    343     condition_.WaitHoldingLocks(self);
    344   }
    345 }
    346 
    347 bool ReferenceProcessor::MakeCircularListIfUnenqueued(
    348     ObjPtr<mirror::FinalizerReference> reference) {
    349   Thread* self = Thread::Current();
    350   MutexLock mu(self, *Locks::reference_processor_lock_);
    351   WaitUntilDoneProcessingReferences(self);
    352   // At this point, since the sentinel of the reference is live, it is guaranteed to not be
    353   // enqueued if we just finished processing references. Otherwise, we may be doing the main GC
    354   // phase. Since we are holding the reference processor lock, it guarantees that reference
    355   // processing can't begin. The GC could have just enqueued the reference one one of the internal
    356   // GC queues, but since we hold the lock finalizer_reference_queue_ lock it also prevents this
    357   // race.
    358   MutexLock mu2(self, *Locks::reference_queue_finalizer_references_lock_);
    359   if (reference->IsUnprocessed()) {
    360     CHECK(reference->IsFinalizerReferenceInstance());
    361     reference->SetPendingNext(reference);
    362     return true;
    363   }
    364   return false;
    365 }
    366 
    367 }  // namespace gc
    368 }  // namespace art
    369