Home | History | Annotate | Download | only in gc
      1 /*
      2  * Copyright (C) 2014 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "reference_processor.h"
     18 
     19 #include "base/time_utils.h"
     20 #include "mirror/class-inl.h"
     21 #include "mirror/object-inl.h"
     22 #include "mirror/reference-inl.h"
     23 #include "reference_processor-inl.h"
     24 #include "reflection.h"
     25 #include "ScopedLocalRef.h"
     26 #include "scoped_thread_state_change.h"
     27 #include "task_processor.h"
     28 #include "utils.h"
     29 #include "well_known_classes.h"
     30 
     31 namespace art {
     32 namespace gc {
     33 
     34 static constexpr bool kAsyncReferenceQueueAdd = false;
     35 
     36 ReferenceProcessor::ReferenceProcessor()
     37     : process_references_args_(nullptr, nullptr, nullptr),
     38       preserving_references_(false),
     39       condition_("reference processor condition", *Locks::reference_processor_lock_) ,
     40       soft_reference_queue_(Locks::reference_queue_soft_references_lock_),
     41       weak_reference_queue_(Locks::reference_queue_weak_references_lock_),
     42       finalizer_reference_queue_(Locks::reference_queue_finalizer_references_lock_),
     43       phantom_reference_queue_(Locks::reference_queue_phantom_references_lock_),
     44       cleared_references_(Locks::reference_queue_cleared_references_lock_) {
     45 }
     46 
     47 void ReferenceProcessor::EnableSlowPath() {
     48   mirror::Reference::GetJavaLangRefReference()->SetSlowPath(true);
     49 }
     50 
     51 void ReferenceProcessor::DisableSlowPath(Thread* self) {
     52   mirror::Reference::GetJavaLangRefReference()->SetSlowPath(false);
     53   condition_.Broadcast(self);
     54 }
     55 
     56 mirror::Object* ReferenceProcessor::GetReferent(Thread* self, mirror::Reference* reference) {
     57   mirror::Object* const referent = reference->GetReferent();
     58   // If the referent is null then it is already cleared, we can just return null since there is no
     59   // scenario where it becomes non-null during the reference processing phase.
     60   if (UNLIKELY(!SlowPathEnabled()) || referent == nullptr) {
     61     return referent;
     62   }
     63   MutexLock mu(self, *Locks::reference_processor_lock_);
     64   while (SlowPathEnabled()) {
     65     mirror::HeapReference<mirror::Object>* const referent_addr =
     66         reference->GetReferentReferenceAddr();
     67     // If the referent became cleared, return it. Don't need barrier since thread roots can't get
     68     // updated until after we leave the function due to holding the mutator lock.
     69     if (referent_addr->AsMirrorPtr() == nullptr) {
     70       return nullptr;
     71     }
     72     // Try to see if the referent is already marked by using the is_marked_callback. We can return
     73     // it to the mutator as long as the GC is not preserving references.
     74     IsHeapReferenceMarkedCallback* const is_marked_callback =
     75         process_references_args_.is_marked_callback_;
     76     if (LIKELY(is_marked_callback != nullptr)) {
     77       // If it's null it means not marked, but it could become marked if the referent is reachable
     78       // by finalizer referents. So we can not return in this case and must block. Otherwise, we
     79       // can return it to the mutator as long as the GC is not preserving references, in which
     80       // case only black nodes can be safely returned. If the GC is preserving references, the
     81       // mutator could take a white field from a grey or white node and move it somewhere else
     82       // in the heap causing corruption since this field would get swept.
     83       if (is_marked_callback(referent_addr, process_references_args_.arg_)) {
     84         if (!preserving_references_ ||
     85            (LIKELY(!reference->IsFinalizerReferenceInstance()) && !reference->IsEnqueued())) {
     86           return referent_addr->AsMirrorPtr();
     87         }
     88       }
     89     }
     90     condition_.WaitHoldingLocks(self);
     91   }
     92   return reference->GetReferent();
     93 }
     94 
     95 bool ReferenceProcessor::PreserveSoftReferenceCallback(mirror::HeapReference<mirror::Object>* obj,
     96                                                        void* arg) {
     97   auto* const args = reinterpret_cast<ProcessReferencesArgs*>(arg);
     98   // TODO: Add smarter logic for preserving soft references.
     99   mirror::Object* new_obj = args->mark_callback_(obj->AsMirrorPtr(), args->arg_);
    100   DCHECK(new_obj != nullptr);
    101   obj->Assign(new_obj);
    102   return true;
    103 }
    104 
    105 void ReferenceProcessor::StartPreservingReferences(Thread* self) {
    106   MutexLock mu(self, *Locks::reference_processor_lock_);
    107   preserving_references_ = true;
    108 }
    109 
    110 void ReferenceProcessor::StopPreservingReferences(Thread* self) {
    111   MutexLock mu(self, *Locks::reference_processor_lock_);
    112   preserving_references_ = false;
    113   // We are done preserving references, some people who are blocked may see a marked referent.
    114   condition_.Broadcast(self);
    115 }
    116 
    117 // Process reference class instances and schedule finalizations.
    118 void ReferenceProcessor::ProcessReferences(bool concurrent, TimingLogger* timings,
    119                                            bool clear_soft_references,
    120                                            IsHeapReferenceMarkedCallback* is_marked_callback,
    121                                            MarkObjectCallback* mark_object_callback,
    122                                            ProcessMarkStackCallback* process_mark_stack_callback,
    123                                            void* arg) {
    124   TimingLogger::ScopedTiming t(concurrent ? __FUNCTION__ : "(Paused)ProcessReferences", timings);
    125   Thread* self = Thread::Current();
    126   {
    127     MutexLock mu(self, *Locks::reference_processor_lock_);
    128     process_references_args_.is_marked_callback_ = is_marked_callback;
    129     process_references_args_.mark_callback_ = mark_object_callback;
    130     process_references_args_.arg_ = arg;
    131     CHECK_EQ(SlowPathEnabled(), concurrent) << "Slow path must be enabled iff concurrent";
    132   }
    133   // Unless required to clear soft references with white references, preserve some white referents.
    134   if (!clear_soft_references) {
    135     TimingLogger::ScopedTiming split(concurrent ? "ForwardSoftReferences" :
    136         "(Paused)ForwardSoftReferences", timings);
    137     if (concurrent) {
    138       StartPreservingReferences(self);
    139     }
    140     soft_reference_queue_.ForwardSoftReferences(&PreserveSoftReferenceCallback,
    141                                                 &process_references_args_);
    142     process_mark_stack_callback(arg);
    143     if (concurrent) {
    144       StopPreservingReferences(self);
    145     }
    146   }
    147   // Clear all remaining soft and weak references with white referents.
    148   soft_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
    149   weak_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
    150   {
    151     TimingLogger::ScopedTiming t2(concurrent ? "EnqueueFinalizerReferences" :
    152         "(Paused)EnqueueFinalizerReferences", timings);
    153     if (concurrent) {
    154       StartPreservingReferences(self);
    155     }
    156     // Preserve all white objects with finalize methods and schedule them for finalization.
    157     finalizer_reference_queue_.EnqueueFinalizerReferences(&cleared_references_, is_marked_callback,
    158                                                           mark_object_callback, arg);
    159     process_mark_stack_callback(arg);
    160     if (concurrent) {
    161       StopPreservingReferences(self);
    162     }
    163   }
    164   // Clear all finalizer referent reachable soft and weak references with white referents.
    165   soft_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
    166   weak_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
    167   // Clear all phantom references with white referents.
    168   phantom_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
    169   // At this point all reference queues other than the cleared references should be empty.
    170   DCHECK(soft_reference_queue_.IsEmpty());
    171   DCHECK(weak_reference_queue_.IsEmpty());
    172   DCHECK(finalizer_reference_queue_.IsEmpty());
    173   DCHECK(phantom_reference_queue_.IsEmpty());
    174   {
    175     MutexLock mu(self, *Locks::reference_processor_lock_);
    176     // Need to always do this since the next GC may be concurrent. Doing this for only concurrent
    177     // could result in a stale is_marked_callback_ being called before the reference processing
    178     // starts since there is a small window of time where slow_path_enabled_ is enabled but the
    179     // callback isn't yet set.
    180     process_references_args_.is_marked_callback_ = nullptr;
    181     if (concurrent) {
    182       // Done processing, disable the slow path and broadcast to the waiters.
    183       DisableSlowPath(self);
    184     }
    185   }
    186 }
    187 
    188 // Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
    189 // marked, put it on the appropriate list in the heap for later processing.
    190 void ReferenceProcessor::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
    191                                                 IsHeapReferenceMarkedCallback* is_marked_callback,
    192                                                 void* arg) {
    193   // klass can be the class of the old object if the visitor already updated the class of ref.
    194   DCHECK(klass != nullptr);
    195   DCHECK(klass->IsTypeOfReferenceClass());
    196   mirror::HeapReference<mirror::Object>* referent = ref->GetReferentReferenceAddr();
    197   if (referent->AsMirrorPtr() != nullptr && !is_marked_callback(referent, arg)) {
    198     Thread* self = Thread::Current();
    199     // TODO: Remove these locks, and use atomic stacks for storing references?
    200     // We need to check that the references haven't already been enqueued since we can end up
    201     // scanning the same reference multiple times due to dirty cards.
    202     if (klass->IsSoftReferenceClass()) {
    203       soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
    204     } else if (klass->IsWeakReferenceClass()) {
    205       weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
    206     } else if (klass->IsFinalizerReferenceClass()) {
    207       finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
    208     } else if (klass->IsPhantomReferenceClass()) {
    209       phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
    210     } else {
    211       LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) << " " << std::hex
    212                  << klass->GetAccessFlags();
    213     }
    214   }
    215 }
    216 
    217 void ReferenceProcessor::UpdateRoots(IsMarkedCallback* callback, void* arg) {
    218   cleared_references_.UpdateRoots(callback, arg);
    219 }
    220 
    221 class ClearedReferenceTask : public HeapTask {
    222  public:
    223   explicit ClearedReferenceTask(jobject cleared_references)
    224       : HeapTask(NanoTime()), cleared_references_(cleared_references) {
    225   }
    226   virtual void Run(Thread* thread) {
    227     ScopedObjectAccess soa(thread);
    228     jvalue args[1];
    229     args[0].l = cleared_references_;
    230     InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_ReferenceQueue_add, args);
    231     soa.Env()->DeleteGlobalRef(cleared_references_);
    232   }
    233 
    234  private:
    235   const jobject cleared_references_;
    236 };
    237 
    238 void ReferenceProcessor::EnqueueClearedReferences(Thread* self) {
    239   Locks::mutator_lock_->AssertNotHeld(self);
    240   // When a runtime isn't started there are no reference queues to care about so ignore.
    241   if (!cleared_references_.IsEmpty()) {
    242     if (LIKELY(Runtime::Current()->IsStarted())) {
    243       jobject cleared_references;
    244       {
    245         ReaderMutexLock mu(self, *Locks::mutator_lock_);
    246         cleared_references = self->GetJniEnv()->vm->AddGlobalRef(
    247             self, cleared_references_.GetList());
    248       }
    249       if (kAsyncReferenceQueueAdd) {
    250         // TODO: This can cause RunFinalization to terminate before newly freed objects are
    251         // finalized since they may not be enqueued by the time RunFinalization starts.
    252         Runtime::Current()->GetHeap()->GetTaskProcessor()->AddTask(
    253             self, new ClearedReferenceTask(cleared_references));
    254       } else {
    255         ClearedReferenceTask task(cleared_references);
    256         task.Run(self);
    257       }
    258     }
    259     cleared_references_.Clear();
    260   }
    261 }
    262 
    263 bool ReferenceProcessor::MakeCircularListIfUnenqueued(mirror::FinalizerReference* reference) {
    264   Thread* self = Thread::Current();
    265   MutexLock mu(self, *Locks::reference_processor_lock_);
    266   // Wait untul we are done processing reference.
    267   while (SlowPathEnabled()) {
    268     condition_.WaitHoldingLocks(self);
    269   }
    270   // At this point, since the sentinel of the reference is live, it is guaranteed to not be
    271   // enqueued if we just finished processing references. Otherwise, we may be doing the main GC
    272   // phase. Since we are holding the reference processor lock, it guarantees that reference
    273   // processing can't begin. The GC could have just enqueued the reference one one of the internal
    274   // GC queues, but since we hold the lock finalizer_reference_queue_ lock it also prevents this
    275   // race.
    276   MutexLock mu2(self, *Locks::reference_queue_finalizer_references_lock_);
    277   if (!reference->IsEnqueued()) {
    278     CHECK(reference->IsFinalizerReferenceInstance());
    279     if (Runtime::Current()->IsActiveTransaction()) {
    280       reference->SetPendingNext<true>(reference);
    281     } else {
    282       reference->SetPendingNext<false>(reference);
    283     }
    284     return true;
    285   }
    286   return false;
    287 }
    288 
    289 }  // namespace gc
    290 }  // namespace art
    291