Home | History | Annotate | Download | only in gc
      1 /*
      2  * Copyright (C) 2013 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_GC_REFERENCE_QUEUE_H_
     18 #define ART_RUNTIME_GC_REFERENCE_QUEUE_H_
     19 
     20 #include <iosfwd>
     21 #include <string>
     22 #include <vector>
     23 
     24 #include "atomic.h"
     25 #include "base/mutex.h"
     26 #include "base/timing_logger.h"
     27 #include "globals.h"
     28 #include "jni.h"
     29 #include "obj_ptr.h"
     30 #include "object_callbacks.h"
     31 #include "offsets.h"
     32 #include "thread_pool.h"
     33 
     34 namespace art {
     35 namespace mirror {
     36 class Reference;
     37 }  // namespace mirror
     38 
     39 namespace gc {
     40 
     41 namespace collector {
     42 class GarbageCollector;
     43 }  // namespace collector
     44 
     45 class Heap;
     46 
     47 // Used to temporarily store java.lang.ref.Reference(s) during GC and prior to queueing on the
     48 // appropriate java.lang.ref.ReferenceQueue. The linked list is maintained as an unordered,
     49 // circular, and singly-linked list using the pendingNext fields of the java.lang.ref.Reference
     50 // objects.
     51 class ReferenceQueue {
     52  public:
     53   explicit ReferenceQueue(Mutex* lock);
     54 
     55   // Enqueue a reference if it is unprocessed. Thread safe to call from multiple
     56   // threads since it uses a lock to avoid a race between checking for the references presence and
     57   // adding it.
     58   void AtomicEnqueueIfNotEnqueued(Thread* self, ObjPtr<mirror::Reference> ref)
     59       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*lock_);
     60 
     61   // Enqueue a reference. The reference must be unprocessed.
     62   // Not thread safe, used when mutators are paused to minimize lock overhead.
     63   void EnqueueReference(ObjPtr<mirror::Reference> ref) REQUIRES_SHARED(Locks::mutator_lock_);
     64 
     65   // Dequeue a reference from the queue and return that dequeued reference.
     66   // Call DisableReadBarrierForReference for the reference that's returned from this function.
     67   ObjPtr<mirror::Reference> DequeuePendingReference() REQUIRES_SHARED(Locks::mutator_lock_);
     68 
     69   // If applicable, disable the read barrier for the reference after its referent is handled (see
     70   // ConcurrentCopying::ProcessMarkStackRef.) This must be called for a reference that's dequeued
     71   // from pending queue (DequeuePendingReference).
     72   void DisableReadBarrierForReference(ObjPtr<mirror::Reference> ref)
     73       REQUIRES_SHARED(Locks::mutator_lock_);
     74 
     75   // Enqueues finalizer references with white referents.  White referents are blackened, moved to
     76   // the zombie field, and the referent field is cleared.
     77   void EnqueueFinalizerReferences(ReferenceQueue* cleared_references,
     78                                   collector::GarbageCollector* collector)
     79       REQUIRES_SHARED(Locks::mutator_lock_);
     80 
     81   // Walks the reference list marking any references subject to the reference clearing policy.
     82   // References with a black referent are removed from the list.  References with white referents
     83   // biased toward saving are blackened and also removed from the list.
     84   void ForwardSoftReferences(MarkObjectVisitor* visitor)
     85       REQUIRES_SHARED(Locks::mutator_lock_);
     86 
     87   // Unlink the reference list clearing references objects with white referents. Cleared references
     88   // registered to a reference queue are scheduled for appending by the heap worker thread.
     89   void ClearWhiteReferences(ReferenceQueue* cleared_references,
     90                             collector::GarbageCollector* collector)
     91       REQUIRES_SHARED(Locks::mutator_lock_);
     92 
     93   void Dump(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_);
     94   size_t GetLength() const REQUIRES_SHARED(Locks::mutator_lock_);
     95 
     96   bool IsEmpty() const {
     97     return list_ == nullptr;
     98   }
     99   void Clear() {
    100     list_ = nullptr;
    101   }
    102   mirror::Reference* GetList() REQUIRES_SHARED(Locks::mutator_lock_) {
    103     return list_;
    104   }
    105 
    106   // Visits list_, currently only used for the mark compact GC.
    107   void UpdateRoots(IsMarkedVisitor* visitor)
    108       REQUIRES_SHARED(Locks::mutator_lock_);
    109 
    110  private:
    111   // Lock, used for parallel GC reference enqueuing. It allows for multiple threads simultaneously
    112   // calling AtomicEnqueueIfNotEnqueued.
    113   Mutex* const lock_;
    114   // The actual reference list. Only a root for the mark compact GC since it will be null for other
    115   // GC types. Not an ObjPtr since it is accessed from multiple threads.
    116   mirror::Reference* list_;
    117 
    118   DISALLOW_IMPLICIT_CONSTRUCTORS(ReferenceQueue);
    119 };
    120 
    121 }  // namespace gc
    122 }  // namespace art
    123 
    124 #endif  // ART_RUNTIME_GC_REFERENCE_QUEUE_H_
    125