Home | History | Annotate | Download | only in gc
      1 /*
      2  * Copyright (C) 2013 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_GC_REFERENCE_QUEUE_H_
     18 #define ART_RUNTIME_GC_REFERENCE_QUEUE_H_
     19 
     20 #include <iosfwd>
     21 #include <string>
     22 #include <vector>
     23 
     24 #include "atomic.h"
     25 #include "base/timing_logger.h"
     26 #include "globals.h"
     27 #include "jni.h"
     28 #include "object_callbacks.h"
     29 #include "offsets.h"
     30 #include "thread_pool.h"
     31 
     32 namespace art {
     33 namespace mirror {
     34 class Reference;
     35 }  // namespace mirror
     36 
     37 namespace gc {
     38 
     39 class Heap;
     40 
     41 // Used to temporarily store java.lang.ref.Reference(s) during GC and prior to queueing on the
     42 // appropriate java.lang.ref.ReferenceQueue. The linked list is maintained in the
     43 // java.lang.ref.Reference objects.
     44 class ReferenceQueue {
     45  public:
     46   explicit ReferenceQueue(Mutex* lock);
     47 
     48   // Enqueue a reference if is not already enqueued. Thread safe to call from multiple threads
     49   // since it uses a lock to avoid a race between checking for the references presence and adding
     50   // it.
     51   void AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref)
     52       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
     53 
     54   // Enqueue a reference, unlike EnqueuePendingReference, enqueue reference checks that the
     55   // reference IsEnqueueable. Not thread safe, used when mutators are paused to minimize lock
     56   // overhead.
     57   void EnqueueReference(mirror::Reference* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
     58 
     59   // Enqueue a reference without checking that it is enqueable.
     60   void EnqueuePendingReference(mirror::Reference* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
     61 
     62   // Dequeue the first reference (returns list_).
     63   mirror::Reference* DequeuePendingReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
     64 
     65   // Enqueues finalizer references with white referents.  White referents are blackened, moved to
     66   // the zombie field, and the referent field is cleared.
     67   void EnqueueFinalizerReferences(ReferenceQueue* cleared_references,
     68                                   IsHeapReferenceMarkedCallback* is_marked_callback,
     69                                   MarkObjectCallback* mark_object_callback, void* arg)
     70       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
     71 
     72   // Walks the reference list marking any references subject to the reference clearing policy.
     73   // References with a black referent are removed from the list.  References with white referents
     74   // biased toward saving are blackened and also removed from the list.
     75   void ForwardSoftReferences(IsHeapReferenceMarkedCallback* preserve_callback, void* arg)
     76       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
     77 
     78   // Unlink the reference list clearing references objects with white referents. Cleared references
     79   // registered to a reference queue are scheduled for appending by the heap worker thread.
     80   void ClearWhiteReferences(ReferenceQueue* cleared_references,
     81                             IsHeapReferenceMarkedCallback* is_marked_callback, void* arg)
     82       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
     83 
     84   void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
     85   size_t GetLength() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
     86 
     87   bool IsEmpty() const {
     88     return list_ == nullptr;
     89   }
     90   void Clear() {
     91     list_ = nullptr;
     92   }
     93   mirror::Reference* GetList() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     94     return list_;
     95   }
     96 
     97   // Visits list_, currently only used for the mark compact GC.
     98   void UpdateRoots(IsMarkedCallback* callback, void* arg)
     99       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
    100 
    101  private:
    102   // Lock, used for parallel GC reference enqueuing. It allows for multiple threads simultaneously
    103   // calling AtomicEnqueueIfNotEnqueued.
    104   Mutex* const lock_;
    105   // The actual reference list. Only a root for the mark compact GC since it will be null for other
    106   // GC types.
    107   mirror::Reference* list_;
    108 
    109   DISALLOW_IMPLICIT_CONSTRUCTORS(ReferenceQueue);
    110 };
    111 
    112 }  // namespace gc
    113 }  // namespace art
    114 
    115 #endif  // ART_RUNTIME_GC_REFERENCE_QUEUE_H_
    116