Home | History | Annotate | Download | only in openjdkjvmti
      1 /* Copyright (C) 2017 The Android Open Source Project
      2  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
      3  *
      4  * This file implements interfaces from the file jvmti.h. This implementation
      5  * is licensed under the same terms as the file jvmti.h.  The
      6  * copyright and license information for the file jvmti.h follows.
      7  *
      8  * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
      9  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     10  *
     11  * This code is free software; you can redistribute it and/or modify it
     12  * under the terms of the GNU General Public License version 2 only, as
     13  * published by the Free Software Foundation.  Oracle designates this
     14  * particular file as subject to the "Classpath" exception as provided
     15  * by Oracle in the LICENSE file that accompanied this code.
     16  *
     17  * This code is distributed in the hope that it will be useful, but WITHOUT
     18  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
     19  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
     20  * version 2 for more details (a copy is included in the LICENSE file that
     21  * accompanied this code).
     22  *
     23  * You should have received a copy of the GNU General Public License version
     24  * 2 along with this work; if not, write to the Free Software Foundation,
     25  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
     26  *
     27  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
     28  * or visit www.oracle.com if you need additional information or have any
     29  * questions.
     30  */
     31 
     32 #ifndef ART_RUNTIME_OPENJDKJVMTI_JVMTI_WEAK_TABLE_H_
     33 #define ART_RUNTIME_OPENJDKJVMTI_JVMTI_WEAK_TABLE_H_
     34 
     35 #include <unordered_map>
     36 
     37 #include "base/macros.h"
     38 #include "base/mutex.h"
     39 #include "gc/system_weak.h"
     40 #include "gc_root-inl.h"
     41 #include "globals.h"
     42 #include "jvmti.h"
     43 #include "mirror/object.h"
     44 #include "thread-inl.h"
     45 
     46 namespace openjdkjvmti {
     47 
     48 class EventHandler;
     49 
     50 // A system-weak container mapping objects to elements of the template type. This corresponds
     51 // to a weak hash map. For historical reasons the stored value is called "tag."
     52 template <typename T>
     53 class JvmtiWeakTable : public art::gc::SystemWeakHolder {
     54  public:
     55   JvmtiWeakTable()
     56       : art::gc::SystemWeakHolder(art::kTaggingLockLevel),
     57         update_since_last_sweep_(false) {
     58   }
     59 
     60   // Remove the mapping for the given object, returning whether such a mapping existed (and the old
     61   // value).
     62   bool Remove(art::mirror::Object* obj, /* out */ T* tag)
     63       REQUIRES_SHARED(art::Locks::mutator_lock_)
     64       REQUIRES(!allow_disallow_lock_);
     65   bool RemoveLocked(art::mirror::Object* obj, /* out */ T* tag)
     66       REQUIRES_SHARED(art::Locks::mutator_lock_)
     67       REQUIRES(allow_disallow_lock_);
     68 
     69   // Set the mapping for the given object. Returns true if this overwrites an already existing
     70   // mapping.
     71   virtual bool Set(art::mirror::Object* obj, T tag)
     72       REQUIRES_SHARED(art::Locks::mutator_lock_)
     73       REQUIRES(!allow_disallow_lock_);
     74   virtual bool SetLocked(art::mirror::Object* obj, T tag)
     75       REQUIRES_SHARED(art::Locks::mutator_lock_)
     76       REQUIRES(allow_disallow_lock_);
     77 
     78   // Return the value associated with the given object. Returns true if the mapping exists, false
     79   // otherwise.
     80   bool GetTag(art::mirror::Object* obj, /* out */ T* result)
     81       REQUIRES_SHARED(art::Locks::mutator_lock_)
     82       REQUIRES(!allow_disallow_lock_) {
     83     art::Thread* self = art::Thread::Current();
     84     art::MutexLock mu(self, allow_disallow_lock_);
     85     Wait(self);
     86 
     87     return GetTagLocked(self, obj, result);
     88   }
     89   bool GetTagLocked(art::mirror::Object* obj, /* out */ T* result)
     90       REQUIRES_SHARED(art::Locks::mutator_lock_)
     91       REQUIRES(allow_disallow_lock_) {
     92     art::Thread* self = art::Thread::Current();
     93     allow_disallow_lock_.AssertHeld(self);
     94     Wait(self);
     95 
     96     return GetTagLocked(self, obj, result);
     97   }
     98 
     99   // Sweep the container. DO NOT CALL MANUALLY.
    100   void Sweep(art::IsMarkedVisitor* visitor)
    101       REQUIRES_SHARED(art::Locks::mutator_lock_)
    102       REQUIRES(!allow_disallow_lock_);
    103 
    104   // Return all objects that have a value mapping in tags.
    105   jvmtiError GetTaggedObjects(jvmtiEnv* jvmti_env,
    106                               jint tag_count,
    107                               const T* tags,
    108                               /* out */ jint* count_ptr,
    109                               /* out */ jobject** object_result_ptr,
    110                               /* out */ T** tag_result_ptr)
    111       REQUIRES_SHARED(art::Locks::mutator_lock_)
    112       REQUIRES(!allow_disallow_lock_);
    113 
    114   // Locking functions, to allow coarse-grained locking and amortization.
    115   void Lock() ACQUIRE(allow_disallow_lock_);
    116   void Unlock() RELEASE(allow_disallow_lock_);
    117   void AssertLocked() ASSERT_CAPABILITY(allow_disallow_lock_);
    118 
    119   art::mirror::Object* Find(T tag)
    120       REQUIRES_SHARED(art::Locks::mutator_lock_)
    121       REQUIRES(!allow_disallow_lock_);
    122 
    123  protected:
    124   // Should HandleNullSweep be called when Sweep detects the release of an object?
    125   virtual bool DoesHandleNullOnSweep() {
    126     return false;
    127   }
    128   // If DoesHandleNullOnSweep returns true, this function will be called.
    129   virtual void HandleNullSweep(T tag ATTRIBUTE_UNUSED) {}
    130 
    131  private:
    132   bool SetLocked(art::Thread* self, art::mirror::Object* obj, T tag)
    133       REQUIRES_SHARED(art::Locks::mutator_lock_)
    134       REQUIRES(allow_disallow_lock_);
    135 
    136   bool RemoveLocked(art::Thread* self, art::mirror::Object* obj, /* out */ T* tag)
    137       REQUIRES_SHARED(art::Locks::mutator_lock_)
    138       REQUIRES(allow_disallow_lock_);
    139 
    140   bool GetTagLocked(art::Thread* self, art::mirror::Object* obj, /* out */ T* result)
    141       REQUIRES_SHARED(art::Locks::mutator_lock_)
    142       REQUIRES(allow_disallow_lock_) {
    143     auto it = tagged_objects_.find(art::GcRoot<art::mirror::Object>(obj));
    144     if (it != tagged_objects_.end()) {
    145       *result = it->second;
    146       return true;
    147     }
    148 
    149     // Performance optimization: To avoid multiple table updates, ensure that during GC we
    150     // only update once. See the comment on the implementation of GetTagSlowPath.
    151     if (art::kUseReadBarrier &&
    152         self != nullptr &&
    153         self->GetIsGcMarking() &&
    154         !update_since_last_sweep_) {
    155       return GetTagSlowPath(self, obj, result);
    156     }
    157 
    158     return false;
    159   }
    160 
    161   // Slow-path for GetTag. We didn't find the object, but we might be storing from-pointers and
    162   // are asked to retrieve with a to-pointer.
    163   bool GetTagSlowPath(art::Thread* self, art::mirror::Object* obj, /* out */ T* result)
    164       REQUIRES_SHARED(art::Locks::mutator_lock_)
    165       REQUIRES(allow_disallow_lock_);
    166 
    167   // Update the table by doing read barriers on each element, ensuring that to-space pointers
    168   // are stored.
    169   void UpdateTableWithReadBarrier()
    170       REQUIRES_SHARED(art::Locks::mutator_lock_)
    171       REQUIRES(allow_disallow_lock_);
    172 
    173   template <bool kHandleNull>
    174   void SweepImpl(art::IsMarkedVisitor* visitor)
    175       REQUIRES_SHARED(art::Locks::mutator_lock_)
    176       REQUIRES(!allow_disallow_lock_);
    177 
    178   enum TableUpdateNullTarget {
    179     kIgnoreNull,
    180     kRemoveNull,
    181     kCallHandleNull
    182   };
    183 
    184   template <typename Updater, TableUpdateNullTarget kTargetNull>
    185   void UpdateTableWith(Updater& updater)
    186       REQUIRES_SHARED(art::Locks::mutator_lock_)
    187       REQUIRES(allow_disallow_lock_);
    188 
    189   template <typename Storage, class Allocator = std::allocator<T>>
    190   struct ReleasableContainer;
    191 
    192   struct HashGcRoot {
    193     size_t operator()(const art::GcRoot<art::mirror::Object>& r) const
    194         REQUIRES_SHARED(art::Locks::mutator_lock_) {
    195       return reinterpret_cast<uintptr_t>(r.Read<art::kWithoutReadBarrier>());
    196     }
    197   };
    198 
    199   struct EqGcRoot {
    200     bool operator()(const art::GcRoot<art::mirror::Object>& r1,
    201                     const art::GcRoot<art::mirror::Object>& r2) const
    202         REQUIRES_SHARED(art::Locks::mutator_lock_) {
    203       return r1.Read<art::kWithoutReadBarrier>() == r2.Read<art::kWithoutReadBarrier>();
    204     }
    205   };
    206 
    207   std::unordered_map<art::GcRoot<art::mirror::Object>,
    208                      T,
    209                      HashGcRoot,
    210                      EqGcRoot> tagged_objects_
    211       GUARDED_BY(allow_disallow_lock_)
    212       GUARDED_BY(art::Locks::mutator_lock_);
    213   // To avoid repeatedly scanning the whole table, remember if we did that since the last sweep.
    214   bool update_since_last_sweep_;
    215 };
    216 
    217 }  // namespace openjdkjvmti
    218 
    219 #endif  // ART_RUNTIME_OPENJDKJVMTI_JVMTI_WEAK_TABLE_H_
    220