Home | History | Annotate | Download | only in openjdkjvmti
      1 /* Copyright (C) 2017 The Android Open Source Project
      2  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
      3  *
      4  * This file implements interfaces from the file jvmti.h. This implementation
      5  * is licensed under the same terms as the file jvmti.h.  The
      6  * copyright and license information for the file jvmti.h follows.
      7  *
      8  * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
      9  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     10  *
     11  * This code is free software; you can redistribute it and/or modify it
     12  * under the terms of the GNU General Public License version 2 only, as
     13  * published by the Free Software Foundation.  Oracle designates this
     14  * particular file as subject to the "Classpath" exception as provided
     15  * by Oracle in the LICENSE file that accompanied this code.
     16  *
     17  * This code is distributed in the hope that it will be useful, but WITHOUT
     18  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
     19  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
     20  * version 2 for more details (a copy is included in the LICENSE file that
     21  * accompanied this code).
     22  *
     23  * You should have received a copy of the GNU General Public License version
     24  * 2 along with this work; if not, write to the Free Software Foundation,
     25  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
     26  *
     27  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
     28  * or visit www.oracle.com if you need additional information or have any
     29  * questions.
     30  */
     31 
     32 #ifndef ART_OPENJDKJVMTI_JVMTI_WEAK_TABLE_INL_H_
     33 #define ART_OPENJDKJVMTI_JVMTI_WEAK_TABLE_INL_H_
     34 
     35 #include "jvmti_weak_table.h"
     36 
     37 #include <limits>
     38 
     39 #include <android-base/logging.h>
     40 
     41 #include "art_jvmti.h"
     42 #include "gc/allocation_listener.h"
     43 #include "instrumentation.h"
     44 #include "jni_env_ext-inl.h"
     45 #include "jvmti_allocator.h"
     46 #include "mirror/class.h"
     47 #include "mirror/object.h"
     48 #include "nativehelper/scoped_local_ref.h"
     49 #include "runtime.h"
     50 
     51 namespace openjdkjvmti {
     52 
     53 template <typename T>
     54 void JvmtiWeakTable<T>::Lock() {
     55   allow_disallow_lock_.ExclusiveLock(art::Thread::Current());
     56 }
     57 template <typename T>
     58 void JvmtiWeakTable<T>::Unlock() {
     59   allow_disallow_lock_.ExclusiveUnlock(art::Thread::Current());
     60 }
     61 template <typename T>
     62 void JvmtiWeakTable<T>::AssertLocked() {
     63   allow_disallow_lock_.AssertHeld(art::Thread::Current());
     64 }
     65 
     66 template <typename T>
     67 void JvmtiWeakTable<T>::UpdateTableWithReadBarrier() {
     68   update_since_last_sweep_ = true;
     69 
     70   auto WithReadBarrierUpdater = [&](const art::GcRoot<art::mirror::Object>& original_root,
     71                                     art::mirror::Object* original_obj ATTRIBUTE_UNUSED)
     72      REQUIRES_SHARED(art::Locks::mutator_lock_) {
     73     return original_root.Read<art::kWithReadBarrier>();
     74   };
     75 
     76   UpdateTableWith<decltype(WithReadBarrierUpdater), kIgnoreNull>(WithReadBarrierUpdater);
     77 }
     78 
     79 template <typename T>
     80 bool JvmtiWeakTable<T>::GetTagSlowPath(art::Thread* self, art::mirror::Object* obj, T* result) {
     81   // Under concurrent GC, there is a window between moving objects and sweeping of system
     82   // weaks in which mutators are active. We may receive a to-space object pointer in obj,
     83   // but still have from-space pointers in the table. Explicitly update the table once.
     84   // Note: this will keep *all* objects in the table live, but should be a rare occurrence.
     85   UpdateTableWithReadBarrier();
     86   return GetTagLocked(self, obj, result);
     87 }
     88 
     89 template <typename T>
     90 bool JvmtiWeakTable<T>::Remove(art::mirror::Object* obj, /* out */ T* tag) {
     91   art::Thread* self = art::Thread::Current();
     92   art::MutexLock mu(self, allow_disallow_lock_);
     93   Wait(self);
     94 
     95   return RemoveLocked(self, obj, tag);
     96 }
     97 template <typename T>
     98 bool JvmtiWeakTable<T>::RemoveLocked(art::mirror::Object* obj, T* tag) {
     99   art::Thread* self = art::Thread::Current();
    100   allow_disallow_lock_.AssertHeld(self);
    101   Wait(self);
    102 
    103   return RemoveLocked(self, obj, tag);
    104 }
    105 
    106 template <typename T>
    107 bool JvmtiWeakTable<T>::RemoveLocked(art::Thread* self, art::mirror::Object* obj, T* tag) {
    108   auto it = tagged_objects_.find(art::GcRoot<art::mirror::Object>(obj));
    109   if (it != tagged_objects_.end()) {
    110     if (tag != nullptr) {
    111       *tag = it->second;
    112     }
    113     tagged_objects_.erase(it);
    114     return true;
    115   }
    116 
    117   if (art::kUseReadBarrier && self->GetIsGcMarking() && !update_since_last_sweep_) {
    118     // Under concurrent GC, there is a window between moving objects and sweeping of system
    119     // weaks in which mutators are active. We may receive a to-space object pointer in obj,
    120     // but still have from-space pointers in the table. Explicitly update the table once.
    121     // Note: this will keep *all* objects in the table live, but should be a rare occurrence.
    122 
    123     // Update the table.
    124     UpdateTableWithReadBarrier();
    125 
    126     // And try again.
    127     return RemoveLocked(self, obj, tag);
    128   }
    129 
    130   // Not in here.
    131   return false;
    132 }
    133 
    134 template <typename T>
    135 bool JvmtiWeakTable<T>::Set(art::mirror::Object* obj, T new_tag) {
    136   art::Thread* self = art::Thread::Current();
    137   art::MutexLock mu(self, allow_disallow_lock_);
    138   Wait(self);
    139 
    140   return SetLocked(self, obj, new_tag);
    141 }
    142 template <typename T>
    143 bool JvmtiWeakTable<T>::SetLocked(art::mirror::Object* obj, T new_tag) {
    144   art::Thread* self = art::Thread::Current();
    145   allow_disallow_lock_.AssertHeld(self);
    146   Wait(self);
    147 
    148   return SetLocked(self, obj, new_tag);
    149 }
    150 
    151 template <typename T>
    152 bool JvmtiWeakTable<T>::SetLocked(art::Thread* self, art::mirror::Object* obj, T new_tag) {
    153   auto it = tagged_objects_.find(art::GcRoot<art::mirror::Object>(obj));
    154   if (it != tagged_objects_.end()) {
    155     it->second = new_tag;
    156     return true;
    157   }
    158 
    159   if (art::kUseReadBarrier && self->GetIsGcMarking() && !update_since_last_sweep_) {
    160     // Under concurrent GC, there is a window between moving objects and sweeping of system
    161     // weaks in which mutators are active. We may receive a to-space object pointer in obj,
    162     // but still have from-space pointers in the table. Explicitly update the table once.
    163     // Note: this will keep *all* objects in the table live, but should be a rare occurrence.
    164 
    165     // Update the table.
    166     UpdateTableWithReadBarrier();
    167 
    168     // And try again.
    169     return SetLocked(self, obj, new_tag);
    170   }
    171 
    172   // New element.
    173   auto insert_it = tagged_objects_.emplace(art::GcRoot<art::mirror::Object>(obj), new_tag);
    174   DCHECK(insert_it.second);
    175   return false;
    176 }
    177 
    178 template <typename T>
    179 void JvmtiWeakTable<T>::Sweep(art::IsMarkedVisitor* visitor) {
    180   if (DoesHandleNullOnSweep()) {
    181     SweepImpl<true>(visitor);
    182   } else {
    183     SweepImpl<false>(visitor);
    184   }
    185 
    186   // Under concurrent GC, there is a window between moving objects and sweeping of system
    187   // weaks in which mutators are active. We may receive a to-space object pointer in obj,
    188   // but still have from-space pointers in the table. We explicitly update the table then
    189   // to ensure we compare against to-space pointers. But we want to do this only once. Once
    190   // sweeping is done, we know all objects are to-space pointers until the next GC cycle,
    191   // so we re-enable the explicit update for the next marking.
    192   update_since_last_sweep_ = false;
    193 }
    194 
    195 template <typename T>
    196 template <bool kHandleNull>
    197 void JvmtiWeakTable<T>::SweepImpl(art::IsMarkedVisitor* visitor) {
    198   art::Thread* self = art::Thread::Current();
    199   art::MutexLock mu(self, allow_disallow_lock_);
    200 
    201   auto IsMarkedUpdater = [&](const art::GcRoot<art::mirror::Object>& original_root ATTRIBUTE_UNUSED,
    202                              art::mirror::Object* original_obj) {
    203     return visitor->IsMarked(original_obj);
    204   };
    205 
    206   UpdateTableWith<decltype(IsMarkedUpdater),
    207                   kHandleNull ? kCallHandleNull : kRemoveNull>(IsMarkedUpdater);
    208 }
    209 
    210 template <typename T>
    211 template <typename Updater, typename JvmtiWeakTable<T>::TableUpdateNullTarget kTargetNull>
    212 ALWAYS_INLINE inline void JvmtiWeakTable<T>::UpdateTableWith(Updater& updater) {
    213   // We optimistically hope that elements will still be well-distributed when re-inserting them.
    214   // So play with the map mechanics, and postpone rehashing. This avoids the need of a side
    215   // vector and two passes.
    216   float original_max_load_factor = tagged_objects_.max_load_factor();
    217   tagged_objects_.max_load_factor(std::numeric_limits<float>::max());
    218   // For checking that a max load-factor actually does what we expect.
    219   size_t original_bucket_count = tagged_objects_.bucket_count();
    220 
    221   for (auto it = tagged_objects_.begin(); it != tagged_objects_.end();) {
    222     DCHECK(!it->first.IsNull());
    223     art::mirror::Object* original_obj = it->first.template Read<art::kWithoutReadBarrier>();
    224     art::mirror::Object* target_obj = updater(it->first, original_obj);
    225     if (original_obj != target_obj) {
    226       if (kTargetNull == kIgnoreNull && target_obj == nullptr) {
    227         // Ignore null target, don't do anything.
    228       } else {
    229         T tag = it->second;
    230         it = tagged_objects_.erase(it);
    231         if (target_obj != nullptr) {
    232           tagged_objects_.emplace(art::GcRoot<art::mirror::Object>(target_obj), tag);
    233           DCHECK_EQ(original_bucket_count, tagged_objects_.bucket_count());
    234         } else if (kTargetNull == kCallHandleNull) {
    235           HandleNullSweep(tag);
    236         }
    237         continue;  // Iterator was implicitly updated by erase.
    238       }
    239     }
    240     it++;
    241   }
    242 
    243   tagged_objects_.max_load_factor(original_max_load_factor);
    244   // TODO: consider rehash here.
    245 }
    246 
    247 template <typename T>
    248 template <typename Storage, class Allocator>
    249 struct JvmtiWeakTable<T>::ReleasableContainer {
    250   using allocator_type = Allocator;
    251 
    252   explicit ReleasableContainer(const allocator_type& alloc, size_t reserve = 10)
    253       : allocator(alloc),
    254         data(reserve > 0 ? allocator.allocate(reserve) : nullptr),
    255         size(0),
    256         capacity(reserve) {
    257   }
    258 
    259   ~ReleasableContainer() {
    260     if (data != nullptr) {
    261       allocator.deallocate(data, capacity);
    262       capacity = 0;
    263       size = 0;
    264     }
    265   }
    266 
    267   Storage* Release() {
    268     Storage* tmp = data;
    269 
    270     data = nullptr;
    271     size = 0;
    272     capacity = 0;
    273 
    274     return tmp;
    275   }
    276 
    277   void Resize(size_t new_capacity) {
    278     CHECK_GT(new_capacity, capacity);
    279 
    280     Storage* tmp = allocator.allocate(new_capacity);
    281     DCHECK(tmp != nullptr);
    282     if (data != nullptr) {
    283       memcpy(tmp, data, sizeof(Storage) * size);
    284     }
    285     Storage* old = data;
    286     data = tmp;
    287     allocator.deallocate(old, capacity);
    288     capacity = new_capacity;
    289   }
    290 
    291   void Pushback(const Storage& elem) {
    292     if (size == capacity) {
    293       size_t new_capacity = 2 * capacity + 1;
    294       Resize(new_capacity);
    295     }
    296     data[size++] = elem;
    297   }
    298 
    299   Allocator allocator;
    300   Storage* data;
    301   size_t size;
    302   size_t capacity;
    303 };
    304 
    305 template <typename T>
    306 jvmtiError JvmtiWeakTable<T>::GetTaggedObjects(jvmtiEnv* jvmti_env,
    307                                                jint tag_count,
    308                                                const T* tags,
    309                                                jint* count_ptr,
    310                                                jobject** object_result_ptr,
    311                                                T** tag_result_ptr) {
    312   if (tag_count < 0) {
    313     return ERR(ILLEGAL_ARGUMENT);
    314   }
    315   if (tag_count > 0) {
    316     for (size_t i = 0; i != static_cast<size_t>(tag_count); ++i) {
    317       if (tags[i] == 0) {
    318         return ERR(ILLEGAL_ARGUMENT);
    319       }
    320     }
    321   }
    322   if (tags == nullptr) {
    323     return ERR(NULL_POINTER);
    324   }
    325   if (count_ptr == nullptr) {
    326     return ERR(NULL_POINTER);
    327   }
    328 
    329   art::Thread* self = art::Thread::Current();
    330   art::MutexLock mu(self, allow_disallow_lock_);
    331   Wait(self);
    332 
    333   art::JNIEnvExt* jni_env = self->GetJniEnv();
    334 
    335   constexpr size_t kDefaultSize = 10;
    336   size_t initial_object_size;
    337   size_t initial_tag_size;
    338   if (tag_count == 0) {
    339     initial_object_size = (object_result_ptr != nullptr) ? tagged_objects_.size() : 0;
    340     initial_tag_size = (tag_result_ptr != nullptr) ? tagged_objects_.size() : 0;
    341   } else {
    342     initial_object_size = initial_tag_size = kDefaultSize;
    343   }
    344   JvmtiAllocator<void> allocator(jvmti_env);
    345   ReleasableContainer<jobject, JvmtiAllocator<jobject>> selected_objects(allocator,
    346                                                                          initial_object_size);
    347   ReleasableContainer<T, JvmtiAllocator<T>> selected_tags(allocator, initial_tag_size);
    348 
    349   size_t count = 0;
    350   for (auto& pair : tagged_objects_) {
    351     bool select;
    352     if (tag_count > 0) {
    353       select = false;
    354       for (size_t i = 0; i != static_cast<size_t>(tag_count); ++i) {
    355         if (tags[i] == pair.second) {
    356           select = true;
    357           break;
    358         }
    359       }
    360     } else {
    361       select = true;
    362     }
    363 
    364     if (select) {
    365       art::mirror::Object* obj = pair.first.template Read<art::kWithReadBarrier>();
    366       if (obj != nullptr) {
    367         count++;
    368         if (object_result_ptr != nullptr) {
    369           selected_objects.Pushback(jni_env->AddLocalReference<jobject>(obj));
    370         }
    371         if (tag_result_ptr != nullptr) {
    372           selected_tags.Pushback(pair.second);
    373         }
    374       }
    375     }
    376   }
    377 
    378   if (object_result_ptr != nullptr) {
    379     *object_result_ptr = selected_objects.Release();
    380   }
    381   if (tag_result_ptr != nullptr) {
    382     *tag_result_ptr = selected_tags.Release();
    383   }
    384   *count_ptr = static_cast<jint>(count);
    385   return ERR(NONE);
    386 }
    387 
    388 template <typename T>
    389 art::mirror::Object* JvmtiWeakTable<T>::Find(T tag) {
    390   art::Thread* self = art::Thread::Current();
    391   art::MutexLock mu(self, allow_disallow_lock_);
    392   Wait(self);
    393 
    394   for (auto& pair : tagged_objects_) {
    395     if (tag == pair.second) {
    396       art::mirror::Object* obj = pair.first.template Read<art::kWithReadBarrier>();
    397       if (obj != nullptr) {
    398         return obj;
    399       }
    400     }
    401   }
    402   return nullptr;
    403 }
    404 
    405 }  // namespace openjdkjvmti
    406 
    407 #endif  // ART_OPENJDKJVMTI_JVMTI_WEAK_TABLE_INL_H_
    408