Home | History | Annotate | Download | only in runtime
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "intern_table.h"
     18 
     19 #include <memory>
     20 
     21 #include "gc/space/image_space.h"
     22 #include "mirror/dex_cache.h"
     23 #include "mirror/object_array-inl.h"
     24 #include "mirror/object-inl.h"
     25 #include "mirror/string-inl.h"
     26 #include "thread.h"
     27 #include "utf.h"
     28 
     29 namespace art {
     30 
     31 InternTable::InternTable()
     32     : log_new_roots_(false), allow_new_interns_(true),
     33       new_intern_condition_("New intern condition", *Locks::intern_table_lock_) {
     34 }
     35 
     36 size_t InternTable::Size() const {
     37   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
     38   return strong_interns_.size() + weak_interns_.size();
     39 }
     40 
     41 size_t InternTable::StrongSize() const {
     42   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
     43   return strong_interns_.size();
     44 }
     45 
     46 size_t InternTable::WeakSize() const {
     47   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
     48   return weak_interns_.size();
     49 }
     50 
     51 void InternTable::DumpForSigQuit(std::ostream& os) const {
     52   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
     53   os << "Intern table: " << strong_interns_.size() << " strong; "
     54      << weak_interns_.size() << " weak\n";
     55 }
     56 
     57 void InternTable::VisitRoots(RootCallback* callback, void* arg, VisitRootFlags flags) {
     58   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
     59   if ((flags & kVisitRootFlagAllRoots) != 0) {
     60     for (auto& strong_intern : strong_interns_) {
     61       const_cast<GcRoot<mirror::String>&>(strong_intern).
     62           VisitRoot(callback, arg, 0, kRootInternedString);
     63       DCHECK(!strong_intern.IsNull());
     64     }
     65   } else if ((flags & kVisitRootFlagNewRoots) != 0) {
     66     for (auto& root : new_strong_intern_roots_) {
     67       mirror::String* old_ref = root.Read<kWithoutReadBarrier>();
     68       root.VisitRoot(callback, arg, 0, kRootInternedString);
     69       mirror::String* new_ref = root.Read<kWithoutReadBarrier>();
     70       if (UNLIKELY(new_ref != old_ref)) {
     71         // The GC moved a root in the log. Need to search the strong interns and update the
     72         // corresponding object. This is slow, but luckily for us, this may only happen with a
     73         // concurrent moving GC.
     74         auto it = strong_interns_.find(GcRoot<mirror::String>(old_ref));
     75         DCHECK(it != strong_interns_.end());
     76         strong_interns_.erase(it);
     77         strong_interns_.insert(GcRoot<mirror::String>(new_ref));
     78       }
     79     }
     80   }
     81 
     82   if ((flags & kVisitRootFlagClearRootLog) != 0) {
     83     new_strong_intern_roots_.clear();
     84   }
     85   if ((flags & kVisitRootFlagStartLoggingNewRoots) != 0) {
     86     log_new_roots_ = true;
     87   } else if ((flags & kVisitRootFlagStopLoggingNewRoots) != 0) {
     88     log_new_roots_ = false;
     89   }
     90   // Note: we deliberately don't visit the weak_interns_ table and the immutable image roots.
     91 }
     92 
     93 mirror::String* InternTable::LookupStrong(mirror::String* s) {
     94   return Lookup(&strong_interns_, s);
     95 }
     96 
     97 mirror::String* InternTable::LookupWeak(mirror::String* s) {
     98   // Weak interns need a read barrier because they are weak roots.
     99   return Lookup(&weak_interns_, s);
    100 }
    101 
    102 mirror::String* InternTable::Lookup(Table* table, mirror::String* s) {
    103   Locks::intern_table_lock_->AssertHeld(Thread::Current());
    104   auto it = table->find(GcRoot<mirror::String>(s));
    105   if (LIKELY(it != table->end())) {
    106     return const_cast<GcRoot<mirror::String>&>(*it).Read<kWithReadBarrier>();
    107   }
    108   return nullptr;
    109 }
    110 
    111 mirror::String* InternTable::InsertStrong(mirror::String* s) {
    112   Runtime* runtime = Runtime::Current();
    113   if (runtime->IsActiveTransaction()) {
    114     runtime->RecordStrongStringInsertion(s);
    115   }
    116   if (log_new_roots_) {
    117     new_strong_intern_roots_.push_back(GcRoot<mirror::String>(s));
    118   }
    119   strong_interns_.insert(GcRoot<mirror::String>(s));
    120   return s;
    121 }
    122 
    123 mirror::String* InternTable::InsertWeak(mirror::String* s) {
    124   Runtime* runtime = Runtime::Current();
    125   if (runtime->IsActiveTransaction()) {
    126     runtime->RecordWeakStringInsertion(s);
    127   }
    128   weak_interns_.insert(GcRoot<mirror::String>(s));
    129   return s;
    130 }
    131 
    132 void InternTable::RemoveStrong(mirror::String* s) {
    133   Remove(&strong_interns_, s);
    134 }
    135 
    136 void InternTable::RemoveWeak(mirror::String* s) {
    137   Runtime* runtime = Runtime::Current();
    138   if (runtime->IsActiveTransaction()) {
    139     runtime->RecordWeakStringRemoval(s);
    140   }
    141   Remove(&weak_interns_, s);
    142 }
    143 
    144 void InternTable::Remove(Table* table, mirror::String* s) {
    145   auto it = table->find(GcRoot<mirror::String>(s));
    146   DCHECK(it != table->end());
    147   table->erase(it);
    148 }
    149 
    150 // Insert/remove methods used to undo changes made during an aborted transaction.
    151 mirror::String* InternTable::InsertStrongFromTransaction(mirror::String* s) {
    152   DCHECK(!Runtime::Current()->IsActiveTransaction());
    153   return InsertStrong(s);
    154 }
    155 mirror::String* InternTable::InsertWeakFromTransaction(mirror::String* s) {
    156   DCHECK(!Runtime::Current()->IsActiveTransaction());
    157   return InsertWeak(s);
    158 }
    159 void InternTable::RemoveStrongFromTransaction(mirror::String* s) {
    160   DCHECK(!Runtime::Current()->IsActiveTransaction());
    161   RemoveStrong(s);
    162 }
    163 void InternTable::RemoveWeakFromTransaction(mirror::String* s) {
    164   DCHECK(!Runtime::Current()->IsActiveTransaction());
    165   RemoveWeak(s);
    166 }
    167 
    168 static mirror::String* LookupStringFromImage(mirror::String* s)
    169     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    170   gc::space::ImageSpace* image = Runtime::Current()->GetHeap()->GetImageSpace();
    171   if (image == NULL) {
    172     return NULL;  // No image present.
    173   }
    174   mirror::Object* root = image->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
    175   mirror::ObjectArray<mirror::DexCache>* dex_caches = root->AsObjectArray<mirror::DexCache>();
    176   const std::string utf8 = s->ToModifiedUtf8();
    177   for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
    178     mirror::DexCache* dex_cache = dex_caches->Get(i);
    179     const DexFile* dex_file = dex_cache->GetDexFile();
    180     // Binary search the dex file for the string index.
    181     const DexFile::StringId* string_id = dex_file->FindStringId(utf8.c_str());
    182     if (string_id != NULL) {
    183       uint32_t string_idx = dex_file->GetIndexForStringId(*string_id);
    184       mirror::String* image = dex_cache->GetResolvedString(string_idx);
    185       if (image != NULL) {
    186         return image;
    187       }
    188     }
    189   }
    190   return NULL;
    191 }
    192 
    193 void InternTable::AllowNewInterns() {
    194   Thread* self = Thread::Current();
    195   MutexLock mu(self, *Locks::intern_table_lock_);
    196   allow_new_interns_ = true;
    197   new_intern_condition_.Broadcast(self);
    198 }
    199 
    200 void InternTable::DisallowNewInterns() {
    201   Thread* self = Thread::Current();
    202   MutexLock mu(self, *Locks::intern_table_lock_);
    203   allow_new_interns_ = false;
    204 }
    205 
    206 mirror::String* InternTable::Insert(mirror::String* s, bool is_strong) {
    207   Thread* self = Thread::Current();
    208   MutexLock mu(self, *Locks::intern_table_lock_);
    209 
    210   DCHECK(s != NULL);
    211 
    212   while (UNLIKELY(!allow_new_interns_)) {
    213     new_intern_condition_.WaitHoldingLocks(self);
    214   }
    215 
    216   if (is_strong) {
    217     // Check the strong table for a match.
    218     mirror::String* strong = LookupStrong(s);
    219     if (strong != NULL) {
    220       return strong;
    221     }
    222 
    223     // Check the image for a match.
    224     mirror::String* image = LookupStringFromImage(s);
    225     if (image != NULL) {
    226       return InsertStrong(image);
    227     }
    228 
    229     // There is no match in the strong table, check the weak table.
    230     mirror::String* weak = LookupWeak(s);
    231     if (weak != NULL) {
    232       // A match was found in the weak table. Promote to the strong table.
    233       RemoveWeak(weak);
    234       return InsertStrong(weak);
    235     }
    236 
    237     // No match in the strong table or the weak table. Insert into the strong
    238     // table.
    239     return InsertStrong(s);
    240   }
    241 
    242   // Check the strong table for a match.
    243   mirror::String* strong = LookupStrong(s);
    244   if (strong != NULL) {
    245     return strong;
    246   }
    247   // Check the image for a match.
    248   mirror::String* image = LookupStringFromImage(s);
    249   if (image != NULL) {
    250     return InsertWeak(image);
    251   }
    252   // Check the weak table for a match.
    253   mirror::String* weak = LookupWeak(s);
    254   if (weak != NULL) {
    255     return weak;
    256   }
    257   // Insert into the weak table.
    258   return InsertWeak(s);
    259 }
    260 
    261 mirror::String* InternTable::InternStrong(int32_t utf16_length, const char* utf8_data) {
    262   DCHECK(utf8_data != nullptr);
    263   return InternStrong(mirror::String::AllocFromModifiedUtf8(
    264       Thread::Current(), utf16_length, utf8_data));
    265 }
    266 
    267 mirror::String* InternTable::InternStrong(const char* utf8_data) {
    268   DCHECK(utf8_data != nullptr);
    269   return InternStrong(mirror::String::AllocFromModifiedUtf8(Thread::Current(), utf8_data));
    270 }
    271 
    272 mirror::String* InternTable::InternStrong(mirror::String* s) {
    273   if (s == nullptr) {
    274     return nullptr;
    275   }
    276   return Insert(s, true);
    277 }
    278 
    279 mirror::String* InternTable::InternWeak(mirror::String* s) {
    280   if (s == nullptr) {
    281     return nullptr;
    282   }
    283   return Insert(s, false);
    284 }
    285 
    286 bool InternTable::ContainsWeak(mirror::String* s) {
    287   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
    288   const mirror::String* found = LookupWeak(s);
    289   return found == s;
    290 }
    291 
    292 void InternTable::SweepInternTableWeaks(IsMarkedCallback* callback, void* arg) {
    293   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
    294   for (auto it = weak_interns_.begin(), end = weak_interns_.end(); it != end;) {
    295     // This does not need a read barrier because this is called by GC.
    296     GcRoot<mirror::String>& root = const_cast<GcRoot<mirror::String>&>(*it);
    297     mirror::Object* object = root.Read<kWithoutReadBarrier>();
    298     mirror::Object* new_object = callback(object, arg);
    299     if (new_object == nullptr) {
    300       it = weak_interns_.erase(it);
    301     } else {
    302       root.Assign(down_cast<mirror::String*>(new_object));
    303       ++it;
    304     }
    305   }
    306 }
    307 
    308 std::size_t InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& root) {
    309   if (kIsDebugBuild) {
    310     Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
    311   }
    312   return static_cast<size_t>(
    313       const_cast<GcRoot<mirror::String>&>(root).Read<kWithoutReadBarrier>()->GetHashCode());
    314 }
    315 
    316 bool InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& a,
    317                                                const GcRoot<mirror::String>& b) {
    318   if (kIsDebugBuild) {
    319     Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
    320   }
    321   return const_cast<GcRoot<mirror::String>&>(a).Read<kWithoutReadBarrier>()->Equals(
    322       const_cast<GcRoot<mirror::String>&>(b).Read<kWithoutReadBarrier>());
    323 }
    324 
    325 }  // namespace art
    326