Home | History | Annotate | Download | only in runtime
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "intern_table.h"
     18 
     19 #include <memory>
     20 
     21 #include "gc/space/image_space.h"
     22 #include "mirror/dex_cache.h"
     23 #include "mirror/object_array-inl.h"
     24 #include "mirror/object-inl.h"
     25 #include "mirror/string-inl.h"
     26 #include "thread.h"
     27 #include "utf.h"
     28 
     29 namespace art {
     30 
     31 InternTable::InternTable()
     32     : image_added_to_intern_table_(false), log_new_roots_(false),
     33       allow_new_interns_(true),
     34       new_intern_condition_("New intern condition", *Locks::intern_table_lock_) {
     35 }
     36 
     37 size_t InternTable::Size() const {
     38   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
     39   return strong_interns_.Size() + weak_interns_.Size();
     40 }
     41 
     42 size_t InternTable::StrongSize() const {
     43   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
     44   return strong_interns_.Size();
     45 }
     46 
     47 size_t InternTable::WeakSize() const {
     48   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
     49   return weak_interns_.Size();
     50 }
     51 
     52 void InternTable::DumpForSigQuit(std::ostream& os) const {
     53   os << "Intern table: " << StrongSize() << " strong; " << WeakSize() << " weak\n";
     54 }
     55 
     56 void InternTable::VisitRoots(RootCallback* callback, void* arg, VisitRootFlags flags) {
     57   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
     58   if ((flags & kVisitRootFlagAllRoots) != 0) {
     59     strong_interns_.VisitRoots(callback, arg);
     60   } else if ((flags & kVisitRootFlagNewRoots) != 0) {
     61     for (auto& root : new_strong_intern_roots_) {
     62       mirror::String* old_ref = root.Read<kWithoutReadBarrier>();
     63       root.VisitRoot(callback, arg, RootInfo(kRootInternedString));
     64       mirror::String* new_ref = root.Read<kWithoutReadBarrier>();
     65       if (new_ref != old_ref) {
     66         // The GC moved a root in the log. Need to search the strong interns and update the
     67         // corresponding object. This is slow, but luckily for us, this may only happen with a
     68         // concurrent moving GC.
     69         strong_interns_.Remove(old_ref);
     70         strong_interns_.Insert(new_ref);
     71       }
     72     }
     73   }
     74   if ((flags & kVisitRootFlagClearRootLog) != 0) {
     75     new_strong_intern_roots_.clear();
     76   }
     77   if ((flags & kVisitRootFlagStartLoggingNewRoots) != 0) {
     78     log_new_roots_ = true;
     79   } else if ((flags & kVisitRootFlagStopLoggingNewRoots) != 0) {
     80     log_new_roots_ = false;
     81   }
     82   // Note: we deliberately don't visit the weak_interns_ table and the immutable image roots.
     83 }
     84 
     85 mirror::String* InternTable::LookupStrong(mirror::String* s) {
     86   return strong_interns_.Find(s);
     87 }
     88 
     89 mirror::String* InternTable::LookupWeak(mirror::String* s) {
     90   return weak_interns_.Find(s);
     91 }
     92 
     93 void InternTable::SwapPostZygoteWithPreZygote() {
     94   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
     95   weak_interns_.SwapPostZygoteWithPreZygote();
     96   strong_interns_.SwapPostZygoteWithPreZygote();
     97 }
     98 
     99 mirror::String* InternTable::InsertStrong(mirror::String* s) {
    100   Runtime* runtime = Runtime::Current();
    101   if (runtime->IsActiveTransaction()) {
    102     runtime->RecordStrongStringInsertion(s);
    103   }
    104   if (log_new_roots_) {
    105     new_strong_intern_roots_.push_back(GcRoot<mirror::String>(s));
    106   }
    107   strong_interns_.Insert(s);
    108   return s;
    109 }
    110 
    111 mirror::String* InternTable::InsertWeak(mirror::String* s) {
    112   Runtime* runtime = Runtime::Current();
    113   if (runtime->IsActiveTransaction()) {
    114     runtime->RecordWeakStringInsertion(s);
    115   }
    116   weak_interns_.Insert(s);
    117   return s;
    118 }
    119 
    120 void InternTable::RemoveStrong(mirror::String* s) {
    121   strong_interns_.Remove(s);
    122 }
    123 
    124 void InternTable::RemoveWeak(mirror::String* s) {
    125   Runtime* runtime = Runtime::Current();
    126   if (runtime->IsActiveTransaction()) {
    127     runtime->RecordWeakStringRemoval(s);
    128   }
    129   weak_interns_.Remove(s);
    130 }
    131 
    132 // Insert/remove methods used to undo changes made during an aborted transaction.
    133 mirror::String* InternTable::InsertStrongFromTransaction(mirror::String* s) {
    134   DCHECK(!Runtime::Current()->IsActiveTransaction());
    135   return InsertStrong(s);
    136 }
    137 mirror::String* InternTable::InsertWeakFromTransaction(mirror::String* s) {
    138   DCHECK(!Runtime::Current()->IsActiveTransaction());
    139   return InsertWeak(s);
    140 }
    141 void InternTable::RemoveStrongFromTransaction(mirror::String* s) {
    142   DCHECK(!Runtime::Current()->IsActiveTransaction());
    143   RemoveStrong(s);
    144 }
    145 void InternTable::RemoveWeakFromTransaction(mirror::String* s) {
    146   DCHECK(!Runtime::Current()->IsActiveTransaction());
    147   RemoveWeak(s);
    148 }
    149 
    150 void InternTable::AddImageStringsToTable(gc::space::ImageSpace* image_space) {
    151   CHECK(image_space != nullptr);
    152   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
    153   if (!image_added_to_intern_table_) {
    154     mirror::Object* root = image_space->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
    155     mirror::ObjectArray<mirror::DexCache>* dex_caches = root->AsObjectArray<mirror::DexCache>();
    156     for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
    157       mirror::DexCache* dex_cache = dex_caches->Get(i);
    158       const DexFile* dex_file = dex_cache->GetDexFile();
    159       const size_t num_strings = dex_file->NumStringIds();
    160       for (size_t j = 0; j < num_strings; ++j) {
    161         mirror::String* image_string = dex_cache->GetResolvedString(j);
    162         if (image_string != nullptr) {
    163           mirror::String* found = LookupStrong(image_string);
    164           if (found == nullptr) {
    165             InsertStrong(image_string);
    166           } else {
    167             DCHECK_EQ(found, image_string);
    168           }
    169         }
    170       }
    171     }
    172     image_added_to_intern_table_ = true;
    173   }
    174 }
    175 
    176 mirror::String* InternTable::LookupStringFromImage(mirror::String* s)
    177     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    178   if (image_added_to_intern_table_) {
    179     return nullptr;
    180   }
    181   gc::space::ImageSpace* image = Runtime::Current()->GetHeap()->GetImageSpace();
    182   if (image == nullptr) {
    183     return nullptr;  // No image present.
    184   }
    185   mirror::Object* root = image->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
    186   mirror::ObjectArray<mirror::DexCache>* dex_caches = root->AsObjectArray<mirror::DexCache>();
    187   const std::string utf8 = s->ToModifiedUtf8();
    188   for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
    189     mirror::DexCache* dex_cache = dex_caches->Get(i);
    190     const DexFile* dex_file = dex_cache->GetDexFile();
    191     // Binary search the dex file for the string index.
    192     const DexFile::StringId* string_id = dex_file->FindStringId(utf8.c_str());
    193     if (string_id != nullptr) {
    194       uint32_t string_idx = dex_file->GetIndexForStringId(*string_id);
    195       mirror::String* image = dex_cache->GetResolvedString(string_idx);
    196       if (image != nullptr) {
    197         return image;
    198       }
    199     }
    200   }
    201   return nullptr;
    202 }
    203 
    204 void InternTable::AllowNewInterns() {
    205   Thread* self = Thread::Current();
    206   MutexLock mu(self, *Locks::intern_table_lock_);
    207   allow_new_interns_ = true;
    208   new_intern_condition_.Broadcast(self);
    209 }
    210 
    211 void InternTable::DisallowNewInterns() {
    212   Thread* self = Thread::Current();
    213   MutexLock mu(self, *Locks::intern_table_lock_);
    214   allow_new_interns_ = false;
    215 }
    216 
    217 mirror::String* InternTable::Insert(mirror::String* s, bool is_strong) {
    218   if (s == nullptr) {
    219     return nullptr;
    220   }
    221   Thread* self = Thread::Current();
    222   MutexLock mu(self, *Locks::intern_table_lock_);
    223   while (UNLIKELY(!allow_new_interns_)) {
    224     new_intern_condition_.WaitHoldingLocks(self);
    225   }
    226   // Check the strong table for a match.
    227   mirror::String* strong = LookupStrong(s);
    228   if (strong != nullptr) {
    229     return strong;
    230   }
    231   // Check the image for a match.
    232   mirror::String* image = LookupStringFromImage(s);
    233   if (image != nullptr) {
    234     return is_strong ? InsertStrong(image) : InsertWeak(image);
    235   }
    236   // There is no match in the strong table, check the weak table.
    237   mirror::String* weak = LookupWeak(s);
    238   if (weak != nullptr) {
    239     if (is_strong) {
    240       // A match was found in the weak table. Promote to the strong table.
    241       RemoveWeak(weak);
    242       return InsertStrong(weak);
    243     }
    244     return weak;
    245   }
    246   // No match in the strong table or the weak table. Insert into the strong / weak table.
    247   return is_strong ? InsertStrong(s) : InsertWeak(s);
    248 }
    249 
    250 mirror::String* InternTable::InternStrong(int32_t utf16_length, const char* utf8_data) {
    251   DCHECK(utf8_data != nullptr);
    252   return InternStrong(mirror::String::AllocFromModifiedUtf8(
    253       Thread::Current(), utf16_length, utf8_data));
    254 }
    255 
    256 mirror::String* InternTable::InternStrong(const char* utf8_data) {
    257   DCHECK(utf8_data != nullptr);
    258   return InternStrong(mirror::String::AllocFromModifiedUtf8(Thread::Current(), utf8_data));
    259 }
    260 
    261 mirror::String* InternTable::InternStrong(mirror::String* s) {
    262   return Insert(s, true);
    263 }
    264 
    265 mirror::String* InternTable::InternWeak(mirror::String* s) {
    266   return Insert(s, false);
    267 }
    268 
    269 bool InternTable::ContainsWeak(mirror::String* s) {
    270   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
    271   return LookupWeak(s) == s;
    272 }
    273 
    274 void InternTable::SweepInternTableWeaks(IsMarkedCallback* callback, void* arg) {
    275   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
    276   weak_interns_.SweepWeaks(callback, arg);
    277 }
    278 
    279 std::size_t InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& root) const {
    280   if (kIsDebugBuild) {
    281     Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
    282   }
    283   return static_cast<size_t>(root.Read()->GetHashCode());
    284 }
    285 
    286 bool InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& a,
    287                                                const GcRoot<mirror::String>& b) const {
    288   if (kIsDebugBuild) {
    289     Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
    290   }
    291   return a.Read()->Equals(b.Read());
    292 }
    293 
    294 void InternTable::Table::Remove(mirror::String* s) {
    295   auto it = post_zygote_table_.Find(GcRoot<mirror::String>(s));
    296   if (it != post_zygote_table_.end()) {
    297     post_zygote_table_.Erase(it);
    298   } else {
    299     it = pre_zygote_table_.Find(GcRoot<mirror::String>(s));
    300     DCHECK(it != pre_zygote_table_.end());
    301     pre_zygote_table_.Erase(it);
    302   }
    303 }
    304 
    305 mirror::String* InternTable::Table::Find(mirror::String* s) {
    306   Locks::intern_table_lock_->AssertHeld(Thread::Current());
    307   auto it = pre_zygote_table_.Find(GcRoot<mirror::String>(s));
    308   if (it != pre_zygote_table_.end()) {
    309     return it->Read();
    310   }
    311   it = post_zygote_table_.Find(GcRoot<mirror::String>(s));
    312   if (it != post_zygote_table_.end()) {
    313     return it->Read();
    314   }
    315   return nullptr;
    316 }
    317 
    318 void InternTable::Table::SwapPostZygoteWithPreZygote() {
    319   CHECK(pre_zygote_table_.Empty());
    320   std::swap(pre_zygote_table_, post_zygote_table_);
    321   VLOG(heap) << "Swapping " << pre_zygote_table_.Size() << " interns to the pre zygote table";
    322 }
    323 
    324 void InternTable::Table::Insert(mirror::String* s) {
    325   // Always insert the post zygote table, this gets swapped when we create the zygote to be the
    326   // pre zygote table.
    327   post_zygote_table_.Insert(GcRoot<mirror::String>(s));
    328 }
    329 
    330 void InternTable::Table::VisitRoots(RootCallback* callback, void* arg) {
    331   for (auto& intern : pre_zygote_table_) {
    332     intern.VisitRoot(callback, arg, RootInfo(kRootInternedString));
    333   }
    334   for (auto& intern : post_zygote_table_) {
    335     intern.VisitRoot(callback, arg, RootInfo(kRootInternedString));
    336   }
    337 }
    338 
    339 void InternTable::Table::SweepWeaks(IsMarkedCallback* callback, void* arg) {
    340   SweepWeaks(&pre_zygote_table_, callback, arg);
    341   SweepWeaks(&post_zygote_table_, callback, arg);
    342 }
    343 
    344 void InternTable::Table::SweepWeaks(UnorderedSet* set, IsMarkedCallback* callback, void* arg) {
    345   for (auto it = set->begin(), end = set->end(); it != end;) {
    346     // This does not need a read barrier because this is called by GC.
    347     mirror::Object* object = it->Read<kWithoutReadBarrier>();
    348     mirror::Object* new_object = callback(object, arg);
    349     if (new_object == nullptr) {
    350       it = set->Erase(it);
    351     } else {
    352       *it = GcRoot<mirror::String>(new_object->AsString());
    353       ++it;
    354     }
    355   }
    356 }
    357 
    358 size_t InternTable::Table::Size() const {
    359   return pre_zygote_table_.Size() + post_zygote_table_.Size();
    360 }
    361 
    362 }  // namespace art
    363