Home | History | Annotate | Download | only in runtime
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "intern_table.h"
     18 
     19 #include <memory>
     20 
     21 #include "gc_root-inl.h"
     22 #include "gc/collector/garbage_collector.h"
     23 #include "gc/space/image_space.h"
     24 #include "gc/weak_root_state.h"
     25 #include "image-inl.h"
     26 #include "mirror/dex_cache-inl.h"
     27 #include "mirror/object_array-inl.h"
     28 #include "mirror/object-inl.h"
     29 #include "mirror/string-inl.h"
     30 #include "thread.h"
     31 #include "utf.h"
     32 
     33 namespace art {
     34 
     35 InternTable::InternTable()
     36     : images_added_to_intern_table_(false),
     37       log_new_roots_(false),
     38       weak_intern_condition_("New intern condition", *Locks::intern_table_lock_),
     39       weak_root_state_(gc::kWeakRootStateNormal) {
     40 }
     41 
     42 size_t InternTable::Size() const {
     43   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
     44   return strong_interns_.Size() + weak_interns_.Size();
     45 }
     46 
     47 size_t InternTable::StrongSize() const {
     48   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
     49   return strong_interns_.Size();
     50 }
     51 
     52 size_t InternTable::WeakSize() const {
     53   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
     54   return weak_interns_.Size();
     55 }
     56 
     57 void InternTable::DumpForSigQuit(std::ostream& os) const {
     58   os << "Intern table: " << StrongSize() << " strong; " << WeakSize() << " weak\n";
     59 }
     60 
     61 void InternTable::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) {
     62   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
     63   if ((flags & kVisitRootFlagAllRoots) != 0) {
     64     strong_interns_.VisitRoots(visitor);
     65   } else if ((flags & kVisitRootFlagNewRoots) != 0) {
     66     for (auto& root : new_strong_intern_roots_) {
     67       mirror::String* old_ref = root.Read<kWithoutReadBarrier>();
     68       root.VisitRoot(visitor, RootInfo(kRootInternedString));
     69       mirror::String* new_ref = root.Read<kWithoutReadBarrier>();
     70       if (new_ref != old_ref) {
     71         // The GC moved a root in the log. Need to search the strong interns and update the
     72         // corresponding object. This is slow, but luckily for us, this may only happen with a
     73         // concurrent moving GC.
     74         strong_interns_.Remove(old_ref);
     75         strong_interns_.Insert(new_ref);
     76       }
     77     }
     78   }
     79   if ((flags & kVisitRootFlagClearRootLog) != 0) {
     80     new_strong_intern_roots_.clear();
     81   }
     82   if ((flags & kVisitRootFlagStartLoggingNewRoots) != 0) {
     83     log_new_roots_ = true;
     84   } else if ((flags & kVisitRootFlagStopLoggingNewRoots) != 0) {
     85     log_new_roots_ = false;
     86   }
     87   // Note: we deliberately don't visit the weak_interns_ table and the immutable image roots.
     88 }
     89 
     90 mirror::String* InternTable::LookupWeak(Thread* self, mirror::String* s) {
     91   MutexLock mu(self, *Locks::intern_table_lock_);
     92   return LookupWeakLocked(s);
     93 }
     94 
     95 mirror::String* InternTable::LookupStrong(Thread* self, mirror::String* s) {
     96   MutexLock mu(self, *Locks::intern_table_lock_);
     97   return LookupStrongLocked(s);
     98 }
     99 
    100 mirror::String* InternTable::LookupStrong(Thread* self,
    101                                           uint32_t utf16_length,
    102                                           const char* utf8_data) {
    103   DCHECK_EQ(utf16_length, CountModifiedUtf8Chars(utf8_data));
    104   Utf8String string(utf16_length,
    105                     utf8_data,
    106                     ComputeUtf16HashFromModifiedUtf8(utf8_data, utf16_length));
    107   MutexLock mu(self, *Locks::intern_table_lock_);
    108   return strong_interns_.Find(string);
    109 }
    110 
    111 mirror::String* InternTable::LookupWeakLocked(mirror::String* s) {
    112   return weak_interns_.Find(s);
    113 }
    114 
    115 mirror::String* InternTable::LookupStrongLocked(mirror::String* s) {
    116   return strong_interns_.Find(s);
    117 }
    118 
    119 void InternTable::AddNewTable() {
    120   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
    121   weak_interns_.AddNewTable();
    122   strong_interns_.AddNewTable();
    123 }
    124 
    125 mirror::String* InternTable::InsertStrong(mirror::String* s) {
    126   Runtime* runtime = Runtime::Current();
    127   if (runtime->IsActiveTransaction()) {
    128     runtime->RecordStrongStringInsertion(s);
    129   }
    130   if (log_new_roots_) {
    131     new_strong_intern_roots_.push_back(GcRoot<mirror::String>(s));
    132   }
    133   strong_interns_.Insert(s);
    134   return s;
    135 }
    136 
    137 mirror::String* InternTable::InsertWeak(mirror::String* s) {
    138   Runtime* runtime = Runtime::Current();
    139   if (runtime->IsActiveTransaction()) {
    140     runtime->RecordWeakStringInsertion(s);
    141   }
    142   weak_interns_.Insert(s);
    143   return s;
    144 }
    145 
    146 void InternTable::RemoveStrong(mirror::String* s) {
    147   strong_interns_.Remove(s);
    148 }
    149 
    150 void InternTable::RemoveWeak(mirror::String* s) {
    151   Runtime* runtime = Runtime::Current();
    152   if (runtime->IsActiveTransaction()) {
    153     runtime->RecordWeakStringRemoval(s);
    154   }
    155   weak_interns_.Remove(s);
    156 }
    157 
    158 // Insert/remove methods used to undo changes made during an aborted transaction.
    159 mirror::String* InternTable::InsertStrongFromTransaction(mirror::String* s) {
    160   DCHECK(!Runtime::Current()->IsActiveTransaction());
    161   return InsertStrong(s);
    162 }
    163 mirror::String* InternTable::InsertWeakFromTransaction(mirror::String* s) {
    164   DCHECK(!Runtime::Current()->IsActiveTransaction());
    165   return InsertWeak(s);
    166 }
    167 void InternTable::RemoveStrongFromTransaction(mirror::String* s) {
    168   DCHECK(!Runtime::Current()->IsActiveTransaction());
    169   RemoveStrong(s);
    170 }
    171 void InternTable::RemoveWeakFromTransaction(mirror::String* s) {
    172   DCHECK(!Runtime::Current()->IsActiveTransaction());
    173   RemoveWeak(s);
    174 }
    175 
    176 void InternTable::AddImagesStringsToTable(const std::vector<gc::space::ImageSpace*>& image_spaces) {
    177   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
    178   for (gc::space::ImageSpace* image_space : image_spaces) {
    179     const ImageHeader* const header = &image_space->GetImageHeader();
    180     // Check if we have the interned strings section.
    181     const ImageSection& section = header->GetImageSection(ImageHeader::kSectionInternedStrings);
    182     if (section.Size() > 0) {
    183       AddTableFromMemoryLocked(image_space->Begin() + section.Offset());
    184     } else {
    185       // TODO: Delete this logic?
    186       mirror::Object* root = header->GetImageRoot(ImageHeader::kDexCaches);
    187       mirror::ObjectArray<mirror::DexCache>* dex_caches = root->AsObjectArray<mirror::DexCache>();
    188       for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
    189         mirror::DexCache* dex_cache = dex_caches->Get(i);
    190         const size_t num_strings = dex_cache->NumStrings();
    191         for (size_t j = 0; j < num_strings; ++j) {
    192           mirror::String* image_string = dex_cache->GetResolvedString(j);
    193           if (image_string != nullptr) {
    194             mirror::String* found = LookupStrongLocked(image_string);
    195             if (found == nullptr) {
    196               InsertStrong(image_string);
    197             } else {
    198               DCHECK_EQ(found, image_string);
    199             }
    200           }
    201         }
    202       }
    203     }
    204   }
    205   images_added_to_intern_table_ = true;
    206 }
    207 
    208 mirror::String* InternTable::LookupStringFromImage(mirror::String* s) {
    209   DCHECK(!images_added_to_intern_table_);
    210   const std::vector<gc::space::ImageSpace*>& image_spaces =
    211       Runtime::Current()->GetHeap()->GetBootImageSpaces();
    212   if (image_spaces.empty()) {
    213     return nullptr;  // No image present.
    214   }
    215   const std::string utf8 = s->ToModifiedUtf8();
    216   for (gc::space::ImageSpace* image_space : image_spaces) {
    217     mirror::Object* root = image_space->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
    218     mirror::ObjectArray<mirror::DexCache>* dex_caches = root->AsObjectArray<mirror::DexCache>();
    219     for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
    220       mirror::DexCache* dex_cache = dex_caches->Get(i);
    221       const DexFile* dex_file = dex_cache->GetDexFile();
    222       // Binary search the dex file for the string index.
    223       const DexFile::StringId* string_id = dex_file->FindStringId(utf8.c_str());
    224       if (string_id != nullptr) {
    225         uint32_t string_idx = dex_file->GetIndexForStringId(*string_id);
    226         // GetResolvedString() contains a RB.
    227         mirror::String* image_string = dex_cache->GetResolvedString(string_idx);
    228         if (image_string != nullptr) {
    229           return image_string;
    230         }
    231       }
    232     }
    233   }
    234   return nullptr;
    235 }
    236 
    237 void InternTable::BroadcastForNewInterns() {
    238   CHECK(kUseReadBarrier);
    239   Thread* self = Thread::Current();
    240   MutexLock mu(self, *Locks::intern_table_lock_);
    241   weak_intern_condition_.Broadcast(self);
    242 }
    243 
    244 void InternTable::WaitUntilAccessible(Thread* self) {
    245   Locks::intern_table_lock_->ExclusiveUnlock(self);
    246   {
    247     ScopedThreadSuspension sts(self, kWaitingWeakGcRootRead);
    248     MutexLock mu(self, *Locks::intern_table_lock_);
    249     while (weak_root_state_ == gc::kWeakRootStateNoReadsOrWrites) {
    250       weak_intern_condition_.Wait(self);
    251     }
    252   }
    253   Locks::intern_table_lock_->ExclusiveLock(self);
    254 }
    255 
    256 mirror::String* InternTable::Insert(mirror::String* s, bool is_strong, bool holding_locks) {
    257   if (s == nullptr) {
    258     return nullptr;
    259   }
    260   Thread* const self = Thread::Current();
    261   MutexLock mu(self, *Locks::intern_table_lock_);
    262   if (kDebugLocking && !holding_locks) {
    263     Locks::mutator_lock_->AssertSharedHeld(self);
    264     CHECK_EQ(2u, self->NumberOfHeldMutexes()) << "may only safely hold the mutator lock";
    265   }
    266   while (true) {
    267     if (holding_locks) {
    268       if (!kUseReadBarrier) {
    269         CHECK_EQ(weak_root_state_, gc::kWeakRootStateNormal);
    270       } else {
    271         CHECK(self->GetWeakRefAccessEnabled());
    272       }
    273     }
    274     // Check the strong table for a match.
    275     mirror::String* strong = LookupStrongLocked(s);
    276     if (strong != nullptr) {
    277       return strong;
    278     }
    279     if ((!kUseReadBarrier && weak_root_state_ != gc::kWeakRootStateNoReadsOrWrites) ||
    280         (kUseReadBarrier && self->GetWeakRefAccessEnabled())) {
    281       break;
    282     }
    283     // weak_root_state_ is set to gc::kWeakRootStateNoReadsOrWrites in the GC pause but is only
    284     // cleared after SweepSystemWeaks has completed. This is why we need to wait until it is
    285     // cleared.
    286     CHECK(!holding_locks);
    287     StackHandleScope<1> hs(self);
    288     auto h = hs.NewHandleWrapper(&s);
    289     WaitUntilAccessible(self);
    290   }
    291   if (!kUseReadBarrier) {
    292     CHECK_EQ(weak_root_state_, gc::kWeakRootStateNormal);
    293   } else {
    294     CHECK(self->GetWeakRefAccessEnabled());
    295   }
    296   // There is no match in the strong table, check the weak table.
    297   mirror::String* weak = LookupWeakLocked(s);
    298   if (weak != nullptr) {
    299     if (is_strong) {
    300       // A match was found in the weak table. Promote to the strong table.
    301       RemoveWeak(weak);
    302       return InsertStrong(weak);
    303     }
    304     return weak;
    305   }
    306   // Check the image for a match.
    307   if (!images_added_to_intern_table_) {
    308     mirror::String* const image_string = LookupStringFromImage(s);
    309     if (image_string != nullptr) {
    310       return is_strong ? InsertStrong(image_string) : InsertWeak(image_string);
    311     }
    312   }
    313   // No match in the strong table or the weak table. Insert into the strong / weak table.
    314   return is_strong ? InsertStrong(s) : InsertWeak(s);
    315 }
    316 
    317 mirror::String* InternTable::InternStrong(int32_t utf16_length, const char* utf8_data) {
    318   DCHECK(utf8_data != nullptr);
    319   return InternStrong(mirror::String::AllocFromModifiedUtf8(
    320       Thread::Current(), utf16_length, utf8_data));
    321 }
    322 
    323 mirror::String* InternTable::InternStrong(const char* utf8_data) {
    324   DCHECK(utf8_data != nullptr);
    325   return InternStrong(mirror::String::AllocFromModifiedUtf8(Thread::Current(), utf8_data));
    326 }
    327 
    328 mirror::String* InternTable::InternStrongImageString(mirror::String* s) {
    329   // May be holding the heap bitmap lock.
    330   return Insert(s, true, true);
    331 }
    332 
    333 mirror::String* InternTable::InternStrong(mirror::String* s) {
    334   return Insert(s, true, false);
    335 }
    336 
    337 mirror::String* InternTable::InternWeak(mirror::String* s) {
    338   return Insert(s, false, false);
    339 }
    340 
    341 bool InternTable::ContainsWeak(mirror::String* s) {
    342   return LookupWeak(Thread::Current(), s) == s;
    343 }
    344 
    345 void InternTable::SweepInternTableWeaks(IsMarkedVisitor* visitor) {
    346   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
    347   weak_interns_.SweepWeaks(visitor);
    348 }
    349 
    350 size_t InternTable::AddTableFromMemory(const uint8_t* ptr) {
    351   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
    352   return AddTableFromMemoryLocked(ptr);
    353 }
    354 
    355 size_t InternTable::AddTableFromMemoryLocked(const uint8_t* ptr) {
    356   return strong_interns_.AddTableFromMemory(ptr);
    357 }
    358 
    359 size_t InternTable::WriteToMemory(uint8_t* ptr) {
    360   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
    361   return strong_interns_.WriteToMemory(ptr);
    362 }
    363 
    364 std::size_t InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& root) const {
    365   if (kIsDebugBuild) {
    366     Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
    367   }
    368   return static_cast<size_t>(root.Read()->GetHashCode());
    369 }
    370 
    371 bool InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& a,
    372                                                const GcRoot<mirror::String>& b) const {
    373   if (kIsDebugBuild) {
    374     Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
    375   }
    376   return a.Read()->Equals(b.Read());
    377 }
    378 
    379 bool InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& a,
    380                                                const Utf8String& b) const {
    381   if (kIsDebugBuild) {
    382     Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
    383   }
    384   mirror::String* a_string = a.Read();
    385   uint32_t a_length = static_cast<uint32_t>(a_string->GetLength());
    386   if (a_length != b.GetUtf16Length()) {
    387     return false;
    388   }
    389   const uint16_t* a_value = a_string->GetValue();
    390   return CompareModifiedUtf8ToUtf16AsCodePointValues(b.GetUtf8Data(), a_value, a_length) == 0;
    391 }
    392 
    393 size_t InternTable::Table::AddTableFromMemory(const uint8_t* ptr) {
    394   size_t read_count = 0;
    395   UnorderedSet set(ptr, /*make copy*/false, &read_count);
    396   if (set.Empty()) {
    397     // Avoid inserting empty sets.
    398     return read_count;
    399   }
    400   // TODO: Disable this for app images if app images have intern tables.
    401   static constexpr bool kCheckDuplicates = true;
    402   if (kCheckDuplicates) {
    403     for (GcRoot<mirror::String>& string : set) {
    404       CHECK(Find(string.Read()) == nullptr) << "Already found " << string.Read()->ToModifiedUtf8();
    405     }
    406   }
    407   // Insert at the front since we add new interns into the back.
    408   tables_.insert(tables_.begin(), std::move(set));
    409   return read_count;
    410 }
    411 
    412 size_t InternTable::Table::WriteToMemory(uint8_t* ptr) {
    413   if (tables_.empty()) {
    414     return 0;
    415   }
    416   UnorderedSet* table_to_write;
    417   UnorderedSet combined;
    418   if (tables_.size() > 1) {
    419     table_to_write = &combined;
    420     for (UnorderedSet& table : tables_) {
    421       for (GcRoot<mirror::String>& string : table) {
    422         combined.Insert(string);
    423       }
    424     }
    425   } else {
    426     table_to_write = &tables_.back();
    427   }
    428   return table_to_write->WriteToMemory(ptr);
    429 }
    430 
    431 void InternTable::Table::Remove(mirror::String* s) {
    432   for (UnorderedSet& table : tables_) {
    433     auto it = table.Find(GcRoot<mirror::String>(s));
    434     if (it != table.end()) {
    435       table.Erase(it);
    436       return;
    437     }
    438   }
    439   LOG(FATAL) << "Attempting to remove non-interned string " << s->ToModifiedUtf8();
    440 }
    441 
    442 mirror::String* InternTable::Table::Find(mirror::String* s) {
    443   Locks::intern_table_lock_->AssertHeld(Thread::Current());
    444   for (UnorderedSet& table : tables_) {
    445     auto it = table.Find(GcRoot<mirror::String>(s));
    446     if (it != table.end()) {
    447       return it->Read();
    448     }
    449   }
    450   return nullptr;
    451 }
    452 
    453 mirror::String* InternTable::Table::Find(const Utf8String& string) {
    454   Locks::intern_table_lock_->AssertHeld(Thread::Current());
    455   for (UnorderedSet& table : tables_) {
    456     auto it = table.Find(string);
    457     if (it != table.end()) {
    458       return it->Read();
    459     }
    460   }
    461   return nullptr;
    462 }
    463 
    464 void InternTable::Table::AddNewTable() {
    465   tables_.push_back(UnorderedSet());
    466 }
    467 
    468 void InternTable::Table::Insert(mirror::String* s) {
    469   // Always insert the last table, the image tables are before and we avoid inserting into these
    470   // to prevent dirty pages.
    471   DCHECK(!tables_.empty());
    472   tables_.back().Insert(GcRoot<mirror::String>(s));
    473 }
    474 
    475 void InternTable::Table::VisitRoots(RootVisitor* visitor) {
    476   BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(
    477       visitor, RootInfo(kRootInternedString));
    478   for (UnorderedSet& table : tables_) {
    479     for (auto& intern : table) {
    480       buffered_visitor.VisitRoot(intern);
    481     }
    482   }
    483 }
    484 
    485 void InternTable::Table::SweepWeaks(IsMarkedVisitor* visitor) {
    486   for (UnorderedSet& table : tables_) {
    487     SweepWeaks(&table, visitor);
    488   }
    489 }
    490 
    491 void InternTable::Table::SweepWeaks(UnorderedSet* set, IsMarkedVisitor* visitor) {
    492   for (auto it = set->begin(), end = set->end(); it != end;) {
    493     // This does not need a read barrier because this is called by GC.
    494     mirror::Object* object = it->Read<kWithoutReadBarrier>();
    495     mirror::Object* new_object = visitor->IsMarked(object);
    496     if (new_object == nullptr) {
    497       it = set->Erase(it);
    498     } else {
    499       *it = GcRoot<mirror::String>(new_object->AsString());
    500       ++it;
    501     }
    502   }
    503 }
    504 
    505 size_t InternTable::Table::Size() const {
    506   return std::accumulate(tables_.begin(),
    507                          tables_.end(),
    508                          0U,
    509                          [](size_t sum, const UnorderedSet& set) {
    510                            return sum + set.Size();
    511                          });
    512 }
    513 
    514 void InternTable::ChangeWeakRootState(gc::WeakRootState new_state) {
    515   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
    516   ChangeWeakRootStateLocked(new_state);
    517 }
    518 
    519 void InternTable::ChangeWeakRootStateLocked(gc::WeakRootState new_state) {
    520   CHECK(!kUseReadBarrier);
    521   weak_root_state_ = new_state;
    522   if (new_state != gc::kWeakRootStateNoReadsOrWrites) {
    523     weak_intern_condition_.Broadcast(Thread::Current());
    524   }
    525 }
    526 
    527 InternTable::Table::Table() {
    528   Runtime* const runtime = Runtime::Current();
    529   // Initial table.
    530   tables_.push_back(UnorderedSet());
    531   tables_.back().SetLoadFactor(runtime->GetHashTableMinLoadFactor(),
    532                                runtime->GetHashTableMaxLoadFactor());
    533 }
    534 
    535 }  // namespace art
    536