Home | History | Annotate | Download | only in runtime
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "intern_table.h"
     18 
     19 #include <memory>
     20 
     21 #include "dex/utf.h"
     22 #include "gc/collector/garbage_collector.h"
     23 #include "gc/space/image_space.h"
     24 #include "gc/weak_root_state.h"
     25 #include "gc_root-inl.h"
     26 #include "image-inl.h"
     27 #include "mirror/dex_cache-inl.h"
     28 #include "mirror/object-inl.h"
     29 #include "mirror/object_array-inl.h"
     30 #include "mirror/string-inl.h"
     31 #include "object_callbacks.h"
     32 #include "scoped_thread_state_change-inl.h"
     33 #include "thread.h"
     34 
     35 namespace art {
     36 
     37 InternTable::InternTable()
     38     : log_new_roots_(false),
     39       weak_intern_condition_("New intern condition", *Locks::intern_table_lock_),
     40       weak_root_state_(gc::kWeakRootStateNormal) {
     41 }
     42 
     43 size_t InternTable::Size() const {
     44   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
     45   return strong_interns_.Size() + weak_interns_.Size();
     46 }
     47 
     48 size_t InternTable::StrongSize() const {
     49   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
     50   return strong_interns_.Size();
     51 }
     52 
     53 size_t InternTable::WeakSize() const {
     54   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
     55   return weak_interns_.Size();
     56 }
     57 
     58 void InternTable::DumpForSigQuit(std::ostream& os) const {
     59   os << "Intern table: " << StrongSize() << " strong; " << WeakSize() << " weak\n";
     60 }
     61 
     62 void InternTable::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) {
     63   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
     64   if ((flags & kVisitRootFlagAllRoots) != 0) {
     65     strong_interns_.VisitRoots(visitor);
     66   } else if ((flags & kVisitRootFlagNewRoots) != 0) {
     67     for (auto& root : new_strong_intern_roots_) {
     68       ObjPtr<mirror::String> old_ref = root.Read<kWithoutReadBarrier>();
     69       root.VisitRoot(visitor, RootInfo(kRootInternedString));
     70       ObjPtr<mirror::String> new_ref = root.Read<kWithoutReadBarrier>();
     71       if (new_ref != old_ref) {
     72         // The GC moved a root in the log. Need to search the strong interns and update the
     73         // corresponding object. This is slow, but luckily for us, this may only happen with a
     74         // concurrent moving GC.
     75         strong_interns_.Remove(old_ref);
     76         strong_interns_.Insert(new_ref);
     77       }
     78     }
     79   }
     80   if ((flags & kVisitRootFlagClearRootLog) != 0) {
     81     new_strong_intern_roots_.clear();
     82   }
     83   if ((flags & kVisitRootFlagStartLoggingNewRoots) != 0) {
     84     log_new_roots_ = true;
     85   } else if ((flags & kVisitRootFlagStopLoggingNewRoots) != 0) {
     86     log_new_roots_ = false;
     87   }
     88   // Note: we deliberately don't visit the weak_interns_ table and the immutable image roots.
     89 }
     90 
     91 ObjPtr<mirror::String> InternTable::LookupWeak(Thread* self, ObjPtr<mirror::String> s) {
     92   MutexLock mu(self, *Locks::intern_table_lock_);
     93   return LookupWeakLocked(s);
     94 }
     95 
     96 ObjPtr<mirror::String> InternTable::LookupStrong(Thread* self, ObjPtr<mirror::String> s) {
     97   MutexLock mu(self, *Locks::intern_table_lock_);
     98   return LookupStrongLocked(s);
     99 }
    100 
    101 ObjPtr<mirror::String> InternTable::LookupStrong(Thread* self,
    102                                           uint32_t utf16_length,
    103                                           const char* utf8_data) {
    104   DCHECK_EQ(utf16_length, CountModifiedUtf8Chars(utf8_data));
    105   Utf8String string(utf16_length,
    106                     utf8_data,
    107                     ComputeUtf16HashFromModifiedUtf8(utf8_data, utf16_length));
    108   MutexLock mu(self, *Locks::intern_table_lock_);
    109   return strong_interns_.Find(string);
    110 }
    111 
    112 ObjPtr<mirror::String> InternTable::LookupWeakLocked(ObjPtr<mirror::String> s) {
    113   return weak_interns_.Find(s);
    114 }
    115 
    116 ObjPtr<mirror::String> InternTable::LookupStrongLocked(ObjPtr<mirror::String> s) {
    117   return strong_interns_.Find(s);
    118 }
    119 
    120 void InternTable::AddNewTable() {
    121   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
    122   weak_interns_.AddNewTable();
    123   strong_interns_.AddNewTable();
    124 }
    125 
    126 ObjPtr<mirror::String> InternTable::InsertStrong(ObjPtr<mirror::String> s) {
    127   Runtime* runtime = Runtime::Current();
    128   if (runtime->IsActiveTransaction()) {
    129     runtime->RecordStrongStringInsertion(s);
    130   }
    131   if (log_new_roots_) {
    132     new_strong_intern_roots_.push_back(GcRoot<mirror::String>(s));
    133   }
    134   strong_interns_.Insert(s);
    135   return s;
    136 }
    137 
    138 ObjPtr<mirror::String> InternTable::InsertWeak(ObjPtr<mirror::String> s) {
    139   Runtime* runtime = Runtime::Current();
    140   if (runtime->IsActiveTransaction()) {
    141     runtime->RecordWeakStringInsertion(s);
    142   }
    143   weak_interns_.Insert(s);
    144   return s;
    145 }
    146 
    147 void InternTable::RemoveStrong(ObjPtr<mirror::String> s) {
    148   strong_interns_.Remove(s);
    149 }
    150 
    151 void InternTable::RemoveWeak(ObjPtr<mirror::String> s) {
    152   Runtime* runtime = Runtime::Current();
    153   if (runtime->IsActiveTransaction()) {
    154     runtime->RecordWeakStringRemoval(s);
    155   }
    156   weak_interns_.Remove(s);
    157 }
    158 
    159 // Insert/remove methods used to undo changes made during an aborted transaction.
    160 ObjPtr<mirror::String> InternTable::InsertStrongFromTransaction(ObjPtr<mirror::String> s) {
    161   DCHECK(!Runtime::Current()->IsActiveTransaction());
    162   return InsertStrong(s);
    163 }
    164 
    165 ObjPtr<mirror::String> InternTable::InsertWeakFromTransaction(ObjPtr<mirror::String> s) {
    166   DCHECK(!Runtime::Current()->IsActiveTransaction());
    167   return InsertWeak(s);
    168 }
    169 
    170 void InternTable::RemoveStrongFromTransaction(ObjPtr<mirror::String> s) {
    171   DCHECK(!Runtime::Current()->IsActiveTransaction());
    172   RemoveStrong(s);
    173 }
    174 
    175 void InternTable::RemoveWeakFromTransaction(ObjPtr<mirror::String> s) {
    176   DCHECK(!Runtime::Current()->IsActiveTransaction());
    177   RemoveWeak(s);
    178 }
    179 
    180 void InternTable::AddImagesStringsToTable(const std::vector<gc::space::ImageSpace*>& image_spaces) {
    181   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
    182   for (gc::space::ImageSpace* image_space : image_spaces) {
    183     const ImageHeader* const header = &image_space->GetImageHeader();
    184     // Check if we have the interned strings section.
    185     const ImageSection& section = header->GetInternedStringsSection();
    186     if (section.Size() > 0) {
    187       AddTableFromMemoryLocked(image_space->Begin() + section.Offset());
    188     }
    189   }
    190 }
    191 
    192 void InternTable::BroadcastForNewInterns() {
    193   Thread* self = Thread::Current();
    194   MutexLock mu(self, *Locks::intern_table_lock_);
    195   weak_intern_condition_.Broadcast(self);
    196 }
    197 
    198 void InternTable::WaitUntilAccessible(Thread* self) {
    199   Locks::intern_table_lock_->ExclusiveUnlock(self);
    200   {
    201     ScopedThreadSuspension sts(self, kWaitingWeakGcRootRead);
    202     MutexLock mu(self, *Locks::intern_table_lock_);
    203     while ((!kUseReadBarrier && weak_root_state_ == gc::kWeakRootStateNoReadsOrWrites) ||
    204            (kUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
    205       weak_intern_condition_.Wait(self);
    206     }
    207   }
    208   Locks::intern_table_lock_->ExclusiveLock(self);
    209 }
    210 
    211 ObjPtr<mirror::String> InternTable::Insert(ObjPtr<mirror::String> s,
    212                                            bool is_strong,
    213                                            bool holding_locks) {
    214   if (s == nullptr) {
    215     return nullptr;
    216   }
    217   Thread* const self = Thread::Current();
    218   MutexLock mu(self, *Locks::intern_table_lock_);
    219   if (kDebugLocking && !holding_locks) {
    220     Locks::mutator_lock_->AssertSharedHeld(self);
    221     CHECK_EQ(2u, self->NumberOfHeldMutexes()) << "may only safely hold the mutator lock";
    222   }
    223   while (true) {
    224     if (holding_locks) {
    225       if (!kUseReadBarrier) {
    226         CHECK_EQ(weak_root_state_, gc::kWeakRootStateNormal);
    227       } else {
    228         CHECK(self->GetWeakRefAccessEnabled());
    229       }
    230     }
    231     // Check the strong table for a match.
    232     ObjPtr<mirror::String> strong = LookupStrongLocked(s);
    233     if (strong != nullptr) {
    234       return strong;
    235     }
    236     if ((!kUseReadBarrier && weak_root_state_ != gc::kWeakRootStateNoReadsOrWrites) ||
    237         (kUseReadBarrier && self->GetWeakRefAccessEnabled())) {
    238       break;
    239     }
    240     // weak_root_state_ is set to gc::kWeakRootStateNoReadsOrWrites in the GC pause but is only
    241     // cleared after SweepSystemWeaks has completed. This is why we need to wait until it is
    242     // cleared.
    243     CHECK(!holding_locks);
    244     StackHandleScope<1> hs(self);
    245     auto h = hs.NewHandleWrapper(&s);
    246     WaitUntilAccessible(self);
    247   }
    248   if (!kUseReadBarrier) {
    249     CHECK_EQ(weak_root_state_, gc::kWeakRootStateNormal);
    250   } else {
    251     CHECK(self->GetWeakRefAccessEnabled());
    252   }
    253   // There is no match in the strong table, check the weak table.
    254   ObjPtr<mirror::String> weak = LookupWeakLocked(s);
    255   if (weak != nullptr) {
    256     if (is_strong) {
    257       // A match was found in the weak table. Promote to the strong table.
    258       RemoveWeak(weak);
    259       return InsertStrong(weak);
    260     }
    261     return weak;
    262   }
    263   // No match in the strong table or the weak table. Insert into the strong / weak table.
    264   return is_strong ? InsertStrong(s) : InsertWeak(s);
    265 }
    266 
    267 ObjPtr<mirror::String> InternTable::InternStrong(int32_t utf16_length, const char* utf8_data) {
    268   DCHECK(utf8_data != nullptr);
    269   Thread* self = Thread::Current();
    270   // Try to avoid allocation.
    271   ObjPtr<mirror::String> s = LookupStrong(self, utf16_length, utf8_data);
    272   if (s != nullptr) {
    273     return s;
    274   }
    275   return InternStrong(mirror::String::AllocFromModifiedUtf8(
    276       self, utf16_length, utf8_data));
    277 }
    278 
    279 ObjPtr<mirror::String> InternTable::InternStrong(const char* utf8_data) {
    280   DCHECK(utf8_data != nullptr);
    281   return InternStrong(mirror::String::AllocFromModifiedUtf8(Thread::Current(), utf8_data));
    282 }
    283 
    284 ObjPtr<mirror::String> InternTable::InternStrongImageString(ObjPtr<mirror::String> s) {
    285   // May be holding the heap bitmap lock.
    286   return Insert(s, true, true);
    287 }
    288 
    289 ObjPtr<mirror::String> InternTable::InternStrong(ObjPtr<mirror::String> s) {
    290   return Insert(s, true, false);
    291 }
    292 
    293 ObjPtr<mirror::String> InternTable::InternWeak(ObjPtr<mirror::String> s) {
    294   return Insert(s, false, false);
    295 }
    296 
    297 bool InternTable::ContainsWeak(ObjPtr<mirror::String> s) {
    298   return LookupWeak(Thread::Current(), s) == s;
    299 }
    300 
    301 void InternTable::SweepInternTableWeaks(IsMarkedVisitor* visitor) {
    302   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
    303   weak_interns_.SweepWeaks(visitor);
    304 }
    305 
    306 size_t InternTable::AddTableFromMemory(const uint8_t* ptr) {
    307   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
    308   return AddTableFromMemoryLocked(ptr);
    309 }
    310 
    311 size_t InternTable::AddTableFromMemoryLocked(const uint8_t* ptr) {
    312   return strong_interns_.AddTableFromMemory(ptr);
    313 }
    314 
    315 size_t InternTable::WriteToMemory(uint8_t* ptr) {
    316   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
    317   return strong_interns_.WriteToMemory(ptr);
    318 }
    319 
    320 std::size_t InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& root) const {
    321   if (kIsDebugBuild) {
    322     Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
    323   }
    324   // An additional cast to prevent undesired sign extension.
    325   return static_cast<size_t>(
    326       static_cast<uint32_t>(root.Read<kWithoutReadBarrier>()->GetHashCode()));
    327 }
    328 
    329 bool InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& a,
    330                                                const GcRoot<mirror::String>& b) const {
    331   if (kIsDebugBuild) {
    332     Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
    333   }
    334   return a.Read<kWithoutReadBarrier>()->Equals(b.Read<kWithoutReadBarrier>());
    335 }
    336 
    337 bool InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& a,
    338                                                const Utf8String& b) const {
    339   if (kIsDebugBuild) {
    340     Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
    341   }
    342   ObjPtr<mirror::String> a_string = a.Read<kWithoutReadBarrier>();
    343   uint32_t a_length = static_cast<uint32_t>(a_string->GetLength());
    344   if (a_length != b.GetUtf16Length()) {
    345     return false;
    346   }
    347   if (a_string->IsCompressed()) {
    348     size_t b_byte_count = strlen(b.GetUtf8Data());
    349     size_t b_utf8_length = CountModifiedUtf8Chars(b.GetUtf8Data(), b_byte_count);
    350     // Modified UTF-8 single byte character range is 0x01 .. 0x7f
    351     // The string compression occurs on regular ASCII with same exact range,
    352     // not on extended ASCII which up to 0xff
    353     const bool is_b_regular_ascii = (b_byte_count == b_utf8_length);
    354     if (is_b_regular_ascii) {
    355       return memcmp(b.GetUtf8Data(),
    356                     a_string->GetValueCompressed(), a_length * sizeof(uint8_t)) == 0;
    357     } else {
    358       return false;
    359     }
    360   } else {
    361     const uint16_t* a_value = a_string->GetValue();
    362     return CompareModifiedUtf8ToUtf16AsCodePointValues(b.GetUtf8Data(), a_value, a_length) == 0;
    363   }
    364 }
    365 
    366 size_t InternTable::Table::AddTableFromMemory(const uint8_t* ptr) {
    367   size_t read_count = 0;
    368   UnorderedSet set(ptr, /*make copy*/false, &read_count);
    369   if (set.Empty()) {
    370     // Avoid inserting empty sets.
    371     return read_count;
    372   }
    373   // TODO: Disable this for app images if app images have intern tables.
    374   static constexpr bool kCheckDuplicates = kIsDebugBuild;
    375   if (kCheckDuplicates) {
    376     for (GcRoot<mirror::String>& string : set) {
    377       CHECK(Find(string.Read()) == nullptr) << "Already found " << string.Read()->ToModifiedUtf8();
    378     }
    379   }
    380   // Insert at the front since we add new interns into the back.
    381   tables_.insert(tables_.begin(), std::move(set));
    382   return read_count;
    383 }
    384 
    385 size_t InternTable::Table::WriteToMemory(uint8_t* ptr) {
    386   if (tables_.empty()) {
    387     return 0;
    388   }
    389   UnorderedSet* table_to_write;
    390   UnorderedSet combined;
    391   if (tables_.size() > 1) {
    392     table_to_write = &combined;
    393     for (UnorderedSet& table : tables_) {
    394       for (GcRoot<mirror::String>& string : table) {
    395         combined.Insert(string);
    396       }
    397     }
    398   } else {
    399     table_to_write = &tables_.back();
    400   }
    401   return table_to_write->WriteToMemory(ptr);
    402 }
    403 
    404 void InternTable::Table::Remove(ObjPtr<mirror::String> s) {
    405   for (UnorderedSet& table : tables_) {
    406     auto it = table.Find(GcRoot<mirror::String>(s));
    407     if (it != table.end()) {
    408       table.Erase(it);
    409       return;
    410     }
    411   }
    412   LOG(FATAL) << "Attempting to remove non-interned string " << s->ToModifiedUtf8();
    413 }
    414 
    415 ObjPtr<mirror::String> InternTable::Table::Find(ObjPtr<mirror::String> s) {
    416   Locks::intern_table_lock_->AssertHeld(Thread::Current());
    417   for (UnorderedSet& table : tables_) {
    418     auto it = table.Find(GcRoot<mirror::String>(s));
    419     if (it != table.end()) {
    420       return it->Read();
    421     }
    422   }
    423   return nullptr;
    424 }
    425 
    426 ObjPtr<mirror::String> InternTable::Table::Find(const Utf8String& string) {
    427   Locks::intern_table_lock_->AssertHeld(Thread::Current());
    428   for (UnorderedSet& table : tables_) {
    429     auto it = table.Find(string);
    430     if (it != table.end()) {
    431       return it->Read();
    432     }
    433   }
    434   return nullptr;
    435 }
    436 
    437 void InternTable::Table::AddNewTable() {
    438   tables_.push_back(UnorderedSet());
    439 }
    440 
    441 void InternTable::Table::Insert(ObjPtr<mirror::String> s) {
    442   // Always insert the last table, the image tables are before and we avoid inserting into these
    443   // to prevent dirty pages.
    444   DCHECK(!tables_.empty());
    445   tables_.back().Insert(GcRoot<mirror::String>(s));
    446 }
    447 
    448 void InternTable::Table::VisitRoots(RootVisitor* visitor) {
    449   BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(
    450       visitor, RootInfo(kRootInternedString));
    451   for (UnorderedSet& table : tables_) {
    452     for (auto& intern : table) {
    453       buffered_visitor.VisitRoot(intern);
    454     }
    455   }
    456 }
    457 
    458 void InternTable::Table::SweepWeaks(IsMarkedVisitor* visitor) {
    459   for (UnorderedSet& table : tables_) {
    460     SweepWeaks(&table, visitor);
    461   }
    462 }
    463 
    464 void InternTable::Table::SweepWeaks(UnorderedSet* set, IsMarkedVisitor* visitor) {
    465   for (auto it = set->begin(), end = set->end(); it != end;) {
    466     // This does not need a read barrier because this is called by GC.
    467     mirror::Object* object = it->Read<kWithoutReadBarrier>();
    468     mirror::Object* new_object = visitor->IsMarked(object);
    469     if (new_object == nullptr) {
    470       it = set->Erase(it);
    471     } else {
    472       *it = GcRoot<mirror::String>(new_object->AsString());
    473       ++it;
    474     }
    475   }
    476 }
    477 
    478 size_t InternTable::Table::Size() const {
    479   return std::accumulate(tables_.begin(),
    480                          tables_.end(),
    481                          0U,
    482                          [](size_t sum, const UnorderedSet& set) {
    483                            return sum + set.Size();
    484                          });
    485 }
    486 
    487 void InternTable::ChangeWeakRootState(gc::WeakRootState new_state) {
    488   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
    489   ChangeWeakRootStateLocked(new_state);
    490 }
    491 
    492 void InternTable::ChangeWeakRootStateLocked(gc::WeakRootState new_state) {
    493   CHECK(!kUseReadBarrier);
    494   weak_root_state_ = new_state;
    495   if (new_state != gc::kWeakRootStateNoReadsOrWrites) {
    496     weak_intern_condition_.Broadcast(Thread::Current());
    497   }
    498 }
    499 
    500 InternTable::Table::Table() {
    501   Runtime* const runtime = Runtime::Current();
    502   // Initial table.
    503   tables_.push_back(UnorderedSet());
    504   tables_.back().SetLoadFactor(runtime->GetHashTableMinLoadFactor(),
    505                                runtime->GetHashTableMaxLoadFactor());
    506 }
    507 
    508 }  // namespace art
    509