Home | History | Annotate | Download | only in runtime
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "intern_table.h"
     18 
     19 #include <memory>
     20 
     21 #include "dex/utf.h"
     22 #include "gc/collector/garbage_collector.h"
     23 #include "gc/space/image_space.h"
     24 #include "gc/weak_root_state.h"
     25 #include "gc_root-inl.h"
     26 #include "handle_scope-inl.h"
     27 #include "image-inl.h"
     28 #include "mirror/dex_cache-inl.h"
     29 #include "mirror/object-inl.h"
     30 #include "mirror/object_array-inl.h"
     31 #include "mirror/string-inl.h"
     32 #include "object_callbacks.h"
     33 #include "scoped_thread_state_change-inl.h"
     34 #include "thread.h"
     35 
     36 namespace art {
     37 
     38 InternTable::InternTable()
     39     : log_new_roots_(false),
     40       weak_intern_condition_("New intern condition", *Locks::intern_table_lock_),
     41       weak_root_state_(gc::kWeakRootStateNormal) {
     42 }
     43 
     44 size_t InternTable::Size() const {
     45   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
     46   return strong_interns_.Size() + weak_interns_.Size();
     47 }
     48 
     49 size_t InternTable::StrongSize() const {
     50   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
     51   return strong_interns_.Size();
     52 }
     53 
     54 size_t InternTable::WeakSize() const {
     55   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
     56   return weak_interns_.Size();
     57 }
     58 
     59 void InternTable::DumpForSigQuit(std::ostream& os) const {
     60   os << "Intern table: " << StrongSize() << " strong; " << WeakSize() << " weak\n";
     61 }
     62 
     63 void InternTable::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) {
     64   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
     65   if ((flags & kVisitRootFlagAllRoots) != 0) {
     66     strong_interns_.VisitRoots(visitor);
     67   } else if ((flags & kVisitRootFlagNewRoots) != 0) {
     68     for (auto& root : new_strong_intern_roots_) {
     69       ObjPtr<mirror::String> old_ref = root.Read<kWithoutReadBarrier>();
     70       root.VisitRoot(visitor, RootInfo(kRootInternedString));
     71       ObjPtr<mirror::String> new_ref = root.Read<kWithoutReadBarrier>();
     72       if (new_ref != old_ref) {
     73         // The GC moved a root in the log. Need to search the strong interns and update the
     74         // corresponding object. This is slow, but luckily for us, this may only happen with a
     75         // concurrent moving GC.
     76         strong_interns_.Remove(old_ref);
     77         strong_interns_.Insert(new_ref);
     78       }
     79     }
     80   }
     81   if ((flags & kVisitRootFlagClearRootLog) != 0) {
     82     new_strong_intern_roots_.clear();
     83   }
     84   if ((flags & kVisitRootFlagStartLoggingNewRoots) != 0) {
     85     log_new_roots_ = true;
     86   } else if ((flags & kVisitRootFlagStopLoggingNewRoots) != 0) {
     87     log_new_roots_ = false;
     88   }
     89   // Note: we deliberately don't visit the weak_interns_ table and the immutable image roots.
     90 }
     91 
     92 ObjPtr<mirror::String> InternTable::LookupWeak(Thread* self, ObjPtr<mirror::String> s) {
     93   MutexLock mu(self, *Locks::intern_table_lock_);
     94   return LookupWeakLocked(s);
     95 }
     96 
     97 ObjPtr<mirror::String> InternTable::LookupStrong(Thread* self, ObjPtr<mirror::String> s) {
     98   MutexLock mu(self, *Locks::intern_table_lock_);
     99   return LookupStrongLocked(s);
    100 }
    101 
    102 ObjPtr<mirror::String> InternTable::LookupStrong(Thread* self,
    103                                           uint32_t utf16_length,
    104                                           const char* utf8_data) {
    105   DCHECK_EQ(utf16_length, CountModifiedUtf8Chars(utf8_data));
    106   Utf8String string(utf16_length,
    107                     utf8_data,
    108                     ComputeUtf16HashFromModifiedUtf8(utf8_data, utf16_length));
    109   MutexLock mu(self, *Locks::intern_table_lock_);
    110   return strong_interns_.Find(string);
    111 }
    112 
    113 ObjPtr<mirror::String> InternTable::LookupWeakLocked(ObjPtr<mirror::String> s) {
    114   return weak_interns_.Find(s);
    115 }
    116 
    117 ObjPtr<mirror::String> InternTable::LookupStrongLocked(ObjPtr<mirror::String> s) {
    118   return strong_interns_.Find(s);
    119 }
    120 
    121 void InternTable::AddNewTable() {
    122   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
    123   weak_interns_.AddNewTable();
    124   strong_interns_.AddNewTable();
    125 }
    126 
    127 ObjPtr<mirror::String> InternTable::InsertStrong(ObjPtr<mirror::String> s) {
    128   Runtime* runtime = Runtime::Current();
    129   if (runtime->IsActiveTransaction()) {
    130     runtime->RecordStrongStringInsertion(s);
    131   }
    132   if (log_new_roots_) {
    133     new_strong_intern_roots_.push_back(GcRoot<mirror::String>(s));
    134   }
    135   strong_interns_.Insert(s);
    136   return s;
    137 }
    138 
    139 ObjPtr<mirror::String> InternTable::InsertWeak(ObjPtr<mirror::String> s) {
    140   Runtime* runtime = Runtime::Current();
    141   if (runtime->IsActiveTransaction()) {
    142     runtime->RecordWeakStringInsertion(s);
    143   }
    144   weak_interns_.Insert(s);
    145   return s;
    146 }
    147 
    148 void InternTable::RemoveStrong(ObjPtr<mirror::String> s) {
    149   strong_interns_.Remove(s);
    150 }
    151 
    152 void InternTable::RemoveWeak(ObjPtr<mirror::String> s) {
    153   Runtime* runtime = Runtime::Current();
    154   if (runtime->IsActiveTransaction()) {
    155     runtime->RecordWeakStringRemoval(s);
    156   }
    157   weak_interns_.Remove(s);
    158 }
    159 
    160 // Insert/remove methods used to undo changes made during an aborted transaction.
    161 ObjPtr<mirror::String> InternTable::InsertStrongFromTransaction(ObjPtr<mirror::String> s) {
    162   DCHECK(!Runtime::Current()->IsActiveTransaction());
    163   return InsertStrong(s);
    164 }
    165 
    166 ObjPtr<mirror::String> InternTable::InsertWeakFromTransaction(ObjPtr<mirror::String> s) {
    167   DCHECK(!Runtime::Current()->IsActiveTransaction());
    168   return InsertWeak(s);
    169 }
    170 
    171 void InternTable::RemoveStrongFromTransaction(ObjPtr<mirror::String> s) {
    172   DCHECK(!Runtime::Current()->IsActiveTransaction());
    173   RemoveStrong(s);
    174 }
    175 
    176 void InternTable::RemoveWeakFromTransaction(ObjPtr<mirror::String> s) {
    177   DCHECK(!Runtime::Current()->IsActiveTransaction());
    178   RemoveWeak(s);
    179 }
    180 
    181 void InternTable::BroadcastForNewInterns() {
    182   Thread* self = Thread::Current();
    183   MutexLock mu(self, *Locks::intern_table_lock_);
    184   weak_intern_condition_.Broadcast(self);
    185 }
    186 
    187 void InternTable::WaitUntilAccessible(Thread* self) {
    188   Locks::intern_table_lock_->ExclusiveUnlock(self);
    189   {
    190     ScopedThreadSuspension sts(self, kWaitingWeakGcRootRead);
    191     MutexLock mu(self, *Locks::intern_table_lock_);
    192     while ((!kUseReadBarrier && weak_root_state_ == gc::kWeakRootStateNoReadsOrWrites) ||
    193            (kUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
    194       weak_intern_condition_.Wait(self);
    195     }
    196   }
    197   Locks::intern_table_lock_->ExclusiveLock(self);
    198 }
    199 
    200 ObjPtr<mirror::String> InternTable::Insert(ObjPtr<mirror::String> s,
    201                                            bool is_strong,
    202                                            bool holding_locks) {
    203   if (s == nullptr) {
    204     return nullptr;
    205   }
    206   Thread* const self = Thread::Current();
    207   MutexLock mu(self, *Locks::intern_table_lock_);
    208   if (kDebugLocking && !holding_locks) {
    209     Locks::mutator_lock_->AssertSharedHeld(self);
    210     CHECK_EQ(2u, self->NumberOfHeldMutexes()) << "may only safely hold the mutator lock";
    211   }
    212   while (true) {
    213     if (holding_locks) {
    214       if (!kUseReadBarrier) {
    215         CHECK_EQ(weak_root_state_, gc::kWeakRootStateNormal);
    216       } else {
    217         CHECK(self->GetWeakRefAccessEnabled());
    218       }
    219     }
    220     // Check the strong table for a match.
    221     ObjPtr<mirror::String> strong = LookupStrongLocked(s);
    222     if (strong != nullptr) {
    223       return strong;
    224     }
    225     if ((!kUseReadBarrier && weak_root_state_ != gc::kWeakRootStateNoReadsOrWrites) ||
    226         (kUseReadBarrier && self->GetWeakRefAccessEnabled())) {
    227       break;
    228     }
    229     // weak_root_state_ is set to gc::kWeakRootStateNoReadsOrWrites in the GC pause but is only
    230     // cleared after SweepSystemWeaks has completed. This is why we need to wait until it is
    231     // cleared.
    232     CHECK(!holding_locks);
    233     StackHandleScope<1> hs(self);
    234     auto h = hs.NewHandleWrapper(&s);
    235     WaitUntilAccessible(self);
    236   }
    237   if (!kUseReadBarrier) {
    238     CHECK_EQ(weak_root_state_, gc::kWeakRootStateNormal);
    239   } else {
    240     CHECK(self->GetWeakRefAccessEnabled());
    241   }
    242   // There is no match in the strong table, check the weak table.
    243   ObjPtr<mirror::String> weak = LookupWeakLocked(s);
    244   if (weak != nullptr) {
    245     if (is_strong) {
    246       // A match was found in the weak table. Promote to the strong table.
    247       RemoveWeak(weak);
    248       return InsertStrong(weak);
    249     }
    250     return weak;
    251   }
    252   // No match in the strong table or the weak table. Insert into the strong / weak table.
    253   return is_strong ? InsertStrong(s) : InsertWeak(s);
    254 }
    255 
    256 ObjPtr<mirror::String> InternTable::InternStrong(int32_t utf16_length, const char* utf8_data) {
    257   DCHECK(utf8_data != nullptr);
    258   Thread* self = Thread::Current();
    259   // Try to avoid allocation.
    260   ObjPtr<mirror::String> s = LookupStrong(self, utf16_length, utf8_data);
    261   if (s != nullptr) {
    262     return s;
    263   }
    264   return InternStrong(mirror::String::AllocFromModifiedUtf8(
    265       self, utf16_length, utf8_data));
    266 }
    267 
    268 ObjPtr<mirror::String> InternTable::InternStrong(const char* utf8_data) {
    269   DCHECK(utf8_data != nullptr);
    270   return InternStrong(mirror::String::AllocFromModifiedUtf8(Thread::Current(), utf8_data));
    271 }
    272 
    273 ObjPtr<mirror::String> InternTable::InternStrongImageString(ObjPtr<mirror::String> s) {
    274   // May be holding the heap bitmap lock.
    275   return Insert(s, true, true);
    276 }
    277 
    278 ObjPtr<mirror::String> InternTable::InternStrong(ObjPtr<mirror::String> s) {
    279   return Insert(s, true, false);
    280 }
    281 
    282 ObjPtr<mirror::String> InternTable::InternWeak(ObjPtr<mirror::String> s) {
    283   return Insert(s, false, false);
    284 }
    285 
    286 bool InternTable::ContainsWeak(ObjPtr<mirror::String> s) {
    287   return LookupWeak(Thread::Current(), s) == s;
    288 }
    289 
    290 void InternTable::SweepInternTableWeaks(IsMarkedVisitor* visitor) {
    291   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
    292   weak_interns_.SweepWeaks(visitor);
    293 }
    294 
    295 size_t InternTable::WriteToMemory(uint8_t* ptr) {
    296   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
    297   return strong_interns_.WriteToMemory(ptr);
    298 }
    299 
    300 std::size_t InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& root) const {
    301   if (kIsDebugBuild) {
    302     Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
    303   }
    304   // An additional cast to prevent undesired sign extension.
    305   return static_cast<size_t>(
    306       static_cast<uint32_t>(root.Read<kWithoutReadBarrier>()->GetHashCode()));
    307 }
    308 
    309 bool InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& a,
    310                                                const GcRoot<mirror::String>& b) const {
    311   if (kIsDebugBuild) {
    312     Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
    313   }
    314   return a.Read<kWithoutReadBarrier>()->Equals(b.Read<kWithoutReadBarrier>());
    315 }
    316 
    317 bool InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& a,
    318                                                const Utf8String& b) const {
    319   if (kIsDebugBuild) {
    320     Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
    321   }
    322   ObjPtr<mirror::String> a_string = a.Read<kWithoutReadBarrier>();
    323   uint32_t a_length = static_cast<uint32_t>(a_string->GetLength());
    324   if (a_length != b.GetUtf16Length()) {
    325     return false;
    326   }
    327   if (a_string->IsCompressed()) {
    328     size_t b_byte_count = strlen(b.GetUtf8Data());
    329     size_t b_utf8_length = CountModifiedUtf8Chars(b.GetUtf8Data(), b_byte_count);
    330     // Modified UTF-8 single byte character range is 0x01 .. 0x7f
    331     // The string compression occurs on regular ASCII with same exact range,
    332     // not on extended ASCII which up to 0xff
    333     const bool is_b_regular_ascii = (b_byte_count == b_utf8_length);
    334     if (is_b_regular_ascii) {
    335       return memcmp(b.GetUtf8Data(),
    336                     a_string->GetValueCompressed(), a_length * sizeof(uint8_t)) == 0;
    337     } else {
    338       return false;
    339     }
    340   } else {
    341     const uint16_t* a_value = a_string->GetValue();
    342     return CompareModifiedUtf8ToUtf16AsCodePointValues(b.GetUtf8Data(), a_value, a_length) == 0;
    343   }
    344 }
    345 
    346 size_t InternTable::Table::WriteToMemory(uint8_t* ptr) {
    347   if (tables_.empty()) {
    348     return 0;
    349   }
    350   UnorderedSet* table_to_write;
    351   UnorderedSet combined;
    352   if (tables_.size() > 1) {
    353     table_to_write = &combined;
    354     for (InternalTable& table : tables_) {
    355       for (GcRoot<mirror::String>& string : table.set_) {
    356         combined.insert(string);
    357       }
    358     }
    359   } else {
    360     table_to_write = &tables_.back().set_;
    361   }
    362   return table_to_write->WriteToMemory(ptr);
    363 }
    364 
    365 void InternTable::Table::Remove(ObjPtr<mirror::String> s) {
    366   for (InternalTable& table : tables_) {
    367     auto it = table.set_.find(GcRoot<mirror::String>(s));
    368     if (it != table.set_.end()) {
    369       table.set_.erase(it);
    370       return;
    371     }
    372   }
    373   LOG(FATAL) << "Attempting to remove non-interned string " << s->ToModifiedUtf8();
    374 }
    375 
    376 ObjPtr<mirror::String> InternTable::Table::Find(ObjPtr<mirror::String> s) {
    377   Locks::intern_table_lock_->AssertHeld(Thread::Current());
    378   for (InternalTable& table : tables_) {
    379     auto it = table.set_.find(GcRoot<mirror::String>(s));
    380     if (it != table.set_.end()) {
    381       return it->Read();
    382     }
    383   }
    384   return nullptr;
    385 }
    386 
    387 ObjPtr<mirror::String> InternTable::Table::Find(const Utf8String& string) {
    388   Locks::intern_table_lock_->AssertHeld(Thread::Current());
    389   for (InternalTable& table : tables_) {
    390     auto it = table.set_.find(string);
    391     if (it != table.set_.end()) {
    392       return it->Read();
    393     }
    394   }
    395   return nullptr;
    396 }
    397 
    398 void InternTable::Table::AddNewTable() {
    399   tables_.push_back(InternalTable());
    400 }
    401 
    402 void InternTable::Table::Insert(ObjPtr<mirror::String> s) {
    403   // Always insert the last table, the image tables are before and we avoid inserting into these
    404   // to prevent dirty pages.
    405   DCHECK(!tables_.empty());
    406   tables_.back().set_.insert(GcRoot<mirror::String>(s));
    407 }
    408 
    409 void InternTable::Table::VisitRoots(RootVisitor* visitor) {
    410   BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(
    411       visitor, RootInfo(kRootInternedString));
    412   for (InternalTable& table : tables_) {
    413     for (auto& intern : table.set_) {
    414       buffered_visitor.VisitRoot(intern);
    415     }
    416   }
    417 }
    418 
    419 void InternTable::Table::SweepWeaks(IsMarkedVisitor* visitor) {
    420   for (InternalTable& table : tables_) {
    421     SweepWeaks(&table.set_, visitor);
    422   }
    423 }
    424 
    425 void InternTable::Table::SweepWeaks(UnorderedSet* set, IsMarkedVisitor* visitor) {
    426   for (auto it = set->begin(), end = set->end(); it != end;) {
    427     // This does not need a read barrier because this is called by GC.
    428     mirror::Object* object = it->Read<kWithoutReadBarrier>();
    429     mirror::Object* new_object = visitor->IsMarked(object);
    430     if (new_object == nullptr) {
    431       it = set->erase(it);
    432     } else {
    433       *it = GcRoot<mirror::String>(new_object->AsString());
    434       ++it;
    435     }
    436   }
    437 }
    438 
    439 size_t InternTable::Table::Size() const {
    440   return std::accumulate(tables_.begin(),
    441                          tables_.end(),
    442                          0U,
    443                          [](size_t sum, const InternalTable& table) {
    444                            return sum + table.Size();
    445                          });
    446 }
    447 
    448 void InternTable::ChangeWeakRootState(gc::WeakRootState new_state) {
    449   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
    450   ChangeWeakRootStateLocked(new_state);
    451 }
    452 
    453 void InternTable::ChangeWeakRootStateLocked(gc::WeakRootState new_state) {
    454   CHECK(!kUseReadBarrier);
    455   weak_root_state_ = new_state;
    456   if (new_state != gc::kWeakRootStateNoReadsOrWrites) {
    457     weak_intern_condition_.Broadcast(Thread::Current());
    458   }
    459 }
    460 
    461 InternTable::Table::Table() {
    462   Runtime* const runtime = Runtime::Current();
    463   InternalTable initial_table;
    464   initial_table.set_.SetLoadFactor(runtime->GetHashTableMinLoadFactor(),
    465                                    runtime->GetHashTableMaxLoadFactor());
    466   tables_.push_back(std::move(initial_table));
    467 }
    468 
    469 }  // namespace art
    470