Home | History | Annotate | Download | only in runtime
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_INTERN_TABLE_H_
     18 #define ART_RUNTIME_INTERN_TABLE_H_
     19 
     20 #include <unordered_set>
     21 
     22 #include "base/atomic.h"
     23 #include "base/allocator.h"
     24 #include "base/hash_set.h"
     25 #include "base/mutex.h"
     26 #include "gc/weak_root_state.h"
     27 #include "gc_root.h"
     28 
     29 namespace art {
     30 
     31 class IsMarkedVisitor;
     32 
     33 namespace gc {
     34 namespace space {
     35 class ImageSpace;
     36 }  // namespace space
     37 }  // namespace gc
     38 
     39 enum VisitRootFlags : uint8_t;
     40 
     41 namespace linker {
     42 class ImageWriter;
     43 }  // namespace linker
     44 
     45 namespace mirror {
     46 class String;
     47 }  // namespace mirror
     48 class Transaction;
     49 
     50 /**
     51  * Used to intern strings.
     52  *
     53  * There are actually two tables: one that holds strong references to its strings, and one that
     54  * holds weak references. The former is used for string literals, for which there is an effective
     55  * reference from the constant pool. The latter is used for strings interned at runtime via
     56  * String.intern. Some code (XML parsers being a prime example) relies on being able to intern
     57  * arbitrarily many strings for the duration of a parse without permanently increasing the memory
     58  * footprint.
     59  */
     60 class InternTable {
     61  public:
     62   // Modified UTF-8-encoded string treated as UTF16.
     63   class Utf8String {
     64    public:
     65     Utf8String(uint32_t utf16_length, const char* utf8_data, int32_t hash)
     66         : hash_(hash), utf16_length_(utf16_length), utf8_data_(utf8_data) { }
     67 
     68     int32_t GetHash() const { return hash_; }
     69     uint32_t GetUtf16Length() const { return utf16_length_; }
     70     const char* GetUtf8Data() const { return utf8_data_; }
     71 
     72    private:
     73     int32_t hash_;
     74     uint32_t utf16_length_;
     75     const char* utf8_data_;
     76   };
     77 
     78   class StringHashEquals {
     79    public:
     80     std::size_t operator()(const GcRoot<mirror::String>& root) const NO_THREAD_SAFETY_ANALYSIS;
     81     bool operator()(const GcRoot<mirror::String>& a, const GcRoot<mirror::String>& b) const
     82         NO_THREAD_SAFETY_ANALYSIS;
     83 
     84     // Utf8String can be used for lookup.
     85     std::size_t operator()(const Utf8String& key) const {
     86       // A cast to prevent undesired sign extension.
     87       return static_cast<uint32_t>(key.GetHash());
     88     }
     89 
     90     bool operator()(const GcRoot<mirror::String>& a, const Utf8String& b) const
     91         NO_THREAD_SAFETY_ANALYSIS;
     92   };
     93 
     94   class GcRootEmptyFn {
     95    public:
     96     void MakeEmpty(GcRoot<mirror::String>& item) const {
     97       item = GcRoot<mirror::String>();
     98     }
     99     bool IsEmpty(const GcRoot<mirror::String>& item) const {
    100       return item.IsNull();
    101     }
    102   };
    103 
    104   using UnorderedSet = HashSet<GcRoot<mirror::String>,
    105                                GcRootEmptyFn,
    106                                StringHashEquals,
    107                                StringHashEquals,
    108                                TrackingAllocator<GcRoot<mirror::String>, kAllocatorTagInternTable>>;
    109 
    110   InternTable();
    111 
    112   // Interns a potentially new string in the 'strong' table. May cause thread suspension.
    113   ObjPtr<mirror::String> InternStrong(int32_t utf16_length, const char* utf8_data)
    114       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
    115 
    116   // Only used by image writer. Special version that may not cause thread suspension since the GC
    117   // cannot be running while we are doing image writing. Maybe be called while while holding a
    118   // lock since there will not be thread suspension.
    119   ObjPtr<mirror::String> InternStrongImageString(ObjPtr<mirror::String> s)
    120       REQUIRES_SHARED(Locks::mutator_lock_);
    121 
    122   // Interns a potentially new string in the 'strong' table. May cause thread suspension.
    123   ObjPtr<mirror::String> InternStrong(const char* utf8_data) REQUIRES_SHARED(Locks::mutator_lock_)
    124       REQUIRES(!Roles::uninterruptible_);
    125 
    126   // Interns a potentially new string in the 'strong' table. May cause thread suspension.
    127   ObjPtr<mirror::String> InternStrong(ObjPtr<mirror::String> s)
    128       REQUIRES_SHARED(Locks::mutator_lock_)
    129       REQUIRES(!Roles::uninterruptible_);
    130 
    131   // Interns a potentially new string in the 'weak' table. May cause thread suspension.
    132   ObjPtr<mirror::String> InternWeak(ObjPtr<mirror::String> s) REQUIRES_SHARED(Locks::mutator_lock_)
    133       REQUIRES(!Roles::uninterruptible_);
    134 
    135   void SweepInternTableWeaks(IsMarkedVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_)
    136       REQUIRES(!Locks::intern_table_lock_);
    137 
    138   bool ContainsWeak(ObjPtr<mirror::String> s) REQUIRES_SHARED(Locks::mutator_lock_)
    139       REQUIRES(!Locks::intern_table_lock_);
    140 
    141   // Lookup a strong intern, returns null if not found.
    142   ObjPtr<mirror::String> LookupStrong(Thread* self, ObjPtr<mirror::String> s)
    143       REQUIRES(!Locks::intern_table_lock_)
    144       REQUIRES_SHARED(Locks::mutator_lock_);
    145   ObjPtr<mirror::String> LookupStrong(Thread* self, uint32_t utf16_length, const char* utf8_data)
    146       REQUIRES(!Locks::intern_table_lock_)
    147       REQUIRES_SHARED(Locks::mutator_lock_);
    148   ObjPtr<mirror::String> LookupStrongLocked(ObjPtr<mirror::String> s)
    149       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
    150 
    151   // Lookup a weak intern, returns null if not found.
    152   ObjPtr<mirror::String> LookupWeak(Thread* self, ObjPtr<mirror::String> s)
    153       REQUIRES(!Locks::intern_table_lock_)
    154       REQUIRES_SHARED(Locks::mutator_lock_);
    155   ObjPtr<mirror::String> LookupWeakLocked(ObjPtr<mirror::String> s)
    156       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
    157 
    158   // Total number of interned strings.
    159   size_t Size() const REQUIRES(!Locks::intern_table_lock_);
    160 
    161   // Total number of weakly live interned strings.
    162   size_t StrongSize() const REQUIRES(!Locks::intern_table_lock_);
    163 
    164   // Total number of strongly live interned strings.
    165   size_t WeakSize() const REQUIRES(!Locks::intern_table_lock_);
    166 
    167   void VisitRoots(RootVisitor* visitor, VisitRootFlags flags)
    168       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_);
    169 
    170   // Visit all of the interns in the table.
    171   template <typename Visitor>
    172   void VisitInterns(const Visitor& visitor,
    173                     bool visit_boot_images,
    174                     bool visit_non_boot_images)
    175       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
    176 
    177   // Count the number of intern strings in the table.
    178   size_t CountInterns(bool visit_boot_images, bool visit_non_boot_images) const
    179       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
    180 
    181   void DumpForSigQuit(std::ostream& os) const REQUIRES(!Locks::intern_table_lock_);
    182 
    183   void BroadcastForNewInterns();
    184 
    185   // Add all of the strings in the image's intern table into this intern table. This is required so
    186   // the intern table is correct.
    187   // The visitor arg type is UnorderedSet
    188   template <typename Visitor>
    189   void AddImageStringsToTable(gc::space::ImageSpace* image_space,
    190                               const Visitor& visitor)
    191       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_);
    192 
    193   // Add a new intern table for inserting to, previous intern tables are still there but no
    194   // longer inserted into and ideally unmodified. This is done to prevent dirty pages.
    195   void AddNewTable()
    196       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_);
    197 
    198   // Write the post zygote intern table to a pointer. Only writes the strong interns since it is
    199   // expected that there is no weak interns since this is called from the image writer.
    200   size_t WriteToMemory(uint8_t* ptr) REQUIRES_SHARED(Locks::mutator_lock_)
    201       REQUIRES(!Locks::intern_table_lock_);
    202 
    203   // Change the weak root state. May broadcast to waiters.
    204   void ChangeWeakRootState(gc::WeakRootState new_state)
    205       REQUIRES(!Locks::intern_table_lock_);
    206 
    207  private:
    208   // Table which holds pre zygote and post zygote interned strings. There is one instance for
    209   // weak interns and strong interns.
    210   class Table {
    211    public:
    212     class InternalTable {
    213      public:
    214       InternalTable() = default;
    215       InternalTable(UnorderedSet&& set, bool is_boot_image)
    216           : set_(std::move(set)), is_boot_image_(is_boot_image) {}
    217 
    218       bool Empty() const {
    219         return set_.empty();
    220       }
    221 
    222       size_t Size() const {
    223         return set_.size();
    224       }
    225 
    226       bool IsBootImage() const {
    227         return is_boot_image_;
    228       }
    229 
    230      private:
    231       UnorderedSet set_;
    232       bool is_boot_image_ = false;
    233 
    234       friend class InternTable;
    235       friend class Table;
    236       ART_FRIEND_TEST(InternTableTest, CrossHash);
    237     };
    238 
    239     Table();
    240     ObjPtr<mirror::String> Find(ObjPtr<mirror::String> s) REQUIRES_SHARED(Locks::mutator_lock_)
    241         REQUIRES(Locks::intern_table_lock_);
    242     ObjPtr<mirror::String> Find(const Utf8String& string) REQUIRES_SHARED(Locks::mutator_lock_)
    243         REQUIRES(Locks::intern_table_lock_);
    244     void Insert(ObjPtr<mirror::String> s) REQUIRES_SHARED(Locks::mutator_lock_)
    245         REQUIRES(Locks::intern_table_lock_);
    246     void Remove(ObjPtr<mirror::String> s)
    247         REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
    248     void VisitRoots(RootVisitor* visitor)
    249         REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
    250     void SweepWeaks(IsMarkedVisitor* visitor)
    251         REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
    252     // Add a new intern table that will only be inserted into from now on.
    253     void AddNewTable() REQUIRES(Locks::intern_table_lock_);
    254     size_t Size() const REQUIRES(Locks::intern_table_lock_);
    255     // Read and add an intern table from ptr.
    256     // Tables read are inserted at the front of the table array. Only checks for conflicts in
    257     // debug builds. Returns how many bytes were read.
    258     // NO_THREAD_SAFETY_ANALYSIS for the visitor that may require locks.
    259     template <typename Visitor>
    260     size_t AddTableFromMemory(const uint8_t* ptr, const Visitor& visitor, bool is_boot_image)
    261         REQUIRES(!Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
    262     // Write the intern tables to ptr, if there are multiple tables they are combined into a single
    263     // one. Returns how many bytes were written.
    264     size_t WriteToMemory(uint8_t* ptr)
    265         REQUIRES(Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
    266 
    267    private:
    268     void SweepWeaks(UnorderedSet* set, IsMarkedVisitor* visitor)
    269         REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
    270 
    271     // Add a table to the front of the tables vector.
    272     void AddInternStrings(UnorderedSet&& intern_strings, bool is_boot_image)
    273         REQUIRES(Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
    274 
    275     // We call AddNewTable when we create the zygote to reduce private dirty pages caused by
    276     // modifying the zygote intern table. The back of table is modified when strings are interned.
    277     std::vector<InternalTable> tables_;
    278 
    279     friend class InternTable;
    280     friend class linker::ImageWriter;
    281     ART_FRIEND_TEST(InternTableTest, CrossHash);
    282   };
    283 
    284   // Insert if non null, otherwise return null. Must be called holding the mutator lock.
    285   // If holding_locks is true, then we may also hold other locks. If holding_locks is true, then we
    286   // require GC is not running since it is not safe to wait while holding locks.
    287   ObjPtr<mirror::String> Insert(ObjPtr<mirror::String> s, bool is_strong, bool holding_locks)
    288       REQUIRES(!Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
    289 
    290   // Add a table from memory to the strong interns.
    291   template <typename Visitor>
    292   size_t AddTableFromMemory(const uint8_t* ptr, const Visitor& visitor, bool is_boot_image)
    293       REQUIRES(!Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
    294 
    295   ObjPtr<mirror::String> InsertStrong(ObjPtr<mirror::String> s)
    296       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
    297   ObjPtr<mirror::String> InsertWeak(ObjPtr<mirror::String> s)
    298       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
    299   void RemoveStrong(ObjPtr<mirror::String> s)
    300       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
    301   void RemoveWeak(ObjPtr<mirror::String> s)
    302       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
    303 
    304   // Transaction rollback access.
    305   ObjPtr<mirror::String> InsertStrongFromTransaction(ObjPtr<mirror::String> s)
    306       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
    307   ObjPtr<mirror::String> InsertWeakFromTransaction(ObjPtr<mirror::String> s)
    308       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
    309   void RemoveStrongFromTransaction(ObjPtr<mirror::String> s)
    310       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
    311   void RemoveWeakFromTransaction(ObjPtr<mirror::String> s)
    312       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
    313 
    314   // Change the weak root state. May broadcast to waiters.
    315   void ChangeWeakRootStateLocked(gc::WeakRootState new_state)
    316       REQUIRES(Locks::intern_table_lock_);
    317 
    318   // Wait until we can read weak roots.
    319   void WaitUntilAccessible(Thread* self)
    320       REQUIRES(Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
    321 
    322   bool log_new_roots_ GUARDED_BY(Locks::intern_table_lock_);
    323   ConditionVariable weak_intern_condition_ GUARDED_BY(Locks::intern_table_lock_);
    324   // Since this contains (strong) roots, they need a read barrier to
    325   // enable concurrent intern table (strong) root scan. Do not
    326   // directly access the strings in it. Use functions that contain
    327   // read barriers.
    328   Table strong_interns_ GUARDED_BY(Locks::intern_table_lock_);
    329   std::vector<GcRoot<mirror::String>> new_strong_intern_roots_
    330       GUARDED_BY(Locks::intern_table_lock_);
    331   // Since this contains (weak) roots, they need a read barrier. Do
    332   // not directly access the strings in it. Use functions that contain
    333   // read barriers.
    334   Table weak_interns_ GUARDED_BY(Locks::intern_table_lock_);
    335   // Weak root state, used for concurrent system weak processing and more.
    336   gc::WeakRootState weak_root_state_ GUARDED_BY(Locks::intern_table_lock_);
    337 
    338   friend class gc::space::ImageSpace;
    339   friend class linker::ImageWriter;
    340   friend class Transaction;
    341   ART_FRIEND_TEST(InternTableTest, CrossHash);
    342   DISALLOW_COPY_AND_ASSIGN(InternTable);
    343 };
    344 
    345 }  // namespace art
    346 
    347 #endif  // ART_RUNTIME_INTERN_TABLE_H_
    348