Home | History | Annotate | Download | only in jit
      1 /*
      2  * Copyright 2014 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
     18 #define ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
     19 
     20 #include "instrumentation.h"
     21 
     22 #include "atomic.h"
     23 #include "base/histogram-inl.h"
     24 #include "base/macros.h"
     25 #include "base/mutex.h"
     26 #include "gc/accounting/bitmap.h"
     27 #include "gc_root.h"
     28 #include "jni.h"
     29 #include "method_reference.h"
     30 #include "oat_file.h"
     31 #include "object_callbacks.h"
     32 #include "safe_map.h"
     33 #include "thread_pool.h"
     34 
     35 namespace art {
     36 
     37 class ArtMethod;
     38 class LinearAlloc;
     39 class ProfilingInfo;
     40 
     41 namespace jit {
     42 
     43 class JitInstrumentationCache;
     44 
     45 // Alignment in bits that will suit all architectures.
     46 static constexpr int kJitCodeAlignment = 16;
     47 using CodeCacheBitmap = gc::accounting::MemoryRangeBitmap<kJitCodeAlignment>;
     48 
     49 class JitCodeCache {
     50  public:
     51   static constexpr size_t kMaxCapacity = 64 * MB;
     52   // Put the default to a very low amount for debug builds to stress the code cache
     53   // collection.
     54   static constexpr size_t kInitialCapacity = kIsDebugBuild ? 8 * KB : 64 * KB;
     55 
     56   // By default, do not GC until reaching 256KB.
     57   static constexpr size_t kReservedCapacity = kInitialCapacity * 4;
     58 
     59   // Create the code cache with a code + data capacity equal to "capacity", error message is passed
     60   // in the out arg error_msg.
     61   static JitCodeCache* Create(size_t initial_capacity,
     62                               size_t max_capacity,
     63                               bool generate_debug_info,
     64                               std::string* error_msg);
     65 
     66   // Number of bytes allocated in the code cache.
     67   size_t CodeCacheSize() REQUIRES(!lock_);
     68 
     69   // Number of bytes allocated in the data cache.
     70   size_t DataCacheSize() REQUIRES(!lock_);
     71 
     72   bool NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr)
     73       SHARED_REQUIRES(Locks::mutator_lock_)
     74       REQUIRES(!lock_);
     75 
     76   // Notify to the code cache that the compiler wants to use the
     77   // profiling info of `method` to drive optimizations,
     78   // and therefore ensure the returned profiling info object is not
     79   // collected.
     80   ProfilingInfo* NotifyCompilerUse(ArtMethod* method, Thread* self)
     81       SHARED_REQUIRES(Locks::mutator_lock_)
     82       REQUIRES(!lock_);
     83 
     84   void DoneCompiling(ArtMethod* method, Thread* self, bool osr)
     85       SHARED_REQUIRES(Locks::mutator_lock_)
     86       REQUIRES(!lock_);
     87 
     88   void DoneCompilerUse(ArtMethod* method, Thread* self)
     89       SHARED_REQUIRES(Locks::mutator_lock_)
     90       REQUIRES(!lock_);
     91 
     92   // Allocate and write code and its metadata to the code cache.
     93   uint8_t* CommitCode(Thread* self,
     94                       ArtMethod* method,
     95                       const uint8_t* vmap_table,
     96                       size_t frame_size_in_bytes,
     97                       size_t core_spill_mask,
     98                       size_t fp_spill_mask,
     99                       const uint8_t* code,
    100                       size_t code_size,
    101                       bool osr)
    102       SHARED_REQUIRES(Locks::mutator_lock_)
    103       REQUIRES(!lock_);
    104 
    105   // Return true if the code cache contains this pc.
    106   bool ContainsPc(const void* pc) const;
    107 
    108   // Return true if the code cache contains this method.
    109   bool ContainsMethod(ArtMethod* method) REQUIRES(!lock_);
    110 
    111   // Reserve a region of data of size at least "size". Returns null if there is no more room.
    112   uint8_t* ReserveData(Thread* self, size_t size, ArtMethod* method)
    113       SHARED_REQUIRES(Locks::mutator_lock_)
    114       REQUIRES(!lock_);
    115 
    116   // Clear data from the data portion of the code cache.
    117   void ClearData(Thread* self, void* data)
    118       SHARED_REQUIRES(Locks::mutator_lock_)
    119       REQUIRES(!lock_);
    120 
    121   CodeCacheBitmap* GetLiveBitmap() const {
    122     return live_bitmap_.get();
    123   }
    124 
    125   // Return whether we should do a full collection given the current state of the cache.
    126   bool ShouldDoFullCollection()
    127       REQUIRES(lock_)
    128       SHARED_REQUIRES(Locks::mutator_lock_);
    129 
    130   // Perform a collection on the code cache.
    131   void GarbageCollectCache(Thread* self)
    132       REQUIRES(!lock_)
    133       SHARED_REQUIRES(Locks::mutator_lock_);
    134 
    135   // Given the 'pc', try to find the JIT compiled code associated with it.
    136   // Return null if 'pc' is not in the code cache. 'method' is passed for
    137   // sanity check.
    138   OatQuickMethodHeader* LookupMethodHeader(uintptr_t pc, ArtMethod* method)
    139       REQUIRES(!lock_)
    140       SHARED_REQUIRES(Locks::mutator_lock_);
    141 
    142   OatQuickMethodHeader* LookupOsrMethodHeader(ArtMethod* method)
    143       REQUIRES(!lock_)
    144       SHARED_REQUIRES(Locks::mutator_lock_);
    145 
    146   // Remove all methods in our cache that were allocated by 'alloc'.
    147   void RemoveMethodsIn(Thread* self, const LinearAlloc& alloc)
    148       REQUIRES(!lock_)
    149       REQUIRES(Locks::classlinker_classes_lock_)
    150       SHARED_REQUIRES(Locks::mutator_lock_);
    151 
    152   void ClearGcRootsInInlineCaches(Thread* self) REQUIRES(!lock_);
    153 
    154   // Create a 'ProfileInfo' for 'method'. If 'retry_allocation' is true,
    155   // will collect and retry if the first allocation is unsuccessful.
    156   ProfilingInfo* AddProfilingInfo(Thread* self,
    157                                   ArtMethod* method,
    158                                   const std::vector<uint32_t>& entries,
    159                                   bool retry_allocation)
    160       REQUIRES(!lock_)
    161       SHARED_REQUIRES(Locks::mutator_lock_);
    162 
    163   bool OwnsSpace(const void* mspace) const NO_THREAD_SAFETY_ANALYSIS {
    164     return mspace == code_mspace_ || mspace == data_mspace_;
    165   }
    166 
    167   void* MoreCore(const void* mspace, intptr_t increment);
    168 
    169   // Adds to `methods` all profiled methods which are part of any of the given dex locations.
    170   void GetProfiledMethods(const std::set<std::string>& dex_base_locations,
    171                           std::vector<MethodReference>& methods)
    172       REQUIRES(!lock_)
    173       SHARED_REQUIRES(Locks::mutator_lock_);
    174 
    175   uint64_t GetLastUpdateTimeNs() const;
    176 
    177   size_t GetCurrentCapacity() REQUIRES(!lock_) {
    178     MutexLock lock(Thread::Current(), lock_);
    179     return current_capacity_;
    180   }
    181 
    182   size_t GetMemorySizeOfCodePointer(const void* ptr) REQUIRES(!lock_);
    183 
    184   void InvalidateCompiledCodeFor(ArtMethod* method, const OatQuickMethodHeader* code)
    185       REQUIRES(!lock_)
    186       SHARED_REQUIRES(Locks::mutator_lock_);
    187 
    188   void Dump(std::ostream& os) REQUIRES(!lock_);
    189 
    190   bool IsOsrCompiled(ArtMethod* method) REQUIRES(!lock_);
    191 
    192  private:
    193   // Take ownership of maps.
    194   JitCodeCache(MemMap* code_map,
    195                MemMap* data_map,
    196                size_t initial_code_capacity,
    197                size_t initial_data_capacity,
    198                size_t max_capacity,
    199                bool garbage_collect_code);
    200 
    201   // Internal version of 'CommitCode' that will not retry if the
    202   // allocation fails. Return null if the allocation fails.
    203   uint8_t* CommitCodeInternal(Thread* self,
    204                               ArtMethod* method,
    205                               const uint8_t* vmap_table,
    206                               size_t frame_size_in_bytes,
    207                               size_t core_spill_mask,
    208                               size_t fp_spill_mask,
    209                               const uint8_t* code,
    210                               size_t code_size,
    211                               bool osr)
    212       REQUIRES(!lock_)
    213       SHARED_REQUIRES(Locks::mutator_lock_);
    214 
    215   ProfilingInfo* AddProfilingInfoInternal(Thread* self,
    216                                           ArtMethod* method,
    217                                           const std::vector<uint32_t>& entries)
    218       REQUIRES(lock_)
    219       SHARED_REQUIRES(Locks::mutator_lock_);
    220 
    221   // If a collection is in progress, wait for it to finish. Return
    222   // whether the thread actually waited.
    223   bool WaitForPotentialCollectionToComplete(Thread* self)
    224       REQUIRES(lock_) REQUIRES(!Locks::mutator_lock_);
    225 
    226   // Free in the mspace allocations taken by 'method'.
    227   void FreeCode(const void* code_ptr, ArtMethod* method) REQUIRES(lock_);
    228 
    229   // Number of bytes allocated in the code cache.
    230   size_t CodeCacheSizeLocked() REQUIRES(lock_);
    231 
    232   // Number of bytes allocated in the data cache.
    233   size_t DataCacheSizeLocked() REQUIRES(lock_);
    234 
    235   // Notify all waiting threads that a collection is done.
    236   void NotifyCollectionDone(Thread* self) REQUIRES(lock_);
    237 
    238   // Try to increase the current capacity of the code cache. Return whether we
    239   // succeeded at doing so.
    240   bool IncreaseCodeCacheCapacity() REQUIRES(lock_);
    241 
    242   // Set the footprint limit of the code cache.
    243   void SetFootprintLimit(size_t new_footprint) REQUIRES(lock_);
    244 
    245   void DoCollection(Thread* self, bool collect_profiling_info)
    246       REQUIRES(!lock_)
    247       SHARED_REQUIRES(Locks::mutator_lock_);
    248 
    249   void RemoveUnmarkedCode(Thread* self)
    250       REQUIRES(!lock_)
    251       SHARED_REQUIRES(Locks::mutator_lock_);
    252 
    253   void MarkCompiledCodeOnThreadStacks(Thread* self)
    254       REQUIRES(!lock_)
    255       SHARED_REQUIRES(Locks::mutator_lock_);
    256 
    257   bool CheckLiveCompiledCodeHasProfilingInfo()
    258       REQUIRES(lock_)
    259       SHARED_REQUIRES(Locks::mutator_lock_);
    260 
    261   void FreeCode(uint8_t* code) REQUIRES(lock_);
    262   uint8_t* AllocateCode(size_t code_size) REQUIRES(lock_);
    263   void FreeData(uint8_t* data) REQUIRES(lock_);
    264   uint8_t* AllocateData(size_t data_size) REQUIRES(lock_);
    265 
    266   // Lock for guarding allocations, collections, and the method_code_map_.
    267   Mutex lock_;
    268   // Condition to wait on during collection.
    269   ConditionVariable lock_cond_ GUARDED_BY(lock_);
    270   // Whether there is a code cache collection in progress.
    271   bool collection_in_progress_ GUARDED_BY(lock_);
    272   // Mem map which holds code.
    273   std::unique_ptr<MemMap> code_map_;
    274   // Mem map which holds data (stack maps and profiling info).
    275   std::unique_ptr<MemMap> data_map_;
    276   // The opaque mspace for allocating code.
    277   void* code_mspace_ GUARDED_BY(lock_);
    278   // The opaque mspace for allocating data.
    279   void* data_mspace_ GUARDED_BY(lock_);
    280   // Bitmap for collecting code and data.
    281   std::unique_ptr<CodeCacheBitmap> live_bitmap_;
    282   // Holds compiled code associated to the ArtMethod.
    283   SafeMap<const void*, ArtMethod*> method_code_map_ GUARDED_BY(lock_);
    284   // Holds osr compiled code associated to the ArtMethod.
    285   SafeMap<ArtMethod*, const void*> osr_code_map_ GUARDED_BY(lock_);
    286   // ProfilingInfo objects we have allocated.
    287   std::vector<ProfilingInfo*> profiling_infos_ GUARDED_BY(lock_);
    288 
    289   // The maximum capacity in bytes this code cache can go to.
    290   size_t max_capacity_ GUARDED_BY(lock_);
    291 
    292   // The current capacity in bytes of the code cache.
    293   size_t current_capacity_ GUARDED_BY(lock_);
    294 
    295   // The current footprint in bytes of the code portion of the code cache.
    296   size_t code_end_ GUARDED_BY(lock_);
    297 
    298   // The current footprint in bytes of the data portion of the code cache.
    299   size_t data_end_ GUARDED_BY(lock_);
    300 
    301   // Whether the last collection round increased the code cache.
    302   bool last_collection_increased_code_cache_ GUARDED_BY(lock_);
    303 
    304   // Last time the the code_cache was updated.
    305   // It is atomic to avoid locking when reading it.
    306   Atomic<uint64_t> last_update_time_ns_;
    307 
    308   // Whether we can do garbage collection.
    309   const bool garbage_collect_code_;
    310 
    311   // The size in bytes of used memory for the data portion of the code cache.
    312   size_t used_memory_for_data_ GUARDED_BY(lock_);
    313 
    314   // The size in bytes of used memory for the code portion of the code cache.
    315   size_t used_memory_for_code_ GUARDED_BY(lock_);
    316 
    317   // Number of compilations done throughout the lifetime of the JIT.
    318   size_t number_of_compilations_ GUARDED_BY(lock_);
    319 
    320   // Number of compilations for on-stack-replacement done throughout the lifetime of the JIT.
    321   size_t number_of_osr_compilations_ GUARDED_BY(lock_);
    322 
    323   // Number of deoptimizations done throughout the lifetime of the JIT.
    324   size_t number_of_deoptimizations_ GUARDED_BY(lock_);
    325 
    326   // Number of code cache collections done throughout the lifetime of the JIT.
    327   size_t number_of_collections_ GUARDED_BY(lock_);
    328 
    329   // Histograms for keeping track of stack map size statistics.
    330   Histogram<uint64_t> histogram_stack_map_memory_use_ GUARDED_BY(lock_);
    331 
    332   // Histograms for keeping track of code size statistics.
    333   Histogram<uint64_t> histogram_code_memory_use_ GUARDED_BY(lock_);
    334 
    335   // Histograms for keeping track of profiling info statistics.
    336   Histogram<uint64_t> histogram_profiling_info_memory_use_ GUARDED_BY(lock_);
    337 
    338   DISALLOW_IMPLICIT_CONSTRUCTORS(JitCodeCache);
    339 };
    340 
    341 }  // namespace jit
    342 }  // namespace art
    343 
    344 #endif  // ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
    345