Home | History | Annotate | Download | only in jit
      1 /*
      2  * Copyright 2014 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
     18 #define ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
     19 
     20 #include "instrumentation.h"
     21 
     22 #include "atomic.h"
     23 #include "base/arena_containers.h"
     24 #include "base/histogram-inl.h"
     25 #include "base/macros.h"
     26 #include "base/mutex.h"
     27 #include "gc/accounting/bitmap.h"
     28 #include "gc_root.h"
     29 #include "jni.h"
     30 #include "method_reference.h"
     31 #include "oat_file.h"
     32 #include "object_callbacks.h"
     33 #include "profile_compilation_info.h"
     34 #include "safe_map.h"
     35 #include "thread_pool.h"
     36 
     37 namespace art {
     38 
     39 class ArtMethod;
     40 class LinearAlloc;
     41 class InlineCache;
     42 class ProfilingInfo;
     43 
     44 namespace jit {
     45 
     46 class JitInstrumentationCache;
     47 
     48 // Alignment in bits that will suit all architectures.
     49 static constexpr int kJitCodeAlignment = 16;
     50 using CodeCacheBitmap = gc::accounting::MemoryRangeBitmap<kJitCodeAlignment>;
     51 
     52 class JitCodeCache {
     53  public:
     54   static constexpr size_t kMaxCapacity = 64 * MB;
     55   // Put the default to a very low amount for debug builds to stress the code cache
     56   // collection.
     57   static constexpr size_t kInitialCapacity = kIsDebugBuild ? 8 * KB : 64 * KB;
     58 
     59   // By default, do not GC until reaching 256KB.
     60   static constexpr size_t kReservedCapacity = kInitialCapacity * 4;
     61 
     62   // Create the code cache with a code + data capacity equal to "capacity", error message is passed
     63   // in the out arg error_msg.
     64   static JitCodeCache* Create(size_t initial_capacity,
     65                               size_t max_capacity,
     66                               bool generate_debug_info,
     67                               std::string* error_msg);
     68 
     69   // Number of bytes allocated in the code cache.
     70   size_t CodeCacheSize() REQUIRES(!lock_);
     71 
     72   // Number of bytes allocated in the data cache.
     73   size_t DataCacheSize() REQUIRES(!lock_);
     74 
     75   bool NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr)
     76       REQUIRES_SHARED(Locks::mutator_lock_)
     77       REQUIRES(!lock_);
     78 
     79   void NotifyMethodRedefined(ArtMethod* method)
     80       REQUIRES(Locks::mutator_lock_)
     81       REQUIRES(!lock_);
     82 
     83   // Notify to the code cache that the compiler wants to use the
     84   // profiling info of `method` to drive optimizations,
     85   // and therefore ensure the returned profiling info object is not
     86   // collected.
     87   ProfilingInfo* NotifyCompilerUse(ArtMethod* method, Thread* self)
     88       REQUIRES_SHARED(Locks::mutator_lock_)
     89       REQUIRES(!lock_);
     90 
     91   void DoneCompiling(ArtMethod* method, Thread* self, bool osr)
     92       REQUIRES_SHARED(Locks::mutator_lock_)
     93       REQUIRES(!lock_);
     94 
     95   void DoneCompilerUse(ArtMethod* method, Thread* self)
     96       REQUIRES_SHARED(Locks::mutator_lock_)
     97       REQUIRES(!lock_);
     98 
     99   // Allocate and write code and its metadata to the code cache.
    100   // `cha_single_implementation_list` needs to be registered via CHA (if it's
    101   // still valid), since the compiled code still needs to be invalidated if the
    102   // single-implementation assumptions are violated later. This needs to be done
    103   // even if `has_should_deoptimize_flag` is false, which can happen due to CHA
    104   // guard elimination.
    105   uint8_t* CommitCode(Thread* self,
    106                       ArtMethod* method,
    107                       uint8_t* stack_map,
    108                       uint8_t* method_info,
    109                       uint8_t* roots_data,
    110                       size_t frame_size_in_bytes,
    111                       size_t core_spill_mask,
    112                       size_t fp_spill_mask,
    113                       const uint8_t* code,
    114                       size_t code_size,
    115                       size_t data_size,
    116                       bool osr,
    117                       Handle<mirror::ObjectArray<mirror::Object>> roots,
    118                       bool has_should_deoptimize_flag,
    119                       const ArenaSet<ArtMethod*>& cha_single_implementation_list)
    120       REQUIRES_SHARED(Locks::mutator_lock_)
    121       REQUIRES(!lock_);
    122 
    123   // Return true if the code cache contains this pc.
    124   bool ContainsPc(const void* pc) const;
    125 
    126   // Return true if the code cache contains this method.
    127   bool ContainsMethod(ArtMethod* method) REQUIRES(!lock_);
    128 
    129   // Allocate a region of data that contain `size` bytes, and potentially space
    130   // for storing `number_of_roots` roots. Returns null if there is no more room.
    131   // Return the number of bytes allocated.
    132   size_t ReserveData(Thread* self,
    133                      size_t stack_map_size,
    134                      size_t method_info_size,
    135                      size_t number_of_roots,
    136                      ArtMethod* method,
    137                      uint8_t** stack_map_data,
    138                      uint8_t** method_info_data,
    139                      uint8_t** roots_data)
    140       REQUIRES_SHARED(Locks::mutator_lock_)
    141       REQUIRES(!lock_);
    142 
    143   // Clear data from the data portion of the code cache.
    144   void ClearData(Thread* self, uint8_t* stack_map_data, uint8_t* roots_data)
    145       REQUIRES_SHARED(Locks::mutator_lock_)
    146       REQUIRES(!lock_);
    147 
    148   CodeCacheBitmap* GetLiveBitmap() const {
    149     return live_bitmap_.get();
    150   }
    151 
    152   // Return whether we should do a full collection given the current state of the cache.
    153   bool ShouldDoFullCollection()
    154       REQUIRES(lock_)
    155       REQUIRES_SHARED(Locks::mutator_lock_);
    156 
    157   // Perform a collection on the code cache.
    158   void GarbageCollectCache(Thread* self)
    159       REQUIRES(!lock_)
    160       REQUIRES_SHARED(Locks::mutator_lock_);
    161 
    162   // Given the 'pc', try to find the JIT compiled code associated with it.
    163   // Return null if 'pc' is not in the code cache. 'method' is passed for
    164   // sanity check.
    165   OatQuickMethodHeader* LookupMethodHeader(uintptr_t pc, ArtMethod* method)
    166       REQUIRES(!lock_)
    167       REQUIRES_SHARED(Locks::mutator_lock_);
    168 
    169   OatQuickMethodHeader* LookupOsrMethodHeader(ArtMethod* method)
    170       REQUIRES(!lock_)
    171       REQUIRES_SHARED(Locks::mutator_lock_);
    172 
    173   // Remove all methods in our cache that were allocated by 'alloc'.
    174   void RemoveMethodsIn(Thread* self, const LinearAlloc& alloc)
    175       REQUIRES(!lock_)
    176       REQUIRES_SHARED(Locks::mutator_lock_);
    177 
    178   void CopyInlineCacheInto(const InlineCache& ic, Handle<mirror::ObjectArray<mirror::Class>> array)
    179       REQUIRES(!lock_)
    180       REQUIRES_SHARED(Locks::mutator_lock_);
    181 
    182   // Create a 'ProfileInfo' for 'method'. If 'retry_allocation' is true,
    183   // will collect and retry if the first allocation is unsuccessful.
    184   ProfilingInfo* AddProfilingInfo(Thread* self,
    185                                   ArtMethod* method,
    186                                   const std::vector<uint32_t>& entries,
    187                                   bool retry_allocation)
    188       REQUIRES(!lock_)
    189       REQUIRES_SHARED(Locks::mutator_lock_);
    190 
    191   bool OwnsSpace(const void* mspace) const NO_THREAD_SAFETY_ANALYSIS {
    192     return mspace == code_mspace_ || mspace == data_mspace_;
    193   }
    194 
    195   void* MoreCore(const void* mspace, intptr_t increment);
    196 
    197   // Adds to `methods` all profiled methods which are part of any of the given dex locations.
    198   void GetProfiledMethods(const std::set<std::string>& dex_base_locations,
    199                           std::vector<ProfileMethodInfo>& methods)
    200       REQUIRES(!lock_)
    201       REQUIRES_SHARED(Locks::mutator_lock_);
    202 
    203   uint64_t GetLastUpdateTimeNs() const;
    204 
    205   size_t GetCurrentCapacity() REQUIRES(!lock_) {
    206     MutexLock lock(Thread::Current(), lock_);
    207     return current_capacity_;
    208   }
    209 
    210   size_t GetMemorySizeOfCodePointer(const void* ptr) REQUIRES(!lock_);
    211 
    212   void InvalidateCompiledCodeFor(ArtMethod* method, const OatQuickMethodHeader* code)
    213       REQUIRES(!lock_)
    214       REQUIRES_SHARED(Locks::mutator_lock_);
    215 
    216   void Dump(std::ostream& os) REQUIRES(!lock_);
    217 
    218   bool IsOsrCompiled(ArtMethod* method) REQUIRES(!lock_);
    219 
    220   void SweepRootTables(IsMarkedVisitor* visitor)
    221       REQUIRES(!lock_)
    222       REQUIRES_SHARED(Locks::mutator_lock_);
    223 
    224   // The GC needs to disallow the reading of inline caches when it processes them,
    225   // to avoid having a class being used while it is being deleted.
    226   void AllowInlineCacheAccess() REQUIRES(!lock_);
    227   void DisallowInlineCacheAccess() REQUIRES(!lock_);
    228   void BroadcastForInlineCacheAccess() REQUIRES(!lock_);
    229 
    230   // Notify the code cache that the method at the pointer 'old_method' is being moved to the pointer
    231   // 'new_method' since it is being made obsolete.
    232   void MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method)
    233       REQUIRES(!lock_) REQUIRES(Locks::mutator_lock_);
    234 
    235   // Dynamically change whether we want to garbage collect code. Should only be used
    236   // by tests.
    237   void SetGarbageCollectCode(bool value) {
    238     garbage_collect_code_ = value;
    239   }
    240 
    241  private:
    242   // Take ownership of maps.
    243   JitCodeCache(MemMap* code_map,
    244                MemMap* data_map,
    245                size_t initial_code_capacity,
    246                size_t initial_data_capacity,
    247                size_t max_capacity,
    248                bool garbage_collect_code);
    249 
    250   // Internal version of 'CommitCode' that will not retry if the
    251   // allocation fails. Return null if the allocation fails.
    252   uint8_t* CommitCodeInternal(Thread* self,
    253                               ArtMethod* method,
    254                               uint8_t* stack_map,
    255                               uint8_t* method_info,
    256                               uint8_t* roots_data,
    257                               size_t frame_size_in_bytes,
    258                               size_t core_spill_mask,
    259                               size_t fp_spill_mask,
    260                               const uint8_t* code,
    261                               size_t code_size,
    262                               size_t data_size,
    263                               bool osr,
    264                               Handle<mirror::ObjectArray<mirror::Object>> roots,
    265                               bool has_should_deoptimize_flag,
    266                               const ArenaSet<ArtMethod*>& cha_single_implementation_list)
    267       REQUIRES(!lock_)
    268       REQUIRES_SHARED(Locks::mutator_lock_);
    269 
    270   ProfilingInfo* AddProfilingInfoInternal(Thread* self,
    271                                           ArtMethod* method,
    272                                           const std::vector<uint32_t>& entries)
    273       REQUIRES(lock_)
    274       REQUIRES_SHARED(Locks::mutator_lock_);
    275 
    276   // If a collection is in progress, wait for it to finish. Return
    277   // whether the thread actually waited.
    278   bool WaitForPotentialCollectionToComplete(Thread* self)
    279       REQUIRES(lock_) REQUIRES(!Locks::mutator_lock_);
    280 
    281   // Remove CHA dependents and underlying allocations for entries in `method_headers`.
    282   void FreeAllMethodHeaders(const std::unordered_set<OatQuickMethodHeader*>& method_headers)
    283       REQUIRES(!lock_)
    284       REQUIRES(!Locks::cha_lock_);
    285 
    286   // Free in the mspace allocations for `code_ptr`.
    287   void FreeCode(const void* code_ptr) REQUIRES(lock_);
    288 
    289   // Number of bytes allocated in the code cache.
    290   size_t CodeCacheSizeLocked() REQUIRES(lock_);
    291 
    292   // Number of bytes allocated in the data cache.
    293   size_t DataCacheSizeLocked() REQUIRES(lock_);
    294 
    295   // Notify all waiting threads that a collection is done.
    296   void NotifyCollectionDone(Thread* self) REQUIRES(lock_);
    297 
    298   // Try to increase the current capacity of the code cache. Return whether we
    299   // succeeded at doing so.
    300   bool IncreaseCodeCacheCapacity() REQUIRES(lock_);
    301 
    302   // Set the footprint limit of the code cache.
    303   void SetFootprintLimit(size_t new_footprint) REQUIRES(lock_);
    304 
    305   void DoCollection(Thread* self, bool collect_profiling_info)
    306       REQUIRES(!lock_)
    307       REQUIRES_SHARED(Locks::mutator_lock_);
    308 
    309   void RemoveUnmarkedCode(Thread* self)
    310       REQUIRES(!lock_)
    311       REQUIRES_SHARED(Locks::mutator_lock_);
    312 
    313   void MarkCompiledCodeOnThreadStacks(Thread* self)
    314       REQUIRES(!lock_)
    315       REQUIRES_SHARED(Locks::mutator_lock_);
    316 
    317   bool CheckLiveCompiledCodeHasProfilingInfo()
    318       REQUIRES(lock_);
    319 
    320   void FreeCode(uint8_t* code) REQUIRES(lock_);
    321   uint8_t* AllocateCode(size_t code_size) REQUIRES(lock_);
    322   void FreeData(uint8_t* data) REQUIRES(lock_);
    323   uint8_t* AllocateData(size_t data_size) REQUIRES(lock_);
    324 
    325   bool IsWeakAccessEnabled(Thread* self) const;
    326   void WaitUntilInlineCacheAccessible(Thread* self)
    327       REQUIRES(!lock_)
    328       REQUIRES_SHARED(Locks::mutator_lock_);
    329 
    330   // Lock for guarding allocations, collections, and the method_code_map_.
    331   Mutex lock_;
    332   // Condition to wait on during collection.
    333   ConditionVariable lock_cond_ GUARDED_BY(lock_);
    334   // Whether there is a code cache collection in progress.
    335   bool collection_in_progress_ GUARDED_BY(lock_);
    336   // Mem map which holds code.
    337   std::unique_ptr<MemMap> code_map_;
    338   // Mem map which holds data (stack maps and profiling info).
    339   std::unique_ptr<MemMap> data_map_;
    340   // The opaque mspace for allocating code.
    341   void* code_mspace_ GUARDED_BY(lock_);
    342   // The opaque mspace for allocating data.
    343   void* data_mspace_ GUARDED_BY(lock_);
    344   // Bitmap for collecting code and data.
    345   std::unique_ptr<CodeCacheBitmap> live_bitmap_;
    346   // Holds compiled code associated to the ArtMethod.
    347   SafeMap<const void*, ArtMethod*> method_code_map_ GUARDED_BY(lock_);
    348   // Holds osr compiled code associated to the ArtMethod.
    349   SafeMap<ArtMethod*, const void*> osr_code_map_ GUARDED_BY(lock_);
    350   // ProfilingInfo objects we have allocated.
    351   std::vector<ProfilingInfo*> profiling_infos_ GUARDED_BY(lock_);
    352 
    353   // The maximum capacity in bytes this code cache can go to.
    354   size_t max_capacity_ GUARDED_BY(lock_);
    355 
    356   // The current capacity in bytes of the code cache.
    357   size_t current_capacity_ GUARDED_BY(lock_);
    358 
    359   // The current footprint in bytes of the code portion of the code cache.
    360   size_t code_end_ GUARDED_BY(lock_);
    361 
    362   // The current footprint in bytes of the data portion of the code cache.
    363   size_t data_end_ GUARDED_BY(lock_);
    364 
    365   // Whether the last collection round increased the code cache.
    366   bool last_collection_increased_code_cache_ GUARDED_BY(lock_);
    367 
    368   // Last time the the code_cache was updated.
    369   // It is atomic to avoid locking when reading it.
    370   Atomic<uint64_t> last_update_time_ns_;
    371 
    372   // Whether we can do garbage collection. Not 'const' as tests may override this.
    373   bool garbage_collect_code_;
    374 
    375   // The size in bytes of used memory for the data portion of the code cache.
    376   size_t used_memory_for_data_ GUARDED_BY(lock_);
    377 
    378   // The size in bytes of used memory for the code portion of the code cache.
    379   size_t used_memory_for_code_ GUARDED_BY(lock_);
    380 
    381   // Number of compilations done throughout the lifetime of the JIT.
    382   size_t number_of_compilations_ GUARDED_BY(lock_);
    383 
    384   // Number of compilations for on-stack-replacement done throughout the lifetime of the JIT.
    385   size_t number_of_osr_compilations_ GUARDED_BY(lock_);
    386 
    387   // Number of code cache collections done throughout the lifetime of the JIT.
    388   size_t number_of_collections_ GUARDED_BY(lock_);
    389 
    390   // Histograms for keeping track of stack map size statistics.
    391   Histogram<uint64_t> histogram_stack_map_memory_use_ GUARDED_BY(lock_);
    392 
    393   // Histograms for keeping track of code size statistics.
    394   Histogram<uint64_t> histogram_code_memory_use_ GUARDED_BY(lock_);
    395 
    396   // Histograms for keeping track of profiling info statistics.
    397   Histogram<uint64_t> histogram_profiling_info_memory_use_ GUARDED_BY(lock_);
    398 
    399   // Whether the GC allows accessing weaks in inline caches. Note that this
    400   // is not used by the concurrent collector, which uses
    401   // Thread::SetWeakRefAccessEnabled instead.
    402   Atomic<bool> is_weak_access_enabled_;
    403 
    404   // Condition to wait on for accessing inline caches.
    405   ConditionVariable inline_cache_cond_ GUARDED_BY(lock_);
    406 
    407   DISALLOW_IMPLICIT_CONSTRUCTORS(JitCodeCache);
    408 };
    409 
    410 }  // namespace jit
    411 }  // namespace art
    412 
    413 #endif  // ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
    414