Home | History | Annotate | Download | only in jit
      1 /*
      2  * Copyright 2014 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
     18 #define ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
     19 
     20 #include <iosfwd>
     21 #include <memory>
     22 #include <set>
     23 #include <string>
     24 #include <unordered_set>
     25 #include <vector>
     26 
     27 #include "base/arena_containers.h"
     28 #include "base/atomic.h"
     29 #include "base/histogram.h"
     30 #include "base/macros.h"
     31 #include "base/mem_map.h"
     32 #include "base/mutex.h"
     33 #include "base/safe_map.h"
     34 
     35 namespace art {
     36 
     37 class ArtMethod;
     38 template<class T> class Handle;
     39 class LinearAlloc;
     40 class InlineCache;
     41 class IsMarkedVisitor;
     42 class JitJniStubTestHelper;
     43 class OatQuickMethodHeader;
     44 struct ProfileMethodInfo;
     45 class ProfilingInfo;
     46 class Thread;
     47 
     48 namespace gc {
     49 namespace accounting {
     50 template<size_t kAlignment> class MemoryRangeBitmap;
     51 }  // namespace accounting
     52 }  // namespace gc
     53 
     54 namespace mirror {
     55 class Class;
     56 class Object;
     57 template<class T> class ObjectArray;
     58 }  // namespace mirror
     59 
     60 namespace gc {
     61 namespace accounting {
     62 template<size_t kAlignment> class MemoryRangeBitmap;
     63 }  // namespace accounting
     64 }  // namespace gc
     65 
     66 namespace mirror {
     67 class Class;
     68 class Object;
     69 template<class T> class ObjectArray;
     70 }  // namespace mirror
     71 
     72 namespace jit {
     73 
     74 class MarkCodeClosure;
     75 class ScopedCodeCacheWrite;
     76 
     77 // Number of bytes represented by a bit in the CodeCacheBitmap. Value is reasonable for all
     78 // architectures.
     79 static constexpr int kJitCodeAccountingBytes = 16;
     80 
     81 // Type of bitmap used for tracking live functions in the JIT code cache for the purposes
     82 // of garbage collecting code.
     83 using CodeCacheBitmap = gc::accounting::MemoryRangeBitmap<kJitCodeAccountingBytes>;
     84 
     85 class JitCodeCache {
     86  public:
     87   static constexpr size_t kMaxCapacity = 64 * MB;
     88   // Put the default to a very low amount for debug builds to stress the code cache
     89   // collection.
     90   static constexpr size_t kInitialCapacity = kIsDebugBuild ? 8 * KB : 64 * KB;
     91 
     92   // By default, do not GC until reaching 256KB.
     93   static constexpr size_t kReservedCapacity = kInitialCapacity * 4;
     94 
     95   // Create the code cache with a code + data capacity equal to "capacity", error message is passed
     96   // in the out arg error_msg.
     97   static JitCodeCache* Create(bool used_only_for_profile_data,
     98                               bool rwx_memory_allowed,
     99                               bool is_zygote,
    100                               std::string* error_msg);
    101   ~JitCodeCache();
    102 
    103   bool NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr)
    104       REQUIRES_SHARED(Locks::mutator_lock_)
    105       REQUIRES(!lock_);
    106 
    107   void NotifyMethodRedefined(ArtMethod* method)
    108       REQUIRES(Locks::mutator_lock_)
    109       REQUIRES(!lock_);
    110 
    111   // Notify to the code cache that the compiler wants to use the
    112   // profiling info of `method` to drive optimizations,
    113   // and therefore ensure the returned profiling info object is not
    114   // collected.
    115   ProfilingInfo* NotifyCompilerUse(ArtMethod* method, Thread* self)
    116       REQUIRES_SHARED(Locks::mutator_lock_)
    117       REQUIRES(!lock_);
    118 
    119   void DoneCompiling(ArtMethod* method, Thread* self, bool osr)
    120       REQUIRES_SHARED(Locks::mutator_lock_)
    121       REQUIRES(!lock_);
    122 
    123   void DoneCompilerUse(ArtMethod* method, Thread* self)
    124       REQUIRES_SHARED(Locks::mutator_lock_)
    125       REQUIRES(!lock_);
    126 
    127   // Allocate and write code and its metadata to the code cache.
    128   // `cha_single_implementation_list` needs to be registered via CHA (if it's
    129   // still valid), since the compiled code still needs to be invalidated if the
    130   // single-implementation assumptions are violated later. This needs to be done
    131   // even if `has_should_deoptimize_flag` is false, which can happen due to CHA
    132   // guard elimination.
    133   uint8_t* CommitCode(Thread* self,
    134                       ArtMethod* method,
    135                       uint8_t* stack_map,
    136                       uint8_t* roots_data,
    137                       const uint8_t* code,
    138                       size_t code_size,
    139                       size_t data_size,
    140                       bool osr,
    141                       const std::vector<Handle<mirror::Object>>& roots,
    142                       bool has_should_deoptimize_flag,
    143                       const ArenaSet<ArtMethod*>& cha_single_implementation_list)
    144       REQUIRES_SHARED(Locks::mutator_lock_)
    145       REQUIRES(!lock_);
    146 
    147   // Return true if the code cache contains this pc.
    148   bool ContainsPc(const void* pc) const;
    149 
    150   // Returns true if either the method's entrypoint is JIT compiled code or it is the
    151   // instrumentation entrypoint and we can jump to jit code for this method. For testing use only.
    152   bool WillExecuteJitCode(ArtMethod* method) REQUIRES(!lock_);
    153 
    154   // Return true if the code cache contains this method.
    155   bool ContainsMethod(ArtMethod* method) REQUIRES(!lock_);
    156 
    157   // Return the code pointer for a JNI-compiled stub if the method is in the cache, null otherwise.
    158   const void* GetJniStubCode(ArtMethod* method) REQUIRES(!lock_);
    159 
    160   // Allocate a region of data that contain `size` bytes, and potentially space
    161   // for storing `number_of_roots` roots. Returns null if there is no more room.
    162   // Return the number of bytes allocated.
    163   size_t ReserveData(Thread* self,
    164                      size_t stack_map_size,
    165                      size_t number_of_roots,
    166                      ArtMethod* method,
    167                      uint8_t** stack_map_data,
    168                      uint8_t** roots_data)
    169       REQUIRES_SHARED(Locks::mutator_lock_)
    170       REQUIRES(!lock_);
    171 
    172   // Clear data from the data portion of the code cache.
    173   void ClearData(Thread* self, uint8_t* stack_map_data, uint8_t* roots_data)
    174       REQUIRES_SHARED(Locks::mutator_lock_)
    175       REQUIRES(!lock_);
    176 
    177   // Perform a collection on the code cache.
    178   void GarbageCollectCache(Thread* self)
    179       REQUIRES(!lock_)
    180       REQUIRES_SHARED(Locks::mutator_lock_);
    181 
    182   // Given the 'pc', try to find the JIT compiled code associated with it.
    183   // Return null if 'pc' is not in the code cache. 'method' is passed for
    184   // sanity check.
    185   OatQuickMethodHeader* LookupMethodHeader(uintptr_t pc, ArtMethod* method)
    186       REQUIRES(!lock_)
    187       REQUIRES_SHARED(Locks::mutator_lock_);
    188 
    189   OatQuickMethodHeader* LookupOsrMethodHeader(ArtMethod* method)
    190       REQUIRES(!lock_)
    191       REQUIRES_SHARED(Locks::mutator_lock_);
    192 
    193   // Removes method from the cache for testing purposes. The caller
    194   // must ensure that all threads are suspended and the method should
    195   // not be in any thread's stack.
    196   bool RemoveMethod(ArtMethod* method, bool release_memory)
    197       REQUIRES(!lock_)
    198       REQUIRES(Locks::mutator_lock_);
    199 
    200   // Remove all methods in our cache that were allocated by 'alloc'.
    201   void RemoveMethodsIn(Thread* self, const LinearAlloc& alloc)
    202       REQUIRES(!lock_)
    203       REQUIRES_SHARED(Locks::mutator_lock_);
    204 
    205   void CopyInlineCacheInto(const InlineCache& ic, Handle<mirror::ObjectArray<mirror::Class>> array)
    206       REQUIRES(!lock_)
    207       REQUIRES_SHARED(Locks::mutator_lock_);
    208 
    209   // Create a 'ProfileInfo' for 'method'. If 'retry_allocation' is true,
    210   // will collect and retry if the first allocation is unsuccessful.
    211   ProfilingInfo* AddProfilingInfo(Thread* self,
    212                                   ArtMethod* method,
    213                                   const std::vector<uint32_t>& entries,
    214                                   bool retry_allocation)
    215       REQUIRES(!lock_)
    216       REQUIRES_SHARED(Locks::mutator_lock_);
    217 
    218   bool OwnsSpace(const void* mspace) const NO_THREAD_SAFETY_ANALYSIS {
    219     return mspace == data_mspace_ || mspace == exec_mspace_;
    220   }
    221 
    222   void* MoreCore(const void* mspace, intptr_t increment);
    223 
    224   // Adds to `methods` all profiled methods which are part of any of the given dex locations.
    225   void GetProfiledMethods(const std::set<std::string>& dex_base_locations,
    226                           std::vector<ProfileMethodInfo>& methods)
    227       REQUIRES(!lock_)
    228       REQUIRES_SHARED(Locks::mutator_lock_);
    229 
    230   void InvalidateCompiledCodeFor(ArtMethod* method, const OatQuickMethodHeader* code)
    231       REQUIRES(!lock_)
    232       REQUIRES_SHARED(Locks::mutator_lock_);
    233 
    234   void Dump(std::ostream& os) REQUIRES(!lock_);
    235 
    236   bool IsOsrCompiled(ArtMethod* method) REQUIRES(!lock_);
    237 
    238   void SweepRootTables(IsMarkedVisitor* visitor)
    239       REQUIRES(!lock_)
    240       REQUIRES_SHARED(Locks::mutator_lock_);
    241 
    242   // The GC needs to disallow the reading of inline caches when it processes them,
    243   // to avoid having a class being used while it is being deleted.
    244   void AllowInlineCacheAccess() REQUIRES(!lock_);
    245   void DisallowInlineCacheAccess() REQUIRES(!lock_);
    246   void BroadcastForInlineCacheAccess() REQUIRES(!lock_);
    247 
    248   // Notify the code cache that the method at the pointer 'old_method' is being moved to the pointer
    249   // 'new_method' since it is being made obsolete.
    250   void MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method)
    251       REQUIRES(!lock_) REQUIRES(Locks::mutator_lock_);
    252 
    253   // Dynamically change whether we want to garbage collect code.
    254   void SetGarbageCollectCode(bool value) REQUIRES(!lock_);
    255 
    256   bool GetGarbageCollectCode() REQUIRES(!lock_);
    257 
    258   // Unsafe variant for debug checks.
    259   bool GetGarbageCollectCodeUnsafe() const NO_THREAD_SAFETY_ANALYSIS {
    260     return garbage_collect_code_;
    261   }
    262 
    263   // If Jit-gc has been disabled (and instrumentation has been enabled) this will return the
    264   // jit-compiled entrypoint for this method.  Otherwise it will return null.
    265   const void* FindCompiledCodeForInstrumentation(ArtMethod* method)
    266       REQUIRES(!lock_)
    267       REQUIRES_SHARED(Locks::mutator_lock_);
    268 
    269   // Fetch the entrypoint that zygote may have saved for a method. The zygote saves an entrypoint
    270   // only for the case when the method's declaring class is not initialized.
    271   const void* GetZygoteSavedEntryPoint(ArtMethod* method)
    272       REQUIRES(!lock_)
    273       REQUIRES_SHARED(Locks::mutator_lock_);
    274 
    275   void PostForkChildAction(bool is_system_server, bool is_zygote);
    276 
    277   // Clear the entrypoints of JIT compiled methods that belong in the zygote space.
    278   // This is used for removing non-debuggable JIT code at the point we realize the runtime
    279   // is debuggable.
    280   void ClearEntryPointsInZygoteExecSpace() REQUIRES(!lock_) REQUIRES(Locks::mutator_lock_);
    281 
    282  private:
    283   JitCodeCache();
    284 
    285   void InitializeState(size_t initial_capacity, size_t max_capacity) REQUIRES(lock_);
    286 
    287   bool InitializeMappings(bool rwx_memory_allowed, bool is_zygote, std::string* error_msg)
    288       REQUIRES(lock_);
    289 
    290   void InitializeSpaces() REQUIRES(lock_);
    291 
    292   // Internal version of 'CommitCode' that will not retry if the
    293   // allocation fails. Return null if the allocation fails.
    294   uint8_t* CommitCodeInternal(Thread* self,
    295                               ArtMethod* method,
    296                               uint8_t* stack_map,
    297                               uint8_t* roots_data,
    298                               const uint8_t* code,
    299                               size_t code_size,
    300                               size_t data_size,
    301                               bool osr,
    302                               const std::vector<Handle<mirror::Object>>& roots,
    303                               bool has_should_deoptimize_flag,
    304                               const ArenaSet<ArtMethod*>& cha_single_implementation_list)
    305       REQUIRES(!lock_)
    306       REQUIRES_SHARED(Locks::mutator_lock_);
    307 
    308   // Adds the given roots to the roots_data. Only a member for annotalysis.
    309   void FillRootTable(uint8_t* roots_data, const std::vector<Handle<mirror::Object>>& roots)
    310       REQUIRES(lock_)
    311       REQUIRES_SHARED(Locks::mutator_lock_);
    312 
    313   ProfilingInfo* AddProfilingInfoInternal(Thread* self,
    314                                           ArtMethod* method,
    315                                           const std::vector<uint32_t>& entries)
    316       REQUIRES(lock_)
    317       REQUIRES_SHARED(Locks::mutator_lock_);
    318 
    319   // If a collection is in progress, wait for it to finish. Must be called with the mutator lock.
    320   // The non-mutator lock version should be used if possible. This method will release then
    321   // re-acquire the mutator lock.
    322   void WaitForPotentialCollectionToCompleteRunnable(Thread* self)
    323       REQUIRES(lock_, !Roles::uninterruptible_) REQUIRES_SHARED(Locks::mutator_lock_);
    324 
    325   // If a collection is in progress, wait for it to finish. Return
    326   // whether the thread actually waited.
    327   bool WaitForPotentialCollectionToComplete(Thread* self)
    328       REQUIRES(lock_) REQUIRES(!Locks::mutator_lock_);
    329 
    330   // Remove CHA dependents and underlying allocations for entries in `method_headers`.
    331   void FreeAllMethodHeaders(const std::unordered_set<OatQuickMethodHeader*>& method_headers)
    332       REQUIRES(!lock_)
    333       REQUIRES(!Locks::cha_lock_);
    334 
    335   // Removes method from the cache. The caller must ensure that all threads
    336   // are suspended and the method should not be in any thread's stack.
    337   bool RemoveMethodLocked(ArtMethod* method, bool release_memory)
    338       REQUIRES(lock_)
    339       REQUIRES(Locks::mutator_lock_);
    340 
    341   // Free code and data allocations for `code_ptr`.
    342   void FreeCodeAndData(const void* code_ptr) REQUIRES(lock_);
    343 
    344   // Number of bytes allocated in the code cache.
    345   size_t CodeCacheSize() REQUIRES(!lock_);
    346 
    347   // Number of bytes allocated in the data cache.
    348   size_t DataCacheSize() REQUIRES(!lock_);
    349 
    350   // Number of bytes allocated in the code cache.
    351   size_t CodeCacheSizeLocked() REQUIRES(lock_);
    352 
    353   // Number of bytes allocated in the data cache.
    354   size_t DataCacheSizeLocked() REQUIRES(lock_);
    355 
    356   // Notify all waiting threads that a collection is done.
    357   void NotifyCollectionDone(Thread* self) REQUIRES(lock_);
    358 
    359   // Try to increase the current capacity of the code cache. Return whether we
    360   // succeeded at doing so.
    361   bool IncreaseCodeCacheCapacity() REQUIRES(lock_);
    362 
    363   // Set the footprint limit of the code cache.
    364   void SetFootprintLimit(size_t new_footprint) REQUIRES(lock_);
    365 
    366   // Return whether we should do a full collection given the current state of the cache.
    367   bool ShouldDoFullCollection()
    368       REQUIRES(lock_)
    369       REQUIRES_SHARED(Locks::mutator_lock_);
    370 
    371   void DoCollection(Thread* self, bool collect_profiling_info)
    372       REQUIRES(!lock_)
    373       REQUIRES_SHARED(Locks::mutator_lock_);
    374 
    375   void RemoveUnmarkedCode(Thread* self)
    376       REQUIRES(!lock_)
    377       REQUIRES_SHARED(Locks::mutator_lock_);
    378 
    379   void MarkCompiledCodeOnThreadStacks(Thread* self)
    380       REQUIRES(!lock_)
    381       REQUIRES_SHARED(Locks::mutator_lock_);
    382 
    383   bool CheckLiveCompiledCodeHasProfilingInfo()
    384       REQUIRES(lock_)
    385       REQUIRES_SHARED(Locks::mutator_lock_);
    386 
    387   CodeCacheBitmap* GetLiveBitmap() const {
    388     return live_bitmap_.get();
    389   }
    390 
    391   uint8_t* AllocateCode(size_t code_size) REQUIRES(lock_);
    392   void FreeCode(uint8_t* code) REQUIRES(lock_);
    393   uint8_t* AllocateData(size_t data_size) REQUIRES(lock_);
    394   void FreeData(uint8_t* data) REQUIRES(lock_);
    395 
    396   bool HasDualCodeMapping() const {
    397     return non_exec_pages_.IsValid();
    398   }
    399 
    400   bool HasCodeMapping() const {
    401     return exec_pages_.IsValid();
    402   }
    403 
    404   const MemMap* GetUpdatableCodeMapping() const;
    405 
    406   bool IsInZygoteDataSpace(const void* ptr) const {
    407     return zygote_data_pages_.HasAddress(ptr);
    408   }
    409 
    410   bool IsInZygoteExecSpace(const void* ptr) const {
    411     return zygote_exec_pages_.HasAddress(ptr);
    412   }
    413 
    414   bool IsWeakAccessEnabled(Thread* self) const;
    415   void WaitUntilInlineCacheAccessible(Thread* self)
    416       REQUIRES(!lock_)
    417       REQUIRES_SHARED(Locks::mutator_lock_);
    418 
    419   class JniStubKey;
    420   class JniStubData;
    421 
    422   // Lock for guarding allocations, collections, and the method_code_map_.
    423   Mutex lock_ BOTTOM_MUTEX_ACQUIRED_AFTER;
    424   // Condition to wait on during collection.
    425   ConditionVariable lock_cond_ GUARDED_BY(lock_);
    426   // Whether there is a code cache collection in progress.
    427   bool collection_in_progress_ GUARDED_BY(lock_);
    428   // Mem map which holds data (stack maps and profiling info).
    429   MemMap data_pages_;
    430   // Mem map which holds code and has executable permission.
    431   MemMap exec_pages_;
    432   // Mem map which holds code with non executable permission. Only valid for dual view JIT when
    433   // this is the non-executable view of code used to write updates.
    434   MemMap non_exec_pages_;
    435   // The opaque mspace for allocating data.
    436   void* data_mspace_ GUARDED_BY(lock_);
    437   // The opaque mspace for allocating code.
    438   void* exec_mspace_ GUARDED_BY(lock_);
    439   // Bitmap for collecting code and data.
    440   std::unique_ptr<CodeCacheBitmap> live_bitmap_;
    441   // Holds compiled code associated with the shorty for a JNI stub.
    442   SafeMap<JniStubKey, JniStubData> jni_stubs_map_ GUARDED_BY(lock_);
    443   // Holds compiled code associated to the ArtMethod.
    444   SafeMap<const void*, ArtMethod*> method_code_map_ GUARDED_BY(lock_);
    445   // Holds osr compiled code associated to the ArtMethod.
    446   SafeMap<ArtMethod*, const void*> osr_code_map_ GUARDED_BY(lock_);
    447   // ProfilingInfo objects we have allocated.
    448   std::vector<ProfilingInfo*> profiling_infos_ GUARDED_BY(lock_);
    449 
    450   // The initial capacity in bytes this code cache starts with.
    451   size_t initial_capacity_ GUARDED_BY(lock_);
    452 
    453   // The maximum capacity in bytes this code cache can go to.
    454   size_t max_capacity_ GUARDED_BY(lock_);
    455 
    456   // The current capacity in bytes of the code cache.
    457   size_t current_capacity_ GUARDED_BY(lock_);
    458 
    459   // The current footprint in bytes of the data portion of the code cache.
    460   size_t data_end_ GUARDED_BY(lock_);
    461 
    462   // The current footprint in bytes of the code portion of the code cache.
    463   size_t exec_end_ GUARDED_BY(lock_);
    464 
    465   // Whether the last collection round increased the code cache.
    466   bool last_collection_increased_code_cache_ GUARDED_BY(lock_);
    467 
    468   // Whether we can do garbage collection. Not 'const' as tests may override this.
    469   bool garbage_collect_code_ GUARDED_BY(lock_);
    470 
    471   // The size in bytes of used memory for the data portion of the code cache.
    472   size_t used_memory_for_data_ GUARDED_BY(lock_);
    473 
    474   // The size in bytes of used memory for the code portion of the code cache.
    475   size_t used_memory_for_code_ GUARDED_BY(lock_);
    476 
    477   // Number of compilations done throughout the lifetime of the JIT.
    478   size_t number_of_compilations_ GUARDED_BY(lock_);
    479 
    480   // Number of compilations for on-stack-replacement done throughout the lifetime of the JIT.
    481   size_t number_of_osr_compilations_ GUARDED_BY(lock_);
    482 
    483   // Number of code cache collections done throughout the lifetime of the JIT.
    484   size_t number_of_collections_ GUARDED_BY(lock_);
    485 
    486   // Histograms for keeping track of stack map size statistics.
    487   Histogram<uint64_t> histogram_stack_map_memory_use_ GUARDED_BY(lock_);
    488 
    489   // Histograms for keeping track of code size statistics.
    490   Histogram<uint64_t> histogram_code_memory_use_ GUARDED_BY(lock_);
    491 
    492   // Histograms for keeping track of profiling info statistics.
    493   Histogram<uint64_t> histogram_profiling_info_memory_use_ GUARDED_BY(lock_);
    494 
    495   // Whether the GC allows accessing weaks in inline caches. Note that this
    496   // is not used by the concurrent collector, which uses
    497   // Thread::SetWeakRefAccessEnabled instead.
    498   Atomic<bool> is_weak_access_enabled_;
    499 
    500   // Condition to wait on for accessing inline caches.
    501   ConditionVariable inline_cache_cond_ GUARDED_BY(lock_);
    502 
    503   // Mem map which holds zygote data (stack maps and profiling info).
    504   MemMap zygote_data_pages_;
    505   // Mem map which holds zygote code and has executable permission.
    506   MemMap zygote_exec_pages_;
    507   // The opaque mspace for allocating zygote data.
    508   void* zygote_data_mspace_ GUARDED_BY(lock_);
    509   // The opaque mspace for allocating zygote code.
    510   void* zygote_exec_mspace_ GUARDED_BY(lock_);
    511 
    512   friend class art::JitJniStubTestHelper;
    513   friend class ScopedCodeCacheWrite;
    514   friend class MarkCodeClosure;
    515 
    516   DISALLOW_COPY_AND_ASSIGN(JitCodeCache);
    517 };
    518 
    519 }  // namespace jit
    520 }  // namespace art
    521 
    522 #endif  // ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
    523