Home | History | Annotate | Download | only in jit
      1 /*
      2  * Copyright (C) 2015 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "profiling_info.h"
     18 
     19 #include "art_method-inl.h"
     20 #include "dex_instruction.h"
     21 #include "jit/jit.h"
     22 #include "jit/jit_code_cache.h"
     23 #include "scoped_thread_state_change-inl.h"
     24 #include "thread.h"
     25 
     26 namespace art {
     27 
     28 ProfilingInfo::ProfilingInfo(ArtMethod* method, const std::vector<uint32_t>& entries)
     29       : number_of_inline_caches_(entries.size()),
     30         method_(method),
     31         is_method_being_compiled_(false),
     32         is_osr_method_being_compiled_(false),
     33         current_inline_uses_(0),
     34         saved_entry_point_(nullptr) {
     35   memset(&cache_, 0, number_of_inline_caches_ * sizeof(InlineCache));
     36   for (size_t i = 0; i < number_of_inline_caches_; ++i) {
     37     cache_[i].dex_pc_ = entries[i];
     38   }
     39 }
     40 
     41 bool ProfilingInfo::Create(Thread* self, ArtMethod* method, bool retry_allocation) {
     42   // Walk over the dex instructions of the method and keep track of
     43   // instructions we are interested in profiling.
     44   DCHECK(!method->IsNative());
     45 
     46   const DexFile::CodeItem& code_item = *method->GetCodeItem();
     47   const uint16_t* code_ptr = code_item.insns_;
     48   const uint16_t* code_end = code_item.insns_ + code_item.insns_size_in_code_units_;
     49 
     50   uint32_t dex_pc = 0;
     51   std::vector<uint32_t> entries;
     52   while (code_ptr < code_end) {
     53     const Instruction& instruction = *Instruction::At(code_ptr);
     54     switch (instruction.Opcode()) {
     55       case Instruction::INVOKE_VIRTUAL:
     56       case Instruction::INVOKE_VIRTUAL_RANGE:
     57       case Instruction::INVOKE_VIRTUAL_QUICK:
     58       case Instruction::INVOKE_VIRTUAL_RANGE_QUICK:
     59       case Instruction::INVOKE_INTERFACE:
     60       case Instruction::INVOKE_INTERFACE_RANGE:
     61         entries.push_back(dex_pc);
     62         break;
     63 
     64       default:
     65         break;
     66     }
     67     dex_pc += instruction.SizeInCodeUnits();
     68     code_ptr += instruction.SizeInCodeUnits();
     69   }
     70 
     71   // We always create a `ProfilingInfo` object, even if there is no instruction we are
     72   // interested in. The JIT code cache internally uses it.
     73 
     74   // Allocate the `ProfilingInfo` object int the JIT's data space.
     75   jit::JitCodeCache* code_cache = Runtime::Current()->GetJit()->GetCodeCache();
     76   return code_cache->AddProfilingInfo(self, method, entries, retry_allocation) != nullptr;
     77 }
     78 
     79 InlineCache* ProfilingInfo::GetInlineCache(uint32_t dex_pc) {
     80   // TODO: binary search if array is too long.
     81   for (size_t i = 0; i < number_of_inline_caches_; ++i) {
     82     if (cache_[i].dex_pc_ == dex_pc) {
     83       return &cache_[i];
     84     }
     85   }
     86   LOG(FATAL) << "No inline cache found for "  << ArtMethod::PrettyMethod(method_) << "@" << dex_pc;
     87   UNREACHABLE();
     88 }
     89 
     90 void ProfilingInfo::AddInvokeInfo(uint32_t dex_pc, mirror::Class* cls) {
     91   InlineCache* cache = GetInlineCache(dex_pc);
     92   for (size_t i = 0; i < InlineCache::kIndividualCacheSize; ++i) {
     93     mirror::Class* existing = cache->classes_[i].Read<kWithoutReadBarrier>();
     94     mirror::Class* marked = ReadBarrier::IsMarked(existing);
     95     if (marked == cls) {
     96       // Receiver type is already in the cache, nothing else to do.
     97       return;
     98     } else if (marked == nullptr) {
     99       // Cache entry is empty, try to put `cls` in it.
    100       // Note: it's ok to spin on 'existing' here: if 'existing' is not null, that means
    101       // it is a stalled heap address, which will only be cleared during SweepSystemWeaks,
    102       // *after* this thread hits a suspend point.
    103       GcRoot<mirror::Class> expected_root(existing);
    104       GcRoot<mirror::Class> desired_root(cls);
    105       if (!reinterpret_cast<Atomic<GcRoot<mirror::Class>>*>(&cache->classes_[i])->
    106               CompareExchangeStrongSequentiallyConsistent(expected_root, desired_root)) {
    107         // Some other thread put a class in the cache, continue iteration starting at this
    108         // entry in case the entry contains `cls`.
    109         --i;
    110       } else {
    111         // We successfully set `cls`, just return.
    112         return;
    113       }
    114     }
    115   }
    116   // Unsuccessfull - cache is full, making it megamorphic. We do not DCHECK it though,
    117   // as the garbage collector might clear the entries concurrently.
    118 }
    119 
    120 }  // namespace art
    121