Home | History | Annotate | Download | only in optimizing
      1 /*
      2  * Copyright (C) 2014 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "optimizing_compiler.h"
     18 
     19 #include <fstream>
     20 #include <memory>
     21 #include <sstream>
     22 
     23 #include <stdint.h>
     24 
     25 #include "android-base/strings.h"
     26 
     27 #ifdef ART_ENABLE_CODEGEN_arm64
     28 #include "instruction_simplifier_arm64.h"
     29 #endif
     30 
     31 #ifdef ART_ENABLE_CODEGEN_mips
     32 #include "pc_relative_fixups_mips.h"
     33 #endif
     34 
     35 #ifdef ART_ENABLE_CODEGEN_x86
     36 #include "pc_relative_fixups_x86.h"
     37 #endif
     38 
     39 #if defined(ART_ENABLE_CODEGEN_x86) || defined(ART_ENABLE_CODEGEN_x86_64)
     40 #include "x86_memory_gen.h"
     41 #endif
     42 
     43 #include "art_method-inl.h"
     44 #include "base/arena_allocator.h"
     45 #include "base/arena_containers.h"
     46 #include "base/dumpable.h"
     47 #include "base/macros.h"
     48 #include "base/mutex.h"
     49 #include "base/timing_logger.h"
     50 #include "bounds_check_elimination.h"
     51 #include "builder.h"
     52 #include "cha_guard_optimization.h"
     53 #include "code_generator.h"
     54 #include "code_sinking.h"
     55 #include "compiled_method.h"
     56 #include "compiler.h"
     57 #include "constant_folding.h"
     58 #include "dead_code_elimination.h"
     59 #include "debug/elf_debug_writer.h"
     60 #include "debug/method_debug_info.h"
     61 #include "dex/verification_results.h"
     62 #include "dex/verified_method.h"
     63 #include "dex_file_types.h"
     64 #include "driver/compiler_driver-inl.h"
     65 #include "driver/compiler_options.h"
     66 #include "driver/dex_compilation_unit.h"
     67 #include "elf_writer_quick.h"
     68 #include "graph_checker.h"
     69 #include "graph_visualizer.h"
     70 #include "gvn.h"
     71 #include "induction_var_analysis.h"
     72 #include "inliner.h"
     73 #include "instruction_simplifier.h"
     74 #include "instruction_simplifier_arm.h"
     75 #include "intrinsics.h"
     76 #include "jit/debugger_interface.h"
     77 #include "jit/jit.h"
     78 #include "jit/jit_code_cache.h"
     79 #include "jit/jit_logger.h"
     80 #include "jni/quick/jni_compiler.h"
     81 #include "licm.h"
     82 #include "load_store_analysis.h"
     83 #include "load_store_elimination.h"
     84 #include "loop_optimization.h"
     85 #include "nodes.h"
     86 #include "oat_quick_method_header.h"
     87 #include "prepare_for_register_allocation.h"
     88 #include "reference_type_propagation.h"
     89 #include "register_allocator_linear_scan.h"
     90 #include "select_generator.h"
     91 #include "scheduler.h"
     92 #include "sharpening.h"
     93 #include "side_effects_analysis.h"
     94 #include "ssa_builder.h"
     95 #include "ssa_liveness_analysis.h"
     96 #include "ssa_phi_elimination.h"
     97 #include "utils/assembler.h"
     98 #include "verifier/verifier_compiler_binding.h"
     99 
    100 namespace art {
    101 
    102 static constexpr size_t kArenaAllocatorMemoryReportThreshold = 8 * MB;
    103 
    104 static constexpr const char* kPassNameSeparator = "$";
    105 
    106 /**
    107  * Used by the code generator, to allocate the code in a vector.
    108  */
    109 class CodeVectorAllocator FINAL : public CodeAllocator {
    110  public:
    111   explicit CodeVectorAllocator(ArenaAllocator* arena)
    112       : memory_(arena->Adapter(kArenaAllocCodeBuffer)),
    113         size_(0) {}
    114 
    115   virtual uint8_t* Allocate(size_t size) {
    116     size_ = size;
    117     memory_.resize(size);
    118     return &memory_[0];
    119   }
    120 
    121   size_t GetSize() const { return size_; }
    122   const ArenaVector<uint8_t>& GetMemory() const { return memory_; }
    123   uint8_t* GetData() { return memory_.data(); }
    124 
    125  private:
    126   ArenaVector<uint8_t> memory_;
    127   size_t size_;
    128 
    129   DISALLOW_COPY_AND_ASSIGN(CodeVectorAllocator);
    130 };
    131 
    132 /**
    133  * Filter to apply to the visualizer. Methods whose name contain that filter will
    134  * be dumped.
    135  */
    136 static constexpr const char kStringFilter[] = "";
    137 
    138 class PassScope;
    139 
    140 class PassObserver : public ValueObject {
    141  public:
    142   PassObserver(HGraph* graph,
    143                CodeGenerator* codegen,
    144                std::ostream* visualizer_output,
    145                CompilerDriver* compiler_driver,
    146                Mutex& dump_mutex)
    147       : graph_(graph),
    148         cached_method_name_(),
    149         timing_logger_enabled_(compiler_driver->GetDumpPasses()),
    150         timing_logger_(timing_logger_enabled_ ? GetMethodName() : "", true, true),
    151         disasm_info_(graph->GetArena()),
    152         visualizer_oss_(),
    153         visualizer_output_(visualizer_output),
    154         visualizer_enabled_(!compiler_driver->GetCompilerOptions().GetDumpCfgFileName().empty()),
    155         visualizer_(&visualizer_oss_, graph, *codegen),
    156         visualizer_dump_mutex_(dump_mutex),
    157         graph_in_bad_state_(false) {
    158     if (timing_logger_enabled_ || visualizer_enabled_) {
    159       if (!IsVerboseMethod(compiler_driver, GetMethodName())) {
    160         timing_logger_enabled_ = visualizer_enabled_ = false;
    161       }
    162       if (visualizer_enabled_) {
    163         visualizer_.PrintHeader(GetMethodName());
    164         codegen->SetDisassemblyInformation(&disasm_info_);
    165       }
    166     }
    167   }
    168 
    169   ~PassObserver() {
    170     if (timing_logger_enabled_) {
    171       LOG(INFO) << "TIMINGS " << GetMethodName();
    172       LOG(INFO) << Dumpable<TimingLogger>(timing_logger_);
    173     }
    174     DCHECK(visualizer_oss_.str().empty());
    175   }
    176 
    177   void DumpDisassembly() REQUIRES(!visualizer_dump_mutex_) {
    178     if (visualizer_enabled_) {
    179       visualizer_.DumpGraphWithDisassembly();
    180       FlushVisualizer();
    181     }
    182   }
    183 
    184   void SetGraphInBadState() { graph_in_bad_state_ = true; }
    185 
    186   const char* GetMethodName() {
    187     // PrettyMethod() is expensive, so we delay calling it until we actually have to.
    188     if (cached_method_name_.empty()) {
    189       cached_method_name_ = graph_->GetDexFile().PrettyMethod(graph_->GetMethodIdx());
    190     }
    191     return cached_method_name_.c_str();
    192   }
    193 
    194  private:
    195   void StartPass(const char* pass_name) REQUIRES(!visualizer_dump_mutex_) {
    196     VLOG(compiler) << "Starting pass: " << pass_name;
    197     // Dump graph first, then start timer.
    198     if (visualizer_enabled_) {
    199       visualizer_.DumpGraph(pass_name, /* is_after_pass */ false, graph_in_bad_state_);
    200       FlushVisualizer();
    201     }
    202     if (timing_logger_enabled_) {
    203       timing_logger_.StartTiming(pass_name);
    204     }
    205   }
    206 
    207   void FlushVisualizer() REQUIRES(!visualizer_dump_mutex_) {
    208     MutexLock mu(Thread::Current(), visualizer_dump_mutex_);
    209     *visualizer_output_ << visualizer_oss_.str();
    210     visualizer_output_->flush();
    211     visualizer_oss_.str("");
    212     visualizer_oss_.clear();
    213   }
    214 
    215   void EndPass(const char* pass_name) REQUIRES(!visualizer_dump_mutex_) {
    216     // Pause timer first, then dump graph.
    217     if (timing_logger_enabled_) {
    218       timing_logger_.EndTiming();
    219     }
    220     if (visualizer_enabled_) {
    221       visualizer_.DumpGraph(pass_name, /* is_after_pass */ true, graph_in_bad_state_);
    222       FlushVisualizer();
    223     }
    224 
    225     // Validate the HGraph if running in debug mode.
    226     if (kIsDebugBuild) {
    227       if (!graph_in_bad_state_) {
    228         GraphChecker checker(graph_);
    229         checker.Run();
    230         if (!checker.IsValid()) {
    231           LOG(FATAL) << "Error after " << pass_name << ": " << Dumpable<GraphChecker>(checker);
    232         }
    233       }
    234     }
    235   }
    236 
    237   static bool IsVerboseMethod(CompilerDriver* compiler_driver, const char* method_name) {
    238     // Test an exact match to --verbose-methods. If verbose-methods is set, this overrides an
    239     // empty kStringFilter matching all methods.
    240     if (compiler_driver->GetCompilerOptions().HasVerboseMethods()) {
    241       return compiler_driver->GetCompilerOptions().IsVerboseMethod(method_name);
    242     }
    243 
    244     // Test the kStringFilter sub-string. constexpr helper variable to silence unreachable-code
    245     // warning when the string is empty.
    246     constexpr bool kStringFilterEmpty = arraysize(kStringFilter) <= 1;
    247     if (kStringFilterEmpty || strstr(method_name, kStringFilter) != nullptr) {
    248       return true;
    249     }
    250 
    251     return false;
    252   }
    253 
    254   HGraph* const graph_;
    255 
    256   std::string cached_method_name_;
    257 
    258   bool timing_logger_enabled_;
    259   TimingLogger timing_logger_;
    260 
    261   DisassemblyInformation disasm_info_;
    262 
    263   std::ostringstream visualizer_oss_;
    264   std::ostream* visualizer_output_;
    265   bool visualizer_enabled_;
    266   HGraphVisualizer visualizer_;
    267   Mutex& visualizer_dump_mutex_;
    268 
    269   // Flag to be set by the compiler if the pass failed and the graph is not
    270   // expected to validate.
    271   bool graph_in_bad_state_;
    272 
    273   friend PassScope;
    274 
    275   DISALLOW_COPY_AND_ASSIGN(PassObserver);
    276 };
    277 
    278 class PassScope : public ValueObject {
    279  public:
    280   PassScope(const char *pass_name, PassObserver* pass_observer)
    281       : pass_name_(pass_name),
    282         pass_observer_(pass_observer) {
    283     pass_observer_->StartPass(pass_name_);
    284   }
    285 
    286   ~PassScope() {
    287     pass_observer_->EndPass(pass_name_);
    288   }
    289 
    290  private:
    291   const char* const pass_name_;
    292   PassObserver* const pass_observer_;
    293 };
    294 
    295 class OptimizingCompiler FINAL : public Compiler {
    296  public:
    297   explicit OptimizingCompiler(CompilerDriver* driver);
    298   ~OptimizingCompiler() OVERRIDE;
    299 
    300   bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file) const OVERRIDE;
    301 
    302   CompiledMethod* Compile(const DexFile::CodeItem* code_item,
    303                           uint32_t access_flags,
    304                           InvokeType invoke_type,
    305                           uint16_t class_def_idx,
    306                           uint32_t method_idx,
    307                           Handle<mirror::ClassLoader> class_loader,
    308                           const DexFile& dex_file,
    309                           Handle<mirror::DexCache> dex_cache) const OVERRIDE;
    310 
    311   CompiledMethod* JniCompile(uint32_t access_flags,
    312                              uint32_t method_idx,
    313                              const DexFile& dex_file,
    314                              JniOptimizationFlags optimization_flags) const OVERRIDE {
    315     return ArtQuickJniCompileMethod(GetCompilerDriver(),
    316                                     access_flags,
    317                                     method_idx,
    318                                     dex_file,
    319                                     optimization_flags);
    320   }
    321 
    322   uintptr_t GetEntryPointOf(ArtMethod* method) const OVERRIDE
    323       REQUIRES_SHARED(Locks::mutator_lock_) {
    324     return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize(
    325         InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet())));
    326   }
    327 
    328   void Init() OVERRIDE;
    329 
    330   void UnInit() const OVERRIDE;
    331 
    332   void MaybeRecordStat(MethodCompilationStat compilation_stat) const {
    333     if (compilation_stats_.get() != nullptr) {
    334       compilation_stats_->RecordStat(compilation_stat);
    335     }
    336   }
    337 
    338   bool JitCompile(Thread* self,
    339                   jit::JitCodeCache* code_cache,
    340                   ArtMethod* method,
    341                   bool osr,
    342                   jit::JitLogger* jit_logger)
    343       OVERRIDE
    344       REQUIRES_SHARED(Locks::mutator_lock_);
    345 
    346  private:
    347   void RunOptimizations(HGraph* graph,
    348                         CodeGenerator* codegen,
    349                         CompilerDriver* driver,
    350                         const DexCompilationUnit& dex_compilation_unit,
    351                         PassObserver* pass_observer,
    352                         VariableSizedHandleScope* handles) const;
    353 
    354   void RunOptimizations(HOptimization* optimizations[],
    355                         size_t length,
    356                         PassObserver* pass_observer) const;
    357 
    358  private:
    359   // Create a 'CompiledMethod' for an optimized graph.
    360   CompiledMethod* Emit(ArenaAllocator* arena,
    361                        CodeVectorAllocator* code_allocator,
    362                        CodeGenerator* codegen,
    363                        CompilerDriver* driver,
    364                        const DexFile::CodeItem* item) const;
    365 
    366   // Try compiling a method and return the code generator used for
    367   // compiling it.
    368   // This method:
    369   // 1) Builds the graph. Returns null if it failed to build it.
    370   // 2) Transforms the graph to SSA. Returns null if it failed.
    371   // 3) Runs optimizations on the graph, including register allocator.
    372   // 4) Generates code with the `code_allocator` provided.
    373   CodeGenerator* TryCompile(ArenaAllocator* arena,
    374                             CodeVectorAllocator* code_allocator,
    375                             const DexFile::CodeItem* code_item,
    376                             uint32_t access_flags,
    377                             InvokeType invoke_type,
    378                             uint16_t class_def_idx,
    379                             uint32_t method_idx,
    380                             Handle<mirror::ClassLoader> class_loader,
    381                             const DexFile& dex_file,
    382                             Handle<mirror::DexCache> dex_cache,
    383                             ArtMethod* method,
    384                             bool osr,
    385                             VariableSizedHandleScope* handles) const;
    386 
    387   void MaybeRunInliner(HGraph* graph,
    388                        CodeGenerator* codegen,
    389                        CompilerDriver* driver,
    390                        const DexCompilationUnit& dex_compilation_unit,
    391                        PassObserver* pass_observer,
    392                        VariableSizedHandleScope* handles) const;
    393 
    394   void RunArchOptimizations(InstructionSet instruction_set,
    395                             HGraph* graph,
    396                             CodeGenerator* codegen,
    397                             PassObserver* pass_observer) const;
    398 
    399   std::unique_ptr<OptimizingCompilerStats> compilation_stats_;
    400 
    401   std::unique_ptr<std::ostream> visualizer_output_;
    402 
    403   mutable Mutex dump_mutex_;  // To synchronize visualizer writing.
    404 
    405   DISALLOW_COPY_AND_ASSIGN(OptimizingCompiler);
    406 };
    407 
    408 static const int kMaximumCompilationTimeBeforeWarning = 100; /* ms */
    409 
    410 OptimizingCompiler::OptimizingCompiler(CompilerDriver* driver)
    411     : Compiler(driver, kMaximumCompilationTimeBeforeWarning),
    412       dump_mutex_("Visualizer dump lock") {}
    413 
    414 void OptimizingCompiler::Init() {
    415   // Enable C1visualizer output. Must be done in Init() because the compiler
    416   // driver is not fully initialized when passed to the compiler's constructor.
    417   CompilerDriver* driver = GetCompilerDriver();
    418   const std::string cfg_file_name = driver->GetCompilerOptions().GetDumpCfgFileName();
    419   if (!cfg_file_name.empty()) {
    420     std::ios_base::openmode cfg_file_mode =
    421         driver->GetCompilerOptions().GetDumpCfgAppend() ? std::ofstream::app : std::ofstream::out;
    422     visualizer_output_.reset(new std::ofstream(cfg_file_name, cfg_file_mode));
    423   }
    424   if (driver->GetDumpStats()) {
    425     compilation_stats_.reset(new OptimizingCompilerStats());
    426   }
    427 }
    428 
    429 void OptimizingCompiler::UnInit() const {
    430 }
    431 
    432 OptimizingCompiler::~OptimizingCompiler() {
    433   if (compilation_stats_.get() != nullptr) {
    434     compilation_stats_->Log();
    435   }
    436 }
    437 
    438 bool OptimizingCompiler::CanCompileMethod(uint32_t method_idx ATTRIBUTE_UNUSED,
    439                                           const DexFile& dex_file ATTRIBUTE_UNUSED) const {
    440   return true;
    441 }
    442 
    443 static bool IsInstructionSetSupported(InstructionSet instruction_set) {
    444   return (instruction_set == kArm && !kArm32QuickCodeUseSoftFloat)
    445       || instruction_set == kArm64
    446       || (instruction_set == kThumb2 && !kArm32QuickCodeUseSoftFloat)
    447       || instruction_set == kMips
    448       || instruction_set == kMips64
    449       || instruction_set == kX86
    450       || instruction_set == kX86_64;
    451 }
    452 
    453 // Strip pass name suffix to get optimization name.
    454 static std::string ConvertPassNameToOptimizationName(const std::string& pass_name) {
    455   size_t pos = pass_name.find(kPassNameSeparator);
    456   return pos == std::string::npos ? pass_name : pass_name.substr(0, pos);
    457 }
    458 
    459 static HOptimization* BuildOptimization(
    460     const std::string& pass_name,
    461     ArenaAllocator* arena,
    462     HGraph* graph,
    463     OptimizingCompilerStats* stats,
    464     CodeGenerator* codegen,
    465     CompilerDriver* driver,
    466     const DexCompilationUnit& dex_compilation_unit,
    467     VariableSizedHandleScope* handles,
    468     SideEffectsAnalysis* most_recent_side_effects,
    469     HInductionVarAnalysis* most_recent_induction,
    470     LoadStoreAnalysis* most_recent_lsa) {
    471   std::string opt_name = ConvertPassNameToOptimizationName(pass_name);
    472   if (opt_name == BoundsCheckElimination::kBoundsCheckEliminationPassName) {
    473     CHECK(most_recent_side_effects != nullptr && most_recent_induction != nullptr);
    474     return new (arena) BoundsCheckElimination(graph,
    475                                               *most_recent_side_effects,
    476                                               most_recent_induction);
    477   } else if (opt_name == GVNOptimization::kGlobalValueNumberingPassName) {
    478     CHECK(most_recent_side_effects != nullptr);
    479     return new (arena) GVNOptimization(graph, *most_recent_side_effects, pass_name.c_str());
    480   } else if (opt_name == HConstantFolding::kConstantFoldingPassName) {
    481     return new (arena) HConstantFolding(graph, pass_name.c_str());
    482   } else if (opt_name == HDeadCodeElimination::kDeadCodeEliminationPassName) {
    483     return new (arena) HDeadCodeElimination(graph, stats, pass_name.c_str());
    484   } else if (opt_name == HInliner::kInlinerPassName) {
    485     size_t number_of_dex_registers = dex_compilation_unit.GetCodeItem()->registers_size_;
    486     return new (arena) HInliner(graph,                   // outer_graph
    487                                 graph,                   // outermost_graph
    488                                 codegen,
    489                                 dex_compilation_unit,    // outer_compilation_unit
    490                                 dex_compilation_unit,    // outermost_compilation_unit
    491                                 driver,
    492                                 handles,
    493                                 stats,
    494                                 number_of_dex_registers,
    495                                 /* total_number_of_instructions */ 0,
    496                                 /* parent */ nullptr);
    497   } else if (opt_name == HSharpening::kSharpeningPassName) {
    498     return new (arena) HSharpening(graph, codegen, dex_compilation_unit, driver, handles);
    499   } else if (opt_name == HSelectGenerator::kSelectGeneratorPassName) {
    500     return new (arena) HSelectGenerator(graph, stats);
    501   } else if (opt_name == HInductionVarAnalysis::kInductionPassName) {
    502     return new (arena) HInductionVarAnalysis(graph);
    503   } else if (opt_name == InstructionSimplifier::kInstructionSimplifierPassName) {
    504     return new (arena) InstructionSimplifier(graph, codegen, driver, stats, pass_name.c_str());
    505   } else if (opt_name == IntrinsicsRecognizer::kIntrinsicsRecognizerPassName) {
    506     return new (arena) IntrinsicsRecognizer(graph, stats);
    507   } else if (opt_name == LICM::kLoopInvariantCodeMotionPassName) {
    508     CHECK(most_recent_side_effects != nullptr);
    509     return new (arena) LICM(graph, *most_recent_side_effects, stats);
    510   } else if (opt_name == LoadStoreAnalysis::kLoadStoreAnalysisPassName) {
    511     return new (arena) LoadStoreAnalysis(graph);
    512   } else if (opt_name == LoadStoreElimination::kLoadStoreEliminationPassName) {
    513     CHECK(most_recent_side_effects != nullptr);
    514     CHECK(most_recent_lsa != nullptr);
    515     return new (arena) LoadStoreElimination(graph, *most_recent_side_effects, *most_recent_lsa);
    516   } else if (opt_name == SideEffectsAnalysis::kSideEffectsAnalysisPassName) {
    517     return new (arena) SideEffectsAnalysis(graph);
    518   } else if (opt_name == HLoopOptimization::kLoopOptimizationPassName) {
    519     return new (arena) HLoopOptimization(graph, driver, most_recent_induction);
    520   } else if (opt_name == CHAGuardOptimization::kCHAGuardOptimizationPassName) {
    521     return new (arena) CHAGuardOptimization(graph);
    522   } else if (opt_name == CodeSinking::kCodeSinkingPassName) {
    523     return new (arena) CodeSinking(graph, stats);
    524 #ifdef ART_ENABLE_CODEGEN_arm
    525   } else if (opt_name == arm::InstructionSimplifierArm::kInstructionSimplifierArmPassName) {
    526     return new (arena) arm::InstructionSimplifierArm(graph, stats);
    527 #endif
    528 #ifdef ART_ENABLE_CODEGEN_arm64
    529   } else if (opt_name == arm64::InstructionSimplifierArm64::kInstructionSimplifierArm64PassName) {
    530     return new (arena) arm64::InstructionSimplifierArm64(graph, stats);
    531 #endif
    532 #ifdef ART_ENABLE_CODEGEN_mips
    533   } else if (opt_name == mips::PcRelativeFixups::kPcRelativeFixupsMipsPassName) {
    534     return new (arena) mips::PcRelativeFixups(graph, codegen, stats);
    535 #endif
    536 #ifdef ART_ENABLE_CODEGEN_x86
    537   } else if (opt_name == x86::PcRelativeFixups::kPcRelativeFixupsX86PassName) {
    538     return new (arena) x86::PcRelativeFixups(graph, codegen, stats);
    539   } else if (opt_name == x86::X86MemoryOperandGeneration::kX86MemoryOperandGenerationPassName) {
    540     return new (arena) x86::X86MemoryOperandGeneration(graph, codegen, stats);
    541 #endif
    542   }
    543   return nullptr;
    544 }
    545 
    546 static ArenaVector<HOptimization*> BuildOptimizations(
    547     const std::vector<std::string>& pass_names,
    548     ArenaAllocator* arena,
    549     HGraph* graph,
    550     OptimizingCompilerStats* stats,
    551     CodeGenerator* codegen,
    552     CompilerDriver* driver,
    553     const DexCompilationUnit& dex_compilation_unit,
    554     VariableSizedHandleScope* handles) {
    555   // Few HOptimizations constructors require SideEffectsAnalysis or HInductionVarAnalysis
    556   // instances. This method assumes that each of them expects the nearest instance preceeding it
    557   // in the pass name list.
    558   SideEffectsAnalysis* most_recent_side_effects = nullptr;
    559   HInductionVarAnalysis* most_recent_induction = nullptr;
    560   LoadStoreAnalysis* most_recent_lsa = nullptr;
    561   ArenaVector<HOptimization*> ret(arena->Adapter());
    562   for (const std::string& pass_name : pass_names) {
    563     HOptimization* opt = BuildOptimization(
    564         pass_name,
    565         arena,
    566         graph,
    567         stats,
    568         codegen,
    569         driver,
    570         dex_compilation_unit,
    571         handles,
    572         most_recent_side_effects,
    573         most_recent_induction,
    574         most_recent_lsa);
    575     CHECK(opt != nullptr) << "Couldn't build optimization: \"" << pass_name << "\"";
    576     ret.push_back(opt);
    577 
    578     std::string opt_name = ConvertPassNameToOptimizationName(pass_name);
    579     if (opt_name == SideEffectsAnalysis::kSideEffectsAnalysisPassName) {
    580       most_recent_side_effects = down_cast<SideEffectsAnalysis*>(opt);
    581     } else if (opt_name == HInductionVarAnalysis::kInductionPassName) {
    582       most_recent_induction = down_cast<HInductionVarAnalysis*>(opt);
    583     } else if (opt_name == LoadStoreAnalysis::kLoadStoreAnalysisPassName) {
    584       most_recent_lsa = down_cast<LoadStoreAnalysis*>(opt);
    585     }
    586   }
    587   return ret;
    588 }
    589 
    590 void OptimizingCompiler::RunOptimizations(HOptimization* optimizations[],
    591                                           size_t length,
    592                                           PassObserver* pass_observer) const {
    593   for (size_t i = 0; i < length; ++i) {
    594     PassScope scope(optimizations[i]->GetPassName(), pass_observer);
    595     optimizations[i]->Run();
    596   }
    597 }
    598 
    599 void OptimizingCompiler::MaybeRunInliner(HGraph* graph,
    600                                          CodeGenerator* codegen,
    601                                          CompilerDriver* driver,
    602                                          const DexCompilationUnit& dex_compilation_unit,
    603                                          PassObserver* pass_observer,
    604                                          VariableSizedHandleScope* handles) const {
    605   OptimizingCompilerStats* stats = compilation_stats_.get();
    606   const CompilerOptions& compiler_options = driver->GetCompilerOptions();
    607   bool should_inline = (compiler_options.GetInlineMaxCodeUnits() > 0);
    608   if (!should_inline) {
    609     return;
    610   }
    611   size_t number_of_dex_registers = dex_compilation_unit.GetCodeItem()->registers_size_;
    612   HInliner* inliner = new (graph->GetArena()) HInliner(
    613       graph,                   // outer_graph
    614       graph,                   // outermost_graph
    615       codegen,
    616       dex_compilation_unit,    // outer_compilation_unit
    617       dex_compilation_unit,    // outermost_compilation_unit
    618       driver,
    619       handles,
    620       stats,
    621       number_of_dex_registers,
    622       /* total_number_of_instructions */ 0,
    623       /* parent */ nullptr);
    624   HOptimization* optimizations[] = { inliner };
    625 
    626   RunOptimizations(optimizations, arraysize(optimizations), pass_observer);
    627 }
    628 
    629 void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set,
    630                                               HGraph* graph,
    631                                               CodeGenerator* codegen,
    632                                               PassObserver* pass_observer) const {
    633   UNUSED(codegen);  // To avoid compilation error when compiling for svelte
    634   OptimizingCompilerStats* stats = compilation_stats_.get();
    635   ArenaAllocator* arena = graph->GetArena();
    636   switch (instruction_set) {
    637 #if defined(ART_ENABLE_CODEGEN_arm)
    638     case kThumb2:
    639     case kArm: {
    640       arm::InstructionSimplifierArm* simplifier =
    641           new (arena) arm::InstructionSimplifierArm(graph, stats);
    642       SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
    643       GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN$after_arch");
    644       HInstructionScheduling* scheduling =
    645           new (arena) HInstructionScheduling(graph, instruction_set, codegen);
    646       HOptimization* arm_optimizations[] = {
    647         simplifier,
    648         side_effects,
    649         gvn,
    650         scheduling,
    651       };
    652       RunOptimizations(arm_optimizations, arraysize(arm_optimizations), pass_observer);
    653       break;
    654     }
    655 #endif
    656 #ifdef ART_ENABLE_CODEGEN_arm64
    657     case kArm64: {
    658       arm64::InstructionSimplifierArm64* simplifier =
    659           new (arena) arm64::InstructionSimplifierArm64(graph, stats);
    660       SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
    661       GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN$after_arch");
    662       HInstructionScheduling* scheduling =
    663           new (arena) HInstructionScheduling(graph, instruction_set);
    664       HOptimization* arm64_optimizations[] = {
    665         simplifier,
    666         side_effects,
    667         gvn,
    668         scheduling,
    669       };
    670       RunOptimizations(arm64_optimizations, arraysize(arm64_optimizations), pass_observer);
    671       break;
    672     }
    673 #endif
    674 #ifdef ART_ENABLE_CODEGEN_mips
    675     case kMips: {
    676       mips::PcRelativeFixups* pc_relative_fixups =
    677           new (arena) mips::PcRelativeFixups(graph, codegen, stats);
    678       HOptimization* mips_optimizations[] = {
    679           pc_relative_fixups,
    680       };
    681       RunOptimizations(mips_optimizations, arraysize(mips_optimizations), pass_observer);
    682       break;
    683     }
    684 #endif
    685 #ifdef ART_ENABLE_CODEGEN_x86
    686     case kX86: {
    687       x86::PcRelativeFixups* pc_relative_fixups =
    688           new (arena) x86::PcRelativeFixups(graph, codegen, stats);
    689       x86::X86MemoryOperandGeneration* memory_gen =
    690           new (arena) x86::X86MemoryOperandGeneration(graph, codegen, stats);
    691       HOptimization* x86_optimizations[] = {
    692           pc_relative_fixups,
    693           memory_gen
    694       };
    695       RunOptimizations(x86_optimizations, arraysize(x86_optimizations), pass_observer);
    696       break;
    697     }
    698 #endif
    699 #ifdef ART_ENABLE_CODEGEN_x86_64
    700     case kX86_64: {
    701       x86::X86MemoryOperandGeneration* memory_gen =
    702           new (arena) x86::X86MemoryOperandGeneration(graph, codegen, stats);
    703       HOptimization* x86_64_optimizations[] = {
    704           memory_gen
    705       };
    706       RunOptimizations(x86_64_optimizations, arraysize(x86_64_optimizations), pass_observer);
    707       break;
    708     }
    709 #endif
    710     default:
    711       break;
    712   }
    713 }
    714 
    715 NO_INLINE  // Avoid increasing caller's frame size by large stack-allocated objects.
    716 static void AllocateRegisters(HGraph* graph,
    717                               CodeGenerator* codegen,
    718                               PassObserver* pass_observer,
    719                               RegisterAllocator::Strategy strategy) {
    720   {
    721     PassScope scope(PrepareForRegisterAllocation::kPrepareForRegisterAllocationPassName,
    722                     pass_observer);
    723     PrepareForRegisterAllocation(graph).Run();
    724   }
    725   SsaLivenessAnalysis liveness(graph, codegen);
    726   {
    727     PassScope scope(SsaLivenessAnalysis::kLivenessPassName, pass_observer);
    728     liveness.Analyze();
    729   }
    730   {
    731     PassScope scope(RegisterAllocator::kRegisterAllocatorPassName, pass_observer);
    732     RegisterAllocator::Create(graph->GetArena(), codegen, liveness, strategy)->AllocateRegisters();
    733   }
    734 }
    735 
    736 void OptimizingCompiler::RunOptimizations(HGraph* graph,
    737                                           CodeGenerator* codegen,
    738                                           CompilerDriver* driver,
    739                                           const DexCompilationUnit& dex_compilation_unit,
    740                                           PassObserver* pass_observer,
    741                                           VariableSizedHandleScope* handles) const {
    742   OptimizingCompilerStats* stats = compilation_stats_.get();
    743   ArenaAllocator* arena = graph->GetArena();
    744   if (driver->GetCompilerOptions().GetPassesToRun() != nullptr) {
    745     ArenaVector<HOptimization*> optimizations = BuildOptimizations(
    746         *driver->GetCompilerOptions().GetPassesToRun(),
    747         arena,
    748         graph,
    749         stats,
    750         codegen,
    751         driver,
    752         dex_compilation_unit,
    753         handles);
    754     RunOptimizations(&optimizations[0], optimizations.size(), pass_observer);
    755     return;
    756   }
    757 
    758   HDeadCodeElimination* dce1 = new (arena) HDeadCodeElimination(
    759       graph, stats, "dead_code_elimination$initial");
    760   HDeadCodeElimination* dce2 = new (arena) HDeadCodeElimination(
    761       graph, stats, "dead_code_elimination$after_inlining");
    762   HDeadCodeElimination* dce3 = new (arena) HDeadCodeElimination(
    763       graph, stats, "dead_code_elimination$final");
    764   HConstantFolding* fold1 = new (arena) HConstantFolding(graph, "constant_folding");
    765   InstructionSimplifier* simplify1 = new (arena) InstructionSimplifier(
    766       graph, codegen, driver, stats);
    767   HSelectGenerator* select_generator = new (arena) HSelectGenerator(graph, stats);
    768   HConstantFolding* fold2 = new (arena) HConstantFolding(
    769       graph, "constant_folding$after_inlining");
    770   HConstantFolding* fold3 = new (arena) HConstantFolding(graph, "constant_folding$after_bce");
    771   SideEffectsAnalysis* side_effects1 = new (arena) SideEffectsAnalysis(
    772       graph, "side_effects$before_gvn");
    773   SideEffectsAnalysis* side_effects2 = new (arena) SideEffectsAnalysis(
    774       graph, "side_effects$before_lse");
    775   GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects1);
    776   LICM* licm = new (arena) LICM(graph, *side_effects1, stats);
    777   HInductionVarAnalysis* induction = new (arena) HInductionVarAnalysis(graph);
    778   BoundsCheckElimination* bce = new (arena) BoundsCheckElimination(graph, *side_effects1, induction);
    779   HLoopOptimization* loop = new (arena) HLoopOptimization(graph, driver, induction);
    780   LoadStoreAnalysis* lsa = new (arena) LoadStoreAnalysis(graph);
    781   LoadStoreElimination* lse = new (arena) LoadStoreElimination(graph, *side_effects2, *lsa);
    782   HSharpening* sharpening = new (arena) HSharpening(
    783       graph, codegen, dex_compilation_unit, driver, handles);
    784   InstructionSimplifier* simplify2 = new (arena) InstructionSimplifier(
    785       graph, codegen, driver, stats, "instruction_simplifier$after_inlining");
    786   InstructionSimplifier* simplify3 = new (arena) InstructionSimplifier(
    787       graph, codegen, driver, stats, "instruction_simplifier$after_bce");
    788   InstructionSimplifier* simplify4 = new (arena) InstructionSimplifier(
    789       graph, codegen, driver, stats, "instruction_simplifier$before_codegen");
    790   IntrinsicsRecognizer* intrinsics = new (arena) IntrinsicsRecognizer(graph, stats);
    791   CHAGuardOptimization* cha_guard = new (arena) CHAGuardOptimization(graph);
    792   CodeSinking* code_sinking = new (arena) CodeSinking(graph, stats);
    793 
    794   HOptimization* optimizations1[] = {
    795     intrinsics,
    796     sharpening,
    797     fold1,
    798     simplify1,
    799     dce1,
    800   };
    801   RunOptimizations(optimizations1, arraysize(optimizations1), pass_observer);
    802 
    803   MaybeRunInliner(graph, codegen, driver, dex_compilation_unit, pass_observer, handles);
    804 
    805   HOptimization* optimizations2[] = {
    806     // SelectGenerator depends on the InstructionSimplifier removing
    807     // redundant suspend checks to recognize empty blocks.
    808     select_generator,
    809     fold2,  // TODO: if we don't inline we can also skip fold2.
    810     simplify2,
    811     dce2,
    812     side_effects1,
    813     gvn,
    814     licm,
    815     induction,
    816     bce,
    817     loop,
    818     fold3,  // evaluates code generated by dynamic bce
    819     simplify3,
    820     side_effects2,
    821     lsa,
    822     lse,
    823     cha_guard,
    824     dce3,
    825     code_sinking,
    826     // The codegen has a few assumptions that only the instruction simplifier
    827     // can satisfy. For example, the code generator does not expect to see a
    828     // HTypeConversion from a type to the same type.
    829     simplify4,
    830   };
    831   RunOptimizations(optimizations2, arraysize(optimizations2), pass_observer);
    832 
    833   RunArchOptimizations(driver->GetInstructionSet(), graph, codegen, pass_observer);
    834 }
    835 
    836 static ArenaVector<LinkerPatch> EmitAndSortLinkerPatches(CodeGenerator* codegen) {
    837   ArenaVector<LinkerPatch> linker_patches(codegen->GetGraph()->GetArena()->Adapter());
    838   codegen->EmitLinkerPatches(&linker_patches);
    839 
    840   // Sort patches by literal offset. Required for .oat_patches encoding.
    841   std::sort(linker_patches.begin(), linker_patches.end(),
    842             [](const LinkerPatch& lhs, const LinkerPatch& rhs) {
    843     return lhs.LiteralOffset() < rhs.LiteralOffset();
    844   });
    845 
    846   return linker_patches;
    847 }
    848 
    849 CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* arena,
    850                                          CodeVectorAllocator* code_allocator,
    851                                          CodeGenerator* codegen,
    852                                          CompilerDriver* compiler_driver,
    853                                          const DexFile::CodeItem* code_item) const {
    854   ArenaVector<LinkerPatch> linker_patches = EmitAndSortLinkerPatches(codegen);
    855   ArenaVector<uint8_t> stack_map(arena->Adapter(kArenaAllocStackMaps));
    856   ArenaVector<uint8_t> method_info(arena->Adapter(kArenaAllocStackMaps));
    857   size_t stack_map_size = 0;
    858   size_t method_info_size = 0;
    859   codegen->ComputeStackMapAndMethodInfoSize(&stack_map_size, &method_info_size);
    860   stack_map.resize(stack_map_size);
    861   method_info.resize(method_info_size);
    862   codegen->BuildStackMaps(MemoryRegion(stack_map.data(), stack_map.size()),
    863                           MemoryRegion(method_info.data(), method_info.size()),
    864                           *code_item);
    865 
    866   CompiledMethod* compiled_method = CompiledMethod::SwapAllocCompiledMethod(
    867       compiler_driver,
    868       codegen->GetInstructionSet(),
    869       ArrayRef<const uint8_t>(code_allocator->GetMemory()),
    870       // Follow Quick's behavior and set the frame size to zero if it is
    871       // considered "empty" (see the definition of
    872       // art::CodeGenerator::HasEmptyFrame).
    873       codegen->HasEmptyFrame() ? 0 : codegen->GetFrameSize(),
    874       codegen->GetCoreSpillMask(),
    875       codegen->GetFpuSpillMask(),
    876       ArrayRef<const uint8_t>(method_info),
    877       ArrayRef<const uint8_t>(stack_map),
    878       ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()),
    879       ArrayRef<const LinkerPatch>(linker_patches));
    880 
    881   return compiled_method;
    882 }
    883 
    884 CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena,
    885                                               CodeVectorAllocator* code_allocator,
    886                                               const DexFile::CodeItem* code_item,
    887                                               uint32_t access_flags,
    888                                               InvokeType invoke_type,
    889                                               uint16_t class_def_idx,
    890                                               uint32_t method_idx,
    891                                               Handle<mirror::ClassLoader> class_loader,
    892                                               const DexFile& dex_file,
    893                                               Handle<mirror::DexCache> dex_cache,
    894                                               ArtMethod* method,
    895                                               bool osr,
    896                                               VariableSizedHandleScope* handles) const {
    897   MaybeRecordStat(MethodCompilationStat::kAttemptCompilation);
    898   CompilerDriver* compiler_driver = GetCompilerDriver();
    899   InstructionSet instruction_set = compiler_driver->GetInstructionSet();
    900 
    901   // Always use the Thumb-2 assembler: some runtime functionality
    902   // (like implicit stack overflow checks) assume Thumb-2.
    903   DCHECK_NE(instruction_set, kArm);
    904 
    905   // Do not attempt to compile on architectures we do not support.
    906   if (!IsInstructionSetSupported(instruction_set)) {
    907     MaybeRecordStat(MethodCompilationStat::kNotCompiledUnsupportedIsa);
    908     return nullptr;
    909   }
    910 
    911   if (Compiler::IsPathologicalCase(*code_item, method_idx, dex_file)) {
    912     MaybeRecordStat(MethodCompilationStat::kNotCompiledPathological);
    913     return nullptr;
    914   }
    915 
    916   // Implementation of the space filter: do not compile a code item whose size in
    917   // code units is bigger than 128.
    918   static constexpr size_t kSpaceFilterOptimizingThreshold = 128;
    919   const CompilerOptions& compiler_options = compiler_driver->GetCompilerOptions();
    920   if ((compiler_options.GetCompilerFilter() == CompilerFilter::kSpace)
    921       && (code_item->insns_size_in_code_units_ > kSpaceFilterOptimizingThreshold)) {
    922     MaybeRecordStat(MethodCompilationStat::kNotCompiledSpaceFilter);
    923     return nullptr;
    924   }
    925 
    926   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
    927   DexCompilationUnit dex_compilation_unit(
    928       class_loader,
    929       class_linker,
    930       dex_file,
    931       code_item,
    932       class_def_idx,
    933       method_idx,
    934       access_flags,
    935       /* verified_method */ nullptr,
    936       dex_cache);
    937 
    938   HGraph* graph = new (arena) HGraph(
    939       arena,
    940       dex_file,
    941       method_idx,
    942       compiler_driver->GetInstructionSet(),
    943       kInvalidInvokeType,
    944       compiler_driver->GetCompilerOptions().GetDebuggable(),
    945       osr);
    946 
    947   const uint8_t* interpreter_metadata = nullptr;
    948   if (method == nullptr) {
    949     ScopedObjectAccess soa(Thread::Current());
    950     method = compiler_driver->ResolveMethod(
    951         soa, dex_cache, class_loader, &dex_compilation_unit, method_idx, invoke_type);
    952   }
    953   // For AOT compilation, we may not get a method, for example if its class is erroneous.
    954   // JIT should always have a method.
    955   DCHECK(Runtime::Current()->IsAotCompiler() || method != nullptr);
    956   if (method != nullptr) {
    957     graph->SetArtMethod(method);
    958     ScopedObjectAccess soa(Thread::Current());
    959     interpreter_metadata = method->GetQuickenedInfo(class_linker->GetImagePointerSize());
    960   }
    961 
    962   std::unique_ptr<CodeGenerator> codegen(
    963       CodeGenerator::Create(graph,
    964                             instruction_set,
    965                             *compiler_driver->GetInstructionSetFeatures(),
    966                             compiler_driver->GetCompilerOptions(),
    967                             compilation_stats_.get()));
    968   if (codegen.get() == nullptr) {
    969     MaybeRecordStat(MethodCompilationStat::kNotCompiledNoCodegen);
    970     return nullptr;
    971   }
    972   codegen->GetAssembler()->cfi().SetEnabled(
    973       compiler_driver->GetCompilerOptions().GenerateAnyDebugInfo());
    974 
    975   PassObserver pass_observer(graph,
    976                              codegen.get(),
    977                              visualizer_output_.get(),
    978                              compiler_driver,
    979                              dump_mutex_);
    980 
    981   {
    982     VLOG(compiler) << "Building " << pass_observer.GetMethodName();
    983     PassScope scope(HGraphBuilder::kBuilderPassName, &pass_observer);
    984     HGraphBuilder builder(graph,
    985                           &dex_compilation_unit,
    986                           &dex_compilation_unit,
    987                           &dex_file,
    988                           *code_item,
    989                           compiler_driver,
    990                           codegen.get(),
    991                           compilation_stats_.get(),
    992                           interpreter_metadata,
    993                           dex_cache,
    994                           handles);
    995     GraphAnalysisResult result = builder.BuildGraph();
    996     if (result != kAnalysisSuccess) {
    997       switch (result) {
    998         case kAnalysisSkipped:
    999           MaybeRecordStat(MethodCompilationStat::kNotCompiledSkipped);
   1000           break;
   1001         case kAnalysisInvalidBytecode:
   1002           MaybeRecordStat(MethodCompilationStat::kNotCompiledInvalidBytecode);
   1003           break;
   1004         case kAnalysisFailThrowCatchLoop:
   1005           MaybeRecordStat(MethodCompilationStat::kNotCompiledThrowCatchLoop);
   1006           break;
   1007         case kAnalysisFailAmbiguousArrayOp:
   1008           MaybeRecordStat(MethodCompilationStat::kNotCompiledAmbiguousArrayOp);
   1009           break;
   1010         case kAnalysisSuccess:
   1011           UNREACHABLE();
   1012       }
   1013       pass_observer.SetGraphInBadState();
   1014       return nullptr;
   1015     }
   1016   }
   1017 
   1018   RunOptimizations(graph,
   1019                    codegen.get(),
   1020                    compiler_driver,
   1021                    dex_compilation_unit,
   1022                    &pass_observer,
   1023                    handles);
   1024 
   1025   RegisterAllocator::Strategy regalloc_strategy =
   1026     compiler_options.GetRegisterAllocationStrategy();
   1027   AllocateRegisters(graph, codegen.get(), &pass_observer, regalloc_strategy);
   1028 
   1029   codegen->Compile(code_allocator);
   1030   pass_observer.DumpDisassembly();
   1031 
   1032   return codegen.release();
   1033 }
   1034 
   1035 CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
   1036                                             uint32_t access_flags,
   1037                                             InvokeType invoke_type,
   1038                                             uint16_t class_def_idx,
   1039                                             uint32_t method_idx,
   1040                                             Handle<mirror::ClassLoader> jclass_loader,
   1041                                             const DexFile& dex_file,
   1042                                             Handle<mirror::DexCache> dex_cache) const {
   1043   CompilerDriver* compiler_driver = GetCompilerDriver();
   1044   CompiledMethod* method = nullptr;
   1045   DCHECK(Runtime::Current()->IsAotCompiler());
   1046   const VerifiedMethod* verified_method = compiler_driver->GetVerifiedMethod(&dex_file, method_idx);
   1047   DCHECK(!verified_method->HasRuntimeThrow());
   1048   if (compiler_driver->IsMethodVerifiedWithoutFailures(method_idx, class_def_idx, dex_file)
   1049       || verifier::CanCompilerHandleVerificationFailure(
   1050             verified_method->GetEncounteredVerificationFailures())) {
   1051     ArenaAllocator arena(Runtime::Current()->GetArenaPool());
   1052     CodeVectorAllocator code_allocator(&arena);
   1053     std::unique_ptr<CodeGenerator> codegen;
   1054     {
   1055       ScopedObjectAccess soa(Thread::Current());
   1056       VariableSizedHandleScope handles(soa.Self());
   1057       // Go to native so that we don't block GC during compilation.
   1058       ScopedThreadSuspension sts(soa.Self(), kNative);
   1059       codegen.reset(
   1060           TryCompile(&arena,
   1061                      &code_allocator,
   1062                      code_item,
   1063                      access_flags,
   1064                      invoke_type,
   1065                      class_def_idx,
   1066                      method_idx,
   1067                      jclass_loader,
   1068                      dex_file,
   1069                      dex_cache,
   1070                      nullptr,
   1071                      /* osr */ false,
   1072                      &handles));
   1073     }
   1074     if (codegen.get() != nullptr) {
   1075       MaybeRecordStat(MethodCompilationStat::kCompiled);
   1076       method = Emit(&arena, &code_allocator, codegen.get(), compiler_driver, code_item);
   1077 
   1078       if (kArenaAllocatorCountAllocations) {
   1079         if (arena.BytesAllocated() > kArenaAllocatorMemoryReportThreshold) {
   1080           MemStats mem_stats(arena.GetMemStats());
   1081           LOG(INFO) << dex_file.PrettyMethod(method_idx) << " " << Dumpable<MemStats>(mem_stats);
   1082         }
   1083       }
   1084     }
   1085   } else {
   1086     if (compiler_driver->GetCompilerOptions().VerifyAtRuntime()) {
   1087       MaybeRecordStat(MethodCompilationStat::kNotCompiledVerifyAtRuntime);
   1088     } else {
   1089       MaybeRecordStat(MethodCompilationStat::kNotCompiledVerificationError);
   1090     }
   1091   }
   1092 
   1093   if (kIsDebugBuild &&
   1094       IsCompilingWithCoreImage() &&
   1095       IsInstructionSetSupported(compiler_driver->GetInstructionSet())) {
   1096     // For testing purposes, we put a special marker on method names
   1097     // that should be compiled with this compiler (when the
   1098     // instruction set is supported). This makes sure we're not
   1099     // regressing.
   1100     std::string method_name = dex_file.PrettyMethod(method_idx);
   1101     bool shouldCompile = method_name.find("$opt$") != std::string::npos;
   1102     DCHECK((method != nullptr) || !shouldCompile) << "Didn't compile " << method_name;
   1103   }
   1104 
   1105   return method;
   1106 }
   1107 
   1108 Compiler* CreateOptimizingCompiler(CompilerDriver* driver) {
   1109   return new OptimizingCompiler(driver);
   1110 }
   1111 
   1112 bool IsCompilingWithCoreImage() {
   1113   const std::string& image = Runtime::Current()->GetImageLocation();
   1114   // TODO: This is under-approximating...
   1115   if (android::base::EndsWith(image, "core.art") ||
   1116       android::base::EndsWith(image, "core-optimizing.art")) {
   1117     return true;
   1118   }
   1119   return false;
   1120 }
   1121 
   1122 bool EncodeArtMethodInInlineInfo(ArtMethod* method ATTRIBUTE_UNUSED) {
   1123   // Note: the runtime is null only for unit testing.
   1124   return Runtime::Current() == nullptr || !Runtime::Current()->IsAotCompiler();
   1125 }
   1126 
   1127 bool CanEncodeInlinedMethodInStackMap(const DexFile& caller_dex_file, ArtMethod* callee) {
   1128   if (!Runtime::Current()->IsAotCompiler()) {
   1129     // JIT can always encode methods in stack maps.
   1130     return true;
   1131   }
   1132   if (IsSameDexFile(caller_dex_file, *callee->GetDexFile())) {
   1133     return true;
   1134   }
   1135   // TODO(ngeoffray): Support more AOT cases for inlining:
   1136   // - methods in multidex
   1137   // - methods in boot image for on-device non-PIC compilation.
   1138   return false;
   1139 }
   1140 
   1141 bool OptimizingCompiler::JitCompile(Thread* self,
   1142                                     jit::JitCodeCache* code_cache,
   1143                                     ArtMethod* method,
   1144                                     bool osr,
   1145                                     jit::JitLogger* jit_logger) {
   1146   StackHandleScope<3> hs(self);
   1147   Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
   1148       method->GetDeclaringClass()->GetClassLoader()));
   1149   Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache()));
   1150   DCHECK(method->IsCompilable());
   1151 
   1152   const DexFile* dex_file = method->GetDexFile();
   1153   const uint16_t class_def_idx = method->GetClassDefIndex();
   1154   const DexFile::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset());
   1155   const uint32_t method_idx = method->GetDexMethodIndex();
   1156   const uint32_t access_flags = method->GetAccessFlags();
   1157   const InvokeType invoke_type = method->GetInvokeType();
   1158 
   1159   ArenaAllocator arena(Runtime::Current()->GetJitArenaPool());
   1160   CodeVectorAllocator code_allocator(&arena);
   1161   VariableSizedHandleScope handles(self);
   1162 
   1163   std::unique_ptr<CodeGenerator> codegen;
   1164   {
   1165     // Go to native so that we don't block GC during compilation.
   1166     ScopedThreadSuspension sts(self, kNative);
   1167     codegen.reset(
   1168         TryCompile(&arena,
   1169                    &code_allocator,
   1170                    code_item,
   1171                    access_flags,
   1172                    invoke_type,
   1173                    class_def_idx,
   1174                    method_idx,
   1175                    class_loader,
   1176                    *dex_file,
   1177                    dex_cache,
   1178                    method,
   1179                    osr,
   1180                    &handles));
   1181     if (codegen.get() == nullptr) {
   1182       return false;
   1183     }
   1184 
   1185     if (kArenaAllocatorCountAllocations) {
   1186       if (arena.BytesAllocated() > kArenaAllocatorMemoryReportThreshold) {
   1187         MemStats mem_stats(arena.GetMemStats());
   1188         LOG(INFO) << dex_file->PrettyMethod(method_idx) << " " << Dumpable<MemStats>(mem_stats);
   1189       }
   1190     }
   1191   }
   1192 
   1193   size_t stack_map_size = 0;
   1194   size_t method_info_size = 0;
   1195   codegen->ComputeStackMapAndMethodInfoSize(&stack_map_size, &method_info_size);
   1196   size_t number_of_roots = codegen->GetNumberOfJitRoots();
   1197   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
   1198   // We allocate an object array to ensure the JIT roots that we will collect in EmitJitRoots
   1199   // will be visible by the GC between EmitLiterals and CommitCode. Once CommitCode is
   1200   // executed, this array is not needed.
   1201   Handle<mirror::ObjectArray<mirror::Object>> roots(
   1202       hs.NewHandle(mirror::ObjectArray<mirror::Object>::Alloc(
   1203           self, class_linker->GetClassRoot(ClassLinker::kObjectArrayClass), number_of_roots)));
   1204   if (roots == nullptr) {
   1205     // Out of memory, just clear the exception to avoid any Java exception uncaught problems.
   1206     DCHECK(self->IsExceptionPending());
   1207     self->ClearException();
   1208     return false;
   1209   }
   1210   uint8_t* stack_map_data = nullptr;
   1211   uint8_t* method_info_data = nullptr;
   1212   uint8_t* roots_data = nullptr;
   1213   uint32_t data_size = code_cache->ReserveData(self,
   1214                                                stack_map_size,
   1215                                                method_info_size,
   1216                                                number_of_roots,
   1217                                                method,
   1218                                                &stack_map_data,
   1219                                                &method_info_data,
   1220                                                &roots_data);
   1221   if (stack_map_data == nullptr || roots_data == nullptr) {
   1222     return false;
   1223   }
   1224   MaybeRecordStat(MethodCompilationStat::kCompiled);
   1225   codegen->BuildStackMaps(MemoryRegion(stack_map_data, stack_map_size),
   1226                           MemoryRegion(method_info_data, method_info_size),
   1227                           *code_item);
   1228   codegen->EmitJitRoots(code_allocator.GetData(), roots, roots_data);
   1229 
   1230   const void* code = code_cache->CommitCode(
   1231       self,
   1232       method,
   1233       stack_map_data,
   1234       method_info_data,
   1235       roots_data,
   1236       codegen->HasEmptyFrame() ? 0 : codegen->GetFrameSize(),
   1237       codegen->GetCoreSpillMask(),
   1238       codegen->GetFpuSpillMask(),
   1239       code_allocator.GetMemory().data(),
   1240       code_allocator.GetSize(),
   1241       data_size,
   1242       osr,
   1243       roots,
   1244       codegen->GetGraph()->HasShouldDeoptimizeFlag(),
   1245       codegen->GetGraph()->GetCHASingleImplementationList());
   1246 
   1247   if (code == nullptr) {
   1248     code_cache->ClearData(self, stack_map_data, roots_data);
   1249     return false;
   1250   }
   1251 
   1252   const CompilerOptions& compiler_options = GetCompilerDriver()->GetCompilerOptions();
   1253   if (compiler_options.GetGenerateDebugInfo()) {
   1254     const auto* method_header = reinterpret_cast<const OatQuickMethodHeader*>(code);
   1255     const uintptr_t code_address = reinterpret_cast<uintptr_t>(method_header->GetCode());
   1256     debug::MethodDebugInfo info = debug::MethodDebugInfo();
   1257     info.trampoline_name = nullptr;
   1258     info.dex_file = dex_file;
   1259     info.class_def_index = class_def_idx;
   1260     info.dex_method_index = method_idx;
   1261     info.access_flags = access_flags;
   1262     info.code_item = code_item;
   1263     info.isa = codegen->GetInstructionSet();
   1264     info.deduped = false;
   1265     info.is_native_debuggable = compiler_options.GetNativeDebuggable();
   1266     info.is_optimized = true;
   1267     info.is_code_address_text_relative = false;
   1268     info.code_address = code_address;
   1269     info.code_size = code_allocator.GetSize();
   1270     info.frame_size_in_bytes = method_header->GetFrameSizeInBytes();
   1271     info.code_info = stack_map_size == 0 ? nullptr : stack_map_data;
   1272     info.cfi = ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data());
   1273     std::vector<uint8_t> elf_file = debug::WriteDebugElfFileForMethods(
   1274         GetCompilerDriver()->GetInstructionSet(),
   1275         GetCompilerDriver()->GetInstructionSetFeatures(),
   1276         ArrayRef<const debug::MethodDebugInfo>(&info, 1));
   1277     CreateJITCodeEntryForAddress(code_address, std::move(elf_file));
   1278   }
   1279 
   1280   Runtime::Current()->GetJit()->AddMemoryUsage(method, arena.BytesUsed());
   1281   if (jit_logger != nullptr) {
   1282     jit_logger->WriteLog(code, code_allocator.GetSize(), method);
   1283   }
   1284 
   1285   return true;
   1286 }
   1287 
   1288 }  // namespace art
   1289