Home | History | Annotate | Download | only in optimizing
      1 /*
      2  * Copyright (C) 2014 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "code_generator.h"
     18 
     19 #ifdef ART_ENABLE_CODEGEN_arm
     20 #include "code_generator_arm_vixl.h"
     21 #endif
     22 
     23 #ifdef ART_ENABLE_CODEGEN_arm64
     24 #include "code_generator_arm64.h"
     25 #endif
     26 
     27 #ifdef ART_ENABLE_CODEGEN_x86
     28 #include "code_generator_x86.h"
     29 #endif
     30 
     31 #ifdef ART_ENABLE_CODEGEN_x86_64
     32 #include "code_generator_x86_64.h"
     33 #endif
     34 
     35 #ifdef ART_ENABLE_CODEGEN_mips
     36 #include "code_generator_mips.h"
     37 #endif
     38 
     39 #ifdef ART_ENABLE_CODEGEN_mips64
     40 #include "code_generator_mips64.h"
     41 #endif
     42 
     43 #include "base/bit_utils.h"
     44 #include "base/bit_utils_iterator.h"
     45 #include "base/casts.h"
     46 #include "base/leb128.h"
     47 #include "class_linker.h"
     48 #include "compiled_method.h"
     49 #include "dex/bytecode_utils.h"
     50 #include "dex/code_item_accessors-inl.h"
     51 #include "dex/verified_method.h"
     52 #include "graph_visualizer.h"
     53 #include "image.h"
     54 #include "gc/space/image_space.h"
     55 #include "intern_table.h"
     56 #include "intrinsics.h"
     57 #include "mirror/array-inl.h"
     58 #include "mirror/object_array-inl.h"
     59 #include "mirror/object_reference.h"
     60 #include "mirror/reference.h"
     61 #include "mirror/string.h"
     62 #include "parallel_move_resolver.h"
     63 #include "scoped_thread_state_change-inl.h"
     64 #include "ssa_liveness_analysis.h"
     65 #include "stack_map.h"
     66 #include "stack_map_stream.h"
     67 #include "thread-current-inl.h"
     68 #include "utils/assembler.h"
     69 
     70 namespace art {
     71 
     72 // Return whether a location is consistent with a type.
     73 static bool CheckType(DataType::Type type, Location location) {
     74   if (location.IsFpuRegister()
     75       || (location.IsUnallocated() && (location.GetPolicy() == Location::kRequiresFpuRegister))) {
     76     return (type == DataType::Type::kFloat32) || (type == DataType::Type::kFloat64);
     77   } else if (location.IsRegister() ||
     78              (location.IsUnallocated() && (location.GetPolicy() == Location::kRequiresRegister))) {
     79     return DataType::IsIntegralType(type) || (type == DataType::Type::kReference);
     80   } else if (location.IsRegisterPair()) {
     81     return type == DataType::Type::kInt64;
     82   } else if (location.IsFpuRegisterPair()) {
     83     return type == DataType::Type::kFloat64;
     84   } else if (location.IsStackSlot()) {
     85     return (DataType::IsIntegralType(type) && type != DataType::Type::kInt64)
     86            || (type == DataType::Type::kFloat32)
     87            || (type == DataType::Type::kReference);
     88   } else if (location.IsDoubleStackSlot()) {
     89     return (type == DataType::Type::kInt64) || (type == DataType::Type::kFloat64);
     90   } else if (location.IsConstant()) {
     91     if (location.GetConstant()->IsIntConstant()) {
     92       return DataType::IsIntegralType(type) && (type != DataType::Type::kInt64);
     93     } else if (location.GetConstant()->IsNullConstant()) {
     94       return type == DataType::Type::kReference;
     95     } else if (location.GetConstant()->IsLongConstant()) {
     96       return type == DataType::Type::kInt64;
     97     } else if (location.GetConstant()->IsFloatConstant()) {
     98       return type == DataType::Type::kFloat32;
     99     } else {
    100       return location.GetConstant()->IsDoubleConstant()
    101           && (type == DataType::Type::kFloat64);
    102     }
    103   } else {
    104     return location.IsInvalid() || (location.GetPolicy() == Location::kAny);
    105   }
    106 }
    107 
    108 // Check that a location summary is consistent with an instruction.
    109 static bool CheckTypeConsistency(HInstruction* instruction) {
    110   LocationSummary* locations = instruction->GetLocations();
    111   if (locations == nullptr) {
    112     return true;
    113   }
    114 
    115   if (locations->Out().IsUnallocated()
    116       && (locations->Out().GetPolicy() == Location::kSameAsFirstInput)) {
    117     DCHECK(CheckType(instruction->GetType(), locations->InAt(0)))
    118         << instruction->GetType()
    119         << " " << locations->InAt(0);
    120   } else {
    121     DCHECK(CheckType(instruction->GetType(), locations->Out()))
    122         << instruction->GetType()
    123         << " " << locations->Out();
    124   }
    125 
    126   HConstInputsRef inputs = instruction->GetInputs();
    127   for (size_t i = 0; i < inputs.size(); ++i) {
    128     DCHECK(CheckType(inputs[i]->GetType(), locations->InAt(i)))
    129       << inputs[i]->GetType() << " " << locations->InAt(i);
    130   }
    131 
    132   HEnvironment* environment = instruction->GetEnvironment();
    133   for (size_t i = 0; i < instruction->EnvironmentSize(); ++i) {
    134     if (environment->GetInstructionAt(i) != nullptr) {
    135       DataType::Type type = environment->GetInstructionAt(i)->GetType();
    136       DCHECK(CheckType(type, environment->GetLocationAt(i)))
    137         << type << " " << environment->GetLocationAt(i);
    138     } else {
    139       DCHECK(environment->GetLocationAt(i).IsInvalid())
    140         << environment->GetLocationAt(i);
    141     }
    142   }
    143   return true;
    144 }
    145 
    146 class CodeGenerator::CodeGenerationData : public DeletableArenaObject<kArenaAllocCodeGenerator> {
    147  public:
    148   static std::unique_ptr<CodeGenerationData> Create(ArenaStack* arena_stack,
    149                                                     InstructionSet instruction_set) {
    150     ScopedArenaAllocator allocator(arena_stack);
    151     void* memory = allocator.Alloc<CodeGenerationData>(kArenaAllocCodeGenerator);
    152     return std::unique_ptr<CodeGenerationData>(
    153         ::new (memory) CodeGenerationData(std::move(allocator), instruction_set));
    154   }
    155 
    156   ScopedArenaAllocator* GetScopedAllocator() {
    157     return &allocator_;
    158   }
    159 
    160   void AddSlowPath(SlowPathCode* slow_path) {
    161     slow_paths_.emplace_back(std::unique_ptr<SlowPathCode>(slow_path));
    162   }
    163 
    164   ArrayRef<const std::unique_ptr<SlowPathCode>> GetSlowPaths() const {
    165     return ArrayRef<const std::unique_ptr<SlowPathCode>>(slow_paths_);
    166   }
    167 
    168   StackMapStream* GetStackMapStream() { return &stack_map_stream_; }
    169 
    170   void ReserveJitStringRoot(StringReference string_reference, Handle<mirror::String> string) {
    171     jit_string_roots_.Overwrite(string_reference,
    172                                 reinterpret_cast64<uint64_t>(string.GetReference()));
    173   }
    174 
    175   uint64_t GetJitStringRootIndex(StringReference string_reference) const {
    176     return jit_string_roots_.Get(string_reference);
    177   }
    178 
    179   size_t GetNumberOfJitStringRoots() const {
    180     return jit_string_roots_.size();
    181   }
    182 
    183   void ReserveJitClassRoot(TypeReference type_reference, Handle<mirror::Class> klass) {
    184     jit_class_roots_.Overwrite(type_reference, reinterpret_cast64<uint64_t>(klass.GetReference()));
    185   }
    186 
    187   uint64_t GetJitClassRootIndex(TypeReference type_reference) const {
    188     return jit_class_roots_.Get(type_reference);
    189   }
    190 
    191   size_t GetNumberOfJitClassRoots() const {
    192     return jit_class_roots_.size();
    193   }
    194 
    195   size_t GetNumberOfJitRoots() const {
    196     return GetNumberOfJitStringRoots() + GetNumberOfJitClassRoots();
    197   }
    198 
    199   void EmitJitRoots(/*out*/std::vector<Handle<mirror::Object>>* roots)
    200       REQUIRES_SHARED(Locks::mutator_lock_);
    201 
    202  private:
    203   CodeGenerationData(ScopedArenaAllocator&& allocator, InstructionSet instruction_set)
    204       : allocator_(std::move(allocator)),
    205         stack_map_stream_(&allocator_, instruction_set),
    206         slow_paths_(allocator_.Adapter(kArenaAllocCodeGenerator)),
    207         jit_string_roots_(StringReferenceValueComparator(),
    208                           allocator_.Adapter(kArenaAllocCodeGenerator)),
    209         jit_class_roots_(TypeReferenceValueComparator(),
    210                          allocator_.Adapter(kArenaAllocCodeGenerator)) {
    211     slow_paths_.reserve(kDefaultSlowPathsCapacity);
    212   }
    213 
    214   static constexpr size_t kDefaultSlowPathsCapacity = 8;
    215 
    216   ScopedArenaAllocator allocator_;
    217   StackMapStream stack_map_stream_;
    218   ScopedArenaVector<std::unique_ptr<SlowPathCode>> slow_paths_;
    219 
    220   // Maps a StringReference (dex_file, string_index) to the index in the literal table.
    221   // Entries are intially added with a pointer in the handle zone, and `EmitJitRoots`
    222   // will compute all the indices.
    223   ScopedArenaSafeMap<StringReference, uint64_t, StringReferenceValueComparator> jit_string_roots_;
    224 
    225   // Maps a ClassReference (dex_file, type_index) to the index in the literal table.
    226   // Entries are intially added with a pointer in the handle zone, and `EmitJitRoots`
    227   // will compute all the indices.
    228   ScopedArenaSafeMap<TypeReference, uint64_t, TypeReferenceValueComparator> jit_class_roots_;
    229 };
    230 
    231 void CodeGenerator::CodeGenerationData::EmitJitRoots(
    232     /*out*/std::vector<Handle<mirror::Object>>* roots) {
    233   DCHECK(roots->empty());
    234   roots->reserve(GetNumberOfJitRoots());
    235   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
    236   size_t index = 0;
    237   for (auto& entry : jit_string_roots_) {
    238     // Update the `roots` with the string, and replace the address temporarily
    239     // stored to the index in the table.
    240     uint64_t address = entry.second;
    241     roots->emplace_back(reinterpret_cast<StackReference<mirror::Object>*>(address));
    242     DCHECK(roots->back() != nullptr);
    243     DCHECK(roots->back()->IsString());
    244     entry.second = index;
    245     // Ensure the string is strongly interned. This is a requirement on how the JIT
    246     // handles strings. b/32995596
    247     class_linker->GetInternTable()->InternStrong(roots->back()->AsString());
    248     ++index;
    249   }
    250   for (auto& entry : jit_class_roots_) {
    251     // Update the `roots` with the class, and replace the address temporarily
    252     // stored to the index in the table.
    253     uint64_t address = entry.second;
    254     roots->emplace_back(reinterpret_cast<StackReference<mirror::Object>*>(address));
    255     DCHECK(roots->back() != nullptr);
    256     DCHECK(roots->back()->IsClass());
    257     entry.second = index;
    258     ++index;
    259   }
    260 }
    261 
    262 ScopedArenaAllocator* CodeGenerator::GetScopedAllocator() {
    263   DCHECK(code_generation_data_ != nullptr);
    264   return code_generation_data_->GetScopedAllocator();
    265 }
    266 
    267 StackMapStream* CodeGenerator::GetStackMapStream() {
    268   DCHECK(code_generation_data_ != nullptr);
    269   return code_generation_data_->GetStackMapStream();
    270 }
    271 
    272 void CodeGenerator::ReserveJitStringRoot(StringReference string_reference,
    273                                          Handle<mirror::String> string) {
    274   DCHECK(code_generation_data_ != nullptr);
    275   code_generation_data_->ReserveJitStringRoot(string_reference, string);
    276 }
    277 
    278 uint64_t CodeGenerator::GetJitStringRootIndex(StringReference string_reference) {
    279   DCHECK(code_generation_data_ != nullptr);
    280   return code_generation_data_->GetJitStringRootIndex(string_reference);
    281 }
    282 
    283 void CodeGenerator::ReserveJitClassRoot(TypeReference type_reference, Handle<mirror::Class> klass) {
    284   DCHECK(code_generation_data_ != nullptr);
    285   code_generation_data_->ReserveJitClassRoot(type_reference, klass);
    286 }
    287 
    288 uint64_t CodeGenerator::GetJitClassRootIndex(TypeReference type_reference) {
    289   DCHECK(code_generation_data_ != nullptr);
    290   return code_generation_data_->GetJitClassRootIndex(type_reference);
    291 }
    292 
    293 void CodeGenerator::EmitJitRootPatches(uint8_t* code ATTRIBUTE_UNUSED,
    294                                        const uint8_t* roots_data ATTRIBUTE_UNUSED) {
    295   DCHECK(code_generation_data_ != nullptr);
    296   DCHECK_EQ(code_generation_data_->GetNumberOfJitStringRoots(), 0u);
    297   DCHECK_EQ(code_generation_data_->GetNumberOfJitClassRoots(), 0u);
    298 }
    299 
    300 uint32_t CodeGenerator::GetArrayLengthOffset(HArrayLength* array_length) {
    301   return array_length->IsStringLength()
    302       ? mirror::String::CountOffset().Uint32Value()
    303       : mirror::Array::LengthOffset().Uint32Value();
    304 }
    305 
    306 uint32_t CodeGenerator::GetArrayDataOffset(HArrayGet* array_get) {
    307   DCHECK(array_get->GetType() == DataType::Type::kUint16 || !array_get->IsStringCharAt());
    308   return array_get->IsStringCharAt()
    309       ? mirror::String::ValueOffset().Uint32Value()
    310       : mirror::Array::DataOffset(DataType::Size(array_get->GetType())).Uint32Value();
    311 }
    312 
    313 bool CodeGenerator::GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const {
    314   DCHECK_EQ((*block_order_)[current_block_index_], current);
    315   return GetNextBlockToEmit() == FirstNonEmptyBlock(next);
    316 }
    317 
    318 HBasicBlock* CodeGenerator::GetNextBlockToEmit() const {
    319   for (size_t i = current_block_index_ + 1; i < block_order_->size(); ++i) {
    320     HBasicBlock* block = (*block_order_)[i];
    321     if (!block->IsSingleJump()) {
    322       return block;
    323     }
    324   }
    325   return nullptr;
    326 }
    327 
    328 HBasicBlock* CodeGenerator::FirstNonEmptyBlock(HBasicBlock* block) const {
    329   while (block->IsSingleJump()) {
    330     block = block->GetSuccessors()[0];
    331   }
    332   return block;
    333 }
    334 
    335 class DisassemblyScope {
    336  public:
    337   DisassemblyScope(HInstruction* instruction, const CodeGenerator& codegen)
    338       : codegen_(codegen), instruction_(instruction), start_offset_(static_cast<size_t>(-1)) {
    339     if (codegen_.GetDisassemblyInformation() != nullptr) {
    340       start_offset_ = codegen_.GetAssembler().CodeSize();
    341     }
    342   }
    343 
    344   ~DisassemblyScope() {
    345     // We avoid building this data when we know it will not be used.
    346     if (codegen_.GetDisassemblyInformation() != nullptr) {
    347       codegen_.GetDisassemblyInformation()->AddInstructionInterval(
    348           instruction_, start_offset_, codegen_.GetAssembler().CodeSize());
    349     }
    350   }
    351 
    352  private:
    353   const CodeGenerator& codegen_;
    354   HInstruction* instruction_;
    355   size_t start_offset_;
    356 };
    357 
    358 
    359 void CodeGenerator::GenerateSlowPaths() {
    360   DCHECK(code_generation_data_ != nullptr);
    361   size_t code_start = 0;
    362   for (const std::unique_ptr<SlowPathCode>& slow_path_ptr : code_generation_data_->GetSlowPaths()) {
    363     SlowPathCode* slow_path = slow_path_ptr.get();
    364     current_slow_path_ = slow_path;
    365     if (disasm_info_ != nullptr) {
    366       code_start = GetAssembler()->CodeSize();
    367     }
    368     // Record the dex pc at start of slow path (required for java line number mapping).
    369     MaybeRecordNativeDebugInfo(slow_path->GetInstruction(), slow_path->GetDexPc(), slow_path);
    370     slow_path->EmitNativeCode(this);
    371     if (disasm_info_ != nullptr) {
    372       disasm_info_->AddSlowPathInterval(slow_path, code_start, GetAssembler()->CodeSize());
    373     }
    374   }
    375   current_slow_path_ = nullptr;
    376 }
    377 
    378 void CodeGenerator::InitializeCodeGenerationData() {
    379   DCHECK(code_generation_data_ == nullptr);
    380   code_generation_data_ = CodeGenerationData::Create(graph_->GetArenaStack(), GetInstructionSet());
    381 }
    382 
    383 void CodeGenerator::Compile(CodeAllocator* allocator) {
    384   InitializeCodeGenerationData();
    385 
    386   // The register allocator already called `InitializeCodeGeneration`,
    387   // where the frame size has been computed.
    388   DCHECK(block_order_ != nullptr);
    389   Initialize();
    390 
    391   HGraphVisitor* instruction_visitor = GetInstructionVisitor();
    392   DCHECK_EQ(current_block_index_, 0u);
    393 
    394   GetStackMapStream()->BeginMethod(HasEmptyFrame() ? 0 : frame_size_,
    395                                    core_spill_mask_,
    396                                    fpu_spill_mask_,
    397                                    GetGraph()->GetNumberOfVRegs());
    398 
    399   size_t frame_start = GetAssembler()->CodeSize();
    400   GenerateFrameEntry();
    401   DCHECK_EQ(GetAssembler()->cfi().GetCurrentCFAOffset(), static_cast<int>(frame_size_));
    402   if (disasm_info_ != nullptr) {
    403     disasm_info_->SetFrameEntryInterval(frame_start, GetAssembler()->CodeSize());
    404   }
    405 
    406   for (size_t e = block_order_->size(); current_block_index_ < e; ++current_block_index_) {
    407     HBasicBlock* block = (*block_order_)[current_block_index_];
    408     // Don't generate code for an empty block. Its predecessors will branch to its successor
    409     // directly. Also, the label of that block will not be emitted, so this helps catch
    410     // errors where we reference that label.
    411     if (block->IsSingleJump()) continue;
    412     Bind(block);
    413     // This ensures that we have correct native line mapping for all native instructions.
    414     // It is necessary to make stepping over a statement work. Otherwise, any initial
    415     // instructions (e.g. moves) would be assumed to be the start of next statement.
    416     MaybeRecordNativeDebugInfo(/* instruction= */ nullptr, block->GetDexPc());
    417     for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
    418       HInstruction* current = it.Current();
    419       if (current->HasEnvironment()) {
    420         // Create stackmap for HNativeDebugInfo or any instruction which calls native code.
    421         // Note that we need correct mapping for the native PC of the call instruction,
    422         // so the runtime's stackmap is not sufficient since it is at PC after the call.
    423         MaybeRecordNativeDebugInfo(current, block->GetDexPc());
    424       }
    425       DisassemblyScope disassembly_scope(current, *this);
    426       DCHECK(CheckTypeConsistency(current));
    427       current->Accept(instruction_visitor);
    428     }
    429   }
    430 
    431   GenerateSlowPaths();
    432 
    433   // Emit catch stack maps at the end of the stack map stream as expected by the
    434   // runtime exception handler.
    435   if (graph_->HasTryCatch()) {
    436     RecordCatchBlockInfo();
    437   }
    438 
    439   // Finalize instructions in assember;
    440   Finalize(allocator);
    441 
    442   GetStackMapStream()->EndMethod();
    443 }
    444 
    445 void CodeGenerator::Finalize(CodeAllocator* allocator) {
    446   size_t code_size = GetAssembler()->CodeSize();
    447   uint8_t* buffer = allocator->Allocate(code_size);
    448 
    449   MemoryRegion code(buffer, code_size);
    450   GetAssembler()->FinalizeInstructions(code);
    451 }
    452 
    453 void CodeGenerator::EmitLinkerPatches(
    454     ArenaVector<linker::LinkerPatch>* linker_patches ATTRIBUTE_UNUSED) {
    455   // No linker patches by default.
    456 }
    457 
    458 bool CodeGenerator::NeedsThunkCode(const linker::LinkerPatch& patch ATTRIBUTE_UNUSED) const {
    459   // Code generators that create patches requiring thunk compilation should override this function.
    460   return false;
    461 }
    462 
    463 void CodeGenerator::EmitThunkCode(const linker::LinkerPatch& patch ATTRIBUTE_UNUSED,
    464                                   /*out*/ ArenaVector<uint8_t>* code ATTRIBUTE_UNUSED,
    465                                   /*out*/ std::string* debug_name ATTRIBUTE_UNUSED) {
    466   // Code generators that create patches requiring thunk compilation should override this function.
    467   LOG(FATAL) << "Unexpected call to EmitThunkCode().";
    468 }
    469 
    470 void CodeGenerator::InitializeCodeGeneration(size_t number_of_spill_slots,
    471                                              size_t maximum_safepoint_spill_size,
    472                                              size_t number_of_out_slots,
    473                                              const ArenaVector<HBasicBlock*>& block_order) {
    474   block_order_ = &block_order;
    475   DCHECK(!block_order.empty());
    476   DCHECK(block_order[0] == GetGraph()->GetEntryBlock());
    477   ComputeSpillMask();
    478   first_register_slot_in_slow_path_ = RoundUp(
    479       (number_of_out_slots + number_of_spill_slots) * kVRegSize, GetPreferredSlotsAlignment());
    480 
    481   if (number_of_spill_slots == 0
    482       && !HasAllocatedCalleeSaveRegisters()
    483       && IsLeafMethod()
    484       && !RequiresCurrentMethod()) {
    485     DCHECK_EQ(maximum_safepoint_spill_size, 0u);
    486     SetFrameSize(CallPushesPC() ? GetWordSize() : 0);
    487   } else {
    488     SetFrameSize(RoundUp(
    489         first_register_slot_in_slow_path_
    490         + maximum_safepoint_spill_size
    491         + (GetGraph()->HasShouldDeoptimizeFlag() ? kShouldDeoptimizeFlagSize : 0)
    492         + FrameEntrySpillSize(),
    493         kStackAlignment));
    494   }
    495 }
    496 
    497 void CodeGenerator::CreateCommonInvokeLocationSummary(
    498     HInvoke* invoke, InvokeDexCallingConventionVisitor* visitor) {
    499   ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
    500   LocationSummary* locations = new (allocator) LocationSummary(invoke,
    501                                                                LocationSummary::kCallOnMainOnly);
    502 
    503   for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
    504     HInstruction* input = invoke->InputAt(i);
    505     locations->SetInAt(i, visitor->GetNextLocation(input->GetType()));
    506   }
    507 
    508   locations->SetOut(visitor->GetReturnLocation(invoke->GetType()));
    509 
    510   if (invoke->IsInvokeStaticOrDirect()) {
    511     HInvokeStaticOrDirect* call = invoke->AsInvokeStaticOrDirect();
    512     switch (call->GetMethodLoadKind()) {
    513       case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
    514         locations->SetInAt(call->GetSpecialInputIndex(), visitor->GetMethodLocation());
    515         break;
    516       case HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall:
    517         locations->AddTemp(visitor->GetMethodLocation());
    518         locations->SetInAt(call->GetSpecialInputIndex(), Location::RequiresRegister());
    519         break;
    520       default:
    521         locations->AddTemp(visitor->GetMethodLocation());
    522         break;
    523     }
    524   } else if (!invoke->IsInvokePolymorphic()) {
    525     locations->AddTemp(visitor->GetMethodLocation());
    526   }
    527 }
    528 
    529 void CodeGenerator::GenerateInvokeStaticOrDirectRuntimeCall(
    530     HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path) {
    531   MoveConstant(temp, invoke->GetDexMethodIndex());
    532 
    533   // The access check is unnecessary but we do not want to introduce
    534   // extra entrypoints for the codegens that do not support some
    535   // invoke type and fall back to the runtime call.
    536 
    537   // Initialize to anything to silent compiler warnings.
    538   QuickEntrypointEnum entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
    539   switch (invoke->GetInvokeType()) {
    540     case kStatic:
    541       entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
    542       break;
    543     case kDirect:
    544       entrypoint = kQuickInvokeDirectTrampolineWithAccessCheck;
    545       break;
    546     case kSuper:
    547       entrypoint = kQuickInvokeSuperTrampolineWithAccessCheck;
    548       break;
    549     case kVirtual:
    550     case kInterface:
    551     case kPolymorphic:
    552     case kCustom:
    553       LOG(FATAL) << "Unexpected invoke type: " << invoke->GetInvokeType();
    554       UNREACHABLE();
    555   }
    556 
    557   InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), slow_path);
    558 }
    559 void CodeGenerator::GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved* invoke) {
    560   MoveConstant(invoke->GetLocations()->GetTemp(0), invoke->GetDexMethodIndex());
    561 
    562   // Initialize to anything to silent compiler warnings.
    563   QuickEntrypointEnum entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
    564   switch (invoke->GetInvokeType()) {
    565     case kStatic:
    566       entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
    567       break;
    568     case kDirect:
    569       entrypoint = kQuickInvokeDirectTrampolineWithAccessCheck;
    570       break;
    571     case kVirtual:
    572       entrypoint = kQuickInvokeVirtualTrampolineWithAccessCheck;
    573       break;
    574     case kSuper:
    575       entrypoint = kQuickInvokeSuperTrampolineWithAccessCheck;
    576       break;
    577     case kInterface:
    578       entrypoint = kQuickInvokeInterfaceTrampolineWithAccessCheck;
    579       break;
    580     case kPolymorphic:
    581     case kCustom:
    582       LOG(FATAL) << "Unexpected invoke type: " << invoke->GetInvokeType();
    583       UNREACHABLE();
    584   }
    585   InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr);
    586 }
    587 
    588 void CodeGenerator::GenerateInvokePolymorphicCall(HInvokePolymorphic* invoke) {
    589   // invoke-polymorphic does not use a temporary to convey any additional information (e.g. a
    590   // method index) since it requires multiple info from the instruction (registers A, B, H). Not
    591   // using the reservation has no effect on the registers used in the runtime call.
    592   QuickEntrypointEnum entrypoint = kQuickInvokePolymorphic;
    593   InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr);
    594 }
    595 
    596 void CodeGenerator::GenerateInvokeCustomCall(HInvokeCustom* invoke) {
    597   MoveConstant(invoke->GetLocations()->GetTemp(0), invoke->GetCallSiteIndex());
    598   QuickEntrypointEnum entrypoint = kQuickInvokeCustom;
    599   InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr);
    600 }
    601 
    602 void CodeGenerator::CreateUnresolvedFieldLocationSummary(
    603     HInstruction* field_access,
    604     DataType::Type field_type,
    605     const FieldAccessCallingConvention& calling_convention) {
    606   bool is_instance = field_access->IsUnresolvedInstanceFieldGet()
    607       || field_access->IsUnresolvedInstanceFieldSet();
    608   bool is_get = field_access->IsUnresolvedInstanceFieldGet()
    609       || field_access->IsUnresolvedStaticFieldGet();
    610 
    611   ArenaAllocator* allocator = field_access->GetBlock()->GetGraph()->GetAllocator();
    612   LocationSummary* locations =
    613       new (allocator) LocationSummary(field_access, LocationSummary::kCallOnMainOnly);
    614 
    615   locations->AddTemp(calling_convention.GetFieldIndexLocation());
    616 
    617   if (is_instance) {
    618     // Add the `this` object for instance field accesses.
    619     locations->SetInAt(0, calling_convention.GetObjectLocation());
    620   }
    621 
    622   // Note that pSetXXStatic/pGetXXStatic always takes/returns an int or int64
    623   // regardless of the the type. Because of that we forced to special case
    624   // the access to floating point values.
    625   if (is_get) {
    626     if (DataType::IsFloatingPointType(field_type)) {
    627       // The return value will be stored in regular registers while register
    628       // allocator expects it in a floating point register.
    629       // Note We don't need to request additional temps because the return
    630       // register(s) are already blocked due the call and they may overlap with
    631       // the input or field index.
    632       // The transfer between the two will be done at codegen level.
    633       locations->SetOut(calling_convention.GetFpuLocation(field_type));
    634     } else {
    635       locations->SetOut(calling_convention.GetReturnLocation(field_type));
    636     }
    637   } else {
    638      size_t set_index = is_instance ? 1 : 0;
    639      if (DataType::IsFloatingPointType(field_type)) {
    640       // The set value comes from a float location while the calling convention
    641       // expects it in a regular register location. Allocate a temp for it and
    642       // make the transfer at codegen.
    643       AddLocationAsTemp(calling_convention.GetSetValueLocation(field_type, is_instance), locations);
    644       locations->SetInAt(set_index, calling_convention.GetFpuLocation(field_type));
    645     } else {
    646       locations->SetInAt(set_index,
    647           calling_convention.GetSetValueLocation(field_type, is_instance));
    648     }
    649   }
    650 }
    651 
    652 void CodeGenerator::GenerateUnresolvedFieldAccess(
    653     HInstruction* field_access,
    654     DataType::Type field_type,
    655     uint32_t field_index,
    656     uint32_t dex_pc,
    657     const FieldAccessCallingConvention& calling_convention) {
    658   LocationSummary* locations = field_access->GetLocations();
    659 
    660   MoveConstant(locations->GetTemp(0), field_index);
    661 
    662   bool is_instance = field_access->IsUnresolvedInstanceFieldGet()
    663       || field_access->IsUnresolvedInstanceFieldSet();
    664   bool is_get = field_access->IsUnresolvedInstanceFieldGet()
    665       || field_access->IsUnresolvedStaticFieldGet();
    666 
    667   if (!is_get && DataType::IsFloatingPointType(field_type)) {
    668     // Copy the float value to be set into the calling convention register.
    669     // Note that using directly the temp location is problematic as we don't
    670     // support temp register pairs. To avoid boilerplate conversion code, use
    671     // the location from the calling convention.
    672     MoveLocation(calling_convention.GetSetValueLocation(field_type, is_instance),
    673                  locations->InAt(is_instance ? 1 : 0),
    674                  (DataType::Is64BitType(field_type) ? DataType::Type::kInt64
    675                                                     : DataType::Type::kInt32));
    676   }
    677 
    678   QuickEntrypointEnum entrypoint = kQuickSet8Static;  // Initialize to anything to avoid warnings.
    679   switch (field_type) {
    680     case DataType::Type::kBool:
    681       entrypoint = is_instance
    682           ? (is_get ? kQuickGetBooleanInstance : kQuickSet8Instance)
    683           : (is_get ? kQuickGetBooleanStatic : kQuickSet8Static);
    684       break;
    685     case DataType::Type::kInt8:
    686       entrypoint = is_instance
    687           ? (is_get ? kQuickGetByteInstance : kQuickSet8Instance)
    688           : (is_get ? kQuickGetByteStatic : kQuickSet8Static);
    689       break;
    690     case DataType::Type::kInt16:
    691       entrypoint = is_instance
    692           ? (is_get ? kQuickGetShortInstance : kQuickSet16Instance)
    693           : (is_get ? kQuickGetShortStatic : kQuickSet16Static);
    694       break;
    695     case DataType::Type::kUint16:
    696       entrypoint = is_instance
    697           ? (is_get ? kQuickGetCharInstance : kQuickSet16Instance)
    698           : (is_get ? kQuickGetCharStatic : kQuickSet16Static);
    699       break;
    700     case DataType::Type::kInt32:
    701     case DataType::Type::kFloat32:
    702       entrypoint = is_instance
    703           ? (is_get ? kQuickGet32Instance : kQuickSet32Instance)
    704           : (is_get ? kQuickGet32Static : kQuickSet32Static);
    705       break;
    706     case DataType::Type::kReference:
    707       entrypoint = is_instance
    708           ? (is_get ? kQuickGetObjInstance : kQuickSetObjInstance)
    709           : (is_get ? kQuickGetObjStatic : kQuickSetObjStatic);
    710       break;
    711     case DataType::Type::kInt64:
    712     case DataType::Type::kFloat64:
    713       entrypoint = is_instance
    714           ? (is_get ? kQuickGet64Instance : kQuickSet64Instance)
    715           : (is_get ? kQuickGet64Static : kQuickSet64Static);
    716       break;
    717     default:
    718       LOG(FATAL) << "Invalid type " << field_type;
    719   }
    720   InvokeRuntime(entrypoint, field_access, dex_pc, nullptr);
    721 
    722   if (is_get && DataType::IsFloatingPointType(field_type)) {
    723     MoveLocation(locations->Out(), calling_convention.GetReturnLocation(field_type), field_type);
    724   }
    725 }
    726 
    727 void CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(HLoadClass* cls,
    728                                                               Location runtime_type_index_location,
    729                                                               Location runtime_return_location) {
    730   DCHECK_EQ(cls->GetLoadKind(), HLoadClass::LoadKind::kRuntimeCall);
    731   DCHECK_EQ(cls->InputCount(), 1u);
    732   LocationSummary* locations = new (cls->GetBlock()->GetGraph()->GetAllocator()) LocationSummary(
    733       cls, LocationSummary::kCallOnMainOnly);
    734   locations->SetInAt(0, Location::NoLocation());
    735   locations->AddTemp(runtime_type_index_location);
    736   locations->SetOut(runtime_return_location);
    737 }
    738 
    739 void CodeGenerator::GenerateLoadClassRuntimeCall(HLoadClass* cls) {
    740   DCHECK_EQ(cls->GetLoadKind(), HLoadClass::LoadKind::kRuntimeCall);
    741   DCHECK(!cls->MustGenerateClinitCheck());
    742   LocationSummary* locations = cls->GetLocations();
    743   MoveConstant(locations->GetTemp(0), cls->GetTypeIndex().index_);
    744   if (cls->NeedsAccessCheck()) {
    745     CheckEntrypointTypes<kQuickResolveTypeAndVerifyAccess, void*, uint32_t>();
    746     InvokeRuntime(kQuickResolveTypeAndVerifyAccess, cls, cls->GetDexPc());
    747   } else {
    748     CheckEntrypointTypes<kQuickResolveType, void*, uint32_t>();
    749     InvokeRuntime(kQuickResolveType, cls, cls->GetDexPc());
    750   }
    751 }
    752 
    753 void CodeGenerator::CreateLoadMethodHandleRuntimeCallLocationSummary(
    754     HLoadMethodHandle* method_handle,
    755     Location runtime_proto_index_location,
    756     Location runtime_return_location) {
    757   DCHECK_EQ(method_handle->InputCount(), 1u);
    758   LocationSummary* locations =
    759       new (method_handle->GetBlock()->GetGraph()->GetAllocator()) LocationSummary(
    760           method_handle, LocationSummary::kCallOnMainOnly);
    761   locations->SetInAt(0, Location::NoLocation());
    762   locations->AddTemp(runtime_proto_index_location);
    763   locations->SetOut(runtime_return_location);
    764 }
    765 
    766 void CodeGenerator::GenerateLoadMethodHandleRuntimeCall(HLoadMethodHandle* method_handle) {
    767   LocationSummary* locations = method_handle->GetLocations();
    768   MoveConstant(locations->GetTemp(0), method_handle->GetMethodHandleIndex());
    769   CheckEntrypointTypes<kQuickResolveMethodHandle, void*, uint32_t>();
    770   InvokeRuntime(kQuickResolveMethodHandle, method_handle, method_handle->GetDexPc());
    771 }
    772 
    773 void CodeGenerator::CreateLoadMethodTypeRuntimeCallLocationSummary(
    774     HLoadMethodType* method_type,
    775     Location runtime_proto_index_location,
    776     Location runtime_return_location) {
    777   DCHECK_EQ(method_type->InputCount(), 1u);
    778   LocationSummary* locations =
    779       new (method_type->GetBlock()->GetGraph()->GetAllocator()) LocationSummary(
    780           method_type, LocationSummary::kCallOnMainOnly);
    781   locations->SetInAt(0, Location::NoLocation());
    782   locations->AddTemp(runtime_proto_index_location);
    783   locations->SetOut(runtime_return_location);
    784 }
    785 
    786 void CodeGenerator::GenerateLoadMethodTypeRuntimeCall(HLoadMethodType* method_type) {
    787   LocationSummary* locations = method_type->GetLocations();
    788   MoveConstant(locations->GetTemp(0), method_type->GetProtoIndex().index_);
    789   CheckEntrypointTypes<kQuickResolveMethodType, void*, uint32_t>();
    790   InvokeRuntime(kQuickResolveMethodType, method_type, method_type->GetDexPc());
    791 }
    792 
    793 static uint32_t GetBootImageOffsetImpl(const void* object, ImageHeader::ImageSections section) {
    794   Runtime* runtime = Runtime::Current();
    795   DCHECK(runtime->IsAotCompiler());
    796   const std::vector<gc::space::ImageSpace*>& boot_image_spaces =
    797       runtime->GetHeap()->GetBootImageSpaces();
    798   // Check that the `object` is in the expected section of one of the boot image files.
    799   DCHECK(std::any_of(boot_image_spaces.begin(),
    800                      boot_image_spaces.end(),
    801                      [object, section](gc::space::ImageSpace* space) {
    802                        uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
    803                        uintptr_t offset = reinterpret_cast<uintptr_t>(object) - begin;
    804                        return space->GetImageHeader().GetImageSection(section).Contains(offset);
    805                      }));
    806   uintptr_t begin = reinterpret_cast<uintptr_t>(boot_image_spaces.front()->Begin());
    807   uintptr_t offset = reinterpret_cast<uintptr_t>(object) - begin;
    808   return dchecked_integral_cast<uint32_t>(offset);
    809 }
    810 
    811 // NO_THREAD_SAFETY_ANALYSIS: Avoid taking the mutator lock, boot image classes are non-moveable.
    812 uint32_t CodeGenerator::GetBootImageOffset(HLoadClass* load_class) NO_THREAD_SAFETY_ANALYSIS {
    813   DCHECK_EQ(load_class->GetLoadKind(), HLoadClass::LoadKind::kBootImageRelRo);
    814   ObjPtr<mirror::Class> klass = load_class->GetClass().Get();
    815   DCHECK(klass != nullptr);
    816   return GetBootImageOffsetImpl(klass.Ptr(), ImageHeader::kSectionObjects);
    817 }
    818 
    819 // NO_THREAD_SAFETY_ANALYSIS: Avoid taking the mutator lock, boot image strings are non-moveable.
    820 uint32_t CodeGenerator::GetBootImageOffset(HLoadString* load_string) NO_THREAD_SAFETY_ANALYSIS {
    821   DCHECK_EQ(load_string->GetLoadKind(), HLoadString::LoadKind::kBootImageRelRo);
    822   ObjPtr<mirror::String> string = load_string->GetString().Get();
    823   DCHECK(string != nullptr);
    824   return GetBootImageOffsetImpl(string.Ptr(), ImageHeader::kSectionObjects);
    825 }
    826 
    827 uint32_t CodeGenerator::GetBootImageOffset(HInvokeStaticOrDirect* invoke) {
    828   DCHECK_EQ(invoke->GetMethodLoadKind(), HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo);
    829   ArtMethod* method = invoke->GetResolvedMethod();
    830   DCHECK(method != nullptr);
    831   return GetBootImageOffsetImpl(method, ImageHeader::kSectionArtMethods);
    832 }
    833 
    834 void CodeGenerator::BlockIfInRegister(Location location, bool is_out) const {
    835   // The DCHECKS below check that a register is not specified twice in
    836   // the summary. The out location can overlap with an input, so we need
    837   // to special case it.
    838   if (location.IsRegister()) {
    839     DCHECK(is_out || !blocked_core_registers_[location.reg()]);
    840     blocked_core_registers_[location.reg()] = true;
    841   } else if (location.IsFpuRegister()) {
    842     DCHECK(is_out || !blocked_fpu_registers_[location.reg()]);
    843     blocked_fpu_registers_[location.reg()] = true;
    844   } else if (location.IsFpuRegisterPair()) {
    845     DCHECK(is_out || !blocked_fpu_registers_[location.AsFpuRegisterPairLow<int>()]);
    846     blocked_fpu_registers_[location.AsFpuRegisterPairLow<int>()] = true;
    847     DCHECK(is_out || !blocked_fpu_registers_[location.AsFpuRegisterPairHigh<int>()]);
    848     blocked_fpu_registers_[location.AsFpuRegisterPairHigh<int>()] = true;
    849   } else if (location.IsRegisterPair()) {
    850     DCHECK(is_out || !blocked_core_registers_[location.AsRegisterPairLow<int>()]);
    851     blocked_core_registers_[location.AsRegisterPairLow<int>()] = true;
    852     DCHECK(is_out || !blocked_core_registers_[location.AsRegisterPairHigh<int>()]);
    853     blocked_core_registers_[location.AsRegisterPairHigh<int>()] = true;
    854   }
    855 }
    856 
    857 void CodeGenerator::AllocateLocations(HInstruction* instruction) {
    858   for (HEnvironment* env = instruction->GetEnvironment(); env != nullptr; env = env->GetParent()) {
    859     env->AllocateLocations();
    860   }
    861   instruction->Accept(GetLocationBuilder());
    862   DCHECK(CheckTypeConsistency(instruction));
    863   LocationSummary* locations = instruction->GetLocations();
    864   if (!instruction->IsSuspendCheckEntry()) {
    865     if (locations != nullptr) {
    866       if (locations->CanCall()) {
    867         MarkNotLeaf();
    868       } else if (locations->Intrinsified() &&
    869                  instruction->IsInvokeStaticOrDirect() &&
    870                  !instruction->AsInvokeStaticOrDirect()->HasCurrentMethodInput()) {
    871         // A static method call that has been fully intrinsified, and cannot call on the slow
    872         // path or refer to the current method directly, no longer needs current method.
    873         return;
    874       }
    875     }
    876     if (instruction->NeedsCurrentMethod()) {
    877       SetRequiresCurrentMethod();
    878     }
    879   }
    880 }
    881 
    882 std::unique_ptr<CodeGenerator> CodeGenerator::Create(HGraph* graph,
    883                                                      const CompilerOptions& compiler_options,
    884                                                      OptimizingCompilerStats* stats) {
    885   ArenaAllocator* allocator = graph->GetAllocator();
    886   switch (compiler_options.GetInstructionSet()) {
    887 #ifdef ART_ENABLE_CODEGEN_arm
    888     case InstructionSet::kArm:
    889     case InstructionSet::kThumb2: {
    890       return std::unique_ptr<CodeGenerator>(
    891           new (allocator) arm::CodeGeneratorARMVIXL(graph, compiler_options, stats));
    892     }
    893 #endif
    894 #ifdef ART_ENABLE_CODEGEN_arm64
    895     case InstructionSet::kArm64: {
    896       return std::unique_ptr<CodeGenerator>(
    897           new (allocator) arm64::CodeGeneratorARM64(graph, compiler_options, stats));
    898     }
    899 #endif
    900 #ifdef ART_ENABLE_CODEGEN_mips
    901     case InstructionSet::kMips: {
    902       return std::unique_ptr<CodeGenerator>(
    903           new (allocator) mips::CodeGeneratorMIPS(graph, compiler_options, stats));
    904     }
    905 #endif
    906 #ifdef ART_ENABLE_CODEGEN_mips64
    907     case InstructionSet::kMips64: {
    908       return std::unique_ptr<CodeGenerator>(
    909           new (allocator) mips64::CodeGeneratorMIPS64(graph, compiler_options, stats));
    910     }
    911 #endif
    912 #ifdef ART_ENABLE_CODEGEN_x86
    913     case InstructionSet::kX86: {
    914       return std::unique_ptr<CodeGenerator>(
    915           new (allocator) x86::CodeGeneratorX86(graph, compiler_options, stats));
    916     }
    917 #endif
    918 #ifdef ART_ENABLE_CODEGEN_x86_64
    919     case InstructionSet::kX86_64: {
    920       return std::unique_ptr<CodeGenerator>(
    921           new (allocator) x86_64::CodeGeneratorX86_64(graph, compiler_options, stats));
    922     }
    923 #endif
    924     default:
    925       return nullptr;
    926   }
    927 }
    928 
    929 CodeGenerator::CodeGenerator(HGraph* graph,
    930                              size_t number_of_core_registers,
    931                              size_t number_of_fpu_registers,
    932                              size_t number_of_register_pairs,
    933                              uint32_t core_callee_save_mask,
    934                              uint32_t fpu_callee_save_mask,
    935                              const CompilerOptions& compiler_options,
    936                              OptimizingCompilerStats* stats)
    937     : frame_size_(0),
    938       core_spill_mask_(0),
    939       fpu_spill_mask_(0),
    940       first_register_slot_in_slow_path_(0),
    941       allocated_registers_(RegisterSet::Empty()),
    942       blocked_core_registers_(graph->GetAllocator()->AllocArray<bool>(number_of_core_registers,
    943                                                                       kArenaAllocCodeGenerator)),
    944       blocked_fpu_registers_(graph->GetAllocator()->AllocArray<bool>(number_of_fpu_registers,
    945                                                                      kArenaAllocCodeGenerator)),
    946       number_of_core_registers_(number_of_core_registers),
    947       number_of_fpu_registers_(number_of_fpu_registers),
    948       number_of_register_pairs_(number_of_register_pairs),
    949       core_callee_save_mask_(core_callee_save_mask),
    950       fpu_callee_save_mask_(fpu_callee_save_mask),
    951       block_order_(nullptr),
    952       disasm_info_(nullptr),
    953       stats_(stats),
    954       graph_(graph),
    955       compiler_options_(compiler_options),
    956       current_slow_path_(nullptr),
    957       current_block_index_(0),
    958       is_leaf_(true),
    959       requires_current_method_(false),
    960       code_generation_data_() {
    961 }
    962 
    963 CodeGenerator::~CodeGenerator() {}
    964 
    965 size_t CodeGenerator::GetNumberOfJitRoots() const {
    966   DCHECK(code_generation_data_ != nullptr);
    967   return code_generation_data_->GetNumberOfJitRoots();
    968 }
    969 
    970 static void CheckCovers(uint32_t dex_pc,
    971                         const HGraph& graph,
    972                         const CodeInfo& code_info,
    973                         const ArenaVector<HSuspendCheck*>& loop_headers,
    974                         ArenaVector<size_t>* covered) {
    975   for (size_t i = 0; i < loop_headers.size(); ++i) {
    976     if (loop_headers[i]->GetDexPc() == dex_pc) {
    977       if (graph.IsCompilingOsr()) {
    978         DCHECK(code_info.GetOsrStackMapForDexPc(dex_pc).IsValid());
    979       }
    980       ++(*covered)[i];
    981     }
    982   }
    983 }
    984 
    985 // Debug helper to ensure loop entries in compiled code are matched by
    986 // dex branch instructions.
    987 static void CheckLoopEntriesCanBeUsedForOsr(const HGraph& graph,
    988                                             const CodeInfo& code_info,
    989                                             const dex::CodeItem& code_item) {
    990   if (graph.HasTryCatch()) {
    991     // One can write loops through try/catch, which we do not support for OSR anyway.
    992     return;
    993   }
    994   ArenaVector<HSuspendCheck*> loop_headers(graph.GetAllocator()->Adapter(kArenaAllocMisc));
    995   for (HBasicBlock* block : graph.GetReversePostOrder()) {
    996     if (block->IsLoopHeader()) {
    997       HSuspendCheck* suspend_check = block->GetLoopInformation()->GetSuspendCheck();
    998       if (!suspend_check->GetEnvironment()->IsFromInlinedInvoke()) {
    999         loop_headers.push_back(suspend_check);
   1000       }
   1001     }
   1002   }
   1003   ArenaVector<size_t> covered(
   1004       loop_headers.size(), 0, graph.GetAllocator()->Adapter(kArenaAllocMisc));
   1005   for (const DexInstructionPcPair& pair : CodeItemInstructionAccessor(graph.GetDexFile(),
   1006                                                                       &code_item)) {
   1007     const uint32_t dex_pc = pair.DexPc();
   1008     const Instruction& instruction = pair.Inst();
   1009     if (instruction.IsBranch()) {
   1010       uint32_t target = dex_pc + instruction.GetTargetOffset();
   1011       CheckCovers(target, graph, code_info, loop_headers, &covered);
   1012     } else if (instruction.IsSwitch()) {
   1013       DexSwitchTable table(instruction, dex_pc);
   1014       uint16_t num_entries = table.GetNumEntries();
   1015       size_t offset = table.GetFirstValueIndex();
   1016 
   1017       // Use a larger loop counter type to avoid overflow issues.
   1018       for (size_t i = 0; i < num_entries; ++i) {
   1019         // The target of the case.
   1020         uint32_t target = dex_pc + table.GetEntryAt(i + offset);
   1021         CheckCovers(target, graph, code_info, loop_headers, &covered);
   1022       }
   1023     }
   1024   }
   1025 
   1026   for (size_t i = 0; i < covered.size(); ++i) {
   1027     DCHECK_NE(covered[i], 0u) << "Loop in compiled code has no dex branch equivalent";
   1028   }
   1029 }
   1030 
   1031 ScopedArenaVector<uint8_t> CodeGenerator::BuildStackMaps(const dex::CodeItem* code_item) {
   1032   ScopedArenaVector<uint8_t> stack_map = GetStackMapStream()->Encode();
   1033   if (kIsDebugBuild && code_item != nullptr) {
   1034     CheckLoopEntriesCanBeUsedForOsr(*graph_, CodeInfo(stack_map.data()), *code_item);
   1035   }
   1036   return stack_map;
   1037 }
   1038 
   1039 void CodeGenerator::RecordPcInfo(HInstruction* instruction,
   1040                                  uint32_t dex_pc,
   1041                                  SlowPathCode* slow_path,
   1042                                  bool native_debug_info) {
   1043   if (instruction != nullptr) {
   1044     // The code generated for some type conversions
   1045     // may call the runtime, thus normally requiring a subsequent
   1046     // call to this method. However, the method verifier does not
   1047     // produce PC information for certain instructions, which are
   1048     // considered "atomic" (they cannot join a GC).
   1049     // Therefore we do not currently record PC information for such
   1050     // instructions.  As this may change later, we added this special
   1051     // case so that code generators may nevertheless call
   1052     // CodeGenerator::RecordPcInfo without triggering an error in
   1053     // CodeGenerator::BuildNativeGCMap ("Missing ref for dex pc 0x")
   1054     // thereafter.
   1055     if (instruction->IsTypeConversion()) {
   1056       return;
   1057     }
   1058     if (instruction->IsRem()) {
   1059       DataType::Type type = instruction->AsRem()->GetResultType();
   1060       if ((type == DataType::Type::kFloat32) || (type == DataType::Type::kFloat64)) {
   1061         return;
   1062       }
   1063     }
   1064   }
   1065 
   1066   // Collect PC infos for the mapping table.
   1067   uint32_t native_pc = GetAssembler()->CodePosition();
   1068 
   1069   StackMapStream* stack_map_stream = GetStackMapStream();
   1070   if (instruction == nullptr) {
   1071     // For stack overflow checks and native-debug-info entries without dex register
   1072     // mapping (i.e. start of basic block or start of slow path).
   1073     stack_map_stream->BeginStackMapEntry(dex_pc, native_pc);
   1074     stack_map_stream->EndStackMapEntry();
   1075     return;
   1076   }
   1077 
   1078   LocationSummary* locations = instruction->GetLocations();
   1079   uint32_t register_mask = locations->GetRegisterMask();
   1080   DCHECK_EQ(register_mask & ~locations->GetLiveRegisters()->GetCoreRegisters(), 0u);
   1081   if (locations->OnlyCallsOnSlowPath()) {
   1082     // In case of slow path, we currently set the location of caller-save registers
   1083     // to register (instead of their stack location when pushed before the slow-path
   1084     // call). Therefore register_mask contains both callee-save and caller-save
   1085     // registers that hold objects. We must remove the spilled caller-save from the
   1086     // mask, since they will be overwritten by the callee.
   1087     uint32_t spills = GetSlowPathSpills(locations, /* core_registers= */ true);
   1088     register_mask &= ~spills;
   1089   } else {
   1090     // The register mask must be a subset of callee-save registers.
   1091     DCHECK_EQ(register_mask & core_callee_save_mask_, register_mask);
   1092   }
   1093 
   1094   uint32_t outer_dex_pc = dex_pc;
   1095   uint32_t outer_environment_size = 0u;
   1096   uint32_t inlining_depth = 0;
   1097   HEnvironment* const environment = instruction->GetEnvironment();
   1098   if (environment != nullptr) {
   1099     HEnvironment* outer_environment = environment;
   1100     while (outer_environment->GetParent() != nullptr) {
   1101       outer_environment = outer_environment->GetParent();
   1102       ++inlining_depth;
   1103     }
   1104     outer_dex_pc = outer_environment->GetDexPc();
   1105     outer_environment_size = outer_environment->Size();
   1106   }
   1107 
   1108   HLoopInformation* info = instruction->GetBlock()->GetLoopInformation();
   1109   bool osr =
   1110       instruction->IsSuspendCheck() &&
   1111       (info != nullptr) &&
   1112       graph_->IsCompilingOsr() &&
   1113       (inlining_depth == 0);
   1114   StackMap::Kind kind = native_debug_info
   1115       ? StackMap::Kind::Debug
   1116       : (osr ? StackMap::Kind::OSR : StackMap::Kind::Default);
   1117   stack_map_stream->BeginStackMapEntry(outer_dex_pc,
   1118                                        native_pc,
   1119                                        register_mask,
   1120                                        locations->GetStackMask(),
   1121                                        kind);
   1122   EmitEnvironment(environment, slow_path);
   1123   stack_map_stream->EndStackMapEntry();
   1124 
   1125   if (osr) {
   1126     DCHECK_EQ(info->GetSuspendCheck(), instruction);
   1127     DCHECK(info->IsIrreducible());
   1128     DCHECK(environment != nullptr);
   1129     if (kIsDebugBuild) {
   1130       for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) {
   1131         HInstruction* in_environment = environment->GetInstructionAt(i);
   1132         if (in_environment != nullptr) {
   1133           DCHECK(in_environment->IsPhi() || in_environment->IsConstant());
   1134           Location location = environment->GetLocationAt(i);
   1135           DCHECK(location.IsStackSlot() ||
   1136                  location.IsDoubleStackSlot() ||
   1137                  location.IsConstant() ||
   1138                  location.IsInvalid());
   1139           if (location.IsStackSlot() || location.IsDoubleStackSlot()) {
   1140             DCHECK_LT(location.GetStackIndex(), static_cast<int32_t>(GetFrameSize()));
   1141           }
   1142         }
   1143       }
   1144     }
   1145   }
   1146 }
   1147 
   1148 bool CodeGenerator::HasStackMapAtCurrentPc() {
   1149   uint32_t pc = GetAssembler()->CodeSize();
   1150   StackMapStream* stack_map_stream = GetStackMapStream();
   1151   size_t count = stack_map_stream->GetNumberOfStackMaps();
   1152   if (count == 0) {
   1153     return false;
   1154   }
   1155   return stack_map_stream->GetStackMapNativePcOffset(count - 1) == pc;
   1156 }
   1157 
   1158 void CodeGenerator::MaybeRecordNativeDebugInfo(HInstruction* instruction,
   1159                                                uint32_t dex_pc,
   1160                                                SlowPathCode* slow_path) {
   1161   if (GetCompilerOptions().GetNativeDebuggable() && dex_pc != kNoDexPc) {
   1162     if (HasStackMapAtCurrentPc()) {
   1163       // Ensure that we do not collide with the stack map of the previous instruction.
   1164       GenerateNop();
   1165     }
   1166     RecordPcInfo(instruction, dex_pc, slow_path, /* native_debug_info= */ true);
   1167   }
   1168 }
   1169 
   1170 void CodeGenerator::RecordCatchBlockInfo() {
   1171   StackMapStream* stack_map_stream = GetStackMapStream();
   1172 
   1173   for (HBasicBlock* block : *block_order_) {
   1174     if (!block->IsCatchBlock()) {
   1175       continue;
   1176     }
   1177 
   1178     uint32_t dex_pc = block->GetDexPc();
   1179     uint32_t num_vregs = graph_->GetNumberOfVRegs();
   1180     uint32_t native_pc = GetAddressOf(block);
   1181 
   1182     stack_map_stream->BeginStackMapEntry(dex_pc,
   1183                                          native_pc,
   1184                                          /* register_mask= */ 0,
   1185                                          /* sp_mask= */ nullptr,
   1186                                          StackMap::Kind::Catch);
   1187 
   1188     HInstruction* current_phi = block->GetFirstPhi();
   1189     for (size_t vreg = 0; vreg < num_vregs; ++vreg) {
   1190       while (current_phi != nullptr && current_phi->AsPhi()->GetRegNumber() < vreg) {
   1191         HInstruction* next_phi = current_phi->GetNext();
   1192         DCHECK(next_phi == nullptr ||
   1193                current_phi->AsPhi()->GetRegNumber() <= next_phi->AsPhi()->GetRegNumber())
   1194             << "Phis need to be sorted by vreg number to keep this a linear-time loop.";
   1195         current_phi = next_phi;
   1196       }
   1197 
   1198       if (current_phi == nullptr || current_phi->AsPhi()->GetRegNumber() != vreg) {
   1199         stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
   1200       } else {
   1201         Location location = current_phi->GetLocations()->Out();
   1202         switch (location.GetKind()) {
   1203           case Location::kStackSlot: {
   1204             stack_map_stream->AddDexRegisterEntry(
   1205                 DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
   1206             break;
   1207           }
   1208           case Location::kDoubleStackSlot: {
   1209             stack_map_stream->AddDexRegisterEntry(
   1210                 DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
   1211             stack_map_stream->AddDexRegisterEntry(
   1212                 DexRegisterLocation::Kind::kInStack, location.GetHighStackIndex(kVRegSize));
   1213             ++vreg;
   1214             DCHECK_LT(vreg, num_vregs);
   1215             break;
   1216           }
   1217           default: {
   1218             // All catch phis must be allocated to a stack slot.
   1219             LOG(FATAL) << "Unexpected kind " << location.GetKind();
   1220             UNREACHABLE();
   1221           }
   1222         }
   1223       }
   1224     }
   1225 
   1226     stack_map_stream->EndStackMapEntry();
   1227   }
   1228 }
   1229 
   1230 void CodeGenerator::AddSlowPath(SlowPathCode* slow_path) {
   1231   DCHECK(code_generation_data_ != nullptr);
   1232   code_generation_data_->AddSlowPath(slow_path);
   1233 }
   1234 
   1235 void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slow_path) {
   1236   if (environment == nullptr) return;
   1237 
   1238   StackMapStream* stack_map_stream = GetStackMapStream();
   1239   if (environment->GetParent() != nullptr) {
   1240     // We emit the parent environment first.
   1241     EmitEnvironment(environment->GetParent(), slow_path);
   1242     stack_map_stream->BeginInlineInfoEntry(environment->GetMethod(),
   1243                                            environment->GetDexPc(),
   1244                                            environment->Size(),
   1245                                            &graph_->GetDexFile());
   1246   }
   1247 
   1248   // Walk over the environment, and record the location of dex registers.
   1249   for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) {
   1250     HInstruction* current = environment->GetInstructionAt(i);
   1251     if (current == nullptr) {
   1252       stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
   1253       continue;
   1254     }
   1255 
   1256     using Kind = DexRegisterLocation::Kind;
   1257     Location location = environment->GetLocationAt(i);
   1258     switch (location.GetKind()) {
   1259       case Location::kConstant: {
   1260         DCHECK_EQ(current, location.GetConstant());
   1261         if (current->IsLongConstant()) {
   1262           int64_t value = current->AsLongConstant()->GetValue();
   1263           stack_map_stream->AddDexRegisterEntry(Kind::kConstant, Low32Bits(value));
   1264           stack_map_stream->AddDexRegisterEntry(Kind::kConstant, High32Bits(value));
   1265           ++i;
   1266           DCHECK_LT(i, environment_size);
   1267         } else if (current->IsDoubleConstant()) {
   1268           int64_t value = bit_cast<int64_t, double>(current->AsDoubleConstant()->GetValue());
   1269           stack_map_stream->AddDexRegisterEntry(Kind::kConstant, Low32Bits(value));
   1270           stack_map_stream->AddDexRegisterEntry(Kind::kConstant, High32Bits(value));
   1271           ++i;
   1272           DCHECK_LT(i, environment_size);
   1273         } else if (current->IsIntConstant()) {
   1274           int32_t value = current->AsIntConstant()->GetValue();
   1275           stack_map_stream->AddDexRegisterEntry(Kind::kConstant, value);
   1276         } else if (current->IsNullConstant()) {
   1277           stack_map_stream->AddDexRegisterEntry(Kind::kConstant, 0);
   1278         } else {
   1279           DCHECK(current->IsFloatConstant()) << current->DebugName();
   1280           int32_t value = bit_cast<int32_t, float>(current->AsFloatConstant()->GetValue());
   1281           stack_map_stream->AddDexRegisterEntry(Kind::kConstant, value);
   1282         }
   1283         break;
   1284       }
   1285 
   1286       case Location::kStackSlot: {
   1287         stack_map_stream->AddDexRegisterEntry(Kind::kInStack, location.GetStackIndex());
   1288         break;
   1289       }
   1290 
   1291       case Location::kDoubleStackSlot: {
   1292         stack_map_stream->AddDexRegisterEntry(Kind::kInStack, location.GetStackIndex());
   1293         stack_map_stream->AddDexRegisterEntry(
   1294             Kind::kInStack, location.GetHighStackIndex(kVRegSize));
   1295         ++i;
   1296         DCHECK_LT(i, environment_size);
   1297         break;
   1298       }
   1299 
   1300       case Location::kRegister : {
   1301         int id = location.reg();
   1302         if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(id)) {
   1303           uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(id);
   1304           stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
   1305           if (current->GetType() == DataType::Type::kInt64) {
   1306             stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset + kVRegSize);
   1307             ++i;
   1308             DCHECK_LT(i, environment_size);
   1309           }
   1310         } else {
   1311           stack_map_stream->AddDexRegisterEntry(Kind::kInRegister, id);
   1312           if (current->GetType() == DataType::Type::kInt64) {
   1313             stack_map_stream->AddDexRegisterEntry(Kind::kInRegisterHigh, id);
   1314             ++i;
   1315             DCHECK_LT(i, environment_size);
   1316           }
   1317         }
   1318         break;
   1319       }
   1320 
   1321       case Location::kFpuRegister : {
   1322         int id = location.reg();
   1323         if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(id)) {
   1324           uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(id);
   1325           stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
   1326           if (current->GetType() == DataType::Type::kFloat64) {
   1327             stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset + kVRegSize);
   1328             ++i;
   1329             DCHECK_LT(i, environment_size);
   1330           }
   1331         } else {
   1332           stack_map_stream->AddDexRegisterEntry(Kind::kInFpuRegister, id);
   1333           if (current->GetType() == DataType::Type::kFloat64) {
   1334             stack_map_stream->AddDexRegisterEntry(Kind::kInFpuRegisterHigh, id);
   1335             ++i;
   1336             DCHECK_LT(i, environment_size);
   1337           }
   1338         }
   1339         break;
   1340       }
   1341 
   1342       case Location::kFpuRegisterPair : {
   1343         int low = location.low();
   1344         int high = location.high();
   1345         if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(low)) {
   1346           uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(low);
   1347           stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
   1348         } else {
   1349           stack_map_stream->AddDexRegisterEntry(Kind::kInFpuRegister, low);
   1350         }
   1351         if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(high)) {
   1352           uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(high);
   1353           stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
   1354           ++i;
   1355         } else {
   1356           stack_map_stream->AddDexRegisterEntry(Kind::kInFpuRegister, high);
   1357           ++i;
   1358         }
   1359         DCHECK_LT(i, environment_size);
   1360         break;
   1361       }
   1362 
   1363       case Location::kRegisterPair : {
   1364         int low = location.low();
   1365         int high = location.high();
   1366         if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(low)) {
   1367           uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(low);
   1368           stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
   1369         } else {
   1370           stack_map_stream->AddDexRegisterEntry(Kind::kInRegister, low);
   1371         }
   1372         if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(high)) {
   1373           uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(high);
   1374           stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
   1375         } else {
   1376           stack_map_stream->AddDexRegisterEntry(Kind::kInRegister, high);
   1377         }
   1378         ++i;
   1379         DCHECK_LT(i, environment_size);
   1380         break;
   1381       }
   1382 
   1383       case Location::kInvalid: {
   1384         stack_map_stream->AddDexRegisterEntry(Kind::kNone, 0);
   1385         break;
   1386       }
   1387 
   1388       default:
   1389         LOG(FATAL) << "Unexpected kind " << location.GetKind();
   1390     }
   1391   }
   1392 
   1393   if (environment->GetParent() != nullptr) {
   1394     stack_map_stream->EndInlineInfoEntry();
   1395   }
   1396 }
   1397 
   1398 bool CodeGenerator::CanMoveNullCheckToUser(HNullCheck* null_check) {
   1399   return null_check->IsEmittedAtUseSite();
   1400 }
   1401 
   1402 void CodeGenerator::MaybeRecordImplicitNullCheck(HInstruction* instr) {
   1403   HNullCheck* null_check = instr->GetImplicitNullCheck();
   1404   if (null_check != nullptr) {
   1405     RecordPcInfo(null_check, null_check->GetDexPc());
   1406   }
   1407 }
   1408 
   1409 LocationSummary* CodeGenerator::CreateThrowingSlowPathLocations(HInstruction* instruction,
   1410                                                                 RegisterSet caller_saves) {
   1411   // Note: Using kNoCall allows the method to be treated as leaf (and eliminate the
   1412   // HSuspendCheck from entry block). However, it will still get a valid stack frame
   1413   // because the HNullCheck needs an environment.
   1414   LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
   1415   // When throwing from a try block, we may need to retrieve dalvik registers from
   1416   // physical registers and we also need to set up stack mask for GC. This is
   1417   // implicitly achieved by passing kCallOnSlowPath to the LocationSummary.
   1418   bool can_throw_into_catch_block = instruction->CanThrowIntoCatchBlock();
   1419   if (can_throw_into_catch_block) {
   1420     call_kind = LocationSummary::kCallOnSlowPath;
   1421   }
   1422   LocationSummary* locations =
   1423       new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
   1424   if (can_throw_into_catch_block && compiler_options_.GetImplicitNullChecks()) {
   1425     locations->SetCustomSlowPathCallerSaves(caller_saves);  // Default: no caller-save registers.
   1426   }
   1427   DCHECK(!instruction->HasUses());
   1428   return locations;
   1429 }
   1430 
   1431 void CodeGenerator::GenerateNullCheck(HNullCheck* instruction) {
   1432   if (compiler_options_.GetImplicitNullChecks()) {
   1433     MaybeRecordStat(stats_, MethodCompilationStat::kImplicitNullCheckGenerated);
   1434     GenerateImplicitNullCheck(instruction);
   1435   } else {
   1436     MaybeRecordStat(stats_, MethodCompilationStat::kExplicitNullCheckGenerated);
   1437     GenerateExplicitNullCheck(instruction);
   1438   }
   1439 }
   1440 
   1441 void CodeGenerator::ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend_check,
   1442                                                           HParallelMove* spills) const {
   1443   LocationSummary* locations = suspend_check->GetLocations();
   1444   HBasicBlock* block = suspend_check->GetBlock();
   1445   DCHECK(block->GetLoopInformation()->GetSuspendCheck() == suspend_check);
   1446   DCHECK(block->IsLoopHeader());
   1447   DCHECK(block->GetFirstInstruction() == spills);
   1448 
   1449   for (size_t i = 0, num_moves = spills->NumMoves(); i != num_moves; ++i) {
   1450     Location dest = spills->MoveOperandsAt(i)->GetDestination();
   1451     // All parallel moves in loop headers are spills.
   1452     DCHECK(dest.IsStackSlot() || dest.IsDoubleStackSlot() || dest.IsSIMDStackSlot()) << dest;
   1453     // Clear the stack bit marking a reference. Do not bother to check if the spill is
   1454     // actually a reference spill, clearing bits that are already zero is harmless.
   1455     locations->ClearStackBit(dest.GetStackIndex() / kVRegSize);
   1456   }
   1457 }
   1458 
   1459 void CodeGenerator::EmitParallelMoves(Location from1,
   1460                                       Location to1,
   1461                                       DataType::Type type1,
   1462                                       Location from2,
   1463                                       Location to2,
   1464                                       DataType::Type type2) {
   1465   HParallelMove parallel_move(GetGraph()->GetAllocator());
   1466   parallel_move.AddMove(from1, to1, type1, nullptr);
   1467   parallel_move.AddMove(from2, to2, type2, nullptr);
   1468   GetMoveResolver()->EmitNativeCode(&parallel_move);
   1469 }
   1470 
   1471 void CodeGenerator::ValidateInvokeRuntime(QuickEntrypointEnum entrypoint,
   1472                                           HInstruction* instruction,
   1473                                           SlowPathCode* slow_path) {
   1474   // Ensure that the call kind indication given to the register allocator is
   1475   // coherent with the runtime call generated.
   1476   if (slow_path == nullptr) {
   1477     DCHECK(instruction->GetLocations()->WillCall())
   1478         << "instruction->DebugName()=" << instruction->DebugName();
   1479   } else {
   1480     DCHECK(instruction->GetLocations()->CallsOnSlowPath() || slow_path->IsFatal())
   1481         << "instruction->DebugName()=" << instruction->DebugName()
   1482         << " slow_path->GetDescription()=" << slow_path->GetDescription();
   1483   }
   1484 
   1485   // Check that the GC side effect is set when required.
   1486   // TODO: Reverse EntrypointCanTriggerGC
   1487   if (EntrypointCanTriggerGC(entrypoint)) {
   1488     if (slow_path == nullptr) {
   1489       DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()))
   1490           << "instruction->DebugName()=" << instruction->DebugName()
   1491           << " instruction->GetSideEffects().ToString()="
   1492           << instruction->GetSideEffects().ToString();
   1493     } else {
   1494       // 'CanTriggerGC' side effect is used to restrict optimization of instructions which depend
   1495       // on GC (e.g. IntermediateAddress) - to ensure they are not alive across GC points. However
   1496       // if execution never returns to the compiled code from a GC point this restriction is
   1497       // unnecessary - in particular for fatal slow paths which might trigger GC.
   1498       DCHECK((slow_path->IsFatal() && !instruction->GetLocations()->WillCall()) ||
   1499              instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()) ||
   1500              // When (non-Baker) read barriers are enabled, some instructions
   1501              // use a slow path to emit a read barrier, which does not trigger
   1502              // GC.
   1503              (kEmitCompilerReadBarrier &&
   1504               !kUseBakerReadBarrier &&
   1505               (instruction->IsInstanceFieldGet() ||
   1506                instruction->IsStaticFieldGet() ||
   1507                instruction->IsArrayGet() ||
   1508                instruction->IsLoadClass() ||
   1509                instruction->IsLoadString() ||
   1510                instruction->IsInstanceOf() ||
   1511                instruction->IsCheckCast() ||
   1512                (instruction->IsInvokeVirtual() && instruction->GetLocations()->Intrinsified()))))
   1513           << "instruction->DebugName()=" << instruction->DebugName()
   1514           << " instruction->GetSideEffects().ToString()="
   1515           << instruction->GetSideEffects().ToString()
   1516           << " slow_path->GetDescription()=" << slow_path->GetDescription();
   1517     }
   1518   } else {
   1519     // The GC side effect is not required for the instruction. But the instruction might still have
   1520     // it, for example if it calls other entrypoints requiring it.
   1521   }
   1522 
   1523   // Check the coherency of leaf information.
   1524   DCHECK(instruction->IsSuspendCheck()
   1525          || ((slow_path != nullptr) && slow_path->IsFatal())
   1526          || instruction->GetLocations()->CanCall()
   1527          || !IsLeafMethod())
   1528       << instruction->DebugName() << ((slow_path != nullptr) ? slow_path->GetDescription() : "");
   1529 }
   1530 
   1531 void CodeGenerator::ValidateInvokeRuntimeWithoutRecordingPcInfo(HInstruction* instruction,
   1532                                                                 SlowPathCode* slow_path) {
   1533   DCHECK(instruction->GetLocations()->OnlyCallsOnSlowPath())
   1534       << "instruction->DebugName()=" << instruction->DebugName()
   1535       << " slow_path->GetDescription()=" << slow_path->GetDescription();
   1536   // Only the Baker read barrier marking slow path used by certains
   1537   // instructions is expected to invoke the runtime without recording
   1538   // PC-related information.
   1539   DCHECK(kUseBakerReadBarrier);
   1540   DCHECK(instruction->IsInstanceFieldGet() ||
   1541          instruction->IsStaticFieldGet() ||
   1542          instruction->IsArrayGet() ||
   1543          instruction->IsArraySet() ||
   1544          instruction->IsLoadClass() ||
   1545          instruction->IsLoadString() ||
   1546          instruction->IsInstanceOf() ||
   1547          instruction->IsCheckCast() ||
   1548          (instruction->IsInvokeVirtual() && instruction->GetLocations()->Intrinsified()) ||
   1549          (instruction->IsInvokeStaticOrDirect() && instruction->GetLocations()->Intrinsified()))
   1550       << "instruction->DebugName()=" << instruction->DebugName()
   1551       << " slow_path->GetDescription()=" << slow_path->GetDescription();
   1552 }
   1553 
   1554 void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
   1555   size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
   1556 
   1557   const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
   1558   for (uint32_t i : LowToHighBits(core_spills)) {
   1559     // If the register holds an object, update the stack mask.
   1560     if (locations->RegisterContainsObject(i)) {
   1561       locations->SetStackBit(stack_offset / kVRegSize);
   1562     }
   1563     DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
   1564     DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
   1565     saved_core_stack_offsets_[i] = stack_offset;
   1566     stack_offset += codegen->SaveCoreRegister(stack_offset, i);
   1567   }
   1568 
   1569   const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
   1570   for (uint32_t i : LowToHighBits(fp_spills)) {
   1571     DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
   1572     DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
   1573     saved_fpu_stack_offsets_[i] = stack_offset;
   1574     stack_offset += codegen->SaveFloatingPointRegister(stack_offset, i);
   1575   }
   1576 }
   1577 
   1578 void SlowPathCode::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
   1579   size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
   1580 
   1581   const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
   1582   for (uint32_t i : LowToHighBits(core_spills)) {
   1583     DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
   1584     DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
   1585     stack_offset += codegen->RestoreCoreRegister(stack_offset, i);
   1586   }
   1587 
   1588   const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
   1589   for (uint32_t i : LowToHighBits(fp_spills)) {
   1590     DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
   1591     DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
   1592     stack_offset += codegen->RestoreFloatingPointRegister(stack_offset, i);
   1593   }
   1594 }
   1595 
   1596 void CodeGenerator::CreateSystemArrayCopyLocationSummary(HInvoke* invoke) {
   1597   // Check to see if we have known failures that will cause us to have to bail out
   1598   // to the runtime, and just generate the runtime call directly.
   1599   HIntConstant* src_pos = invoke->InputAt(1)->AsIntConstant();
   1600   HIntConstant* dest_pos = invoke->InputAt(3)->AsIntConstant();
   1601 
   1602   // The positions must be non-negative.
   1603   if ((src_pos != nullptr && src_pos->GetValue() < 0) ||
   1604       (dest_pos != nullptr && dest_pos->GetValue() < 0)) {
   1605     // We will have to fail anyways.
   1606     return;
   1607   }
   1608 
   1609   // The length must be >= 0.
   1610   HIntConstant* length = invoke->InputAt(4)->AsIntConstant();
   1611   if (length != nullptr) {
   1612     int32_t len = length->GetValue();
   1613     if (len < 0) {
   1614       // Just call as normal.
   1615       return;
   1616     }
   1617   }
   1618 
   1619   SystemArrayCopyOptimizations optimizations(invoke);
   1620 
   1621   if (optimizations.GetDestinationIsSource()) {
   1622     if (src_pos != nullptr && dest_pos != nullptr && src_pos->GetValue() < dest_pos->GetValue()) {
   1623       // We only support backward copying if source and destination are the same.
   1624       return;
   1625     }
   1626   }
   1627 
   1628   if (optimizations.GetDestinationIsPrimitiveArray() || optimizations.GetSourceIsPrimitiveArray()) {
   1629     // We currently don't intrinsify primitive copying.
   1630     return;
   1631   }
   1632 
   1633   ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
   1634   LocationSummary* locations = new (allocator) LocationSummary(invoke,
   1635                                                                LocationSummary::kCallOnSlowPath,
   1636                                                                kIntrinsified);
   1637   // arraycopy(Object src, int src_pos, Object dest, int dest_pos, int length).
   1638   locations->SetInAt(0, Location::RequiresRegister());
   1639   locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
   1640   locations->SetInAt(2, Location::RequiresRegister());
   1641   locations->SetInAt(3, Location::RegisterOrConstant(invoke->InputAt(3)));
   1642   locations->SetInAt(4, Location::RegisterOrConstant(invoke->InputAt(4)));
   1643 
   1644   locations->AddTemp(Location::RequiresRegister());
   1645   locations->AddTemp(Location::RequiresRegister());
   1646   locations->AddTemp(Location::RequiresRegister());
   1647 }
   1648 
   1649 void CodeGenerator::EmitJitRoots(uint8_t* code,
   1650                                  const uint8_t* roots_data,
   1651                                  /*out*/std::vector<Handle<mirror::Object>>* roots) {
   1652   code_generation_data_->EmitJitRoots(roots);
   1653   EmitJitRootPatches(code, roots_data);
   1654 }
   1655 
   1656 QuickEntrypointEnum CodeGenerator::GetArrayAllocationEntrypoint(HNewArray* new_array) {
   1657   switch (new_array->GetComponentSizeShift()) {
   1658     case 0: return kQuickAllocArrayResolved8;
   1659     case 1: return kQuickAllocArrayResolved16;
   1660     case 2: return kQuickAllocArrayResolved32;
   1661     case 3: return kQuickAllocArrayResolved64;
   1662   }
   1663   LOG(FATAL) << "Unreachable";
   1664   UNREACHABLE();
   1665 }
   1666 
   1667 }  // namespace art
   1668