Home | History | Annotate | Download | only in optimizing
      1 /*
      2  * Copyright (C) 2014 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM64_H_
     18 #define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM64_H_
     19 
     20 #include "arch/arm64/quick_method_frame_info_arm64.h"
     21 #include "code_generator.h"
     22 #include "common_arm64.h"
     23 #include "dex/dex_file_types.h"
     24 #include "dex/string_reference.h"
     25 #include "dex/type_reference.h"
     26 #include "driver/compiler_options.h"
     27 #include "nodes.h"
     28 #include "parallel_move_resolver.h"
     29 #include "utils/arm64/assembler_arm64.h"
     30 
     31 // TODO(VIXL): Make VIXL compile with -Wshadow.
     32 #pragma GCC diagnostic push
     33 #pragma GCC diagnostic ignored "-Wshadow"
     34 #include "aarch64/disasm-aarch64.h"
     35 #include "aarch64/macro-assembler-aarch64.h"
     36 #pragma GCC diagnostic pop
     37 
     38 namespace art {
     39 namespace arm64 {
     40 
     41 class CodeGeneratorARM64;
     42 
     43 // Use a local definition to prevent copying mistakes.
     44 static constexpr size_t kArm64WordSize = static_cast<size_t>(kArm64PointerSize);
     45 
     46 // These constants are used as an approximate margin when emission of veneer and literal pools
     47 // must be blocked.
     48 static constexpr int kMaxMacroInstructionSizeInBytes = 15 * vixl::aarch64::kInstructionSize;
     49 static constexpr int kInvokeCodeMarginSizeInBytes = 6 * kMaxMacroInstructionSizeInBytes;
     50 
     51 static const vixl::aarch64::Register kParameterCoreRegisters[] = {
     52   vixl::aarch64::x1,
     53   vixl::aarch64::x2,
     54   vixl::aarch64::x3,
     55   vixl::aarch64::x4,
     56   vixl::aarch64::x5,
     57   vixl::aarch64::x6,
     58   vixl::aarch64::x7
     59 };
     60 static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
     61 static const vixl::aarch64::FPRegister kParameterFPRegisters[] = {
     62   vixl::aarch64::d0,
     63   vixl::aarch64::d1,
     64   vixl::aarch64::d2,
     65   vixl::aarch64::d3,
     66   vixl::aarch64::d4,
     67   vixl::aarch64::d5,
     68   vixl::aarch64::d6,
     69   vixl::aarch64::d7
     70 };
     71 static constexpr size_t kParameterFPRegistersLength = arraysize(kParameterFPRegisters);
     72 
     73 // Thread Register.
     74 const vixl::aarch64::Register tr = vixl::aarch64::x19;
     75 // Marking Register.
     76 const vixl::aarch64::Register mr = vixl::aarch64::x20;
     77 // Method register on invoke.
     78 static const vixl::aarch64::Register kArtMethodRegister = vixl::aarch64::x0;
     79 const vixl::aarch64::CPURegList vixl_reserved_core_registers(vixl::aarch64::ip0,
     80                                                              vixl::aarch64::ip1);
     81 const vixl::aarch64::CPURegList vixl_reserved_fp_registers(vixl::aarch64::d31);
     82 
     83 const vixl::aarch64::CPURegList runtime_reserved_core_registers =
     84     vixl::aarch64::CPURegList(
     85         tr,
     86         // Reserve X20 as Marking Register when emitting Baker read barriers.
     87         ((kEmitCompilerReadBarrier && kUseBakerReadBarrier) ? mr : vixl::aarch64::NoCPUReg),
     88         vixl::aarch64::lr);
     89 
     90 // Callee-save registers AAPCS64, without x19 (Thread Register) (nor
     91 // x20 (Marking Register) when emitting Baker read barriers).
     92 const vixl::aarch64::CPURegList callee_saved_core_registers(
     93     vixl::aarch64::CPURegister::kRegister,
     94     vixl::aarch64::kXRegSize,
     95     ((kEmitCompilerReadBarrier && kUseBakerReadBarrier)
     96          ? vixl::aarch64::x21.GetCode()
     97          : vixl::aarch64::x20.GetCode()),
     98      vixl::aarch64::x30.GetCode());
     99 const vixl::aarch64::CPURegList callee_saved_fp_registers(vixl::aarch64::CPURegister::kFPRegister,
    100                                                           vixl::aarch64::kDRegSize,
    101                                                           vixl::aarch64::d8.GetCode(),
    102                                                           vixl::aarch64::d15.GetCode());
    103 Location ARM64ReturnLocation(DataType::Type return_type);
    104 
    105 class SlowPathCodeARM64 : public SlowPathCode {
    106  public:
    107   explicit SlowPathCodeARM64(HInstruction* instruction)
    108       : SlowPathCode(instruction), entry_label_(), exit_label_() {}
    109 
    110   vixl::aarch64::Label* GetEntryLabel() { return &entry_label_; }
    111   vixl::aarch64::Label* GetExitLabel() { return &exit_label_; }
    112 
    113   void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) OVERRIDE;
    114   void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) OVERRIDE;
    115 
    116  private:
    117   vixl::aarch64::Label entry_label_;
    118   vixl::aarch64::Label exit_label_;
    119 
    120   DISALLOW_COPY_AND_ASSIGN(SlowPathCodeARM64);
    121 };
    122 
    123 class JumpTableARM64 : public DeletableArenaObject<kArenaAllocSwitchTable> {
    124  public:
    125   explicit JumpTableARM64(HPackedSwitch* switch_instr)
    126     : switch_instr_(switch_instr), table_start_() {}
    127 
    128   vixl::aarch64::Label* GetTableStartLabel() { return &table_start_; }
    129 
    130   void EmitTable(CodeGeneratorARM64* codegen);
    131 
    132  private:
    133   HPackedSwitch* const switch_instr_;
    134   vixl::aarch64::Label table_start_;
    135 
    136   DISALLOW_COPY_AND_ASSIGN(JumpTableARM64);
    137 };
    138 
    139 static const vixl::aarch64::Register kRuntimeParameterCoreRegisters[] =
    140     { vixl::aarch64::x0,
    141       vixl::aarch64::x1,
    142       vixl::aarch64::x2,
    143       vixl::aarch64::x3,
    144       vixl::aarch64::x4,
    145       vixl::aarch64::x5,
    146       vixl::aarch64::x6,
    147       vixl::aarch64::x7 };
    148 static constexpr size_t kRuntimeParameterCoreRegistersLength =
    149     arraysize(kRuntimeParameterCoreRegisters);
    150 static const vixl::aarch64::FPRegister kRuntimeParameterFpuRegisters[] =
    151     { vixl::aarch64::d0,
    152       vixl::aarch64::d1,
    153       vixl::aarch64::d2,
    154       vixl::aarch64::d3,
    155       vixl::aarch64::d4,
    156       vixl::aarch64::d5,
    157       vixl::aarch64::d6,
    158       vixl::aarch64::d7 };
    159 static constexpr size_t kRuntimeParameterFpuRegistersLength =
    160     arraysize(kRuntimeParameterCoreRegisters);
    161 
    162 class InvokeRuntimeCallingConvention : public CallingConvention<vixl::aarch64::Register,
    163                                                                 vixl::aarch64::FPRegister> {
    164  public:
    165   static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
    166 
    167   InvokeRuntimeCallingConvention()
    168       : CallingConvention(kRuntimeParameterCoreRegisters,
    169                           kRuntimeParameterCoreRegistersLength,
    170                           kRuntimeParameterFpuRegisters,
    171                           kRuntimeParameterFpuRegistersLength,
    172                           kArm64PointerSize) {}
    173 
    174   Location GetReturnLocation(DataType::Type return_type);
    175 
    176  private:
    177   DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
    178 };
    179 
    180 class InvokeDexCallingConvention : public CallingConvention<vixl::aarch64::Register,
    181                                                             vixl::aarch64::FPRegister> {
    182  public:
    183   InvokeDexCallingConvention()
    184       : CallingConvention(kParameterCoreRegisters,
    185                           kParameterCoreRegistersLength,
    186                           kParameterFPRegisters,
    187                           kParameterFPRegistersLength,
    188                           kArm64PointerSize) {}
    189 
    190   Location GetReturnLocation(DataType::Type return_type) const {
    191     return ARM64ReturnLocation(return_type);
    192   }
    193 
    194 
    195  private:
    196   DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention);
    197 };
    198 
    199 class InvokeDexCallingConventionVisitorARM64 : public InvokeDexCallingConventionVisitor {
    200  public:
    201   InvokeDexCallingConventionVisitorARM64() {}
    202   virtual ~InvokeDexCallingConventionVisitorARM64() {}
    203 
    204   Location GetNextLocation(DataType::Type type) OVERRIDE;
    205   Location GetReturnLocation(DataType::Type return_type) const OVERRIDE {
    206     return calling_convention.GetReturnLocation(return_type);
    207   }
    208   Location GetMethodLocation() const OVERRIDE;
    209 
    210  private:
    211   InvokeDexCallingConvention calling_convention;
    212 
    213   DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorARM64);
    214 };
    215 
    216 class FieldAccessCallingConventionARM64 : public FieldAccessCallingConvention {
    217  public:
    218   FieldAccessCallingConventionARM64() {}
    219 
    220   Location GetObjectLocation() const OVERRIDE {
    221     return helpers::LocationFrom(vixl::aarch64::x1);
    222   }
    223   Location GetFieldIndexLocation() const OVERRIDE {
    224     return helpers::LocationFrom(vixl::aarch64::x0);
    225   }
    226   Location GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
    227     return helpers::LocationFrom(vixl::aarch64::x0);
    228   }
    229   Location GetSetValueLocation(DataType::Type type ATTRIBUTE_UNUSED,
    230                                bool is_instance) const OVERRIDE {
    231     return is_instance
    232         ? helpers::LocationFrom(vixl::aarch64::x2)
    233         : helpers::LocationFrom(vixl::aarch64::x1);
    234   }
    235   Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
    236     return helpers::LocationFrom(vixl::aarch64::d0);
    237   }
    238 
    239  private:
    240   DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionARM64);
    241 };
    242 
    243 class InstructionCodeGeneratorARM64 : public InstructionCodeGenerator {
    244  public:
    245   InstructionCodeGeneratorARM64(HGraph* graph, CodeGeneratorARM64* codegen);
    246 
    247 #define DECLARE_VISIT_INSTRUCTION(name, super) \
    248   void Visit##name(H##name* instr) OVERRIDE;
    249 
    250   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
    251   FOR_EACH_CONCRETE_INSTRUCTION_ARM64(DECLARE_VISIT_INSTRUCTION)
    252   FOR_EACH_CONCRETE_INSTRUCTION_SHARED(DECLARE_VISIT_INSTRUCTION)
    253 
    254 #undef DECLARE_VISIT_INSTRUCTION
    255 
    256   void VisitInstruction(HInstruction* instruction) OVERRIDE {
    257     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
    258                << " (id " << instruction->GetId() << ")";
    259   }
    260 
    261   Arm64Assembler* GetAssembler() const { return assembler_; }
    262   vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); }
    263 
    264  private:
    265   void GenerateClassInitializationCheck(SlowPathCodeARM64* slow_path,
    266                                         vixl::aarch64::Register class_reg);
    267   void GenerateSuspendCheck(HSuspendCheck* instruction, HBasicBlock* successor);
    268   void HandleBinaryOp(HBinaryOperation* instr);
    269 
    270   void HandleFieldSet(HInstruction* instruction,
    271                       const FieldInfo& field_info,
    272                       bool value_can_be_null);
    273   void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
    274   void HandleCondition(HCondition* instruction);
    275 
    276   // Generate a heap reference load using one register `out`:
    277   //
    278   //   out <- *(out + offset)
    279   //
    280   // while honoring heap poisoning and/or read barriers (if any).
    281   //
    282   // Location `maybe_temp` is used when generating a read barrier and
    283   // shall be a register in that case; it may be an invalid location
    284   // otherwise.
    285   void GenerateReferenceLoadOneRegister(HInstruction* instruction,
    286                                         Location out,
    287                                         uint32_t offset,
    288                                         Location maybe_temp,
    289                                         ReadBarrierOption read_barrier_option);
    290   // Generate a heap reference load using two different registers
    291   // `out` and `obj`:
    292   //
    293   //   out <- *(obj + offset)
    294   //
    295   // while honoring heap poisoning and/or read barriers (if any).
    296   //
    297   // Location `maybe_temp` is used when generating a Baker's (fast
    298   // path) read barrier and shall be a register in that case; it may
    299   // be an invalid location otherwise.
    300   void GenerateReferenceLoadTwoRegisters(HInstruction* instruction,
    301                                          Location out,
    302                                          Location obj,
    303                                          uint32_t offset,
    304                                          Location maybe_temp,
    305                                          ReadBarrierOption read_barrier_option);
    306   // Generate a GC root reference load:
    307   //
    308   //   root <- *(obj + offset)
    309   //
    310   // while honoring read barriers based on read_barrier_option.
    311   void GenerateGcRootFieldLoad(HInstruction* instruction,
    312                                Location root,
    313                                vixl::aarch64::Register obj,
    314                                uint32_t offset,
    315                                vixl::aarch64::Label* fixup_label,
    316                                ReadBarrierOption read_barrier_option);
    317 
    318   // Generate a floating-point comparison.
    319   void GenerateFcmp(HInstruction* instruction);
    320 
    321   void HandleShift(HBinaryOperation* instr);
    322   void GenerateTestAndBranch(HInstruction* instruction,
    323                              size_t condition_input_index,
    324                              vixl::aarch64::Label* true_target,
    325                              vixl::aarch64::Label* false_target);
    326   void DivRemOneOrMinusOne(HBinaryOperation* instruction);
    327   void DivRemByPowerOfTwo(HBinaryOperation* instruction);
    328   void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
    329   void GenerateDivRemIntegral(HBinaryOperation* instruction);
    330   void HandleGoto(HInstruction* got, HBasicBlock* successor);
    331 
    332   vixl::aarch64::MemOperand VecAddress(
    333       HVecMemoryOperation* instruction,
    334       // This function may acquire a scratch register.
    335       vixl::aarch64::UseScratchRegisterScope* temps_scope,
    336       size_t size,
    337       bool is_string_char_at,
    338       /*out*/ vixl::aarch64::Register* scratch);
    339 
    340   Arm64Assembler* const assembler_;
    341   CodeGeneratorARM64* const codegen_;
    342 
    343   DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorARM64);
    344 };
    345 
    346 class LocationsBuilderARM64 : public HGraphVisitor {
    347  public:
    348   LocationsBuilderARM64(HGraph* graph, CodeGeneratorARM64* codegen)
    349       : HGraphVisitor(graph), codegen_(codegen) {}
    350 
    351 #define DECLARE_VISIT_INSTRUCTION(name, super) \
    352   void Visit##name(H##name* instr) OVERRIDE;
    353 
    354   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
    355   FOR_EACH_CONCRETE_INSTRUCTION_ARM64(DECLARE_VISIT_INSTRUCTION)
    356   FOR_EACH_CONCRETE_INSTRUCTION_SHARED(DECLARE_VISIT_INSTRUCTION)
    357 
    358 #undef DECLARE_VISIT_INSTRUCTION
    359 
    360   void VisitInstruction(HInstruction* instruction) OVERRIDE {
    361     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
    362                << " (id " << instruction->GetId() << ")";
    363   }
    364 
    365  private:
    366   void HandleBinaryOp(HBinaryOperation* instr);
    367   void HandleFieldSet(HInstruction* instruction);
    368   void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
    369   void HandleInvoke(HInvoke* instr);
    370   void HandleCondition(HCondition* instruction);
    371   void HandleShift(HBinaryOperation* instr);
    372 
    373   CodeGeneratorARM64* const codegen_;
    374   InvokeDexCallingConventionVisitorARM64 parameter_visitor_;
    375 
    376   DISALLOW_COPY_AND_ASSIGN(LocationsBuilderARM64);
    377 };
    378 
    379 class ParallelMoveResolverARM64 : public ParallelMoveResolverNoSwap {
    380  public:
    381   ParallelMoveResolverARM64(ArenaAllocator* allocator, CodeGeneratorARM64* codegen)
    382       : ParallelMoveResolverNoSwap(allocator), codegen_(codegen), vixl_temps_() {}
    383 
    384  protected:
    385   void PrepareForEmitNativeCode() OVERRIDE;
    386   void FinishEmitNativeCode() OVERRIDE;
    387   Location AllocateScratchLocationFor(Location::Kind kind) OVERRIDE;
    388   void FreeScratchLocation(Location loc) OVERRIDE;
    389   void EmitMove(size_t index) OVERRIDE;
    390 
    391  private:
    392   Arm64Assembler* GetAssembler() const;
    393   vixl::aarch64::MacroAssembler* GetVIXLAssembler() const {
    394     return GetAssembler()->GetVIXLAssembler();
    395   }
    396 
    397   CodeGeneratorARM64* const codegen_;
    398   vixl::aarch64::UseScratchRegisterScope vixl_temps_;
    399 
    400   DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverARM64);
    401 };
    402 
    403 class CodeGeneratorARM64 : public CodeGenerator {
    404  public:
    405   CodeGeneratorARM64(HGraph* graph,
    406                      const Arm64InstructionSetFeatures& isa_features,
    407                      const CompilerOptions& compiler_options,
    408                      OptimizingCompilerStats* stats = nullptr);
    409   virtual ~CodeGeneratorARM64() {}
    410 
    411   void GenerateFrameEntry() OVERRIDE;
    412   void GenerateFrameExit() OVERRIDE;
    413 
    414   vixl::aarch64::CPURegList GetFramePreservedCoreRegisters() const;
    415   vixl::aarch64::CPURegList GetFramePreservedFPRegisters() const;
    416 
    417   void Bind(HBasicBlock* block) OVERRIDE;
    418 
    419   vixl::aarch64::Label* GetLabelOf(HBasicBlock* block) {
    420     block = FirstNonEmptyBlock(block);
    421     return &(block_labels_[block->GetBlockId()]);
    422   }
    423 
    424   size_t GetWordSize() const OVERRIDE {
    425     return kArm64WordSize;
    426   }
    427 
    428   size_t GetFloatingPointSpillSlotSize() const OVERRIDE {
    429     return GetGraph()->HasSIMD()
    430         ? 2 * kArm64WordSize   // 16 bytes == 2 arm64 words for each spill
    431         : 1 * kArm64WordSize;  //  8 bytes == 1 arm64 words for each spill
    432   }
    433 
    434   uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
    435     vixl::aarch64::Label* block_entry_label = GetLabelOf(block);
    436     DCHECK(block_entry_label->IsBound());
    437     return block_entry_label->GetLocation();
    438   }
    439 
    440   HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
    441   HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
    442   Arm64Assembler* GetAssembler() OVERRIDE { return &assembler_; }
    443   const Arm64Assembler& GetAssembler() const OVERRIDE { return assembler_; }
    444   vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); }
    445 
    446   // Emit a write barrier.
    447   void MarkGCCard(vixl::aarch64::Register object,
    448                   vixl::aarch64::Register value,
    449                   bool value_can_be_null);
    450 
    451   void GenerateMemoryBarrier(MemBarrierKind kind);
    452 
    453   // Register allocation.
    454 
    455   void SetupBlockedRegisters() const OVERRIDE;
    456 
    457   size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
    458   size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
    459   size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
    460   size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
    461 
    462   // The number of registers that can be allocated. The register allocator may
    463   // decide to reserve and not use a few of them.
    464   // We do not consider registers sp, xzr, wzr. They are either not allocatable
    465   // (xzr, wzr), or make for poor allocatable registers (sp alignment
    466   // requirements, etc.). This also facilitates our task as all other registers
    467   // can easily be mapped via to or from their type and index or code.
    468   static const int kNumberOfAllocatableRegisters = vixl::aarch64::kNumberOfRegisters - 1;
    469   static const int kNumberOfAllocatableFPRegisters = vixl::aarch64::kNumberOfFPRegisters;
    470   static constexpr int kNumberOfAllocatableRegisterPairs = 0;
    471 
    472   void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
    473   void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
    474 
    475   InstructionSet GetInstructionSet() const OVERRIDE {
    476     return InstructionSet::kArm64;
    477   }
    478 
    479   const Arm64InstructionSetFeatures& GetInstructionSetFeatures() const {
    480     return isa_features_;
    481   }
    482 
    483   void Initialize() OVERRIDE {
    484     block_labels_.resize(GetGraph()->GetBlocks().size());
    485   }
    486 
    487   // We want to use the STP and LDP instructions to spill and restore registers for slow paths.
    488   // These instructions can only encode offsets that are multiples of the register size accessed.
    489   uint32_t GetPreferredSlotsAlignment() const OVERRIDE { return vixl::aarch64::kXRegSizeInBytes; }
    490 
    491   JumpTableARM64* CreateJumpTable(HPackedSwitch* switch_instr) {
    492     jump_tables_.emplace_back(new (GetGraph()->GetAllocator()) JumpTableARM64(switch_instr));
    493     return jump_tables_.back().get();
    494   }
    495 
    496   void Finalize(CodeAllocator* allocator) OVERRIDE;
    497 
    498   // Code generation helpers.
    499   void MoveConstant(vixl::aarch64::CPURegister destination, HConstant* constant);
    500   void MoveConstant(Location destination, int32_t value) OVERRIDE;
    501   void MoveLocation(Location dst, Location src, DataType::Type dst_type) OVERRIDE;
    502   void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
    503 
    504   void Load(DataType::Type type,
    505             vixl::aarch64::CPURegister dst,
    506             const vixl::aarch64::MemOperand& src);
    507   void Store(DataType::Type type,
    508              vixl::aarch64::CPURegister src,
    509              const vixl::aarch64::MemOperand& dst);
    510   void LoadAcquire(HInstruction* instruction,
    511                    vixl::aarch64::CPURegister dst,
    512                    const vixl::aarch64::MemOperand& src,
    513                    bool needs_null_check);
    514   void StoreRelease(HInstruction* instruction,
    515                     DataType::Type type,
    516                     vixl::aarch64::CPURegister src,
    517                     const vixl::aarch64::MemOperand& dst,
    518                     bool needs_null_check);
    519 
    520   // Generate code to invoke a runtime entry point.
    521   void InvokeRuntime(QuickEntrypointEnum entrypoint,
    522                      HInstruction* instruction,
    523                      uint32_t dex_pc,
    524                      SlowPathCode* slow_path = nullptr) OVERRIDE;
    525 
    526   // Generate code to invoke a runtime entry point, but do not record
    527   // PC-related information in a stack map.
    528   void InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,
    529                                            HInstruction* instruction,
    530                                            SlowPathCode* slow_path);
    531 
    532   ParallelMoveResolverARM64* GetMoveResolver() OVERRIDE { return &move_resolver_; }
    533 
    534   bool NeedsTwoRegisters(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
    535     return false;
    536   }
    537 
    538   // Check if the desired_string_load_kind is supported. If it is, return it,
    539   // otherwise return a fall-back kind that should be used instead.
    540   HLoadString::LoadKind GetSupportedLoadStringKind(
    541       HLoadString::LoadKind desired_string_load_kind) OVERRIDE;
    542 
    543   // Check if the desired_class_load_kind is supported. If it is, return it,
    544   // otherwise return a fall-back kind that should be used instead.
    545   HLoadClass::LoadKind GetSupportedLoadClassKind(
    546       HLoadClass::LoadKind desired_class_load_kind) OVERRIDE;
    547 
    548   // Check if the desired_dispatch_info is supported. If it is, return it,
    549   // otherwise return a fall-back info that should be used instead.
    550   HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
    551       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
    552       HInvokeStaticOrDirect* invoke) OVERRIDE;
    553 
    554   void GenerateStaticOrDirectCall(
    555       HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
    556   void GenerateVirtualCall(
    557       HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
    558 
    559   void MoveFromReturnRegister(Location trg ATTRIBUTE_UNUSED,
    560                               DataType::Type type ATTRIBUTE_UNUSED) OVERRIDE {
    561     UNIMPLEMENTED(FATAL);
    562   }
    563 
    564   // Add a new PC-relative method patch for an instruction and return the label
    565   // to be bound before the instruction. The instruction will be either the
    566   // ADRP (pass `adrp_label = null`) or the ADD (pass `adrp_label` pointing
    567   // to the associated ADRP patch label).
    568   vixl::aarch64::Label* NewBootImageMethodPatch(MethodReference target_method,
    569                                                 vixl::aarch64::Label* adrp_label = nullptr);
    570 
    571   // Add a new .bss entry method patch for an instruction and return
    572   // the label to be bound before the instruction. The instruction will be
    573   // either the ADRP (pass `adrp_label = null`) or the LDR (pass `adrp_label`
    574   // pointing to the associated ADRP patch label).
    575   vixl::aarch64::Label* NewMethodBssEntryPatch(MethodReference target_method,
    576                                                vixl::aarch64::Label* adrp_label = nullptr);
    577 
    578   // Add a new PC-relative type patch for an instruction and return the label
    579   // to be bound before the instruction. The instruction will be either the
    580   // ADRP (pass `adrp_label = null`) or the ADD (pass `adrp_label` pointing
    581   // to the associated ADRP patch label).
    582   vixl::aarch64::Label* NewBootImageTypePatch(const DexFile& dex_file,
    583                                               dex::TypeIndex type_index,
    584                                               vixl::aarch64::Label* adrp_label = nullptr);
    585 
    586   // Add a new .bss entry type patch for an instruction and return the label
    587   // to be bound before the instruction. The instruction will be either the
    588   // ADRP (pass `adrp_label = null`) or the ADD (pass `adrp_label` pointing
    589   // to the associated ADRP patch label).
    590   vixl::aarch64::Label* NewBssEntryTypePatch(const DexFile& dex_file,
    591                                              dex::TypeIndex type_index,
    592                                              vixl::aarch64::Label* adrp_label = nullptr);
    593 
    594   // Add a new PC-relative string patch for an instruction and return the label
    595   // to be bound before the instruction. The instruction will be either the
    596   // ADRP (pass `adrp_label = null`) or the ADD (pass `adrp_label` pointing
    597   // to the associated ADRP patch label).
    598   vixl::aarch64::Label* NewBootImageStringPatch(const DexFile& dex_file,
    599                                                 dex::StringIndex string_index,
    600                                                 vixl::aarch64::Label* adrp_label = nullptr);
    601 
    602   // Add a new .bss entry string patch for an instruction and return the label
    603   // to be bound before the instruction. The instruction will be either the
    604   // ADRP (pass `adrp_label = null`) or the ADD (pass `adrp_label` pointing
    605   // to the associated ADRP patch label).
    606   vixl::aarch64::Label* NewStringBssEntryPatch(const DexFile& dex_file,
    607                                                dex::StringIndex string_index,
    608                                                vixl::aarch64::Label* adrp_label = nullptr);
    609 
    610   // Add a new baker read barrier patch and return the label to be bound
    611   // before the CBNZ instruction.
    612   vixl::aarch64::Label* NewBakerReadBarrierPatch(uint32_t custom_data);
    613 
    614   vixl::aarch64::Literal<uint32_t>* DeduplicateBootImageAddressLiteral(uint64_t address);
    615   vixl::aarch64::Literal<uint32_t>* DeduplicateJitStringLiteral(const DexFile& dex_file,
    616                                                                 dex::StringIndex string_index,
    617                                                                 Handle<mirror::String> handle);
    618   vixl::aarch64::Literal<uint32_t>* DeduplicateJitClassLiteral(const DexFile& dex_file,
    619                                                                dex::TypeIndex string_index,
    620                                                                Handle<mirror::Class> handle);
    621 
    622   void EmitAdrpPlaceholder(vixl::aarch64::Label* fixup_label, vixl::aarch64::Register reg);
    623   void EmitAddPlaceholder(vixl::aarch64::Label* fixup_label,
    624                           vixl::aarch64::Register out,
    625                           vixl::aarch64::Register base);
    626   void EmitLdrOffsetPlaceholder(vixl::aarch64::Label* fixup_label,
    627                                 vixl::aarch64::Register out,
    628                                 vixl::aarch64::Register base);
    629 
    630   void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE;
    631 
    632   void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
    633 
    634   // Fast path implementation of ReadBarrier::Barrier for a heap
    635   // reference field load when Baker's read barriers are used.
    636   void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
    637                                              Location ref,
    638                                              vixl::aarch64::Register obj,
    639                                              uint32_t offset,
    640                                              Location maybe_temp,
    641                                              bool needs_null_check,
    642                                              bool use_load_acquire);
    643   // Fast path implementation of ReadBarrier::Barrier for a heap
    644   // reference array load when Baker's read barriers are used.
    645   void GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction,
    646                                              Location ref,
    647                                              vixl::aarch64::Register obj,
    648                                              uint32_t data_offset,
    649                                              Location index,
    650                                              vixl::aarch64::Register temp,
    651                                              bool needs_null_check);
    652   // Factored implementation, used by GenerateFieldLoadWithBakerReadBarrier,
    653   // GenerateArrayLoadWithBakerReadBarrier and some intrinsics.
    654   //
    655   // Load the object reference located at the address
    656   // `obj + offset + (index << scale_factor)`, held by object `obj`, into
    657   // `ref`, and mark it if needed.
    658   void GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
    659                                                  Location ref,
    660                                                  vixl::aarch64::Register obj,
    661                                                  uint32_t offset,
    662                                                  Location index,
    663                                                  size_t scale_factor,
    664                                                  vixl::aarch64::Register temp,
    665                                                  bool needs_null_check,
    666                                                  bool use_load_acquire);
    667 
    668   // Generate code checking whether the the reference field at the
    669   // address `obj + field_offset`, held by object `obj`, needs to be
    670   // marked, and if so, marking it and updating the field within `obj`
    671   // with the marked value.
    672   //
    673   // This routine is used for the implementation of the
    674   // UnsafeCASObject intrinsic with Baker read barriers.
    675   //
    676   // This method has a structure similar to
    677   // GenerateReferenceLoadWithBakerReadBarrier, but note that argument
    678   // `ref` is only as a temporary here, and thus its value should not
    679   // be used afterwards.
    680   void UpdateReferenceFieldWithBakerReadBarrier(HInstruction* instruction,
    681                                                 Location ref,
    682                                                 vixl::aarch64::Register obj,
    683                                                 Location field_offset,
    684                                                 vixl::aarch64::Register temp,
    685                                                 bool needs_null_check,
    686                                                 bool use_load_acquire);
    687 
    688   // Generate a heap reference load (with no read barrier).
    689   void GenerateRawReferenceLoad(HInstruction* instruction,
    690                                 Location ref,
    691                                 vixl::aarch64::Register obj,
    692                                 uint32_t offset,
    693                                 Location index,
    694                                 size_t scale_factor,
    695                                 bool needs_null_check,
    696                                 bool use_load_acquire);
    697 
    698   // Emit code checking the status of the Marking Register, and
    699   // aborting the program if MR does not match the value stored in the
    700   // art::Thread object. Code is only emitted in debug mode and if
    701   // CompilerOptions::EmitRunTimeChecksInDebugMode returns true.
    702   //
    703   // Argument `code` is used to identify the different occurrences of
    704   // MaybeGenerateMarkingRegisterCheck in the code generator, and is
    705   // passed to the BRK instruction.
    706   //
    707   // If `temp_loc` is a valid location, it is expected to be a
    708   // register and will be used as a temporary to generate code;
    709   // otherwise, a temporary will be fetched from the core register
    710   // scratch pool.
    711   virtual void MaybeGenerateMarkingRegisterCheck(int code,
    712                                                  Location temp_loc = Location::NoLocation());
    713 
    714   // Generate a read barrier for a heap reference within `instruction`
    715   // using a slow path.
    716   //
    717   // A read barrier for an object reference read from the heap is
    718   // implemented as a call to the artReadBarrierSlow runtime entry
    719   // point, which is passed the values in locations `ref`, `obj`, and
    720   // `offset`:
    721   //
    722   //   mirror::Object* artReadBarrierSlow(mirror::Object* ref,
    723   //                                      mirror::Object* obj,
    724   //                                      uint32_t offset);
    725   //
    726   // The `out` location contains the value returned by
    727   // artReadBarrierSlow.
    728   //
    729   // When `index` is provided (i.e. for array accesses), the offset
    730   // value passed to artReadBarrierSlow is adjusted to take `index`
    731   // into account.
    732   void GenerateReadBarrierSlow(HInstruction* instruction,
    733                                Location out,
    734                                Location ref,
    735                                Location obj,
    736                                uint32_t offset,
    737                                Location index = Location::NoLocation());
    738 
    739   // If read barriers are enabled, generate a read barrier for a heap
    740   // reference using a slow path. If heap poisoning is enabled, also
    741   // unpoison the reference in `out`.
    742   void MaybeGenerateReadBarrierSlow(HInstruction* instruction,
    743                                     Location out,
    744                                     Location ref,
    745                                     Location obj,
    746                                     uint32_t offset,
    747                                     Location index = Location::NoLocation());
    748 
    749   // Generate a read barrier for a GC root within `instruction` using
    750   // a slow path.
    751   //
    752   // A read barrier for an object reference GC root is implemented as
    753   // a call to the artReadBarrierForRootSlow runtime entry point,
    754   // which is passed the value in location `root`:
    755   //
    756   //   mirror::Object* artReadBarrierForRootSlow(GcRoot<mirror::Object>* root);
    757   //
    758   // The `out` location contains the value returned by
    759   // artReadBarrierForRootSlow.
    760   void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root);
    761 
    762   void GenerateNop() OVERRIDE;
    763 
    764   void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
    765   void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
    766 
    767  private:
    768   using Uint64ToLiteralMap = ArenaSafeMap<uint64_t, vixl::aarch64::Literal<uint64_t>*>;
    769   using Uint32ToLiteralMap = ArenaSafeMap<uint32_t, vixl::aarch64::Literal<uint32_t>*>;
    770   using StringToLiteralMap = ArenaSafeMap<StringReference,
    771                                           vixl::aarch64::Literal<uint32_t>*,
    772                                           StringReferenceValueComparator>;
    773   using TypeToLiteralMap = ArenaSafeMap<TypeReference,
    774                                         vixl::aarch64::Literal<uint32_t>*,
    775                                         TypeReferenceValueComparator>;
    776 
    777   vixl::aarch64::Literal<uint32_t>* DeduplicateUint32Literal(uint32_t value);
    778   vixl::aarch64::Literal<uint64_t>* DeduplicateUint64Literal(uint64_t value);
    779 
    780   // The PcRelativePatchInfo is used for PC-relative addressing of methods/strings/types,
    781   // whether through .data.bimg.rel.ro, .bss, or directly in the boot image.
    782   struct PcRelativePatchInfo : PatchInfo<vixl::aarch64::Label> {
    783     PcRelativePatchInfo(const DexFile* dex_file, uint32_t off_or_idx)
    784         : PatchInfo<vixl::aarch64::Label>(dex_file, off_or_idx), pc_insn_label() { }
    785 
    786     vixl::aarch64::Label* pc_insn_label;
    787   };
    788 
    789   struct BakerReadBarrierPatchInfo {
    790     explicit BakerReadBarrierPatchInfo(uint32_t data) : label(), custom_data(data) { }
    791 
    792     vixl::aarch64::Label label;
    793     uint32_t custom_data;
    794   };
    795 
    796   vixl::aarch64::Label* NewPcRelativePatch(const DexFile* dex_file,
    797                                            uint32_t offset_or_index,
    798                                            vixl::aarch64::Label* adrp_label,
    799                                            ArenaDeque<PcRelativePatchInfo>* patches);
    800 
    801   void EmitJumpTables();
    802 
    803   template <linker::LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
    804   static void EmitPcRelativeLinkerPatches(const ArenaDeque<PcRelativePatchInfo>& infos,
    805                                           ArenaVector<linker::LinkerPatch>* linker_patches);
    806 
    807   // Labels for each block that will be compiled.
    808   // We use a deque so that the `vixl::aarch64::Label` objects do not move in memory.
    809   ArenaDeque<vixl::aarch64::Label> block_labels_;  // Indexed by block id.
    810   vixl::aarch64::Label frame_entry_label_;
    811   ArenaVector<std::unique_ptr<JumpTableARM64>> jump_tables_;
    812 
    813   LocationsBuilderARM64 location_builder_;
    814   InstructionCodeGeneratorARM64 instruction_visitor_;
    815   ParallelMoveResolverARM64 move_resolver_;
    816   Arm64Assembler assembler_;
    817   const Arm64InstructionSetFeatures& isa_features_;
    818 
    819   // Deduplication map for 32-bit literals, used for non-patchable boot image addresses.
    820   Uint32ToLiteralMap uint32_literals_;
    821   // Deduplication map for 64-bit literals, used for non-patchable method address or method code.
    822   Uint64ToLiteralMap uint64_literals_;
    823   // PC-relative method patch info for kBootImageLinkTimePcRelative.
    824   ArenaDeque<PcRelativePatchInfo> boot_image_method_patches_;
    825   // PC-relative method patch info for kBssEntry.
    826   ArenaDeque<PcRelativePatchInfo> method_bss_entry_patches_;
    827   // PC-relative type patch info for kBootImageLinkTimePcRelative.
    828   ArenaDeque<PcRelativePatchInfo> boot_image_type_patches_;
    829   // PC-relative type patch info for kBssEntry.
    830   ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
    831   // PC-relative String patch info; type depends on configuration (intern table or boot image PIC).
    832   ArenaDeque<PcRelativePatchInfo> boot_image_string_patches_;
    833   // PC-relative String patch info for kBssEntry.
    834   ArenaDeque<PcRelativePatchInfo> string_bss_entry_patches_;
    835   // Baker read barrier patch info.
    836   ArenaDeque<BakerReadBarrierPatchInfo> baker_read_barrier_patches_;
    837 
    838   // Patches for string literals in JIT compiled code.
    839   StringToLiteralMap jit_string_patches_;
    840   // Patches for class literals in JIT compiled code.
    841   TypeToLiteralMap jit_class_patches_;
    842 
    843   DISALLOW_COPY_AND_ASSIGN(CodeGeneratorARM64);
    844 };
    845 
    846 inline Arm64Assembler* ParallelMoveResolverARM64::GetAssembler() const {
    847   return codegen_->GetAssembler();
    848 }
    849 
    850 }  // namespace arm64
    851 }  // namespace art
    852 
    853 #endif  // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM64_H_
    854