Home | History | Annotate | Download | only in optimizing
      1 /*
      2  * Copyright (C) 2014 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_X86_64_H_
     18 #define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_X86_64_H_
     19 
     20 #include "arch/x86_64/instruction_set_features_x86_64.h"
     21 #include "code_generator.h"
     22 #include "dex/compiler_enums.h"
     23 #include "driver/compiler_options.h"
     24 #include "nodes.h"
     25 #include "parallel_move_resolver.h"
     26 #include "utils/x86_64/assembler_x86_64.h"
     27 
     28 namespace art {
     29 namespace x86_64 {
     30 
     31 // Use a local definition to prevent copying mistakes.
     32 static constexpr size_t kX86_64WordSize = kX86_64PointerSize;
     33 
     34 // Some x86_64 instructions require a register to be available as temp.
     35 static constexpr Register TMP = R11;
     36 
     37 static constexpr Register kParameterCoreRegisters[] = { RSI, RDX, RCX, R8, R9 };
     38 static constexpr FloatRegister kParameterFloatRegisters[] =
     39     { XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7 };
     40 
     41 static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
     42 static constexpr size_t kParameterFloatRegistersLength = arraysize(kParameterFloatRegisters);
     43 
     44 static constexpr Register kRuntimeParameterCoreRegisters[] = { RDI, RSI, RDX, RCX };
     45 static constexpr size_t kRuntimeParameterCoreRegistersLength =
     46     arraysize(kRuntimeParameterCoreRegisters);
     47 static constexpr FloatRegister kRuntimeParameterFpuRegisters[] = { XMM0, XMM1 };
     48 static constexpr size_t kRuntimeParameterFpuRegistersLength =
     49     arraysize(kRuntimeParameterFpuRegisters);
     50 
     51 // These XMM registers are non-volatile in ART ABI, but volatile in native ABI.
     52 // If the ART ABI changes, this list must be updated.  It is used to ensure that
     53 // these are not clobbered by any direct call to native code (such as math intrinsics).
     54 static constexpr FloatRegister non_volatile_xmm_regs[] = { XMM12, XMM13, XMM14, XMM15 };
     55 
     56 
     57 class InvokeRuntimeCallingConvention : public CallingConvention<Register, FloatRegister> {
     58  public:
     59   InvokeRuntimeCallingConvention()
     60       : CallingConvention(kRuntimeParameterCoreRegisters,
     61                           kRuntimeParameterCoreRegistersLength,
     62                           kRuntimeParameterFpuRegisters,
     63                           kRuntimeParameterFpuRegistersLength,
     64                           kX86_64PointerSize) {}
     65 
     66  private:
     67   DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
     68 };
     69 
     70 class InvokeDexCallingConvention : public CallingConvention<Register, FloatRegister> {
     71  public:
     72   InvokeDexCallingConvention() : CallingConvention(
     73       kParameterCoreRegisters,
     74       kParameterCoreRegistersLength,
     75       kParameterFloatRegisters,
     76       kParameterFloatRegistersLength,
     77       kX86_64PointerSize) {}
     78 
     79  private:
     80   DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention);
     81 };
     82 
     83 class FieldAccessCallingConventionX86_64 : public FieldAccessCallingConvention {
     84  public:
     85   FieldAccessCallingConventionX86_64() {}
     86 
     87   Location GetObjectLocation() const OVERRIDE {
     88     return Location::RegisterLocation(RSI);
     89   }
     90   Location GetFieldIndexLocation() const OVERRIDE {
     91     return Location::RegisterLocation(RDI);
     92   }
     93   Location GetReturnLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
     94     return Location::RegisterLocation(RAX);
     95   }
     96   Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
     97     return Primitive::Is64BitType(type)
     98         ? Location::RegisterLocation(RDX)
     99         : (is_instance
    100             ? Location::RegisterLocation(RDX)
    101             : Location::RegisterLocation(RSI));
    102   }
    103   Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
    104     return Location::FpuRegisterLocation(XMM0);
    105   }
    106 
    107  private:
    108   DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionX86_64);
    109 };
    110 
    111 
    112 class InvokeDexCallingConventionVisitorX86_64 : public InvokeDexCallingConventionVisitor {
    113  public:
    114   InvokeDexCallingConventionVisitorX86_64() {}
    115   virtual ~InvokeDexCallingConventionVisitorX86_64() {}
    116 
    117   Location GetNextLocation(Primitive::Type type) OVERRIDE;
    118   Location GetReturnLocation(Primitive::Type type) const OVERRIDE;
    119   Location GetMethodLocation() const OVERRIDE;
    120 
    121  private:
    122   InvokeDexCallingConvention calling_convention;
    123 
    124   DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorX86_64);
    125 };
    126 
    127 class CodeGeneratorX86_64;
    128 
    129 class ParallelMoveResolverX86_64 : public ParallelMoveResolverWithSwap {
    130  public:
    131   ParallelMoveResolverX86_64(ArenaAllocator* allocator, CodeGeneratorX86_64* codegen)
    132       : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
    133 
    134   void EmitMove(size_t index) OVERRIDE;
    135   void EmitSwap(size_t index) OVERRIDE;
    136   void SpillScratch(int reg) OVERRIDE;
    137   void RestoreScratch(int reg) OVERRIDE;
    138 
    139   X86_64Assembler* GetAssembler() const;
    140 
    141  private:
    142   void Exchange32(CpuRegister reg, int mem);
    143   void Exchange32(XmmRegister reg, int mem);
    144   void Exchange32(int mem1, int mem2);
    145   void Exchange64(CpuRegister reg1, CpuRegister reg2);
    146   void Exchange64(CpuRegister reg, int mem);
    147   void Exchange64(XmmRegister reg, int mem);
    148   void Exchange64(int mem1, int mem2);
    149 
    150   CodeGeneratorX86_64* const codegen_;
    151 
    152   DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverX86_64);
    153 };
    154 
    155 class LocationsBuilderX86_64 : public HGraphVisitor {
    156  public:
    157   LocationsBuilderX86_64(HGraph* graph, CodeGeneratorX86_64* codegen)
    158       : HGraphVisitor(graph), codegen_(codegen) {}
    159 
    160 #define DECLARE_VISIT_INSTRUCTION(name, super)     \
    161   void Visit##name(H##name* instr) OVERRIDE;
    162 
    163   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
    164   FOR_EACH_CONCRETE_INSTRUCTION_X86_64(DECLARE_VISIT_INSTRUCTION)
    165 
    166 #undef DECLARE_VISIT_INSTRUCTION
    167 
    168   void VisitInstruction(HInstruction* instruction) OVERRIDE {
    169     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
    170                << " (id " << instruction->GetId() << ")";
    171   }
    172 
    173  private:
    174   void HandleInvoke(HInvoke* invoke);
    175   void HandleBitwiseOperation(HBinaryOperation* operation);
    176   void HandleCondition(HCondition* condition);
    177   void HandleShift(HBinaryOperation* operation);
    178   void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
    179   void HandleFieldGet(HInstruction* instruction);
    180 
    181   CodeGeneratorX86_64* const codegen_;
    182   InvokeDexCallingConventionVisitorX86_64 parameter_visitor_;
    183 
    184   DISALLOW_COPY_AND_ASSIGN(LocationsBuilderX86_64);
    185 };
    186 
    187 class InstructionCodeGeneratorX86_64 : public InstructionCodeGenerator {
    188  public:
    189   InstructionCodeGeneratorX86_64(HGraph* graph, CodeGeneratorX86_64* codegen);
    190 
    191 #define DECLARE_VISIT_INSTRUCTION(name, super)     \
    192   void Visit##name(H##name* instr) OVERRIDE;
    193 
    194   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
    195   FOR_EACH_CONCRETE_INSTRUCTION_X86_64(DECLARE_VISIT_INSTRUCTION)
    196 
    197 #undef DECLARE_VISIT_INSTRUCTION
    198 
    199   void VisitInstruction(HInstruction* instruction) OVERRIDE {
    200     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
    201                << " (id " << instruction->GetId() << ")";
    202   }
    203 
    204   X86_64Assembler* GetAssembler() const { return assembler_; }
    205 
    206  private:
    207   // Generate code for the given suspend check. If not null, `successor`
    208   // is the block to branch to if the suspend check is not needed, and after
    209   // the suspend call.
    210   void GenerateSuspendCheck(HSuspendCheck* instruction, HBasicBlock* successor);
    211   void GenerateClassInitializationCheck(SlowPathCode* slow_path, CpuRegister class_reg);
    212   void HandleBitwiseOperation(HBinaryOperation* operation);
    213   void GenerateRemFP(HRem* rem);
    214   void DivRemOneOrMinusOne(HBinaryOperation* instruction);
    215   void DivByPowerOfTwo(HDiv* instruction);
    216   void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
    217   void GenerateDivRemIntegral(HBinaryOperation* instruction);
    218   void HandleCondition(HCondition* condition);
    219   void HandleShift(HBinaryOperation* operation);
    220 
    221   void HandleFieldSet(HInstruction* instruction,
    222                       const FieldInfo& field_info,
    223                       bool value_can_be_null);
    224   void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
    225 
    226   // Generate a heap reference load using one register `out`:
    227   //
    228   //   out <- *(out + offset)
    229   //
    230   // while honoring heap poisoning and/or read barriers (if any).
    231   //
    232   // Location `maybe_temp` is used when generating a read barrier and
    233   // shall be a register in that case; it may be an invalid location
    234   // otherwise.
    235   void GenerateReferenceLoadOneRegister(HInstruction* instruction,
    236                                         Location out,
    237                                         uint32_t offset,
    238                                         Location maybe_temp);
    239   // Generate a heap reference load using two different registers
    240   // `out` and `obj`:
    241   //
    242   //   out <- *(obj + offset)
    243   //
    244   // while honoring heap poisoning and/or read barriers (if any).
    245   //
    246   // Location `maybe_temp` is used when generating a Baker's (fast
    247   // path) read barrier and shall be a register in that case; it may
    248   // be an invalid location otherwise.
    249   void GenerateReferenceLoadTwoRegisters(HInstruction* instruction,
    250                                          Location out,
    251                                          Location obj,
    252                                          uint32_t offset,
    253                                          Location maybe_temp);
    254   // Generate a GC root reference load:
    255   //
    256   //   root <- *address
    257   //
    258   // while honoring read barriers (if any).
    259   void GenerateGcRootFieldLoad(HInstruction* instruction,
    260                                Location root,
    261                                const Address& address,
    262                                Label* fixup_label = nullptr);
    263 
    264   void PushOntoFPStack(Location source, uint32_t temp_offset,
    265                        uint32_t stack_adjustment, bool is_float);
    266   void GenerateCompareTest(HCondition* condition);
    267   template<class LabelType>
    268   void GenerateTestAndBranch(HInstruction* instruction,
    269                              size_t condition_input_index,
    270                              LabelType* true_target,
    271                              LabelType* false_target);
    272   template<class LabelType>
    273   void GenerateCompareTestAndBranch(HCondition* condition,
    274                                     LabelType* true_target,
    275                                     LabelType* false_target);
    276   template<class LabelType>
    277   void GenerateFPJumps(HCondition* cond, LabelType* true_label, LabelType* false_label);
    278 
    279   void HandleGoto(HInstruction* got, HBasicBlock* successor);
    280 
    281   X86_64Assembler* const assembler_;
    282   CodeGeneratorX86_64* const codegen_;
    283 
    284   DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorX86_64);
    285 };
    286 
    287 // Class for fixups to jump tables.
    288 class JumpTableRIPFixup;
    289 
    290 class CodeGeneratorX86_64 : public CodeGenerator {
    291  public:
    292   CodeGeneratorX86_64(HGraph* graph,
    293                   const X86_64InstructionSetFeatures& isa_features,
    294                   const CompilerOptions& compiler_options,
    295                   OptimizingCompilerStats* stats = nullptr);
    296   virtual ~CodeGeneratorX86_64() {}
    297 
    298   void GenerateFrameEntry() OVERRIDE;
    299   void GenerateFrameExit() OVERRIDE;
    300   void Bind(HBasicBlock* block) OVERRIDE;
    301   void MoveConstant(Location destination, int32_t value) OVERRIDE;
    302   void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE;
    303   void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
    304 
    305   size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
    306   size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
    307   size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
    308   size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
    309 
    310   // Generate code to invoke a runtime entry point.
    311   void InvokeRuntime(QuickEntrypointEnum entrypoint,
    312                      HInstruction* instruction,
    313                      uint32_t dex_pc,
    314                      SlowPathCode* slow_path) OVERRIDE;
    315 
    316   void InvokeRuntime(int32_t entry_point_offset,
    317                      HInstruction* instruction,
    318                      uint32_t dex_pc,
    319                      SlowPathCode* slow_path);
    320 
    321   size_t GetWordSize() const OVERRIDE {
    322     return kX86_64WordSize;
    323   }
    324 
    325   size_t GetFloatingPointSpillSlotSize() const OVERRIDE {
    326     return kX86_64WordSize;
    327   }
    328 
    329   HGraphVisitor* GetLocationBuilder() OVERRIDE {
    330     return &location_builder_;
    331   }
    332 
    333   HGraphVisitor* GetInstructionVisitor() OVERRIDE {
    334     return &instruction_visitor_;
    335   }
    336 
    337   X86_64Assembler* GetAssembler() OVERRIDE {
    338     return &assembler_;
    339   }
    340 
    341   const X86_64Assembler& GetAssembler() const OVERRIDE {
    342     return assembler_;
    343   }
    344 
    345   ParallelMoveResolverX86_64* GetMoveResolver() OVERRIDE {
    346     return &move_resolver_;
    347   }
    348 
    349   uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
    350     return GetLabelOf(block)->Position();
    351   }
    352 
    353   void SetupBlockedRegisters() const OVERRIDE;
    354   void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
    355   void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
    356   void Finalize(CodeAllocator* allocator) OVERRIDE;
    357 
    358   InstructionSet GetInstructionSet() const OVERRIDE {
    359     return InstructionSet::kX86_64;
    360   }
    361 
    362   // Emit a write barrier.
    363   void MarkGCCard(CpuRegister temp,
    364                   CpuRegister card,
    365                   CpuRegister object,
    366                   CpuRegister value,
    367                   bool value_can_be_null);
    368 
    369   void GenerateMemoryBarrier(MemBarrierKind kind);
    370 
    371   // Helper method to move a value between two locations.
    372   void Move(Location destination, Location source);
    373 
    374   Label* GetLabelOf(HBasicBlock* block) const {
    375     return CommonGetLabelOf<Label>(block_labels_, block);
    376   }
    377 
    378   void Initialize() OVERRIDE {
    379     block_labels_ = CommonInitializeLabels<Label>();
    380   }
    381 
    382   bool NeedsTwoRegisters(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
    383     return false;
    384   }
    385 
    386   // Check if the desired_string_load_kind is supported. If it is, return it,
    387   // otherwise return a fall-back kind that should be used instead.
    388   HLoadString::LoadKind GetSupportedLoadStringKind(
    389       HLoadString::LoadKind desired_string_load_kind) OVERRIDE;
    390 
    391   // Check if the desired_dispatch_info is supported. If it is, return it,
    392   // otherwise return a fall-back info that should be used instead.
    393   HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
    394       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
    395       MethodReference target_method) OVERRIDE;
    396 
    397   void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
    398   void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
    399 
    400   void RecordSimplePatch();
    401   void RecordStringPatch(HLoadString* load_string);
    402   Label* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file, uint32_t element_offset);
    403 
    404   void MoveFromReturnRegister(Location trg, Primitive::Type type) OVERRIDE;
    405 
    406   void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE;
    407 
    408   const X86_64InstructionSetFeatures& GetInstructionSetFeatures() const {
    409     return isa_features_;
    410   }
    411 
    412   // Fast path implementation of ReadBarrier::Barrier for a heap
    413   // reference field load when Baker's read barriers are used.
    414   void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
    415                                              Location ref,
    416                                              CpuRegister obj,
    417                                              uint32_t offset,
    418                                              Location temp,
    419                                              bool needs_null_check);
    420   // Fast path implementation of ReadBarrier::Barrier for a heap
    421   // reference array load when Baker's read barriers are used.
    422   void GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction,
    423                                              Location ref,
    424                                              CpuRegister obj,
    425                                              uint32_t data_offset,
    426                                              Location index,
    427                                              Location temp,
    428                                              bool needs_null_check);
    429 
    430   // Generate a read barrier for a heap reference within `instruction`
    431   // using a slow path.
    432   //
    433   // A read barrier for an object reference read from the heap is
    434   // implemented as a call to the artReadBarrierSlow runtime entry
    435   // point, which is passed the values in locations `ref`, `obj`, and
    436   // `offset`:
    437   //
    438   //   mirror::Object* artReadBarrierSlow(mirror::Object* ref,
    439   //                                      mirror::Object* obj,
    440   //                                      uint32_t offset);
    441   //
    442   // The `out` location contains the value returned by
    443   // artReadBarrierSlow.
    444   //
    445   // When `index` provided (i.e., when it is different from
    446   // Location::NoLocation()), the offset value passed to
    447   // artReadBarrierSlow is adjusted to take `index` into account.
    448   void GenerateReadBarrierSlow(HInstruction* instruction,
    449                                Location out,
    450                                Location ref,
    451                                Location obj,
    452                                uint32_t offset,
    453                                Location index = Location::NoLocation());
    454 
    455   // If read barriers are enabled, generate a read barrier for a heap
    456   // reference using a slow path. If heap poisoning is enabled, also
    457   // unpoison the reference in `out`.
    458   void MaybeGenerateReadBarrierSlow(HInstruction* instruction,
    459                                     Location out,
    460                                     Location ref,
    461                                     Location obj,
    462                                     uint32_t offset,
    463                                     Location index = Location::NoLocation());
    464 
    465   // Generate a read barrier for a GC root within `instruction` using
    466   // a slow path.
    467   //
    468   // A read barrier for an object reference GC root is implemented as
    469   // a call to the artReadBarrierForRootSlow runtime entry point,
    470   // which is passed the value in location `root`:
    471   //
    472   //   mirror::Object* artReadBarrierForRootSlow(GcRoot<mirror::Object>* root);
    473   //
    474   // The `out` location contains the value returned by
    475   // artReadBarrierForRootSlow.
    476   void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root);
    477 
    478   int ConstantAreaStart() const {
    479     return constant_area_start_;
    480   }
    481 
    482   Address LiteralDoubleAddress(double v);
    483   Address LiteralFloatAddress(float v);
    484   Address LiteralInt32Address(int32_t v);
    485   Address LiteralInt64Address(int64_t v);
    486 
    487   // Load a 32/64-bit value into a register in the most efficient manner.
    488   void Load32BitValue(CpuRegister dest, int32_t value);
    489   void Load64BitValue(CpuRegister dest, int64_t value);
    490   void Load32BitValue(XmmRegister dest, int32_t value);
    491   void Load64BitValue(XmmRegister dest, int64_t value);
    492   void Load32BitValue(XmmRegister dest, float value);
    493   void Load64BitValue(XmmRegister dest, double value);
    494 
    495   // Compare a register with a 32/64-bit value in the most efficient manner.
    496   void Compare32BitValue(CpuRegister dest, int32_t value);
    497   void Compare64BitValue(CpuRegister dest, int64_t value);
    498 
    499   Address LiteralCaseTable(HPackedSwitch* switch_instr);
    500 
    501   // Store a 64 bit value into a DoubleStackSlot in the most efficient manner.
    502   void Store64BitValueToStack(Location dest, int64_t value);
    503 
    504   // Assign a 64 bit constant to an address.
    505   void MoveInt64ToAddress(const Address& addr_low,
    506                           const Address& addr_high,
    507                           int64_t v,
    508                           HInstruction* instruction);
    509 
    510   // Ensure that prior stores complete to memory before subsequent loads.
    511   // The locked add implementation will avoid serializing device memory, but will
    512   // touch (but not change) the top of the stack. The locked add should not be used for
    513   // ordering non-temporal stores.
    514   void MemoryFence(bool force_mfence = false) {
    515     if (!force_mfence && isa_features_.PrefersLockedAddSynchronization()) {
    516       assembler_.lock()->addl(Address(CpuRegister(RSP), 0), Immediate(0));
    517     } else {
    518       assembler_.mfence();
    519     }
    520   }
    521 
    522   void GenerateNop();
    523   void GenerateImplicitNullCheck(HNullCheck* instruction);
    524   void GenerateExplicitNullCheck(HNullCheck* instruction);
    525 
    526   // When we don't know the proper offset for the value, we use kDummy32BitOffset.
    527   // We will fix this up in the linker later to have the right value.
    528   static constexpr int32_t kDummy32BitOffset = 256;
    529 
    530  private:
    531   // Factored implementation of GenerateFieldLoadWithBakerReadBarrier
    532   // and GenerateArrayLoadWithBakerReadBarrier.
    533   void GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
    534                                                  Location ref,
    535                                                  CpuRegister obj,
    536                                                  const Address& src,
    537                                                  Location temp,
    538                                                  bool needs_null_check);
    539 
    540   struct PcRelativeDexCacheAccessInfo {
    541     PcRelativeDexCacheAccessInfo(const DexFile& dex_file, uint32_t element_off)
    542         : target_dex_file(dex_file), element_offset(element_off), label() { }
    543 
    544     const DexFile& target_dex_file;
    545     uint32_t element_offset;
    546     Label label;
    547   };
    548 
    549   // Labels for each block that will be compiled.
    550   Label* block_labels_;  // Indexed by block id.
    551   Label frame_entry_label_;
    552   LocationsBuilderX86_64 location_builder_;
    553   InstructionCodeGeneratorX86_64 instruction_visitor_;
    554   ParallelMoveResolverX86_64 move_resolver_;
    555   X86_64Assembler assembler_;
    556   const X86_64InstructionSetFeatures& isa_features_;
    557 
    558   // Offset to the start of the constant area in the assembled code.
    559   // Used for fixups to the constant area.
    560   int constant_area_start_;
    561 
    562   // Method patch info. Using ArenaDeque<> which retains element addresses on push/emplace_back().
    563   ArenaDeque<MethodPatchInfo<Label>> method_patches_;
    564   ArenaDeque<MethodPatchInfo<Label>> relative_call_patches_;
    565   // PC-relative DexCache access info.
    566   ArenaDeque<PcRelativeDexCacheAccessInfo> pc_relative_dex_cache_patches_;
    567   // Patch locations for patchoat where the linker doesn't do any other work.
    568   ArenaDeque<Label> simple_patches_;
    569   // String patch locations.
    570   ArenaDeque<StringPatchInfo<Label>> string_patches_;
    571 
    572   // Fixups for jump tables need to be handled specially.
    573   ArenaVector<JumpTableRIPFixup*> fixups_to_jump_tables_;
    574 
    575   DISALLOW_COPY_AND_ASSIGN(CodeGeneratorX86_64);
    576 };
    577 
    578 }  // namespace x86_64
    579 }  // namespace art
    580 
    581 #endif  // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_X86_64_H_
    582