Home | History | Annotate | Download | only in optimizing
      1 /*
      2  * Copyright (C) 2014 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM_H_
     18 #define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM_H_
     19 
     20 #include "code_generator.h"
     21 #include "dex/compiler_enums.h"
     22 #include "driver/compiler_options.h"
     23 #include "nodes.h"
     24 #include "parallel_move_resolver.h"
     25 #include "utils/arm/assembler_thumb2.h"
     26 #include "utils/string_reference.h"
     27 
     28 namespace art {
     29 namespace arm {
     30 
     31 class CodeGeneratorARM;
     32 
     33 // Use a local definition to prevent copying mistakes.
     34 static constexpr size_t kArmWordSize = kArmPointerSize;
     35 static constexpr size_t kArmBitsPerWord = kArmWordSize * kBitsPerByte;
     36 
     37 static constexpr Register kParameterCoreRegisters[] = { R1, R2, R3 };
     38 static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
     39 static constexpr SRegister kParameterFpuRegisters[] =
     40     { S0, S1, S2, S3, S4, S5, S6, S7, S8, S9, S10, S11, S12, S13, S14, S15 };
     41 static constexpr size_t kParameterFpuRegistersLength = arraysize(kParameterFpuRegisters);
     42 
     43 static constexpr Register kArtMethodRegister = R0;
     44 
     45 static constexpr Register kRuntimeParameterCoreRegisters[] = { R0, R1, R2, R3 };
     46 static constexpr size_t kRuntimeParameterCoreRegistersLength =
     47     arraysize(kRuntimeParameterCoreRegisters);
     48 static constexpr SRegister kRuntimeParameterFpuRegisters[] = { S0, S1, S2, S3 };
     49 static constexpr size_t kRuntimeParameterFpuRegistersLength =
     50     arraysize(kRuntimeParameterFpuRegisters);
     51 
     52 class InvokeRuntimeCallingConvention : public CallingConvention<Register, SRegister> {
     53  public:
     54   InvokeRuntimeCallingConvention()
     55       : CallingConvention(kRuntimeParameterCoreRegisters,
     56                           kRuntimeParameterCoreRegistersLength,
     57                           kRuntimeParameterFpuRegisters,
     58                           kRuntimeParameterFpuRegistersLength,
     59                           kArmPointerSize) {}
     60 
     61  private:
     62   DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
     63 };
     64 
     65 static constexpr DRegister FromLowSToD(SRegister reg) {
     66   return DCHECK_CONSTEXPR(reg % 2 == 0, , D0)
     67       static_cast<DRegister>(reg / 2);
     68 }
     69 
     70 
     71 class InvokeDexCallingConvention : public CallingConvention<Register, SRegister> {
     72  public:
     73   InvokeDexCallingConvention()
     74       : CallingConvention(kParameterCoreRegisters,
     75                           kParameterCoreRegistersLength,
     76                           kParameterFpuRegisters,
     77                           kParameterFpuRegistersLength,
     78                           kArmPointerSize) {}
     79 
     80  private:
     81   DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention);
     82 };
     83 
     84 class InvokeDexCallingConventionVisitorARM : public InvokeDexCallingConventionVisitor {
     85  public:
     86   InvokeDexCallingConventionVisitorARM() {}
     87   virtual ~InvokeDexCallingConventionVisitorARM() {}
     88 
     89   Location GetNextLocation(Primitive::Type type) OVERRIDE;
     90   Location GetReturnLocation(Primitive::Type type) const OVERRIDE;
     91   Location GetMethodLocation() const OVERRIDE;
     92 
     93  private:
     94   InvokeDexCallingConvention calling_convention;
     95   uint32_t double_index_ = 0;
     96 
     97   DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorARM);
     98 };
     99 
    100 class FieldAccessCallingConventionARM : public FieldAccessCallingConvention {
    101  public:
    102   FieldAccessCallingConventionARM() {}
    103 
    104   Location GetObjectLocation() const OVERRIDE {
    105     return Location::RegisterLocation(R1);
    106   }
    107   Location GetFieldIndexLocation() const OVERRIDE {
    108     return Location::RegisterLocation(R0);
    109   }
    110   Location GetReturnLocation(Primitive::Type type) const OVERRIDE {
    111     return Primitive::Is64BitType(type)
    112         ? Location::RegisterPairLocation(R0, R1)
    113         : Location::RegisterLocation(R0);
    114   }
    115   Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
    116     return Primitive::Is64BitType(type)
    117         ? Location::RegisterPairLocation(R2, R3)
    118         : (is_instance
    119             ? Location::RegisterLocation(R2)
    120             : Location::RegisterLocation(R1));
    121   }
    122   Location GetFpuLocation(Primitive::Type type) const OVERRIDE {
    123     return Primitive::Is64BitType(type)
    124         ? Location::FpuRegisterPairLocation(S0, S1)
    125         : Location::FpuRegisterLocation(S0);
    126   }
    127 
    128  private:
    129   DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionARM);
    130 };
    131 
    132 class ParallelMoveResolverARM : public ParallelMoveResolverWithSwap {
    133  public:
    134   ParallelMoveResolverARM(ArenaAllocator* allocator, CodeGeneratorARM* codegen)
    135       : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
    136 
    137   void EmitMove(size_t index) OVERRIDE;
    138   void EmitSwap(size_t index) OVERRIDE;
    139   void SpillScratch(int reg) OVERRIDE;
    140   void RestoreScratch(int reg) OVERRIDE;
    141 
    142   ArmAssembler* GetAssembler() const;
    143 
    144  private:
    145   void Exchange(Register reg, int mem);
    146   void Exchange(int mem1, int mem2);
    147 
    148   CodeGeneratorARM* const codegen_;
    149 
    150   DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverARM);
    151 };
    152 
    153 class LocationsBuilderARM : public HGraphVisitor {
    154  public:
    155   LocationsBuilderARM(HGraph* graph, CodeGeneratorARM* codegen)
    156       : HGraphVisitor(graph), codegen_(codegen) {}
    157 
    158 #define DECLARE_VISIT_INSTRUCTION(name, super)     \
    159   void Visit##name(H##name* instr) OVERRIDE;
    160 
    161   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
    162   FOR_EACH_CONCRETE_INSTRUCTION_ARM(DECLARE_VISIT_INSTRUCTION)
    163   FOR_EACH_CONCRETE_INSTRUCTION_SHARED(DECLARE_VISIT_INSTRUCTION)
    164 
    165 #undef DECLARE_VISIT_INSTRUCTION
    166 
    167   void VisitInstruction(HInstruction* instruction) OVERRIDE {
    168     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
    169                << " (id " << instruction->GetId() << ")";
    170   }
    171 
    172  private:
    173   void HandleInvoke(HInvoke* invoke);
    174   void HandleBitwiseOperation(HBinaryOperation* operation, Opcode opcode);
    175   void HandleCondition(HCondition* condition);
    176   void HandleIntegerRotate(LocationSummary* locations);
    177   void HandleLongRotate(LocationSummary* locations);
    178   void HandleShift(HBinaryOperation* operation);
    179   void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
    180   void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
    181 
    182   Location ArmEncodableConstantOrRegister(HInstruction* constant, Opcode opcode);
    183   bool CanEncodeConstantAsImmediate(HConstant* input_cst, Opcode opcode);
    184   bool CanEncodeConstantAsImmediate(uint32_t value, Opcode opcode);
    185 
    186   CodeGeneratorARM* const codegen_;
    187   InvokeDexCallingConventionVisitorARM parameter_visitor_;
    188 
    189   DISALLOW_COPY_AND_ASSIGN(LocationsBuilderARM);
    190 };
    191 
    192 class InstructionCodeGeneratorARM : public InstructionCodeGenerator {
    193  public:
    194   InstructionCodeGeneratorARM(HGraph* graph, CodeGeneratorARM* codegen);
    195 
    196 #define DECLARE_VISIT_INSTRUCTION(name, super)     \
    197   void Visit##name(H##name* instr) OVERRIDE;
    198 
    199   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
    200   FOR_EACH_CONCRETE_INSTRUCTION_ARM(DECLARE_VISIT_INSTRUCTION)
    201   FOR_EACH_CONCRETE_INSTRUCTION_SHARED(DECLARE_VISIT_INSTRUCTION)
    202 
    203 #undef DECLARE_VISIT_INSTRUCTION
    204 
    205   void VisitInstruction(HInstruction* instruction) OVERRIDE {
    206     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
    207                << " (id " << instruction->GetId() << ")";
    208   }
    209 
    210   ArmAssembler* GetAssembler() const { return assembler_; }
    211 
    212  private:
    213   // Generate code for the given suspend check. If not null, `successor`
    214   // is the block to branch to if the suspend check is not needed, and after
    215   // the suspend call.
    216   void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor);
    217   void GenerateClassInitializationCheck(SlowPathCode* slow_path, Register class_reg);
    218   void GenerateAndConst(Register out, Register first, uint32_t value);
    219   void GenerateOrrConst(Register out, Register first, uint32_t value);
    220   void GenerateEorConst(Register out, Register first, uint32_t value);
    221   void HandleBitwiseOperation(HBinaryOperation* operation);
    222   void HandleCondition(HCondition* condition);
    223   void HandleIntegerRotate(LocationSummary* locations);
    224   void HandleLongRotate(LocationSummary* locations);
    225   void HandleShift(HBinaryOperation* operation);
    226 
    227   void GenerateWideAtomicStore(Register addr, uint32_t offset,
    228                                Register value_lo, Register value_hi,
    229                                Register temp1, Register temp2,
    230                                HInstruction* instruction);
    231   void GenerateWideAtomicLoad(Register addr, uint32_t offset,
    232                               Register out_lo, Register out_hi);
    233 
    234   void HandleFieldSet(HInstruction* instruction,
    235                       const FieldInfo& field_info,
    236                       bool value_can_be_null);
    237   void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
    238 
    239   // Generate a heap reference load using one register `out`:
    240   //
    241   //   out <- *(out + offset)
    242   //
    243   // while honoring heap poisoning and/or read barriers (if any).
    244   //
    245   // Location `maybe_temp` is used when generating a read barrier and
    246   // shall be a register in that case; it may be an invalid location
    247   // otherwise.
    248   void GenerateReferenceLoadOneRegister(HInstruction* instruction,
    249                                         Location out,
    250                                         uint32_t offset,
    251                                         Location maybe_temp);
    252   // Generate a heap reference load using two different registers
    253   // `out` and `obj`:
    254   //
    255   //   out <- *(obj + offset)
    256   //
    257   // while honoring heap poisoning and/or read barriers (if any).
    258   //
    259   // Location `maybe_temp` is used when generating a Baker's (fast
    260   // path) read barrier and shall be a register in that case; it may
    261   // be an invalid location otherwise.
    262   void GenerateReferenceLoadTwoRegisters(HInstruction* instruction,
    263                                          Location out,
    264                                          Location obj,
    265                                          uint32_t offset,
    266                                          Location maybe_temp);
    267   // Generate a GC root reference load:
    268   //
    269   //   root <- *(obj + offset)
    270   //
    271   // while honoring read barriers (if any).
    272   void GenerateGcRootFieldLoad(HInstruction* instruction,
    273                                Location root,
    274                                Register obj,
    275                                uint32_t offset);
    276   void GenerateTestAndBranch(HInstruction* instruction,
    277                              size_t condition_input_index,
    278                              Label* true_target,
    279                              Label* false_target);
    280   void GenerateCompareTestAndBranch(HCondition* condition,
    281                                     Label* true_target,
    282                                     Label* false_target);
    283   void GenerateFPJumps(HCondition* cond, Label* true_label, Label* false_label);
    284   void GenerateLongComparesAndJumps(HCondition* cond, Label* true_label, Label* false_label);
    285   void DivRemOneOrMinusOne(HBinaryOperation* instruction);
    286   void DivRemByPowerOfTwo(HBinaryOperation* instruction);
    287   void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
    288   void GenerateDivRemConstantIntegral(HBinaryOperation* instruction);
    289   void HandleGoto(HInstruction* got, HBasicBlock* successor);
    290 
    291   ArmAssembler* const assembler_;
    292   CodeGeneratorARM* const codegen_;
    293 
    294   DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorARM);
    295 };
    296 
    297 class CodeGeneratorARM : public CodeGenerator {
    298  public:
    299   CodeGeneratorARM(HGraph* graph,
    300                    const ArmInstructionSetFeatures& isa_features,
    301                    const CompilerOptions& compiler_options,
    302                    OptimizingCompilerStats* stats = nullptr);
    303   virtual ~CodeGeneratorARM() {}
    304 
    305   void GenerateFrameEntry() OVERRIDE;
    306   void GenerateFrameExit() OVERRIDE;
    307   void Bind(HBasicBlock* block) OVERRIDE;
    308   void MoveConstant(Location destination, int32_t value) OVERRIDE;
    309   void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE;
    310   void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
    311 
    312   size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
    313   size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
    314   size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
    315   size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
    316 
    317   size_t GetWordSize() const OVERRIDE {
    318     return kArmWordSize;
    319   }
    320 
    321   size_t GetFloatingPointSpillSlotSize() const OVERRIDE {
    322     // Allocated in S registers, which are word sized.
    323     return kArmWordSize;
    324   }
    325 
    326   HGraphVisitor* GetLocationBuilder() OVERRIDE {
    327     return &location_builder_;
    328   }
    329 
    330   HGraphVisitor* GetInstructionVisitor() OVERRIDE {
    331     return &instruction_visitor_;
    332   }
    333 
    334   ArmAssembler* GetAssembler() OVERRIDE {
    335     return &assembler_;
    336   }
    337 
    338   const ArmAssembler& GetAssembler() const OVERRIDE {
    339     return assembler_;
    340   }
    341 
    342   uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
    343     return GetLabelOf(block)->Position();
    344   }
    345 
    346   void SetupBlockedRegisters() const OVERRIDE;
    347 
    348   void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
    349   void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
    350 
    351   // Blocks all register pairs made out of blocked core registers.
    352   void UpdateBlockedPairRegisters() const;
    353 
    354   ParallelMoveResolverARM* GetMoveResolver() OVERRIDE {
    355     return &move_resolver_;
    356   }
    357 
    358   InstructionSet GetInstructionSet() const OVERRIDE {
    359     return InstructionSet::kThumb2;
    360   }
    361 
    362   // Helper method to move a 32bits value between two locations.
    363   void Move32(Location destination, Location source);
    364   // Helper method to move a 64bits value between two locations.
    365   void Move64(Location destination, Location source);
    366 
    367   // Generate code to invoke a runtime entry point.
    368   void InvokeRuntime(QuickEntrypointEnum entrypoint,
    369                      HInstruction* instruction,
    370                      uint32_t dex_pc,
    371                      SlowPathCode* slow_path) OVERRIDE;
    372 
    373   void InvokeRuntime(int32_t offset,
    374                      HInstruction* instruction,
    375                      uint32_t dex_pc,
    376                      SlowPathCode* slow_path);
    377 
    378   // Emit a write barrier.
    379   void MarkGCCard(Register temp, Register card, Register object, Register value, bool can_be_null);
    380 
    381   void GenerateMemoryBarrier(MemBarrierKind kind);
    382 
    383   Label* GetLabelOf(HBasicBlock* block) const {
    384     return CommonGetLabelOf<Label>(block_labels_, block);
    385   }
    386 
    387   void Initialize() OVERRIDE {
    388     block_labels_ = CommonInitializeLabels<Label>();
    389   }
    390 
    391   void Finalize(CodeAllocator* allocator) OVERRIDE;
    392 
    393   const ArmInstructionSetFeatures& GetInstructionSetFeatures() const {
    394     return isa_features_;
    395   }
    396 
    397   bool NeedsTwoRegisters(Primitive::Type type) const OVERRIDE {
    398     return type == Primitive::kPrimDouble || type == Primitive::kPrimLong;
    399   }
    400 
    401   void ComputeSpillMask() OVERRIDE;
    402 
    403   Label* GetFrameEntryLabel() { return &frame_entry_label_; }
    404 
    405   // Check if the desired_string_load_kind is supported. If it is, return it,
    406   // otherwise return a fall-back kind that should be used instead.
    407   HLoadString::LoadKind GetSupportedLoadStringKind(
    408       HLoadString::LoadKind desired_string_load_kind) OVERRIDE;
    409 
    410   // Check if the desired_dispatch_info is supported. If it is, return it,
    411   // otherwise return a fall-back info that should be used instead.
    412   HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
    413       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
    414       MethodReference target_method) OVERRIDE;
    415 
    416   void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
    417   void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
    418 
    419   void MoveFromReturnRegister(Location trg, Primitive::Type type) OVERRIDE;
    420 
    421   // The PcRelativePatchInfo is used for PC-relative addressing of dex cache arrays
    422   // and boot image strings. The only difference is the interpretation of the offset_or_index.
    423   // The PC-relative address is loaded with three instructions, MOVW+MOVT
    424   // to load the offset to base_reg and then ADD base_reg, PC. The offset is
    425   // calculated from the ADD's effective PC, i.e. PC+4 on Thumb2. Though we
    426   // currently emit these 3 instructions together, instruction scheduling could
    427   // split this sequence apart, so we keep separate labels for each of them.
    428   struct PcRelativePatchInfo {
    429     PcRelativePatchInfo(const DexFile& dex_file, uint32_t off_or_idx)
    430         : target_dex_file(dex_file), offset_or_index(off_or_idx) { }
    431     PcRelativePatchInfo(PcRelativePatchInfo&& other) = default;
    432 
    433     const DexFile& target_dex_file;
    434     // Either the dex cache array element offset or the string index.
    435     uint32_t offset_or_index;
    436     Label movw_label;
    437     Label movt_label;
    438     Label add_pc_label;
    439   };
    440 
    441   PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file, uint32_t string_index);
    442   PcRelativePatchInfo* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file,
    443                                                        uint32_t element_offset);
    444   Literal* DeduplicateBootImageStringLiteral(const DexFile& dex_file, uint32_t string_index);
    445   Literal* DeduplicateBootImageAddressLiteral(uint32_t address);
    446   Literal* DeduplicateDexCacheAddressLiteral(uint32_t address);
    447 
    448   void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE;
    449 
    450   // Fast path implementation of ReadBarrier::Barrier for a heap
    451   // reference field load when Baker's read barriers are used.
    452   void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
    453                                              Location ref,
    454                                              Register obj,
    455                                              uint32_t offset,
    456                                              Location temp,
    457                                              bool needs_null_check);
    458   // Fast path implementation of ReadBarrier::Barrier for a heap
    459   // reference array load when Baker's read barriers are used.
    460   void GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction,
    461                                              Location ref,
    462                                              Register obj,
    463                                              uint32_t data_offset,
    464                                              Location index,
    465                                              Location temp,
    466                                              bool needs_null_check);
    467 
    468   // Generate a read barrier for a heap reference within `instruction`
    469   // using a slow path.
    470   //
    471   // A read barrier for an object reference read from the heap is
    472   // implemented as a call to the artReadBarrierSlow runtime entry
    473   // point, which is passed the values in locations `ref`, `obj`, and
    474   // `offset`:
    475   //
    476   //   mirror::Object* artReadBarrierSlow(mirror::Object* ref,
    477   //                                      mirror::Object* obj,
    478   //                                      uint32_t offset);
    479   //
    480   // The `out` location contains the value returned by
    481   // artReadBarrierSlow.
    482   //
    483   // When `index` is provided (i.e. for array accesses), the offset
    484   // value passed to artReadBarrierSlow is adjusted to take `index`
    485   // into account.
    486   void GenerateReadBarrierSlow(HInstruction* instruction,
    487                                Location out,
    488                                Location ref,
    489                                Location obj,
    490                                uint32_t offset,
    491                                Location index = Location::NoLocation());
    492 
    493   // If read barriers are enabled, generate a read barrier for a heap
    494   // reference using a slow path. If heap poisoning is enabled, also
    495   // unpoison the reference in `out`.
    496   void MaybeGenerateReadBarrierSlow(HInstruction* instruction,
    497                                     Location out,
    498                                     Location ref,
    499                                     Location obj,
    500                                     uint32_t offset,
    501                                     Location index = Location::NoLocation());
    502 
    503   // Generate a read barrier for a GC root within `instruction` using
    504   // a slow path.
    505   //
    506   // A read barrier for an object reference GC root is implemented as
    507   // a call to the artReadBarrierForRootSlow runtime entry point,
    508   // which is passed the value in location `root`:
    509   //
    510   //   mirror::Object* artReadBarrierForRootSlow(GcRoot<mirror::Object>* root);
    511   //
    512   // The `out` location contains the value returned by
    513   // artReadBarrierForRootSlow.
    514   void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root);
    515 
    516   void GenerateNop();
    517 
    518   void GenerateImplicitNullCheck(HNullCheck* instruction);
    519   void GenerateExplicitNullCheck(HNullCheck* instruction);
    520 
    521  private:
    522   // Factored implementation of GenerateFieldLoadWithBakerReadBarrier
    523   // and GenerateArrayLoadWithBakerReadBarrier.
    524   void GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
    525                                                  Location ref,
    526                                                  Register obj,
    527                                                  uint32_t offset,
    528                                                  Location index,
    529                                                  Location temp,
    530                                                  bool needs_null_check);
    531 
    532   Register GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke, Register temp);
    533 
    534   using Uint32ToLiteralMap = ArenaSafeMap<uint32_t, Literal*>;
    535   using MethodToLiteralMap = ArenaSafeMap<MethodReference, Literal*, MethodReferenceComparator>;
    536   using BootStringToLiteralMap = ArenaSafeMap<StringReference,
    537                                               Literal*,
    538                                               StringReferenceValueComparator>;
    539 
    540   Literal* DeduplicateUint32Literal(uint32_t value, Uint32ToLiteralMap* map);
    541   Literal* DeduplicateMethodLiteral(MethodReference target_method, MethodToLiteralMap* map);
    542   Literal* DeduplicateMethodAddressLiteral(MethodReference target_method);
    543   Literal* DeduplicateMethodCodeLiteral(MethodReference target_method);
    544   PcRelativePatchInfo* NewPcRelativePatch(const DexFile& dex_file,
    545                                           uint32_t offset_or_index,
    546                                           ArenaDeque<PcRelativePatchInfo>* patches);
    547 
    548   // Labels for each block that will be compiled.
    549   Label* block_labels_;  // Indexed by block id.
    550   Label frame_entry_label_;
    551   LocationsBuilderARM location_builder_;
    552   InstructionCodeGeneratorARM instruction_visitor_;
    553   ParallelMoveResolverARM move_resolver_;
    554   Thumb2Assembler assembler_;
    555   const ArmInstructionSetFeatures& isa_features_;
    556 
    557   // Deduplication map for 32-bit literals, used for non-patchable boot image addresses.
    558   Uint32ToLiteralMap uint32_literals_;
    559   // Method patch info, map MethodReference to a literal for method address and method code.
    560   MethodToLiteralMap method_patches_;
    561   MethodToLiteralMap call_patches_;
    562   // Relative call patch info.
    563   // Using ArenaDeque<> which retains element addresses on push/emplace_back().
    564   ArenaDeque<MethodPatchInfo<Label>> relative_call_patches_;
    565   // PC-relative patch info for each HArmDexCacheArraysBase.
    566   ArenaDeque<PcRelativePatchInfo> pc_relative_dex_cache_patches_;
    567   // Deduplication map for boot string literals for kBootImageLinkTimeAddress.
    568   BootStringToLiteralMap boot_image_string_patches_;
    569   // PC-relative String patch info.
    570   ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_;
    571   // Deduplication map for patchable boot image addresses.
    572   Uint32ToLiteralMap boot_image_address_patches_;
    573 
    574   DISALLOW_COPY_AND_ASSIGN(CodeGeneratorARM);
    575 };
    576 
    577 }  // namespace arm
    578 }  // namespace art
    579 
    580 #endif  // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM_H_
    581