Home | History | Annotate | Download | only in optimizing
      1 /*
      2  * Copyright (C) 2015 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_MIPS_H_
     18 #define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_MIPS_H_
     19 
     20 #include "code_generator.h"
     21 #include "dex_file_types.h"
     22 #include "driver/compiler_options.h"
     23 #include "nodes.h"
     24 #include "parallel_move_resolver.h"
     25 #include "string_reference.h"
     26 #include "utils/mips/assembler_mips.h"
     27 #include "utils/type_reference.h"
     28 
     29 namespace art {
     30 namespace mips {
     31 
     32 // InvokeDexCallingConvention registers
     33 
     34 static constexpr Register kParameterCoreRegisters[] =
     35     { A1, A2, A3, T0, T1 };
     36 static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
     37 
     38 static constexpr FRegister kParameterFpuRegisters[] =
     39     { F8, F10, F12, F14, F16, F18 };
     40 static constexpr size_t kParameterFpuRegistersLength = arraysize(kParameterFpuRegisters);
     41 
     42 
     43 // InvokeRuntimeCallingConvention registers
     44 
     45 static constexpr Register kRuntimeParameterCoreRegisters[] =
     46     { A0, A1, A2, A3 };
     47 static constexpr size_t kRuntimeParameterCoreRegistersLength =
     48     arraysize(kRuntimeParameterCoreRegisters);
     49 
     50 static constexpr FRegister kRuntimeParameterFpuRegisters[] =
     51     { F12, F14 };
     52 static constexpr size_t kRuntimeParameterFpuRegistersLength =
     53     arraysize(kRuntimeParameterFpuRegisters);
     54 
     55 
     56 static constexpr Register kCoreCalleeSaves[] =
     57     { S0, S1, S2, S3, S4, S5, S6, S7, FP, RA };
     58 static constexpr FRegister kFpuCalleeSaves[] =
     59     { F20, F22, F24, F26, F28, F30 };
     60 
     61 
     62 class CodeGeneratorMIPS;
     63 
     64 class InvokeDexCallingConvention : public CallingConvention<Register, FRegister> {
     65  public:
     66   InvokeDexCallingConvention()
     67       : CallingConvention(kParameterCoreRegisters,
     68                           kParameterCoreRegistersLength,
     69                           kParameterFpuRegisters,
     70                           kParameterFpuRegistersLength,
     71                           kMipsPointerSize) {}
     72 
     73  private:
     74   DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention);
     75 };
     76 
     77 class InvokeDexCallingConventionVisitorMIPS : public InvokeDexCallingConventionVisitor {
     78  public:
     79   InvokeDexCallingConventionVisitorMIPS() {}
     80   virtual ~InvokeDexCallingConventionVisitorMIPS() {}
     81 
     82   Location GetNextLocation(Primitive::Type type) OVERRIDE;
     83   Location GetReturnLocation(Primitive::Type type) const OVERRIDE;
     84   Location GetMethodLocation() const OVERRIDE;
     85 
     86  private:
     87   InvokeDexCallingConvention calling_convention;
     88 
     89   DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorMIPS);
     90 };
     91 
     92 class InvokeRuntimeCallingConvention : public CallingConvention<Register, FRegister> {
     93  public:
     94   InvokeRuntimeCallingConvention()
     95       : CallingConvention(kRuntimeParameterCoreRegisters,
     96                           kRuntimeParameterCoreRegistersLength,
     97                           kRuntimeParameterFpuRegisters,
     98                           kRuntimeParameterFpuRegistersLength,
     99                           kMipsPointerSize) {}
    100 
    101   Location GetReturnLocation(Primitive::Type return_type);
    102 
    103  private:
    104   DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
    105 };
    106 
    107 class FieldAccessCallingConventionMIPS : public FieldAccessCallingConvention {
    108  public:
    109   FieldAccessCallingConventionMIPS() {}
    110 
    111   Location GetObjectLocation() const OVERRIDE {
    112     return Location::RegisterLocation(A1);
    113   }
    114   Location GetFieldIndexLocation() const OVERRIDE {
    115     return Location::RegisterLocation(A0);
    116   }
    117   Location GetReturnLocation(Primitive::Type type) const OVERRIDE {
    118     return Primitive::Is64BitType(type)
    119         ? Location::RegisterPairLocation(V0, V1)
    120         : Location::RegisterLocation(V0);
    121   }
    122   Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
    123     return Primitive::Is64BitType(type)
    124         ? Location::RegisterPairLocation(A2, A3)
    125         : (is_instance ? Location::RegisterLocation(A2) : Location::RegisterLocation(A1));
    126   }
    127   Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
    128     return Location::FpuRegisterLocation(F0);
    129   }
    130 
    131  private:
    132   DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionMIPS);
    133 };
    134 
    135 class ParallelMoveResolverMIPS : public ParallelMoveResolverWithSwap {
    136  public:
    137   ParallelMoveResolverMIPS(ArenaAllocator* allocator, CodeGeneratorMIPS* codegen)
    138       : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
    139 
    140   void EmitMove(size_t index) OVERRIDE;
    141   void EmitSwap(size_t index) OVERRIDE;
    142   void SpillScratch(int reg) OVERRIDE;
    143   void RestoreScratch(int reg) OVERRIDE;
    144 
    145   void Exchange(int index1, int index2, bool double_slot);
    146 
    147   MipsAssembler* GetAssembler() const;
    148 
    149  private:
    150   CodeGeneratorMIPS* const codegen_;
    151 
    152   DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverMIPS);
    153 };
    154 
    155 class SlowPathCodeMIPS : public SlowPathCode {
    156  public:
    157   explicit SlowPathCodeMIPS(HInstruction* instruction)
    158       : SlowPathCode(instruction), entry_label_(), exit_label_() {}
    159 
    160   MipsLabel* GetEntryLabel() { return &entry_label_; }
    161   MipsLabel* GetExitLabel() { return &exit_label_; }
    162 
    163  private:
    164   MipsLabel entry_label_;
    165   MipsLabel exit_label_;
    166 
    167   DISALLOW_COPY_AND_ASSIGN(SlowPathCodeMIPS);
    168 };
    169 
    170 class LocationsBuilderMIPS : public HGraphVisitor {
    171  public:
    172   LocationsBuilderMIPS(HGraph* graph, CodeGeneratorMIPS* codegen)
    173       : HGraphVisitor(graph), codegen_(codegen) {}
    174 
    175 #define DECLARE_VISIT_INSTRUCTION(name, super)     \
    176   void Visit##name(H##name* instr) OVERRIDE;
    177 
    178   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
    179   FOR_EACH_CONCRETE_INSTRUCTION_MIPS(DECLARE_VISIT_INSTRUCTION)
    180 
    181 #undef DECLARE_VISIT_INSTRUCTION
    182 
    183   void VisitInstruction(HInstruction* instruction) OVERRIDE {
    184     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
    185                << " (id " << instruction->GetId() << ")";
    186   }
    187 
    188  private:
    189   void HandleInvoke(HInvoke* invoke);
    190   void HandleBinaryOp(HBinaryOperation* operation);
    191   void HandleCondition(HCondition* instruction);
    192   void HandleShift(HBinaryOperation* operation);
    193   void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
    194   void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
    195   Location RegisterOrZeroConstant(HInstruction* instruction);
    196   Location FpuRegisterOrConstantForStore(HInstruction* instruction);
    197 
    198   InvokeDexCallingConventionVisitorMIPS parameter_visitor_;
    199 
    200   CodeGeneratorMIPS* const codegen_;
    201 
    202   DISALLOW_COPY_AND_ASSIGN(LocationsBuilderMIPS);
    203 };
    204 
    205 class InstructionCodeGeneratorMIPS : public InstructionCodeGenerator {
    206  public:
    207   InstructionCodeGeneratorMIPS(HGraph* graph, CodeGeneratorMIPS* codegen);
    208 
    209 #define DECLARE_VISIT_INSTRUCTION(name, super)     \
    210   void Visit##name(H##name* instr) OVERRIDE;
    211 
    212   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
    213   FOR_EACH_CONCRETE_INSTRUCTION_MIPS(DECLARE_VISIT_INSTRUCTION)
    214 
    215 #undef DECLARE_VISIT_INSTRUCTION
    216 
    217   void VisitInstruction(HInstruction* instruction) OVERRIDE {
    218     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
    219                << " (id " << instruction->GetId() << ")";
    220   }
    221 
    222   MipsAssembler* GetAssembler() const { return assembler_; }
    223 
    224   // Compare-and-jump packed switch generates approx. 3 + 2.5 * N 32-bit
    225   // instructions for N cases.
    226   // Table-based packed switch generates approx. 11 32-bit instructions
    227   // and N 32-bit data words for N cases.
    228   // At N = 6 they come out as 18 and 17 32-bit words respectively.
    229   // We switch to the table-based method starting with 7 cases.
    230   static constexpr uint32_t kPackedSwitchJumpTableThreshold = 6;
    231 
    232  private:
    233   void GenerateClassInitializationCheck(SlowPathCodeMIPS* slow_path, Register class_reg);
    234   void GenerateMemoryBarrier(MemBarrierKind kind);
    235   void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor);
    236   void HandleBinaryOp(HBinaryOperation* operation);
    237   void HandleCondition(HCondition* instruction);
    238   void HandleShift(HBinaryOperation* operation);
    239   void HandleFieldSet(HInstruction* instruction,
    240                       const FieldInfo& field_info,
    241                       uint32_t dex_pc,
    242                       bool value_can_be_null);
    243   void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info, uint32_t dex_pc);
    244 
    245   // Generate a heap reference load using one register `out`:
    246   //
    247   //   out <- *(out + offset)
    248   //
    249   // while honoring heap poisoning and/or read barriers (if any).
    250   //
    251   // Location `maybe_temp` is used when generating a read barrier and
    252   // shall be a register in that case; it may be an invalid location
    253   // otherwise.
    254   void GenerateReferenceLoadOneRegister(HInstruction* instruction,
    255                                         Location out,
    256                                         uint32_t offset,
    257                                         Location maybe_temp,
    258                                         ReadBarrierOption read_barrier_option);
    259   // Generate a heap reference load using two different registers
    260   // `out` and `obj`:
    261   //
    262   //   out <- *(obj + offset)
    263   //
    264   // while honoring heap poisoning and/or read barriers (if any).
    265   //
    266   // Location `maybe_temp` is used when generating a Baker's (fast
    267   // path) read barrier and shall be a register in that case; it may
    268   // be an invalid location otherwise.
    269   void GenerateReferenceLoadTwoRegisters(HInstruction* instruction,
    270                                          Location out,
    271                                          Location obj,
    272                                          uint32_t offset,
    273                                          Location maybe_temp,
    274                                          ReadBarrierOption read_barrier_option);
    275 
    276   // Generate a GC root reference load:
    277   //
    278   //   root <- *(obj + offset)
    279   //
    280   // while honoring read barriers (if any).
    281   void GenerateGcRootFieldLoad(HInstruction* instruction,
    282                                Location root,
    283                                Register obj,
    284                                uint32_t offset,
    285                                ReadBarrierOption read_barrier_option);
    286 
    287   void GenerateIntCompare(IfCondition cond, LocationSummary* locations);
    288   // When the function returns `false` it means that the condition holds if `dst` is non-zero
    289   // and doesn't hold if `dst` is zero. If it returns `true`, the roles of zero and non-zero
    290   // `dst` are exchanged.
    291   bool MaterializeIntCompare(IfCondition cond,
    292                              LocationSummary* input_locations,
    293                              Register dst);
    294   void GenerateIntCompareAndBranch(IfCondition cond,
    295                                    LocationSummary* locations,
    296                                    MipsLabel* label);
    297   void GenerateLongCompareAndBranch(IfCondition cond,
    298                                     LocationSummary* locations,
    299                                     MipsLabel* label);
    300   void GenerateFpCompare(IfCondition cond,
    301                          bool gt_bias,
    302                          Primitive::Type type,
    303                          LocationSummary* locations);
    304   // When the function returns `false` it means that the condition holds if the condition
    305   // code flag `cc` is non-zero and doesn't hold if `cc` is zero. If it returns `true`,
    306   // the roles of zero and non-zero values of the `cc` flag are exchanged.
    307   bool MaterializeFpCompareR2(IfCondition cond,
    308                               bool gt_bias,
    309                               Primitive::Type type,
    310                               LocationSummary* input_locations,
    311                               int cc);
    312   // When the function returns `false` it means that the condition holds if `dst` is non-zero
    313   // and doesn't hold if `dst` is zero. If it returns `true`, the roles of zero and non-zero
    314   // `dst` are exchanged.
    315   bool MaterializeFpCompareR6(IfCondition cond,
    316                               bool gt_bias,
    317                               Primitive::Type type,
    318                               LocationSummary* input_locations,
    319                               FRegister dst);
    320   void GenerateFpCompareAndBranch(IfCondition cond,
    321                                   bool gt_bias,
    322                                   Primitive::Type type,
    323                                   LocationSummary* locations,
    324                                   MipsLabel* label);
    325   void GenerateTestAndBranch(HInstruction* instruction,
    326                              size_t condition_input_index,
    327                              MipsLabel* true_target,
    328                              MipsLabel* false_target);
    329   void DivRemOneOrMinusOne(HBinaryOperation* instruction);
    330   void DivRemByPowerOfTwo(HBinaryOperation* instruction);
    331   void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
    332   void GenerateDivRemIntegral(HBinaryOperation* instruction);
    333   void HandleGoto(HInstruction* got, HBasicBlock* successor);
    334   void GenPackedSwitchWithCompares(Register value_reg,
    335                                    int32_t lower_bound,
    336                                    uint32_t num_entries,
    337                                    HBasicBlock* switch_block,
    338                                    HBasicBlock* default_block);
    339   void GenTableBasedPackedSwitch(Register value_reg,
    340                                  Register constant_area,
    341                                  int32_t lower_bound,
    342                                  uint32_t num_entries,
    343                                  HBasicBlock* switch_block,
    344                                  HBasicBlock* default_block);
    345   void GenConditionalMoveR2(HSelect* select);
    346   void GenConditionalMoveR6(HSelect* select);
    347 
    348   MipsAssembler* const assembler_;
    349   CodeGeneratorMIPS* const codegen_;
    350 
    351   DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorMIPS);
    352 };
    353 
    354 class CodeGeneratorMIPS : public CodeGenerator {
    355  public:
    356   CodeGeneratorMIPS(HGraph* graph,
    357                     const MipsInstructionSetFeatures& isa_features,
    358                     const CompilerOptions& compiler_options,
    359                     OptimizingCompilerStats* stats = nullptr);
    360   virtual ~CodeGeneratorMIPS() {}
    361 
    362   void ComputeSpillMask() OVERRIDE;
    363   bool HasAllocatedCalleeSaveRegisters() const OVERRIDE;
    364   void GenerateFrameEntry() OVERRIDE;
    365   void GenerateFrameExit() OVERRIDE;
    366 
    367   void Bind(HBasicBlock* block) OVERRIDE;
    368 
    369   void Move32(Location destination, Location source);
    370   void Move64(Location destination, Location source);
    371   void MoveConstant(Location location, HConstant* c);
    372 
    373   size_t GetWordSize() const OVERRIDE { return kMipsWordSize; }
    374 
    375   size_t GetFloatingPointSpillSlotSize() const OVERRIDE { return kMipsDoublewordSize; }
    376 
    377   uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
    378     return assembler_.GetLabelLocation(GetLabelOf(block));
    379   }
    380 
    381   HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
    382   HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
    383   MipsAssembler* GetAssembler() OVERRIDE { return &assembler_; }
    384   const MipsAssembler& GetAssembler() const OVERRIDE { return assembler_; }
    385 
    386   // Emit linker patches.
    387   void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE;
    388   void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
    389 
    390   // Fast path implementation of ReadBarrier::Barrier for a heap
    391   // reference field load when Baker's read barriers are used.
    392   void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
    393                                              Location ref,
    394                                              Register obj,
    395                                              uint32_t offset,
    396                                              Location temp,
    397                                              bool needs_null_check);
    398   // Fast path implementation of ReadBarrier::Barrier for a heap
    399   // reference array load when Baker's read barriers are used.
    400   void GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction,
    401                                              Location ref,
    402                                              Register obj,
    403                                              uint32_t data_offset,
    404                                              Location index,
    405                                              Location temp,
    406                                              bool needs_null_check);
    407 
    408   // Factored implementation, used by GenerateFieldLoadWithBakerReadBarrier,
    409   // GenerateArrayLoadWithBakerReadBarrier and some intrinsics.
    410   //
    411   // Load the object reference located at the address
    412   // `obj + offset + (index << scale_factor)`, held by object `obj`, into
    413   // `ref`, and mark it if needed.
    414   //
    415   // If `always_update_field` is true, the value of the reference is
    416   // atomically updated in the holder (`obj`).
    417   void GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
    418                                                  Location ref,
    419                                                  Register obj,
    420                                                  uint32_t offset,
    421                                                  Location index,
    422                                                  ScaleFactor scale_factor,
    423                                                  Location temp,
    424                                                  bool needs_null_check,
    425                                                  bool always_update_field = false);
    426 
    427   // Generate a read barrier for a heap reference within `instruction`
    428   // using a slow path.
    429   //
    430   // A read barrier for an object reference read from the heap is
    431   // implemented as a call to the artReadBarrierSlow runtime entry
    432   // point, which is passed the values in locations `ref`, `obj`, and
    433   // `offset`:
    434   //
    435   //   mirror::Object* artReadBarrierSlow(mirror::Object* ref,
    436   //                                      mirror::Object* obj,
    437   //                                      uint32_t offset);
    438   //
    439   // The `out` location contains the value returned by
    440   // artReadBarrierSlow.
    441   //
    442   // When `index` is provided (i.e. for array accesses), the offset
    443   // value passed to artReadBarrierSlow is adjusted to take `index`
    444   // into account.
    445   void GenerateReadBarrierSlow(HInstruction* instruction,
    446                                Location out,
    447                                Location ref,
    448                                Location obj,
    449                                uint32_t offset,
    450                                Location index = Location::NoLocation());
    451 
    452   // If read barriers are enabled, generate a read barrier for a heap
    453   // reference using a slow path. If heap poisoning is enabled, also
    454   // unpoison the reference in `out`.
    455   void MaybeGenerateReadBarrierSlow(HInstruction* instruction,
    456                                     Location out,
    457                                     Location ref,
    458                                     Location obj,
    459                                     uint32_t offset,
    460                                     Location index = Location::NoLocation());
    461 
    462   // Generate a read barrier for a GC root within `instruction` using
    463   // a slow path.
    464   //
    465   // A read barrier for an object reference GC root is implemented as
    466   // a call to the artReadBarrierForRootSlow runtime entry point,
    467   // which is passed the value in location `root`:
    468   //
    469   //   mirror::Object* artReadBarrierForRootSlow(GcRoot<mirror::Object>* root);
    470   //
    471   // The `out` location contains the value returned by
    472   // artReadBarrierForRootSlow.
    473   void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root);
    474 
    475   void MarkGCCard(Register object, Register value, bool value_can_be_null);
    476 
    477   // Register allocation.
    478 
    479   void SetupBlockedRegisters() const OVERRIDE;
    480 
    481   size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
    482   size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
    483   size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
    484   size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
    485   void ClobberRA() {
    486     clobbered_ra_ = true;
    487   }
    488 
    489   void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
    490   void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
    491 
    492   InstructionSet GetInstructionSet() const OVERRIDE { return InstructionSet::kMips; }
    493 
    494   const MipsInstructionSetFeatures& GetInstructionSetFeatures() const {
    495     return isa_features_;
    496   }
    497 
    498   MipsLabel* GetLabelOf(HBasicBlock* block) const {
    499     return CommonGetLabelOf<MipsLabel>(block_labels_, block);
    500   }
    501 
    502   void Initialize() OVERRIDE {
    503     block_labels_ = CommonInitializeLabels<MipsLabel>();
    504   }
    505 
    506   void Finalize(CodeAllocator* allocator) OVERRIDE;
    507 
    508   // Code generation helpers.
    509 
    510   void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE;
    511 
    512   void MoveConstant(Location destination, int32_t value) OVERRIDE;
    513 
    514   void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
    515 
    516   // Generate code to invoke a runtime entry point.
    517   void InvokeRuntime(QuickEntrypointEnum entrypoint,
    518                      HInstruction* instruction,
    519                      uint32_t dex_pc,
    520                      SlowPathCode* slow_path = nullptr) OVERRIDE;
    521 
    522   // Generate code to invoke a runtime entry point, but do not record
    523   // PC-related information in a stack map.
    524   void InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,
    525                                            HInstruction* instruction,
    526                                            SlowPathCode* slow_path,
    527                                            bool direct);
    528 
    529   void GenerateInvokeRuntime(int32_t entry_point_offset, bool direct);
    530 
    531   ParallelMoveResolver* GetMoveResolver() OVERRIDE { return &move_resolver_; }
    532 
    533   bool NeedsTwoRegisters(Primitive::Type type) const OVERRIDE {
    534     return type == Primitive::kPrimLong;
    535   }
    536 
    537   // Check if the desired_string_load_kind is supported. If it is, return it,
    538   // otherwise return a fall-back kind that should be used instead.
    539   HLoadString::LoadKind GetSupportedLoadStringKind(
    540       HLoadString::LoadKind desired_string_load_kind) OVERRIDE;
    541 
    542   // Check if the desired_class_load_kind is supported. If it is, return it,
    543   // otherwise return a fall-back kind that should be used instead.
    544   HLoadClass::LoadKind GetSupportedLoadClassKind(
    545       HLoadClass::LoadKind desired_class_load_kind) OVERRIDE;
    546 
    547   // Check if the desired_dispatch_info is supported. If it is, return it,
    548   // otherwise return a fall-back info that should be used instead.
    549   HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
    550       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
    551       HInvokeStaticOrDirect* invoke) OVERRIDE;
    552 
    553   void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp);
    554   void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
    555 
    556   void MoveFromReturnRegister(Location trg ATTRIBUTE_UNUSED,
    557                               Primitive::Type type ATTRIBUTE_UNUSED) OVERRIDE {
    558     UNIMPLEMENTED(FATAL) << "Not implemented on MIPS";
    559   }
    560 
    561   void GenerateNop() OVERRIDE;
    562   void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
    563   void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
    564 
    565   // The PcRelativePatchInfo is used for PC-relative addressing of dex cache arrays
    566   // and boot image strings. The only difference is the interpretation of the offset_or_index.
    567   struct PcRelativePatchInfo {
    568     PcRelativePatchInfo(const DexFile& dex_file, uint32_t off_or_idx)
    569         : target_dex_file(dex_file), offset_or_index(off_or_idx) { }
    570     PcRelativePatchInfo(PcRelativePatchInfo&& other) = default;
    571 
    572     const DexFile& target_dex_file;
    573     // Either the dex cache array element offset or the string/type index.
    574     uint32_t offset_or_index;
    575     // Label for the instruction loading the most significant half of the offset that's added to PC
    576     // to form the base address (the least significant half is loaded with the instruction that
    577     // follows).
    578     MipsLabel high_label;
    579     // Label for the instruction corresponding to PC+0.
    580     MipsLabel pc_rel_label;
    581   };
    582 
    583   PcRelativePatchInfo* NewPcRelativeStringPatch(const DexFile& dex_file,
    584                                                 dex::StringIndex string_index);
    585   PcRelativePatchInfo* NewPcRelativeTypePatch(const DexFile& dex_file, dex::TypeIndex type_index);
    586   PcRelativePatchInfo* NewTypeBssEntryPatch(const DexFile& dex_file, dex::TypeIndex type_index);
    587   PcRelativePatchInfo* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file,
    588                                                        uint32_t element_offset);
    589   Literal* DeduplicateBootImageStringLiteral(const DexFile& dex_file,
    590                                              dex::StringIndex string_index);
    591   Literal* DeduplicateBootImageTypeLiteral(const DexFile& dex_file, dex::TypeIndex type_index);
    592   Literal* DeduplicateBootImageAddressLiteral(uint32_t address);
    593 
    594   void EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo* info, Register out, Register base);
    595 
    596   // The JitPatchInfo is used for JIT string and class loads.
    597   struct JitPatchInfo {
    598     JitPatchInfo(const DexFile& dex_file, uint64_t idx)
    599         : target_dex_file(dex_file), index(idx) { }
    600     JitPatchInfo(JitPatchInfo&& other) = default;
    601 
    602     const DexFile& target_dex_file;
    603     // String/type index.
    604     uint64_t index;
    605     // Label for the instruction loading the most significant half of the address.
    606     // The least significant half is loaded with the instruction that follows immediately.
    607     MipsLabel high_label;
    608   };
    609 
    610   void PatchJitRootUse(uint8_t* code,
    611                        const uint8_t* roots_data,
    612                        const JitPatchInfo& info,
    613                        uint64_t index_in_table) const;
    614   JitPatchInfo* NewJitRootStringPatch(const DexFile& dex_file,
    615                                       dex::StringIndex dex_index,
    616                                       Handle<mirror::String> handle);
    617   JitPatchInfo* NewJitRootClassPatch(const DexFile& dex_file,
    618                                      dex::TypeIndex dex_index,
    619                                      Handle<mirror::Class> handle);
    620 
    621  private:
    622   Register GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke, Register temp);
    623 
    624   using Uint32ToLiteralMap = ArenaSafeMap<uint32_t, Literal*>;
    625   using MethodToLiteralMap = ArenaSafeMap<MethodReference, Literal*, MethodReferenceComparator>;
    626   using BootStringToLiteralMap = ArenaSafeMap<StringReference,
    627                                               Literal*,
    628                                               StringReferenceValueComparator>;
    629   using BootTypeToLiteralMap = ArenaSafeMap<TypeReference,
    630                                             Literal*,
    631                                             TypeReferenceValueComparator>;
    632 
    633   Literal* DeduplicateUint32Literal(uint32_t value, Uint32ToLiteralMap* map);
    634   Literal* DeduplicateMethodLiteral(MethodReference target_method, MethodToLiteralMap* map);
    635   PcRelativePatchInfo* NewPcRelativePatch(const DexFile& dex_file,
    636                                           uint32_t offset_or_index,
    637                                           ArenaDeque<PcRelativePatchInfo>* patches);
    638 
    639   template <LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
    640   void EmitPcRelativeLinkerPatches(const ArenaDeque<PcRelativePatchInfo>& infos,
    641                                    ArenaVector<LinkerPatch>* linker_patches);
    642 
    643   // Labels for each block that will be compiled.
    644   MipsLabel* block_labels_;
    645   MipsLabel frame_entry_label_;
    646   LocationsBuilderMIPS location_builder_;
    647   InstructionCodeGeneratorMIPS instruction_visitor_;
    648   ParallelMoveResolverMIPS move_resolver_;
    649   MipsAssembler assembler_;
    650   const MipsInstructionSetFeatures& isa_features_;
    651 
    652   // Deduplication map for 32-bit literals, used for non-patchable boot image addresses.
    653   Uint32ToLiteralMap uint32_literals_;
    654   // PC-relative patch info for each HMipsDexCacheArraysBase.
    655   ArenaDeque<PcRelativePatchInfo> pc_relative_dex_cache_patches_;
    656   // Deduplication map for boot string literals for kBootImageLinkTimeAddress.
    657   BootStringToLiteralMap boot_image_string_patches_;
    658   // PC-relative String patch info; type depends on configuration (app .bss or boot image PIC).
    659   ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_;
    660   // Deduplication map for boot type literals for kBootImageLinkTimeAddress.
    661   BootTypeToLiteralMap boot_image_type_patches_;
    662   // PC-relative type patch info for kBootImageLinkTimePcRelative.
    663   ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_;
    664   // PC-relative type patch info for kBssEntry.
    665   ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
    666   // Patches for string root accesses in JIT compiled code.
    667   ArenaDeque<JitPatchInfo> jit_string_patches_;
    668   // Patches for class root accesses in JIT compiled code.
    669   ArenaDeque<JitPatchInfo> jit_class_patches_;
    670 
    671   // PC-relative loads on R2 clobber RA, which may need to be preserved explicitly in leaf methods.
    672   // This is a flag set by pc_relative_fixups_mips and dex_cache_array_fixups_mips optimizations.
    673   bool clobbered_ra_;
    674 
    675   DISALLOW_COPY_AND_ASSIGN(CodeGeneratorMIPS);
    676 };
    677 
    678 }  // namespace mips
    679 }  // namespace art
    680 
    681 #endif  // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_MIPS_H_
    682