Home | History | Annotate | Download | only in optimizing
      1 /*
      2  * Copyright (C) 2015 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_MIPS64_H_
     18 #define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_MIPS64_H_
     19 
     20 #include "code_generator.h"
     21 #include "dex/type_reference.h"
     22 #include "driver/compiler_options.h"
     23 #include "nodes.h"
     24 #include "parallel_move_resolver.h"
     25 #include "utils/mips64/assembler_mips64.h"
     26 
     27 namespace art {
     28 namespace mips64 {
     29 
     30 // InvokeDexCallingConvention registers
     31 
     32 static constexpr GpuRegister kParameterCoreRegisters[] =
     33     { A1, A2, A3, A4, A5, A6, A7 };
     34 static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
     35 
     36 static constexpr FpuRegister kParameterFpuRegisters[] =
     37     { F13, F14, F15, F16, F17, F18, F19 };
     38 static constexpr size_t kParameterFpuRegistersLength = arraysize(kParameterFpuRegisters);
     39 
     40 
     41 // InvokeRuntimeCallingConvention registers
     42 
     43 static constexpr GpuRegister kRuntimeParameterCoreRegisters[] =
     44     { A0, A1, A2, A3, A4, A5, A6, A7 };
     45 static constexpr size_t kRuntimeParameterCoreRegistersLength =
     46     arraysize(kRuntimeParameterCoreRegisters);
     47 
     48 static constexpr FpuRegister kRuntimeParameterFpuRegisters[] =
     49     { F12, F13, F14, F15, F16, F17, F18, F19 };
     50 static constexpr size_t kRuntimeParameterFpuRegistersLength =
     51     arraysize(kRuntimeParameterFpuRegisters);
     52 
     53 
     54 static constexpr GpuRegister kCoreCalleeSaves[] =
     55     { S0, S1, S2, S3, S4, S5, S6, S7, GP, S8, RA };
     56 static constexpr FpuRegister kFpuCalleeSaves[] =
     57     { F24, F25, F26, F27, F28, F29, F30, F31 };
     58 
     59 
     60 class CodeGeneratorMIPS64;
     61 
     62 VectorRegister VectorRegisterFrom(Location location);
     63 
     64 class InvokeDexCallingConvention : public CallingConvention<GpuRegister, FpuRegister> {
     65  public:
     66   InvokeDexCallingConvention()
     67       : CallingConvention(kParameterCoreRegisters,
     68                           kParameterCoreRegistersLength,
     69                           kParameterFpuRegisters,
     70                           kParameterFpuRegistersLength,
     71                           kMips64PointerSize) {}
     72 
     73  private:
     74   DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention);
     75 };
     76 
     77 class InvokeDexCallingConventionVisitorMIPS64 : public InvokeDexCallingConventionVisitor {
     78  public:
     79   InvokeDexCallingConventionVisitorMIPS64() {}
     80   virtual ~InvokeDexCallingConventionVisitorMIPS64() {}
     81 
     82   Location GetNextLocation(DataType::Type type) OVERRIDE;
     83   Location GetReturnLocation(DataType::Type type) const OVERRIDE;
     84   Location GetMethodLocation() const OVERRIDE;
     85 
     86  private:
     87   InvokeDexCallingConvention calling_convention;
     88 
     89   DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorMIPS64);
     90 };
     91 
     92 class InvokeRuntimeCallingConvention : public CallingConvention<GpuRegister, FpuRegister> {
     93  public:
     94   InvokeRuntimeCallingConvention()
     95       : CallingConvention(kRuntimeParameterCoreRegisters,
     96                           kRuntimeParameterCoreRegistersLength,
     97                           kRuntimeParameterFpuRegisters,
     98                           kRuntimeParameterFpuRegistersLength,
     99                           kMips64PointerSize) {}
    100 
    101   Location GetReturnLocation(DataType::Type return_type);
    102 
    103  private:
    104   DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
    105 };
    106 
    107 class FieldAccessCallingConventionMIPS64 : public FieldAccessCallingConvention {
    108  public:
    109   FieldAccessCallingConventionMIPS64() {}
    110 
    111   Location GetObjectLocation() const OVERRIDE {
    112     return Location::RegisterLocation(A1);
    113   }
    114   Location GetFieldIndexLocation() const OVERRIDE {
    115     return Location::RegisterLocation(A0);
    116   }
    117   Location GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
    118     return Location::RegisterLocation(V0);
    119   }
    120   Location GetSetValueLocation(DataType::Type type ATTRIBUTE_UNUSED,
    121                                bool is_instance) const OVERRIDE {
    122     return is_instance
    123         ? Location::RegisterLocation(A2)
    124         : Location::RegisterLocation(A1);
    125   }
    126   Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
    127     return Location::FpuRegisterLocation(F0);
    128   }
    129 
    130  private:
    131   DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionMIPS64);
    132 };
    133 
    134 class ParallelMoveResolverMIPS64 : public ParallelMoveResolverWithSwap {
    135  public:
    136   ParallelMoveResolverMIPS64(ArenaAllocator* allocator, CodeGeneratorMIPS64* codegen)
    137       : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
    138 
    139   void EmitMove(size_t index) OVERRIDE;
    140   void EmitSwap(size_t index) OVERRIDE;
    141   void SpillScratch(int reg) OVERRIDE;
    142   void RestoreScratch(int reg) OVERRIDE;
    143 
    144   void Exchange(int index1, int index2, bool double_slot);
    145   void ExchangeQuadSlots(int index1, int index2);
    146 
    147   Mips64Assembler* GetAssembler() const;
    148 
    149  private:
    150   CodeGeneratorMIPS64* const codegen_;
    151 
    152   DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverMIPS64);
    153 };
    154 
    155 class SlowPathCodeMIPS64 : public SlowPathCode {
    156  public:
    157   explicit SlowPathCodeMIPS64(HInstruction* instruction)
    158       : SlowPathCode(instruction), entry_label_(), exit_label_() {}
    159 
    160   Mips64Label* GetEntryLabel() { return &entry_label_; }
    161   Mips64Label* GetExitLabel() { return &exit_label_; }
    162 
    163  private:
    164   Mips64Label entry_label_;
    165   Mips64Label exit_label_;
    166 
    167   DISALLOW_COPY_AND_ASSIGN(SlowPathCodeMIPS64);
    168 };
    169 
    170 class LocationsBuilderMIPS64 : public HGraphVisitor {
    171  public:
    172   LocationsBuilderMIPS64(HGraph* graph, CodeGeneratorMIPS64* codegen)
    173       : HGraphVisitor(graph), codegen_(codegen) {}
    174 
    175 #define DECLARE_VISIT_INSTRUCTION(name, super)     \
    176   void Visit##name(H##name* instr) OVERRIDE;
    177 
    178   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
    179   FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(DECLARE_VISIT_INSTRUCTION)
    180 
    181 #undef DECLARE_VISIT_INSTRUCTION
    182 
    183   void VisitInstruction(HInstruction* instruction) OVERRIDE {
    184     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
    185                << " (id " << instruction->GetId() << ")";
    186   }
    187 
    188  private:
    189   void HandleInvoke(HInvoke* invoke);
    190   void HandleBinaryOp(HBinaryOperation* operation);
    191   void HandleCondition(HCondition* instruction);
    192   void HandleShift(HBinaryOperation* operation);
    193   void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
    194   void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
    195   Location RegisterOrZeroConstant(HInstruction* instruction);
    196   Location FpuRegisterOrConstantForStore(HInstruction* instruction);
    197 
    198   InvokeDexCallingConventionVisitorMIPS64 parameter_visitor_;
    199 
    200   CodeGeneratorMIPS64* const codegen_;
    201 
    202   DISALLOW_COPY_AND_ASSIGN(LocationsBuilderMIPS64);
    203 };
    204 
    205 class InstructionCodeGeneratorMIPS64 : public InstructionCodeGenerator {
    206  public:
    207   InstructionCodeGeneratorMIPS64(HGraph* graph, CodeGeneratorMIPS64* codegen);
    208 
    209 #define DECLARE_VISIT_INSTRUCTION(name, super)     \
    210   void Visit##name(H##name* instr) OVERRIDE;
    211 
    212   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
    213   FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(DECLARE_VISIT_INSTRUCTION)
    214 
    215 #undef DECLARE_VISIT_INSTRUCTION
    216 
    217   void VisitInstruction(HInstruction* instruction) OVERRIDE {
    218     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
    219                << " (id " << instruction->GetId() << ")";
    220   }
    221 
    222   Mips64Assembler* GetAssembler() const { return assembler_; }
    223 
    224   // Compare-and-jump packed switch generates approx. 3 + 2.5 * N 32-bit
    225   // instructions for N cases.
    226   // Table-based packed switch generates approx. 11 32-bit instructions
    227   // and N 32-bit data words for N cases.
    228   // At N = 6 they come out as 18 and 17 32-bit words respectively.
    229   // We switch to the table-based method starting with 7 cases.
    230   static constexpr uint32_t kPackedSwitchJumpTableThreshold = 6;
    231 
    232   void GenerateMemoryBarrier(MemBarrierKind kind);
    233 
    234  private:
    235   void GenerateClassInitializationCheck(SlowPathCodeMIPS64* slow_path, GpuRegister class_reg);
    236   void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor);
    237   void HandleBinaryOp(HBinaryOperation* operation);
    238   void HandleCondition(HCondition* instruction);
    239   void HandleShift(HBinaryOperation* operation);
    240   void HandleFieldSet(HInstruction* instruction,
    241                       const FieldInfo& field_info,
    242                       bool value_can_be_null);
    243   void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
    244 
    245   // Generate a heap reference load using one register `out`:
    246   //
    247   //   out <- *(out + offset)
    248   //
    249   // while honoring heap poisoning and/or read barriers (if any).
    250   //
    251   // Location `maybe_temp` is used when generating a read barrier and
    252   // shall be a register in that case; it may be an invalid location
    253   // otherwise.
    254   void GenerateReferenceLoadOneRegister(HInstruction* instruction,
    255                                         Location out,
    256                                         uint32_t offset,
    257                                         Location maybe_temp,
    258                                         ReadBarrierOption read_barrier_option);
    259   // Generate a heap reference load using two different registers
    260   // `out` and `obj`:
    261   //
    262   //   out <- *(obj + offset)
    263   //
    264   // while honoring heap poisoning and/or read barriers (if any).
    265   //
    266   // Location `maybe_temp` is used when generating a Baker's (fast
    267   // path) read barrier and shall be a register in that case; it may
    268   // be an invalid location otherwise.
    269   void GenerateReferenceLoadTwoRegisters(HInstruction* instruction,
    270                                          Location out,
    271                                          Location obj,
    272                                          uint32_t offset,
    273                                          Location maybe_temp,
    274                                          ReadBarrierOption read_barrier_option);
    275 
    276   // Generate a GC root reference load:
    277   //
    278   //   root <- *(obj + offset)
    279   //
    280   // while honoring read barriers (if any).
    281   void GenerateGcRootFieldLoad(HInstruction* instruction,
    282                                Location root,
    283                                GpuRegister obj,
    284                                uint32_t offset,
    285                                ReadBarrierOption read_barrier_option,
    286                                Mips64Label* label_low = nullptr);
    287 
    288   void GenerateTestAndBranch(HInstruction* instruction,
    289                              size_t condition_input_index,
    290                              Mips64Label* true_target,
    291                              Mips64Label* false_target);
    292   void DivRemOneOrMinusOne(HBinaryOperation* instruction);
    293   void DivRemByPowerOfTwo(HBinaryOperation* instruction);
    294   void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
    295   void GenerateDivRemIntegral(HBinaryOperation* instruction);
    296   void GenerateIntLongCompare(IfCondition cond, bool is64bit, LocationSummary* locations);
    297   // When the function returns `false` it means that the condition holds if `dst` is non-zero
    298   // and doesn't hold if `dst` is zero. If it returns `true`, the roles of zero and non-zero
    299   // `dst` are exchanged.
    300   bool MaterializeIntLongCompare(IfCondition cond,
    301                                  bool is64bit,
    302                                  LocationSummary* input_locations,
    303                                  GpuRegister dst);
    304   void GenerateIntLongCompareAndBranch(IfCondition cond,
    305                                        bool is64bit,
    306                                        LocationSummary* locations,
    307                                        Mips64Label* label);
    308   void GenerateFpCompare(IfCondition cond,
    309                          bool gt_bias,
    310                          DataType::Type type,
    311                          LocationSummary* locations);
    312   // When the function returns `false` it means that the condition holds if `dst` is non-zero
    313   // and doesn't hold if `dst` is zero. If it returns `true`, the roles of zero and non-zero
    314   // `dst` are exchanged.
    315   bool MaterializeFpCompare(IfCondition cond,
    316                             bool gt_bias,
    317                             DataType::Type type,
    318                             LocationSummary* input_locations,
    319                             FpuRegister dst);
    320   void GenerateFpCompareAndBranch(IfCondition cond,
    321                                   bool gt_bias,
    322                                   DataType::Type type,
    323                                   LocationSummary* locations,
    324                                   Mips64Label* label);
    325   void HandleGoto(HInstruction* got, HBasicBlock* successor);
    326   void GenPackedSwitchWithCompares(GpuRegister value_reg,
    327                                    int32_t lower_bound,
    328                                    uint32_t num_entries,
    329                                    HBasicBlock* switch_block,
    330                                    HBasicBlock* default_block);
    331   void GenTableBasedPackedSwitch(GpuRegister value_reg,
    332                                  int32_t lower_bound,
    333                                  uint32_t num_entries,
    334                                  HBasicBlock* switch_block,
    335                                  HBasicBlock* default_block);
    336   int32_t VecAddress(LocationSummary* locations,
    337                      size_t size,
    338                      /* out */ GpuRegister* adjusted_base);
    339   void GenConditionalMove(HSelect* select);
    340 
    341   Mips64Assembler* const assembler_;
    342   CodeGeneratorMIPS64* const codegen_;
    343 
    344   DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorMIPS64);
    345 };
    346 
    347 class CodeGeneratorMIPS64 : public CodeGenerator {
    348  public:
    349   CodeGeneratorMIPS64(HGraph* graph,
    350                       const Mips64InstructionSetFeatures& isa_features,
    351                       const CompilerOptions& compiler_options,
    352                       OptimizingCompilerStats* stats = nullptr);
    353   virtual ~CodeGeneratorMIPS64() {}
    354 
    355   void GenerateFrameEntry() OVERRIDE;
    356   void GenerateFrameExit() OVERRIDE;
    357 
    358   void Bind(HBasicBlock* block) OVERRIDE;
    359 
    360   size_t GetWordSize() const OVERRIDE { return kMips64DoublewordSize; }
    361 
    362   size_t GetFloatingPointSpillSlotSize() const OVERRIDE {
    363     return GetGraph()->HasSIMD()
    364         ? 2 * kMips64DoublewordSize   // 16 bytes for each spill.
    365         : 1 * kMips64DoublewordSize;  //  8 bytes for each spill.
    366   }
    367 
    368   uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE {
    369     return assembler_.GetLabelLocation(GetLabelOf(block));
    370   }
    371 
    372   HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
    373   HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
    374   Mips64Assembler* GetAssembler() OVERRIDE { return &assembler_; }
    375   const Mips64Assembler& GetAssembler() const OVERRIDE { return assembler_; }
    376 
    377   // Emit linker patches.
    378   void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE;
    379   void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
    380 
    381   // Fast path implementation of ReadBarrier::Barrier for a heap
    382   // reference field load when Baker's read barriers are used.
    383   void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
    384                                              Location ref,
    385                                              GpuRegister obj,
    386                                              uint32_t offset,
    387                                              Location temp,
    388                                              bool needs_null_check);
    389   // Fast path implementation of ReadBarrier::Barrier for a heap
    390   // reference array load when Baker's read barriers are used.
    391   void GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction,
    392                                              Location ref,
    393                                              GpuRegister obj,
    394                                              uint32_t data_offset,
    395                                              Location index,
    396                                              Location temp,
    397                                              bool needs_null_check);
    398 
    399   // Factored implementation, used by GenerateFieldLoadWithBakerReadBarrier,
    400   // GenerateArrayLoadWithBakerReadBarrier and some intrinsics.
    401   //
    402   // Load the object reference located at the address
    403   // `obj + offset + (index << scale_factor)`, held by object `obj`, into
    404   // `ref`, and mark it if needed.
    405   //
    406   // If `always_update_field` is true, the value of the reference is
    407   // atomically updated in the holder (`obj`).
    408   void GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
    409                                                  Location ref,
    410                                                  GpuRegister obj,
    411                                                  uint32_t offset,
    412                                                  Location index,
    413                                                  ScaleFactor scale_factor,
    414                                                  Location temp,
    415                                                  bool needs_null_check,
    416                                                  bool always_update_field = false);
    417 
    418   // Generate a read barrier for a heap reference within `instruction`
    419   // using a slow path.
    420   //
    421   // A read barrier for an object reference read from the heap is
    422   // implemented as a call to the artReadBarrierSlow runtime entry
    423   // point, which is passed the values in locations `ref`, `obj`, and
    424   // `offset`:
    425   //
    426   //   mirror::Object* artReadBarrierSlow(mirror::Object* ref,
    427   //                                      mirror::Object* obj,
    428   //                                      uint32_t offset);
    429   //
    430   // The `out` location contains the value returned by
    431   // artReadBarrierSlow.
    432   //
    433   // When `index` is provided (i.e. for array accesses), the offset
    434   // value passed to artReadBarrierSlow is adjusted to take `index`
    435   // into account.
    436   void GenerateReadBarrierSlow(HInstruction* instruction,
    437                                Location out,
    438                                Location ref,
    439                                Location obj,
    440                                uint32_t offset,
    441                                Location index = Location::NoLocation());
    442 
    443   // If read barriers are enabled, generate a read barrier for a heap
    444   // reference using a slow path. If heap poisoning is enabled, also
    445   // unpoison the reference in `out`.
    446   void MaybeGenerateReadBarrierSlow(HInstruction* instruction,
    447                                     Location out,
    448                                     Location ref,
    449                                     Location obj,
    450                                     uint32_t offset,
    451                                     Location index = Location::NoLocation());
    452 
    453   // Generate a read barrier for a GC root within `instruction` using
    454   // a slow path.
    455   //
    456   // A read barrier for an object reference GC root is implemented as
    457   // a call to the artReadBarrierForRootSlow runtime entry point,
    458   // which is passed the value in location `root`:
    459   //
    460   //   mirror::Object* artReadBarrierForRootSlow(GcRoot<mirror::Object>* root);
    461   //
    462   // The `out` location contains the value returned by
    463   // artReadBarrierForRootSlow.
    464   void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root);
    465 
    466   void MarkGCCard(GpuRegister object, GpuRegister value, bool value_can_be_null);
    467 
    468   // Register allocation.
    469 
    470   void SetupBlockedRegisters() const OVERRIDE;
    471 
    472   size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
    473   size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
    474   size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
    475   size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
    476 
    477   void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
    478   void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
    479 
    480   InstructionSet GetInstructionSet() const OVERRIDE { return InstructionSet::kMips64; }
    481 
    482   const Mips64InstructionSetFeatures& GetInstructionSetFeatures() const {
    483     return isa_features_;
    484   }
    485 
    486   Mips64Label* GetLabelOf(HBasicBlock* block) const {
    487     return CommonGetLabelOf<Mips64Label>(block_labels_, block);
    488   }
    489 
    490   void Initialize() OVERRIDE {
    491     block_labels_ = CommonInitializeLabels<Mips64Label>();
    492   }
    493 
    494   // We prefer aligned loads and stores (less code), so spill and restore registers in slow paths
    495   // at aligned locations.
    496   uint32_t GetPreferredSlotsAlignment() const OVERRIDE { return kMips64DoublewordSize; }
    497 
    498   void Finalize(CodeAllocator* allocator) OVERRIDE;
    499 
    500   // Code generation helpers.
    501   void MoveLocation(Location dst, Location src, DataType::Type dst_type) OVERRIDE;
    502 
    503   void MoveConstant(Location destination, int32_t value) OVERRIDE;
    504 
    505   void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
    506 
    507 
    508   void SwapLocations(Location loc1, Location loc2, DataType::Type type);
    509 
    510   // Generate code to invoke a runtime entry point.
    511   void InvokeRuntime(QuickEntrypointEnum entrypoint,
    512                      HInstruction* instruction,
    513                      uint32_t dex_pc,
    514                      SlowPathCode* slow_path = nullptr) OVERRIDE;
    515 
    516   // Generate code to invoke a runtime entry point, but do not record
    517   // PC-related information in a stack map.
    518   void InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,
    519                                            HInstruction* instruction,
    520                                            SlowPathCode* slow_path);
    521 
    522   void GenerateInvokeRuntime(int32_t entry_point_offset);
    523 
    524   ParallelMoveResolver* GetMoveResolver() OVERRIDE { return &move_resolver_; }
    525 
    526   bool NeedsTwoRegisters(DataType::Type type ATTRIBUTE_UNUSED) const OVERRIDE { return false; }
    527 
    528   // Check if the desired_string_load_kind is supported. If it is, return it,
    529   // otherwise return a fall-back kind that should be used instead.
    530   HLoadString::LoadKind GetSupportedLoadStringKind(
    531       HLoadString::LoadKind desired_string_load_kind) OVERRIDE;
    532 
    533   // Check if the desired_class_load_kind is supported. If it is, return it,
    534   // otherwise return a fall-back kind that should be used instead.
    535   HLoadClass::LoadKind GetSupportedLoadClassKind(
    536       HLoadClass::LoadKind desired_class_load_kind) OVERRIDE;
    537 
    538   // Check if the desired_dispatch_info is supported. If it is, return it,
    539   // otherwise return a fall-back info that should be used instead.
    540   HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
    541       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
    542       HInvokeStaticOrDirect* invoke) OVERRIDE;
    543 
    544   void GenerateStaticOrDirectCall(
    545       HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
    546   void GenerateVirtualCall(
    547       HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
    548 
    549   void MoveFromReturnRegister(Location trg ATTRIBUTE_UNUSED,
    550                               DataType::Type type ATTRIBUTE_UNUSED) OVERRIDE {
    551     UNIMPLEMENTED(FATAL) << "Not implemented on MIPS64";
    552   }
    553 
    554   void GenerateNop() OVERRIDE;
    555   void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
    556   void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
    557 
    558   // The PcRelativePatchInfo is used for PC-relative addressing of methods/strings/types,
    559   // whether through .data.bimg.rel.ro, .bss, or directly in the boot image.
    560   //
    561   // The 16-bit halves of the 32-bit PC-relative offset are patched separately, necessitating
    562   // two patches/infos. There can be more than two patches/infos if the instruction supplying
    563   // the high half is shared with e.g. a slow path, while the low half is supplied by separate
    564   // instructions, e.g.:
    565   //     auipc r1, high       // patch
    566   //     lwu   r2, low(r1)    // patch
    567   //     beqzc r2, slow_path
    568   //   back:
    569   //     ...
    570   //   slow_path:
    571   //     ...
    572   //     sw    r2, low(r1)    // patch
    573   //     bc    back
    574   struct PcRelativePatchInfo : PatchInfo<Mips64Label> {
    575     PcRelativePatchInfo(const DexFile* dex_file,
    576                         uint32_t off_or_idx,
    577                         const PcRelativePatchInfo* info_high)
    578         : PatchInfo<Mips64Label>(dex_file, off_or_idx),
    579           patch_info_high(info_high) { }
    580 
    581     // Pointer to the info for the high half patch or nullptr if this is the high half patch info.
    582     const PcRelativePatchInfo* patch_info_high;
    583 
    584    private:
    585     PcRelativePatchInfo(PcRelativePatchInfo&& other) = delete;
    586     DISALLOW_COPY_AND_ASSIGN(PcRelativePatchInfo);
    587   };
    588 
    589   PcRelativePatchInfo* NewBootImageMethodPatch(MethodReference target_method,
    590                                                const PcRelativePatchInfo* info_high = nullptr);
    591   PcRelativePatchInfo* NewMethodBssEntryPatch(MethodReference target_method,
    592                                               const PcRelativePatchInfo* info_high = nullptr);
    593   PcRelativePatchInfo* NewBootImageTypePatch(const DexFile& dex_file,
    594                                              dex::TypeIndex type_index,
    595                                              const PcRelativePatchInfo* info_high = nullptr);
    596   PcRelativePatchInfo* NewTypeBssEntryPatch(const DexFile& dex_file,
    597                                             dex::TypeIndex type_index,
    598                                             const PcRelativePatchInfo* info_high = nullptr);
    599   PcRelativePatchInfo* NewBootImageStringPatch(const DexFile& dex_file,
    600                                                dex::StringIndex string_index,
    601                                                const PcRelativePatchInfo* info_high = nullptr);
    602   PcRelativePatchInfo* NewStringBssEntryPatch(const DexFile& dex_file,
    603                                               dex::StringIndex string_index,
    604                                               const PcRelativePatchInfo* info_high = nullptr);
    605   Literal* DeduplicateBootImageAddressLiteral(uint64_t address);
    606 
    607   void EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo* info_high,
    608                                             GpuRegister out,
    609                                             PcRelativePatchInfo* info_low = nullptr);
    610 
    611   void PatchJitRootUse(uint8_t* code,
    612                        const uint8_t* roots_data,
    613                        const Literal* literal,
    614                        uint64_t index_in_table) const;
    615   Literal* DeduplicateJitStringLiteral(const DexFile& dex_file,
    616                                        dex::StringIndex string_index,
    617                                        Handle<mirror::String> handle);
    618   Literal* DeduplicateJitClassLiteral(const DexFile& dex_file,
    619                                       dex::TypeIndex type_index,
    620                                       Handle<mirror::Class> handle);
    621 
    622  private:
    623   using Uint32ToLiteralMap = ArenaSafeMap<uint32_t, Literal*>;
    624   using Uint64ToLiteralMap = ArenaSafeMap<uint64_t, Literal*>;
    625   using StringToLiteralMap = ArenaSafeMap<StringReference,
    626                                           Literal*,
    627                                           StringReferenceValueComparator>;
    628   using TypeToLiteralMap = ArenaSafeMap<TypeReference,
    629                                         Literal*,
    630                                         TypeReferenceValueComparator>;
    631 
    632   Literal* DeduplicateUint32Literal(uint32_t value, Uint32ToLiteralMap* map);
    633   Literal* DeduplicateUint64Literal(uint64_t value);
    634 
    635   PcRelativePatchInfo* NewPcRelativePatch(const DexFile* dex_file,
    636                                           uint32_t offset_or_index,
    637                                           const PcRelativePatchInfo* info_high,
    638                                           ArenaDeque<PcRelativePatchInfo>* patches);
    639 
    640   template <linker::LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
    641   void EmitPcRelativeLinkerPatches(const ArenaDeque<PcRelativePatchInfo>& infos,
    642                                    ArenaVector<linker::LinkerPatch>* linker_patches);
    643 
    644   // Labels for each block that will be compiled.
    645   Mips64Label* block_labels_;  // Indexed by block id.
    646   Mips64Label frame_entry_label_;
    647   LocationsBuilderMIPS64 location_builder_;
    648   InstructionCodeGeneratorMIPS64 instruction_visitor_;
    649   ParallelMoveResolverMIPS64 move_resolver_;
    650   Mips64Assembler assembler_;
    651   const Mips64InstructionSetFeatures& isa_features_;
    652 
    653   // Deduplication map for 32-bit literals, used for non-patchable boot image addresses.
    654   Uint32ToLiteralMap uint32_literals_;
    655   // Deduplication map for 64-bit literals, used for non-patchable method address or method code
    656   // address.
    657   Uint64ToLiteralMap uint64_literals_;
    658   // PC-relative method patch info for kBootImageLinkTimePcRelative.
    659   ArenaDeque<PcRelativePatchInfo> boot_image_method_patches_;
    660   // PC-relative method patch info for kBssEntry.
    661   ArenaDeque<PcRelativePatchInfo> method_bss_entry_patches_;
    662   // PC-relative type patch info for kBootImageLinkTimePcRelative.
    663   ArenaDeque<PcRelativePatchInfo> boot_image_type_patches_;
    664   // PC-relative type patch info for kBssEntry.
    665   ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
    666   // PC-relative String patch info; type depends on configuration (intern table or boot image PIC).
    667   ArenaDeque<PcRelativePatchInfo> boot_image_string_patches_;
    668   // PC-relative type patch info for kBssEntry.
    669   ArenaDeque<PcRelativePatchInfo> string_bss_entry_patches_;
    670 
    671   // Patches for string root accesses in JIT compiled code.
    672   StringToLiteralMap jit_string_patches_;
    673   // Patches for class root accesses in JIT compiled code.
    674   TypeToLiteralMap jit_class_patches_;
    675 
    676   DISALLOW_COPY_AND_ASSIGN(CodeGeneratorMIPS64);
    677 };
    678 
    679 }  // namespace mips64
    680 }  // namespace art
    681 
    682 #endif  // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_MIPS64_H_
    683