Home | History | Annotate | Download | only in optimizing
      1 /*
      2  * Copyright (C) 2015 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "code_generator_mips64.h"
     18 
     19 #include "art_method.h"
     20 #include "code_generator_utils.h"
     21 #include "entrypoints/quick/quick_entrypoints.h"
     22 #include "entrypoints/quick/quick_entrypoints_enum.h"
     23 #include "gc/accounting/card_table.h"
     24 #include "intrinsics.h"
     25 #include "intrinsics_mips64.h"
     26 #include "mirror/array-inl.h"
     27 #include "mirror/class-inl.h"
     28 #include "offsets.h"
     29 #include "thread.h"
     30 #include "utils/assembler.h"
     31 #include "utils/mips64/assembler_mips64.h"
     32 #include "utils/stack_checks.h"
     33 
     34 namespace art {
     35 namespace mips64 {
     36 
     37 static constexpr int kCurrentMethodStackOffset = 0;
     38 static constexpr GpuRegister kMethodRegisterArgument = A0;
     39 
     40 Location Mips64ReturnLocation(Primitive::Type return_type) {
     41   switch (return_type) {
     42     case Primitive::kPrimBoolean:
     43     case Primitive::kPrimByte:
     44     case Primitive::kPrimChar:
     45     case Primitive::kPrimShort:
     46     case Primitive::kPrimInt:
     47     case Primitive::kPrimNot:
     48     case Primitive::kPrimLong:
     49       return Location::RegisterLocation(V0);
     50 
     51     case Primitive::kPrimFloat:
     52     case Primitive::kPrimDouble:
     53       return Location::FpuRegisterLocation(F0);
     54 
     55     case Primitive::kPrimVoid:
     56       return Location();
     57   }
     58   UNREACHABLE();
     59 }
     60 
     61 Location InvokeDexCallingConventionVisitorMIPS64::GetReturnLocation(Primitive::Type type) const {
     62   return Mips64ReturnLocation(type);
     63 }
     64 
     65 Location InvokeDexCallingConventionVisitorMIPS64::GetMethodLocation() const {
     66   return Location::RegisterLocation(kMethodRegisterArgument);
     67 }
     68 
     69 Location InvokeDexCallingConventionVisitorMIPS64::GetNextLocation(Primitive::Type type) {
     70   Location next_location;
     71   if (type == Primitive::kPrimVoid) {
     72     LOG(FATAL) << "Unexpected parameter type " << type;
     73   }
     74 
     75   if (Primitive::IsFloatingPointType(type) &&
     76       (float_index_ < calling_convention.GetNumberOfFpuRegisters())) {
     77     next_location = Location::FpuRegisterLocation(
     78         calling_convention.GetFpuRegisterAt(float_index_++));
     79     gp_index_++;
     80   } else if (!Primitive::IsFloatingPointType(type) &&
     81              (gp_index_ < calling_convention.GetNumberOfRegisters())) {
     82     next_location = Location::RegisterLocation(calling_convention.GetRegisterAt(gp_index_++));
     83     float_index_++;
     84   } else {
     85     size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
     86     next_location = Primitive::Is64BitType(type) ? Location::DoubleStackSlot(stack_offset)
     87                                                  : Location::StackSlot(stack_offset);
     88   }
     89 
     90   // Space on the stack is reserved for all arguments.
     91   stack_index_ += Primitive::Is64BitType(type) ? 2 : 1;
     92 
     93   // TODO: review
     94 
     95   // TODO: shouldn't we use a whole machine word per argument on the stack?
     96   // Implicit 4-byte method pointer (and such) will cause misalignment.
     97 
     98   return next_location;
     99 }
    100 
    101 Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type type) {
    102   return Mips64ReturnLocation(type);
    103 }
    104 
    105 #define __ down_cast<CodeGeneratorMIPS64*>(codegen)->GetAssembler()->
    106 #define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize, x).Int32Value()
    107 
    108 class BoundsCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
    109  public:
    110   explicit BoundsCheckSlowPathMIPS64(HBoundsCheck* instruction) : SlowPathCodeMIPS64(instruction) {}
    111 
    112   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
    113     LocationSummary* locations = instruction_->GetLocations();
    114     CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
    115     __ Bind(GetEntryLabel());
    116     if (instruction_->CanThrowIntoCatchBlock()) {
    117       // Live registers will be restored in the catch block if caught.
    118       SaveLiveRegisters(codegen, instruction_->GetLocations());
    119     }
    120     // We're moving two locations to locations that could overlap, so we need a parallel
    121     // move resolver.
    122     InvokeRuntimeCallingConvention calling_convention;
    123     codegen->EmitParallelMoves(locations->InAt(0),
    124                                Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
    125                                Primitive::kPrimInt,
    126                                locations->InAt(1),
    127                                Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
    128                                Primitive::kPrimInt);
    129     mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowArrayBounds),
    130                                   instruction_,
    131                                   instruction_->GetDexPc(),
    132                                   this);
    133     CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
    134   }
    135 
    136   bool IsFatal() const OVERRIDE { return true; }
    137 
    138   const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathMIPS64"; }
    139 
    140  private:
    141   DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathMIPS64);
    142 };
    143 
    144 class DivZeroCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
    145  public:
    146   explicit DivZeroCheckSlowPathMIPS64(HDivZeroCheck* instruction) : SlowPathCodeMIPS64(instruction) {}
    147 
    148   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
    149     CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
    150     __ Bind(GetEntryLabel());
    151     if (instruction_->CanThrowIntoCatchBlock()) {
    152       // Live registers will be restored in the catch block if caught.
    153       SaveLiveRegisters(codegen, instruction_->GetLocations());
    154     }
    155     mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowDivZero),
    156                                   instruction_,
    157                                   instruction_->GetDexPc(),
    158                                   this);
    159     CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
    160   }
    161 
    162   bool IsFatal() const OVERRIDE { return true; }
    163 
    164   const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathMIPS64"; }
    165 
    166  private:
    167   DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathMIPS64);
    168 };
    169 
    170 class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
    171  public:
    172   LoadClassSlowPathMIPS64(HLoadClass* cls,
    173                           HInstruction* at,
    174                           uint32_t dex_pc,
    175                           bool do_clinit)
    176       : SlowPathCodeMIPS64(at), cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
    177     DCHECK(at->IsLoadClass() || at->IsClinitCheck());
    178   }
    179 
    180   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
    181     LocationSummary* locations = at_->GetLocations();
    182     CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
    183 
    184     __ Bind(GetEntryLabel());
    185     SaveLiveRegisters(codegen, locations);
    186 
    187     InvokeRuntimeCallingConvention calling_convention;
    188     __ LoadConst32(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex());
    189     int32_t entry_point_offset = do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
    190                                             : QUICK_ENTRY_POINT(pInitializeType);
    191     mips64_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this);
    192     if (do_clinit_) {
    193       CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
    194     } else {
    195       CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
    196     }
    197 
    198     // Move the class to the desired location.
    199     Location out = locations->Out();
    200     if (out.IsValid()) {
    201       DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
    202       Primitive::Type type = at_->GetType();
    203       mips64_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type);
    204     }
    205 
    206     RestoreLiveRegisters(codegen, locations);
    207     __ Bc(GetExitLabel());
    208   }
    209 
    210   const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathMIPS64"; }
    211 
    212  private:
    213   // The class this slow path will load.
    214   HLoadClass* const cls_;
    215 
    216   // The instruction where this slow path is happening.
    217   // (Might be the load class or an initialization check).
    218   HInstruction* const at_;
    219 
    220   // The dex PC of `at_`.
    221   const uint32_t dex_pc_;
    222 
    223   // Whether to initialize the class.
    224   const bool do_clinit_;
    225 
    226   DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathMIPS64);
    227 };
    228 
    229 class LoadStringSlowPathMIPS64 : public SlowPathCodeMIPS64 {
    230  public:
    231   explicit LoadStringSlowPathMIPS64(HLoadString* instruction) : SlowPathCodeMIPS64(instruction) {}
    232 
    233   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
    234     LocationSummary* locations = instruction_->GetLocations();
    235     DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
    236     CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
    237 
    238     __ Bind(GetEntryLabel());
    239     SaveLiveRegisters(codegen, locations);
    240 
    241     InvokeRuntimeCallingConvention calling_convention;
    242     const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex();
    243     __ LoadConst32(calling_convention.GetRegisterAt(0), string_index);
    244     mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pResolveString),
    245                                   instruction_,
    246                                   instruction_->GetDexPc(),
    247                                   this);
    248     CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
    249     Primitive::Type type = instruction_->GetType();
    250     mips64_codegen->MoveLocation(locations->Out(),
    251                                  calling_convention.GetReturnLocation(type),
    252                                  type);
    253 
    254     RestoreLiveRegisters(codegen, locations);
    255     __ Bc(GetExitLabel());
    256   }
    257 
    258   const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathMIPS64"; }
    259 
    260  private:
    261   DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS64);
    262 };
    263 
    264 class NullCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
    265  public:
    266   explicit NullCheckSlowPathMIPS64(HNullCheck* instr) : SlowPathCodeMIPS64(instr) {}
    267 
    268   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
    269     CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
    270     __ Bind(GetEntryLabel());
    271     if (instruction_->CanThrowIntoCatchBlock()) {
    272       // Live registers will be restored in the catch block if caught.
    273       SaveLiveRegisters(codegen, instruction_->GetLocations());
    274     }
    275     mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowNullPointer),
    276                                   instruction_,
    277                                   instruction_->GetDexPc(),
    278                                   this);
    279     CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
    280   }
    281 
    282   bool IsFatal() const OVERRIDE { return true; }
    283 
    284   const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathMIPS64"; }
    285 
    286  private:
    287   DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathMIPS64);
    288 };
    289 
    290 class SuspendCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
    291  public:
    292   SuspendCheckSlowPathMIPS64(HSuspendCheck* instruction, HBasicBlock* successor)
    293       : SlowPathCodeMIPS64(instruction), successor_(successor) {}
    294 
    295   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
    296     CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
    297     __ Bind(GetEntryLabel());
    298     SaveLiveRegisters(codegen, instruction_->GetLocations());
    299     mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pTestSuspend),
    300                                   instruction_,
    301                                   instruction_->GetDexPc(),
    302                                   this);
    303     CheckEntrypointTypes<kQuickTestSuspend, void, void>();
    304     RestoreLiveRegisters(codegen, instruction_->GetLocations());
    305     if (successor_ == nullptr) {
    306       __ Bc(GetReturnLabel());
    307     } else {
    308       __ Bc(mips64_codegen->GetLabelOf(successor_));
    309     }
    310   }
    311 
    312   Mips64Label* GetReturnLabel() {
    313     DCHECK(successor_ == nullptr);
    314     return &return_label_;
    315   }
    316 
    317   const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathMIPS64"; }
    318 
    319  private:
    320   // If not null, the block to branch to after the suspend check.
    321   HBasicBlock* const successor_;
    322 
    323   // If `successor_` is null, the label to branch to after the suspend check.
    324   Mips64Label return_label_;
    325 
    326   DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathMIPS64);
    327 };
    328 
    329 class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
    330  public:
    331   explicit TypeCheckSlowPathMIPS64(HInstruction* instruction) : SlowPathCodeMIPS64(instruction) {}
    332 
    333   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
    334     LocationSummary* locations = instruction_->GetLocations();
    335     Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0) : locations->Out();
    336     uint32_t dex_pc = instruction_->GetDexPc();
    337     DCHECK(instruction_->IsCheckCast()
    338            || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
    339     CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
    340 
    341     __ Bind(GetEntryLabel());
    342     SaveLiveRegisters(codegen, locations);
    343 
    344     // We're moving two locations to locations that could overlap, so we need a parallel
    345     // move resolver.
    346     InvokeRuntimeCallingConvention calling_convention;
    347     codegen->EmitParallelMoves(locations->InAt(1),
    348                                Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
    349                                Primitive::kPrimNot,
    350                                object_class,
    351                                Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
    352                                Primitive::kPrimNot);
    353 
    354     if (instruction_->IsInstanceOf()) {
    355       mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
    356                                     instruction_,
    357                                     dex_pc,
    358                                     this);
    359       CheckEntrypointTypes<
    360           kQuickInstanceofNonTrivial, uint32_t, const mirror::Class*, const mirror::Class*>();
    361       Primitive::Type ret_type = instruction_->GetType();
    362       Location ret_loc = calling_convention.GetReturnLocation(ret_type);
    363       mips64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
    364     } else {
    365       DCHECK(instruction_->IsCheckCast());
    366       mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc, this);
    367       CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
    368     }
    369 
    370     RestoreLiveRegisters(codegen, locations);
    371     __ Bc(GetExitLabel());
    372   }
    373 
    374   const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathMIPS64"; }
    375 
    376  private:
    377   DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathMIPS64);
    378 };
    379 
    380 class DeoptimizationSlowPathMIPS64 : public SlowPathCodeMIPS64 {
    381  public:
    382   explicit DeoptimizationSlowPathMIPS64(HDeoptimize* instruction)
    383     : SlowPathCodeMIPS64(instruction) {}
    384 
    385   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
    386     CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
    387     __ Bind(GetEntryLabel());
    388     SaveLiveRegisters(codegen, instruction_->GetLocations());
    389     mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize),
    390                                   instruction_,
    391                                   instruction_->GetDexPc(),
    392                                   this);
    393     CheckEntrypointTypes<kQuickDeoptimize, void, void>();
    394   }
    395 
    396   const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathMIPS64"; }
    397 
    398  private:
    399   DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS64);
    400 };
    401 
    402 CodeGeneratorMIPS64::CodeGeneratorMIPS64(HGraph* graph,
    403                                          const Mips64InstructionSetFeatures& isa_features,
    404                                          const CompilerOptions& compiler_options,
    405                                          OptimizingCompilerStats* stats)
    406     : CodeGenerator(graph,
    407                     kNumberOfGpuRegisters,
    408                     kNumberOfFpuRegisters,
    409                     /* number_of_register_pairs */ 0,
    410                     ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
    411                                         arraysize(kCoreCalleeSaves)),
    412                     ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
    413                                         arraysize(kFpuCalleeSaves)),
    414                     compiler_options,
    415                     stats),
    416       block_labels_(nullptr),
    417       location_builder_(graph, this),
    418       instruction_visitor_(graph, this),
    419       move_resolver_(graph->GetArena(), this),
    420       assembler_(graph->GetArena()),
    421       isa_features_(isa_features) {
    422   // Save RA (containing the return address) to mimic Quick.
    423   AddAllocatedRegister(Location::RegisterLocation(RA));
    424 }
    425 
    426 #undef __
    427 #define __ down_cast<Mips64Assembler*>(GetAssembler())->
    428 #define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize, x).Int32Value()
    429 
    430 void CodeGeneratorMIPS64::Finalize(CodeAllocator* allocator) {
    431   // Ensure that we fix up branches.
    432   __ FinalizeCode();
    433 
    434   // Adjust native pc offsets in stack maps.
    435   for (size_t i = 0, num = stack_map_stream_.GetNumberOfStackMaps(); i != num; ++i) {
    436     uint32_t old_position = stack_map_stream_.GetStackMap(i).native_pc_offset;
    437     uint32_t new_position = __ GetAdjustedPosition(old_position);
    438     DCHECK_GE(new_position, old_position);
    439     stack_map_stream_.SetStackMapNativePcOffset(i, new_position);
    440   }
    441 
    442   // Adjust pc offsets for the disassembly information.
    443   if (disasm_info_ != nullptr) {
    444     GeneratedCodeInterval* frame_entry_interval = disasm_info_->GetFrameEntryInterval();
    445     frame_entry_interval->start = __ GetAdjustedPosition(frame_entry_interval->start);
    446     frame_entry_interval->end = __ GetAdjustedPosition(frame_entry_interval->end);
    447     for (auto& it : *disasm_info_->GetInstructionIntervals()) {
    448       it.second.start = __ GetAdjustedPosition(it.second.start);
    449       it.second.end = __ GetAdjustedPosition(it.second.end);
    450     }
    451     for (auto& it : *disasm_info_->GetSlowPathIntervals()) {
    452       it.code_interval.start = __ GetAdjustedPosition(it.code_interval.start);
    453       it.code_interval.end = __ GetAdjustedPosition(it.code_interval.end);
    454     }
    455   }
    456 
    457   CodeGenerator::Finalize(allocator);
    458 }
    459 
    460 Mips64Assembler* ParallelMoveResolverMIPS64::GetAssembler() const {
    461   return codegen_->GetAssembler();
    462 }
    463 
    464 void ParallelMoveResolverMIPS64::EmitMove(size_t index) {
    465   MoveOperands* move = moves_[index];
    466   codegen_->MoveLocation(move->GetDestination(), move->GetSource(), move->GetType());
    467 }
    468 
    469 void ParallelMoveResolverMIPS64::EmitSwap(size_t index) {
    470   MoveOperands* move = moves_[index];
    471   codegen_->SwapLocations(move->GetDestination(), move->GetSource(), move->GetType());
    472 }
    473 
    474 void ParallelMoveResolverMIPS64::RestoreScratch(int reg) {
    475   // Pop reg
    476   __ Ld(GpuRegister(reg), SP, 0);
    477   __ DecreaseFrameSize(kMips64DoublewordSize);
    478 }
    479 
    480 void ParallelMoveResolverMIPS64::SpillScratch(int reg) {
    481   // Push reg
    482   __ IncreaseFrameSize(kMips64DoublewordSize);
    483   __ Sd(GpuRegister(reg), SP, 0);
    484 }
    485 
    486 void ParallelMoveResolverMIPS64::Exchange(int index1, int index2, bool double_slot) {
    487   LoadOperandType load_type = double_slot ? kLoadDoubleword : kLoadWord;
    488   StoreOperandType store_type = double_slot ? kStoreDoubleword : kStoreWord;
    489   // Allocate a scratch register other than TMP, if available.
    490   // Else, spill V0 (arbitrary choice) and use it as a scratch register (it will be
    491   // automatically unspilled when the scratch scope object is destroyed).
    492   ScratchRegisterScope ensure_scratch(this, TMP, V0, codegen_->GetNumberOfCoreRegisters());
    493   // If V0 spills onto the stack, SP-relative offsets need to be adjusted.
    494   int stack_offset = ensure_scratch.IsSpilled() ? kMips64DoublewordSize : 0;
    495   __ LoadFromOffset(load_type,
    496                     GpuRegister(ensure_scratch.GetRegister()),
    497                     SP,
    498                     index1 + stack_offset);
    499   __ LoadFromOffset(load_type,
    500                     TMP,
    501                     SP,
    502                     index2 + stack_offset);
    503   __ StoreToOffset(store_type,
    504                    GpuRegister(ensure_scratch.GetRegister()),
    505                    SP,
    506                    index2 + stack_offset);
    507   __ StoreToOffset(store_type, TMP, SP, index1 + stack_offset);
    508 }
    509 
    510 static dwarf::Reg DWARFReg(GpuRegister reg) {
    511   return dwarf::Reg::Mips64Core(static_cast<int>(reg));
    512 }
    513 
    514 static dwarf::Reg DWARFReg(FpuRegister reg) {
    515   return dwarf::Reg::Mips64Fp(static_cast<int>(reg));
    516 }
    517 
    518 void CodeGeneratorMIPS64::GenerateFrameEntry() {
    519   __ Bind(&frame_entry_label_);
    520 
    521   bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kMips64) || !IsLeafMethod();
    522 
    523   if (do_overflow_check) {
    524     __ LoadFromOffset(kLoadWord,
    525                       ZERO,
    526                       SP,
    527                       -static_cast<int32_t>(GetStackOverflowReservedBytes(kMips64)));
    528     RecordPcInfo(nullptr, 0);
    529   }
    530 
    531   // TODO: anything related to T9/GP/GOT/PIC/.so's?
    532 
    533   if (HasEmptyFrame()) {
    534     return;
    535   }
    536 
    537   // Make sure the frame size isn't unreasonably large. Per the various APIs
    538   // it looks like it should always be less than 2GB in size, which allows
    539   // us using 32-bit signed offsets from the stack pointer.
    540   if (GetFrameSize() > 0x7FFFFFFF)
    541     LOG(FATAL) << "Stack frame larger than 2GB";
    542 
    543   // Spill callee-saved registers.
    544   // Note that their cumulative size is small and they can be indexed using
    545   // 16-bit offsets.
    546 
    547   // TODO: increment/decrement SP in one step instead of two or remove this comment.
    548 
    549   uint32_t ofs = FrameEntrySpillSize();
    550   __ IncreaseFrameSize(ofs);
    551 
    552   for (int i = arraysize(kCoreCalleeSaves) - 1; i >= 0; --i) {
    553     GpuRegister reg = kCoreCalleeSaves[i];
    554     if (allocated_registers_.ContainsCoreRegister(reg)) {
    555       ofs -= kMips64DoublewordSize;
    556       __ Sd(reg, SP, ofs);
    557       __ cfi().RelOffset(DWARFReg(reg), ofs);
    558     }
    559   }
    560 
    561   for (int i = arraysize(kFpuCalleeSaves) - 1; i >= 0; --i) {
    562     FpuRegister reg = kFpuCalleeSaves[i];
    563     if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
    564       ofs -= kMips64DoublewordSize;
    565       __ Sdc1(reg, SP, ofs);
    566       __ cfi().RelOffset(DWARFReg(reg), ofs);
    567     }
    568   }
    569 
    570   // Allocate the rest of the frame and store the current method pointer
    571   // at its end.
    572 
    573   __ IncreaseFrameSize(GetFrameSize() - FrameEntrySpillSize());
    574 
    575   static_assert(IsInt<16>(kCurrentMethodStackOffset),
    576                 "kCurrentMethodStackOffset must fit into int16_t");
    577   __ Sd(kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
    578 }
    579 
    580 void CodeGeneratorMIPS64::GenerateFrameExit() {
    581   __ cfi().RememberState();
    582 
    583   // TODO: anything related to T9/GP/GOT/PIC/.so's?
    584 
    585   if (!HasEmptyFrame()) {
    586     // Deallocate the rest of the frame.
    587 
    588     __ DecreaseFrameSize(GetFrameSize() - FrameEntrySpillSize());
    589 
    590     // Restore callee-saved registers.
    591     // Note that their cumulative size is small and they can be indexed using
    592     // 16-bit offsets.
    593 
    594     // TODO: increment/decrement SP in one step instead of two or remove this comment.
    595 
    596     uint32_t ofs = 0;
    597 
    598     for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
    599       FpuRegister reg = kFpuCalleeSaves[i];
    600       if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
    601         __ Ldc1(reg, SP, ofs);
    602         ofs += kMips64DoublewordSize;
    603         __ cfi().Restore(DWARFReg(reg));
    604       }
    605     }
    606 
    607     for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
    608       GpuRegister reg = kCoreCalleeSaves[i];
    609       if (allocated_registers_.ContainsCoreRegister(reg)) {
    610         __ Ld(reg, SP, ofs);
    611         ofs += kMips64DoublewordSize;
    612         __ cfi().Restore(DWARFReg(reg));
    613       }
    614     }
    615 
    616     DCHECK_EQ(ofs, FrameEntrySpillSize());
    617     __ DecreaseFrameSize(ofs);
    618   }
    619 
    620   __ Jr(RA);
    621   __ Nop();
    622 
    623   __ cfi().RestoreState();
    624   __ cfi().DefCFAOffset(GetFrameSize());
    625 }
    626 
    627 void CodeGeneratorMIPS64::Bind(HBasicBlock* block) {
    628   __ Bind(GetLabelOf(block));
    629 }
    630 
    631 void CodeGeneratorMIPS64::MoveLocation(Location destination,
    632                                        Location source,
    633                                        Primitive::Type dst_type) {
    634   if (source.Equals(destination)) {
    635     return;
    636   }
    637 
    638   // A valid move can always be inferred from the destination and source
    639   // locations. When moving from and to a register, the argument type can be
    640   // used to generate 32bit instead of 64bit moves.
    641   bool unspecified_type = (dst_type == Primitive::kPrimVoid);
    642   DCHECK_EQ(unspecified_type, false);
    643 
    644   if (destination.IsRegister() || destination.IsFpuRegister()) {
    645     if (unspecified_type) {
    646       HConstant* src_cst = source.IsConstant() ? source.GetConstant() : nullptr;
    647       if (source.IsStackSlot() ||
    648           (src_cst != nullptr && (src_cst->IsIntConstant()
    649                                   || src_cst->IsFloatConstant()
    650                                   || src_cst->IsNullConstant()))) {
    651         // For stack slots and 32bit constants, a 64bit type is appropriate.
    652         dst_type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat;
    653       } else {
    654         // If the source is a double stack slot or a 64bit constant, a 64bit
    655         // type is appropriate. Else the source is a register, and since the
    656         // type has not been specified, we chose a 64bit type to force a 64bit
    657         // move.
    658         dst_type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble;
    659       }
    660     }
    661     DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(dst_type)) ||
    662            (destination.IsRegister() && !Primitive::IsFloatingPointType(dst_type)));
    663     if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
    664       // Move to GPR/FPR from stack
    665       LoadOperandType load_type = source.IsStackSlot() ? kLoadWord : kLoadDoubleword;
    666       if (Primitive::IsFloatingPointType(dst_type)) {
    667         __ LoadFpuFromOffset(load_type,
    668                              destination.AsFpuRegister<FpuRegister>(),
    669                              SP,
    670                              source.GetStackIndex());
    671       } else {
    672         // TODO: use load_type = kLoadUnsignedWord when type == Primitive::kPrimNot.
    673         __ LoadFromOffset(load_type,
    674                           destination.AsRegister<GpuRegister>(),
    675                           SP,
    676                           source.GetStackIndex());
    677       }
    678     } else if (source.IsConstant()) {
    679       // Move to GPR/FPR from constant
    680       GpuRegister gpr = AT;
    681       if (!Primitive::IsFloatingPointType(dst_type)) {
    682         gpr = destination.AsRegister<GpuRegister>();
    683       }
    684       if (dst_type == Primitive::kPrimInt || dst_type == Primitive::kPrimFloat) {
    685         int32_t value = GetInt32ValueOf(source.GetConstant()->AsConstant());
    686         if (Primitive::IsFloatingPointType(dst_type) && value == 0) {
    687           gpr = ZERO;
    688         } else {
    689           __ LoadConst32(gpr, value);
    690         }
    691       } else {
    692         int64_t value = GetInt64ValueOf(source.GetConstant()->AsConstant());
    693         if (Primitive::IsFloatingPointType(dst_type) && value == 0) {
    694           gpr = ZERO;
    695         } else {
    696           __ LoadConst64(gpr, value);
    697         }
    698       }
    699       if (dst_type == Primitive::kPrimFloat) {
    700         __ Mtc1(gpr, destination.AsFpuRegister<FpuRegister>());
    701       } else if (dst_type == Primitive::kPrimDouble) {
    702         __ Dmtc1(gpr, destination.AsFpuRegister<FpuRegister>());
    703       }
    704     } else if (source.IsRegister()) {
    705       if (destination.IsRegister()) {
    706         // Move to GPR from GPR
    707         __ Move(destination.AsRegister<GpuRegister>(), source.AsRegister<GpuRegister>());
    708       } else {
    709         DCHECK(destination.IsFpuRegister());
    710         if (Primitive::Is64BitType(dst_type)) {
    711           __ Dmtc1(source.AsRegister<GpuRegister>(), destination.AsFpuRegister<FpuRegister>());
    712         } else {
    713           __ Mtc1(source.AsRegister<GpuRegister>(), destination.AsFpuRegister<FpuRegister>());
    714         }
    715       }
    716     } else if (source.IsFpuRegister()) {
    717       if (destination.IsFpuRegister()) {
    718         // Move to FPR from FPR
    719         if (dst_type == Primitive::kPrimFloat) {
    720           __ MovS(destination.AsFpuRegister<FpuRegister>(), source.AsFpuRegister<FpuRegister>());
    721         } else {
    722           DCHECK_EQ(dst_type, Primitive::kPrimDouble);
    723           __ MovD(destination.AsFpuRegister<FpuRegister>(), source.AsFpuRegister<FpuRegister>());
    724         }
    725       } else {
    726         DCHECK(destination.IsRegister());
    727         if (Primitive::Is64BitType(dst_type)) {
    728           __ Dmfc1(destination.AsRegister<GpuRegister>(), source.AsFpuRegister<FpuRegister>());
    729         } else {
    730           __ Mfc1(destination.AsRegister<GpuRegister>(), source.AsFpuRegister<FpuRegister>());
    731         }
    732       }
    733     }
    734   } else {  // The destination is not a register. It must be a stack slot.
    735     DCHECK(destination.IsStackSlot() || destination.IsDoubleStackSlot());
    736     if (source.IsRegister() || source.IsFpuRegister()) {
    737       if (unspecified_type) {
    738         if (source.IsRegister()) {
    739           dst_type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong;
    740         } else {
    741           dst_type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble;
    742         }
    743       }
    744       DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(dst_type)) &&
    745              (source.IsFpuRegister() == Primitive::IsFloatingPointType(dst_type)));
    746       // Move to stack from GPR/FPR
    747       StoreOperandType store_type = destination.IsStackSlot() ? kStoreWord : kStoreDoubleword;
    748       if (source.IsRegister()) {
    749         __ StoreToOffset(store_type,
    750                          source.AsRegister<GpuRegister>(),
    751                          SP,
    752                          destination.GetStackIndex());
    753       } else {
    754         __ StoreFpuToOffset(store_type,
    755                             source.AsFpuRegister<FpuRegister>(),
    756                             SP,
    757                             destination.GetStackIndex());
    758       }
    759     } else if (source.IsConstant()) {
    760       // Move to stack from constant
    761       HConstant* src_cst = source.GetConstant();
    762       StoreOperandType store_type = destination.IsStackSlot() ? kStoreWord : kStoreDoubleword;
    763       GpuRegister gpr = ZERO;
    764       if (destination.IsStackSlot()) {
    765         int32_t value = GetInt32ValueOf(src_cst->AsConstant());
    766         if (value != 0) {
    767           gpr = TMP;
    768           __ LoadConst32(gpr, value);
    769         }
    770       } else {
    771         DCHECK(destination.IsDoubleStackSlot());
    772         int64_t value = GetInt64ValueOf(src_cst->AsConstant());
    773         if (value != 0) {
    774           gpr = TMP;
    775           __ LoadConst64(gpr, value);
    776         }
    777       }
    778       __ StoreToOffset(store_type, gpr, SP, destination.GetStackIndex());
    779     } else {
    780       DCHECK(source.IsStackSlot() || source.IsDoubleStackSlot());
    781       DCHECK_EQ(source.IsDoubleStackSlot(), destination.IsDoubleStackSlot());
    782       // Move to stack from stack
    783       if (destination.IsStackSlot()) {
    784         __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex());
    785         __ StoreToOffset(kStoreWord, TMP, SP, destination.GetStackIndex());
    786       } else {
    787         __ LoadFromOffset(kLoadDoubleword, TMP, SP, source.GetStackIndex());
    788         __ StoreToOffset(kStoreDoubleword, TMP, SP, destination.GetStackIndex());
    789       }
    790     }
    791   }
    792 }
    793 
    794 void CodeGeneratorMIPS64::SwapLocations(Location loc1, Location loc2, Primitive::Type type) {
    795   DCHECK(!loc1.IsConstant());
    796   DCHECK(!loc2.IsConstant());
    797 
    798   if (loc1.Equals(loc2)) {
    799     return;
    800   }
    801 
    802   bool is_slot1 = loc1.IsStackSlot() || loc1.IsDoubleStackSlot();
    803   bool is_slot2 = loc2.IsStackSlot() || loc2.IsDoubleStackSlot();
    804   bool is_fp_reg1 = loc1.IsFpuRegister();
    805   bool is_fp_reg2 = loc2.IsFpuRegister();
    806 
    807   if (loc2.IsRegister() && loc1.IsRegister()) {
    808     // Swap 2 GPRs
    809     GpuRegister r1 = loc1.AsRegister<GpuRegister>();
    810     GpuRegister r2 = loc2.AsRegister<GpuRegister>();
    811     __ Move(TMP, r2);
    812     __ Move(r2, r1);
    813     __ Move(r1, TMP);
    814   } else if (is_fp_reg2 && is_fp_reg1) {
    815     // Swap 2 FPRs
    816     FpuRegister r1 = loc1.AsFpuRegister<FpuRegister>();
    817     FpuRegister r2 = loc2.AsFpuRegister<FpuRegister>();
    818     if (type == Primitive::kPrimFloat) {
    819       __ MovS(FTMP, r1);
    820       __ MovS(r1, r2);
    821       __ MovS(r2, FTMP);
    822     } else {
    823       DCHECK_EQ(type, Primitive::kPrimDouble);
    824       __ MovD(FTMP, r1);
    825       __ MovD(r1, r2);
    826       __ MovD(r2, FTMP);
    827     }
    828   } else if (is_slot1 != is_slot2) {
    829     // Swap GPR/FPR and stack slot
    830     Location reg_loc = is_slot1 ? loc2 : loc1;
    831     Location mem_loc = is_slot1 ? loc1 : loc2;
    832     LoadOperandType load_type = mem_loc.IsStackSlot() ? kLoadWord : kLoadDoubleword;
    833     StoreOperandType store_type = mem_loc.IsStackSlot() ? kStoreWord : kStoreDoubleword;
    834     // TODO: use load_type = kLoadUnsignedWord when type == Primitive::kPrimNot.
    835     __ LoadFromOffset(load_type, TMP, SP, mem_loc.GetStackIndex());
    836     if (reg_loc.IsFpuRegister()) {
    837       __ StoreFpuToOffset(store_type,
    838                           reg_loc.AsFpuRegister<FpuRegister>(),
    839                           SP,
    840                           mem_loc.GetStackIndex());
    841       if (mem_loc.IsStackSlot()) {
    842         __ Mtc1(TMP, reg_loc.AsFpuRegister<FpuRegister>());
    843       } else {
    844         DCHECK(mem_loc.IsDoubleStackSlot());
    845         __ Dmtc1(TMP, reg_loc.AsFpuRegister<FpuRegister>());
    846       }
    847     } else {
    848       __ StoreToOffset(store_type, reg_loc.AsRegister<GpuRegister>(), SP, mem_loc.GetStackIndex());
    849       __ Move(reg_loc.AsRegister<GpuRegister>(), TMP);
    850     }
    851   } else if (is_slot1 && is_slot2) {
    852     move_resolver_.Exchange(loc1.GetStackIndex(),
    853                             loc2.GetStackIndex(),
    854                             loc1.IsDoubleStackSlot());
    855   } else {
    856     LOG(FATAL) << "Unimplemented swap between locations " << loc1 << " and " << loc2;
    857   }
    858 }
    859 
    860 void CodeGeneratorMIPS64::MoveConstant(Location location, int32_t value) {
    861   DCHECK(location.IsRegister());
    862   __ LoadConst32(location.AsRegister<GpuRegister>(), value);
    863 }
    864 
    865 void CodeGeneratorMIPS64::AddLocationAsTemp(Location location, LocationSummary* locations) {
    866   if (location.IsRegister()) {
    867     locations->AddTemp(location);
    868   } else {
    869     UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
    870   }
    871 }
    872 
    873 void CodeGeneratorMIPS64::MarkGCCard(GpuRegister object,
    874                                      GpuRegister value,
    875                                      bool value_can_be_null) {
    876   Mips64Label done;
    877   GpuRegister card = AT;
    878   GpuRegister temp = TMP;
    879   if (value_can_be_null) {
    880     __ Beqzc(value, &done);
    881   }
    882   __ LoadFromOffset(kLoadDoubleword,
    883                     card,
    884                     TR,
    885                     Thread::CardTableOffset<kMips64DoublewordSize>().Int32Value());
    886   __ Dsrl(temp, object, gc::accounting::CardTable::kCardShift);
    887   __ Daddu(temp, card, temp);
    888   __ Sb(card, temp, 0);
    889   if (value_can_be_null) {
    890     __ Bind(&done);
    891   }
    892 }
    893 
    894 void CodeGeneratorMIPS64::SetupBlockedRegisters() const {
    895   // ZERO, K0, K1, GP, SP, RA are always reserved and can't be allocated.
    896   blocked_core_registers_[ZERO] = true;
    897   blocked_core_registers_[K0] = true;
    898   blocked_core_registers_[K1] = true;
    899   blocked_core_registers_[GP] = true;
    900   blocked_core_registers_[SP] = true;
    901   blocked_core_registers_[RA] = true;
    902 
    903   // AT, TMP(T8) and TMP2(T3) are used as temporary/scratch
    904   // registers (similar to how AT is used by MIPS assemblers).
    905   blocked_core_registers_[AT] = true;
    906   blocked_core_registers_[TMP] = true;
    907   blocked_core_registers_[TMP2] = true;
    908   blocked_fpu_registers_[FTMP] = true;
    909 
    910   // Reserve suspend and thread registers.
    911   blocked_core_registers_[S0] = true;
    912   blocked_core_registers_[TR] = true;
    913 
    914   // Reserve T9 for function calls
    915   blocked_core_registers_[T9] = true;
    916 
    917   // TODO: review; anything else?
    918 
    919   // TODO: remove once all the issues with register saving/restoring are sorted out.
    920   for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
    921     blocked_core_registers_[kCoreCalleeSaves[i]] = true;
    922   }
    923 
    924   for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
    925     blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
    926   }
    927 }
    928 
    929 size_t CodeGeneratorMIPS64::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
    930   __ StoreToOffset(kStoreDoubleword, GpuRegister(reg_id), SP, stack_index);
    931   return kMips64DoublewordSize;
    932 }
    933 
    934 size_t CodeGeneratorMIPS64::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
    935   __ LoadFromOffset(kLoadDoubleword, GpuRegister(reg_id), SP, stack_index);
    936   return kMips64DoublewordSize;
    937 }
    938 
    939 size_t CodeGeneratorMIPS64::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
    940   __ StoreFpuToOffset(kStoreDoubleword, FpuRegister(reg_id), SP, stack_index);
    941   return kMips64DoublewordSize;
    942 }
    943 
    944 size_t CodeGeneratorMIPS64::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
    945   __ LoadFpuFromOffset(kLoadDoubleword, FpuRegister(reg_id), SP, stack_index);
    946   return kMips64DoublewordSize;
    947 }
    948 
    949 void CodeGeneratorMIPS64::DumpCoreRegister(std::ostream& stream, int reg) const {
    950   stream << GpuRegister(reg);
    951 }
    952 
    953 void CodeGeneratorMIPS64::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
    954   stream << FpuRegister(reg);
    955 }
    956 
    957 void CodeGeneratorMIPS64::InvokeRuntime(QuickEntrypointEnum entrypoint,
    958                                      HInstruction* instruction,
    959                                      uint32_t dex_pc,
    960                                      SlowPathCode* slow_path) {
    961   InvokeRuntime(GetThreadOffset<kMips64DoublewordSize>(entrypoint).Int32Value(),
    962                 instruction,
    963                 dex_pc,
    964                 slow_path);
    965 }
    966 
    967 void CodeGeneratorMIPS64::InvokeRuntime(int32_t entry_point_offset,
    968                                         HInstruction* instruction,
    969                                         uint32_t dex_pc,
    970                                         SlowPathCode* slow_path) {
    971   ValidateInvokeRuntime(instruction, slow_path);
    972   // TODO: anything related to T9/GP/GOT/PIC/.so's?
    973   __ LoadFromOffset(kLoadDoubleword, T9, TR, entry_point_offset);
    974   __ Jalr(T9);
    975   __ Nop();
    976   RecordPcInfo(instruction, dex_pc, slow_path);
    977 }
    978 
    979 void InstructionCodeGeneratorMIPS64::GenerateClassInitializationCheck(SlowPathCodeMIPS64* slow_path,
    980                                                                       GpuRegister class_reg) {
    981   __ LoadFromOffset(kLoadWord, TMP, class_reg, mirror::Class::StatusOffset().Int32Value());
    982   __ LoadConst32(AT, mirror::Class::kStatusInitialized);
    983   __ Bltc(TMP, AT, slow_path->GetEntryLabel());
    984   // TODO: barrier needed?
    985   __ Bind(slow_path->GetExitLabel());
    986 }
    987 
    988 void InstructionCodeGeneratorMIPS64::GenerateMemoryBarrier(MemBarrierKind kind ATTRIBUTE_UNUSED) {
    989   __ Sync(0);  // only stype 0 is supported
    990 }
    991 
    992 void InstructionCodeGeneratorMIPS64::GenerateSuspendCheck(HSuspendCheck* instruction,
    993                                                           HBasicBlock* successor) {
    994   SuspendCheckSlowPathMIPS64* slow_path =
    995     new (GetGraph()->GetArena()) SuspendCheckSlowPathMIPS64(instruction, successor);
    996   codegen_->AddSlowPath(slow_path);
    997 
    998   __ LoadFromOffset(kLoadUnsignedHalfword,
    999                     TMP,
   1000                     TR,
   1001                     Thread::ThreadFlagsOffset<kMips64DoublewordSize>().Int32Value());
   1002   if (successor == nullptr) {
   1003     __ Bnezc(TMP, slow_path->GetEntryLabel());
   1004     __ Bind(slow_path->GetReturnLabel());
   1005   } else {
   1006     __ Beqzc(TMP, codegen_->GetLabelOf(successor));
   1007     __ Bc(slow_path->GetEntryLabel());
   1008     // slow_path will return to GetLabelOf(successor).
   1009   }
   1010 }
   1011 
   1012 InstructionCodeGeneratorMIPS64::InstructionCodeGeneratorMIPS64(HGraph* graph,
   1013                                                                CodeGeneratorMIPS64* codegen)
   1014       : InstructionCodeGenerator(graph, codegen),
   1015         assembler_(codegen->GetAssembler()),
   1016         codegen_(codegen) {}
   1017 
   1018 void LocationsBuilderMIPS64::HandleBinaryOp(HBinaryOperation* instruction) {
   1019   DCHECK_EQ(instruction->InputCount(), 2U);
   1020   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
   1021   Primitive::Type type = instruction->GetResultType();
   1022   switch (type) {
   1023     case Primitive::kPrimInt:
   1024     case Primitive::kPrimLong: {
   1025       locations->SetInAt(0, Location::RequiresRegister());
   1026       HInstruction* right = instruction->InputAt(1);
   1027       bool can_use_imm = false;
   1028       if (right->IsConstant()) {
   1029         int64_t imm = CodeGenerator::GetInt64ValueOf(right->AsConstant());
   1030         if (instruction->IsAnd() || instruction->IsOr() || instruction->IsXor()) {
   1031           can_use_imm = IsUint<16>(imm);
   1032         } else if (instruction->IsAdd()) {
   1033           can_use_imm = IsInt<16>(imm);
   1034         } else {
   1035           DCHECK(instruction->IsSub());
   1036           can_use_imm = IsInt<16>(-imm);
   1037         }
   1038       }
   1039       if (can_use_imm)
   1040         locations->SetInAt(1, Location::ConstantLocation(right->AsConstant()));
   1041       else
   1042         locations->SetInAt(1, Location::RequiresRegister());
   1043       locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   1044       }
   1045       break;
   1046 
   1047     case Primitive::kPrimFloat:
   1048     case Primitive::kPrimDouble:
   1049       locations->SetInAt(0, Location::RequiresFpuRegister());
   1050       locations->SetInAt(1, Location::RequiresFpuRegister());
   1051       locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
   1052       break;
   1053 
   1054     default:
   1055       LOG(FATAL) << "Unexpected " << instruction->DebugName() << " type " << type;
   1056   }
   1057 }
   1058 
   1059 void InstructionCodeGeneratorMIPS64::HandleBinaryOp(HBinaryOperation* instruction) {
   1060   Primitive::Type type = instruction->GetType();
   1061   LocationSummary* locations = instruction->GetLocations();
   1062 
   1063   switch (type) {
   1064     case Primitive::kPrimInt:
   1065     case Primitive::kPrimLong: {
   1066       GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
   1067       GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
   1068       Location rhs_location = locations->InAt(1);
   1069 
   1070       GpuRegister rhs_reg = ZERO;
   1071       int64_t rhs_imm = 0;
   1072       bool use_imm = rhs_location.IsConstant();
   1073       if (use_imm) {
   1074         rhs_imm = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant());
   1075       } else {
   1076         rhs_reg = rhs_location.AsRegister<GpuRegister>();
   1077       }
   1078 
   1079       if (instruction->IsAnd()) {
   1080         if (use_imm)
   1081           __ Andi(dst, lhs, rhs_imm);
   1082         else
   1083           __ And(dst, lhs, rhs_reg);
   1084       } else if (instruction->IsOr()) {
   1085         if (use_imm)
   1086           __ Ori(dst, lhs, rhs_imm);
   1087         else
   1088           __ Or(dst, lhs, rhs_reg);
   1089       } else if (instruction->IsXor()) {
   1090         if (use_imm)
   1091           __ Xori(dst, lhs, rhs_imm);
   1092         else
   1093           __ Xor(dst, lhs, rhs_reg);
   1094       } else if (instruction->IsAdd()) {
   1095         if (type == Primitive::kPrimInt) {
   1096           if (use_imm)
   1097             __ Addiu(dst, lhs, rhs_imm);
   1098           else
   1099             __ Addu(dst, lhs, rhs_reg);
   1100         } else {
   1101           if (use_imm)
   1102             __ Daddiu(dst, lhs, rhs_imm);
   1103           else
   1104             __ Daddu(dst, lhs, rhs_reg);
   1105         }
   1106       } else {
   1107         DCHECK(instruction->IsSub());
   1108         if (type == Primitive::kPrimInt) {
   1109           if (use_imm)
   1110             __ Addiu(dst, lhs, -rhs_imm);
   1111           else
   1112             __ Subu(dst, lhs, rhs_reg);
   1113         } else {
   1114           if (use_imm)
   1115             __ Daddiu(dst, lhs, -rhs_imm);
   1116           else
   1117             __ Dsubu(dst, lhs, rhs_reg);
   1118         }
   1119       }
   1120       break;
   1121     }
   1122     case Primitive::kPrimFloat:
   1123     case Primitive::kPrimDouble: {
   1124       FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
   1125       FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
   1126       FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
   1127       if (instruction->IsAdd()) {
   1128         if (type == Primitive::kPrimFloat)
   1129           __ AddS(dst, lhs, rhs);
   1130         else
   1131           __ AddD(dst, lhs, rhs);
   1132       } else if (instruction->IsSub()) {
   1133         if (type == Primitive::kPrimFloat)
   1134           __ SubS(dst, lhs, rhs);
   1135         else
   1136           __ SubD(dst, lhs, rhs);
   1137       } else {
   1138         LOG(FATAL) << "Unexpected floating-point binary operation";
   1139       }
   1140       break;
   1141     }
   1142     default:
   1143       LOG(FATAL) << "Unexpected binary operation type " << type;
   1144   }
   1145 }
   1146 
   1147 void LocationsBuilderMIPS64::HandleShift(HBinaryOperation* instr) {
   1148   DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor());
   1149 
   1150   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
   1151   Primitive::Type type = instr->GetResultType();
   1152   switch (type) {
   1153     case Primitive::kPrimInt:
   1154     case Primitive::kPrimLong: {
   1155       locations->SetInAt(0, Location::RequiresRegister());
   1156       locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
   1157       locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   1158       break;
   1159     }
   1160     default:
   1161       LOG(FATAL) << "Unexpected shift type " << type;
   1162   }
   1163 }
   1164 
   1165 void InstructionCodeGeneratorMIPS64::HandleShift(HBinaryOperation* instr) {
   1166   DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor());
   1167   LocationSummary* locations = instr->GetLocations();
   1168   Primitive::Type type = instr->GetType();
   1169 
   1170   switch (type) {
   1171     case Primitive::kPrimInt:
   1172     case Primitive::kPrimLong: {
   1173       GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
   1174       GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
   1175       Location rhs_location = locations->InAt(1);
   1176 
   1177       GpuRegister rhs_reg = ZERO;
   1178       int64_t rhs_imm = 0;
   1179       bool use_imm = rhs_location.IsConstant();
   1180       if (use_imm) {
   1181         rhs_imm = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant());
   1182       } else {
   1183         rhs_reg = rhs_location.AsRegister<GpuRegister>();
   1184       }
   1185 
   1186       if (use_imm) {
   1187         uint32_t shift_value = rhs_imm &
   1188             (type == Primitive::kPrimInt ? kMaxIntShiftDistance : kMaxLongShiftDistance);
   1189 
   1190         if (shift_value == 0) {
   1191           if (dst != lhs) {
   1192             __ Move(dst, lhs);
   1193           }
   1194         } else if (type == Primitive::kPrimInt) {
   1195           if (instr->IsShl()) {
   1196             __ Sll(dst, lhs, shift_value);
   1197           } else if (instr->IsShr()) {
   1198             __ Sra(dst, lhs, shift_value);
   1199           } else if (instr->IsUShr()) {
   1200             __ Srl(dst, lhs, shift_value);
   1201           } else {
   1202             __ Rotr(dst, lhs, shift_value);
   1203           }
   1204         } else {
   1205           if (shift_value < 32) {
   1206             if (instr->IsShl()) {
   1207               __ Dsll(dst, lhs, shift_value);
   1208             } else if (instr->IsShr()) {
   1209               __ Dsra(dst, lhs, shift_value);
   1210             } else if (instr->IsUShr()) {
   1211               __ Dsrl(dst, lhs, shift_value);
   1212             } else {
   1213               __ Drotr(dst, lhs, shift_value);
   1214             }
   1215           } else {
   1216             shift_value -= 32;
   1217             if (instr->IsShl()) {
   1218               __ Dsll32(dst, lhs, shift_value);
   1219             } else if (instr->IsShr()) {
   1220               __ Dsra32(dst, lhs, shift_value);
   1221             } else if (instr->IsUShr()) {
   1222               __ Dsrl32(dst, lhs, shift_value);
   1223             } else {
   1224               __ Drotr32(dst, lhs, shift_value);
   1225             }
   1226           }
   1227         }
   1228       } else {
   1229         if (type == Primitive::kPrimInt) {
   1230           if (instr->IsShl()) {
   1231             __ Sllv(dst, lhs, rhs_reg);
   1232           } else if (instr->IsShr()) {
   1233             __ Srav(dst, lhs, rhs_reg);
   1234           } else if (instr->IsUShr()) {
   1235             __ Srlv(dst, lhs, rhs_reg);
   1236           } else {
   1237             __ Rotrv(dst, lhs, rhs_reg);
   1238           }
   1239         } else {
   1240           if (instr->IsShl()) {
   1241             __ Dsllv(dst, lhs, rhs_reg);
   1242           } else if (instr->IsShr()) {
   1243             __ Dsrav(dst, lhs, rhs_reg);
   1244           } else if (instr->IsUShr()) {
   1245             __ Dsrlv(dst, lhs, rhs_reg);
   1246           } else {
   1247             __ Drotrv(dst, lhs, rhs_reg);
   1248           }
   1249         }
   1250       }
   1251       break;
   1252     }
   1253     default:
   1254       LOG(FATAL) << "Unexpected shift operation type " << type;
   1255   }
   1256 }
   1257 
   1258 void LocationsBuilderMIPS64::VisitAdd(HAdd* instruction) {
   1259   HandleBinaryOp(instruction);
   1260 }
   1261 
   1262 void InstructionCodeGeneratorMIPS64::VisitAdd(HAdd* instruction) {
   1263   HandleBinaryOp(instruction);
   1264 }
   1265 
   1266 void LocationsBuilderMIPS64::VisitAnd(HAnd* instruction) {
   1267   HandleBinaryOp(instruction);
   1268 }
   1269 
   1270 void InstructionCodeGeneratorMIPS64::VisitAnd(HAnd* instruction) {
   1271   HandleBinaryOp(instruction);
   1272 }
   1273 
   1274 void LocationsBuilderMIPS64::VisitArrayGet(HArrayGet* instruction) {
   1275   LocationSummary* locations =
   1276       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
   1277   locations->SetInAt(0, Location::RequiresRegister());
   1278   locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
   1279   if (Primitive::IsFloatingPointType(instruction->GetType())) {
   1280     locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
   1281   } else {
   1282     locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   1283   }
   1284 }
   1285 
   1286 void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
   1287   LocationSummary* locations = instruction->GetLocations();
   1288   GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
   1289   Location index = locations->InAt(1);
   1290   Primitive::Type type = instruction->GetType();
   1291 
   1292   switch (type) {
   1293     case Primitive::kPrimBoolean: {
   1294       uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
   1295       GpuRegister out = locations->Out().AsRegister<GpuRegister>();
   1296       if (index.IsConstant()) {
   1297         size_t offset =
   1298             (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
   1299         __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset);
   1300       } else {
   1301         __ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
   1302         __ LoadFromOffset(kLoadUnsignedByte, out, TMP, data_offset);
   1303       }
   1304       break;
   1305     }
   1306 
   1307     case Primitive::kPrimByte: {
   1308       uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
   1309       GpuRegister out = locations->Out().AsRegister<GpuRegister>();
   1310       if (index.IsConstant()) {
   1311         size_t offset =
   1312             (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
   1313         __ LoadFromOffset(kLoadSignedByte, out, obj, offset);
   1314       } else {
   1315         __ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
   1316         __ LoadFromOffset(kLoadSignedByte, out, TMP, data_offset);
   1317       }
   1318       break;
   1319     }
   1320 
   1321     case Primitive::kPrimShort: {
   1322       uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
   1323       GpuRegister out = locations->Out().AsRegister<GpuRegister>();
   1324       if (index.IsConstant()) {
   1325         size_t offset =
   1326             (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
   1327         __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset);
   1328       } else {
   1329         __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
   1330         __ Daddu(TMP, obj, TMP);
   1331         __ LoadFromOffset(kLoadSignedHalfword, out, TMP, data_offset);
   1332       }
   1333       break;
   1334     }
   1335 
   1336     case Primitive::kPrimChar: {
   1337       uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
   1338       GpuRegister out = locations->Out().AsRegister<GpuRegister>();
   1339       if (index.IsConstant()) {
   1340         size_t offset =
   1341             (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
   1342         __ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset);
   1343       } else {
   1344         __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
   1345         __ Daddu(TMP, obj, TMP);
   1346         __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset);
   1347       }
   1348       break;
   1349     }
   1350 
   1351     case Primitive::kPrimInt:
   1352     case Primitive::kPrimNot: {
   1353       DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
   1354       uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
   1355       GpuRegister out = locations->Out().AsRegister<GpuRegister>();
   1356       LoadOperandType load_type = (type == Primitive::kPrimNot) ? kLoadUnsignedWord : kLoadWord;
   1357       if (index.IsConstant()) {
   1358         size_t offset =
   1359             (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
   1360         __ LoadFromOffset(load_type, out, obj, offset);
   1361       } else {
   1362         __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
   1363         __ Daddu(TMP, obj, TMP);
   1364         __ LoadFromOffset(load_type, out, TMP, data_offset);
   1365       }
   1366       break;
   1367     }
   1368 
   1369     case Primitive::kPrimLong: {
   1370       uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
   1371       GpuRegister out = locations->Out().AsRegister<GpuRegister>();
   1372       if (index.IsConstant()) {
   1373         size_t offset =
   1374             (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
   1375         __ LoadFromOffset(kLoadDoubleword, out, obj, offset);
   1376       } else {
   1377         __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
   1378         __ Daddu(TMP, obj, TMP);
   1379         __ LoadFromOffset(kLoadDoubleword, out, TMP, data_offset);
   1380       }
   1381       break;
   1382     }
   1383 
   1384     case Primitive::kPrimFloat: {
   1385       uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
   1386       FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
   1387       if (index.IsConstant()) {
   1388         size_t offset =
   1389             (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
   1390         __ LoadFpuFromOffset(kLoadWord, out, obj, offset);
   1391       } else {
   1392         __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
   1393         __ Daddu(TMP, obj, TMP);
   1394         __ LoadFpuFromOffset(kLoadWord, out, TMP, data_offset);
   1395       }
   1396       break;
   1397     }
   1398 
   1399     case Primitive::kPrimDouble: {
   1400       uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
   1401       FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
   1402       if (index.IsConstant()) {
   1403         size_t offset =
   1404             (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
   1405         __ LoadFpuFromOffset(kLoadDoubleword, out, obj, offset);
   1406       } else {
   1407         __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
   1408         __ Daddu(TMP, obj, TMP);
   1409         __ LoadFpuFromOffset(kLoadDoubleword, out, TMP, data_offset);
   1410       }
   1411       break;
   1412     }
   1413 
   1414     case Primitive::kPrimVoid:
   1415       LOG(FATAL) << "Unreachable type " << instruction->GetType();
   1416       UNREACHABLE();
   1417   }
   1418   codegen_->MaybeRecordImplicitNullCheck(instruction);
   1419 }
   1420 
   1421 void LocationsBuilderMIPS64::VisitArrayLength(HArrayLength* instruction) {
   1422   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
   1423   locations->SetInAt(0, Location::RequiresRegister());
   1424   locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   1425 }
   1426 
   1427 void InstructionCodeGeneratorMIPS64::VisitArrayLength(HArrayLength* instruction) {
   1428   LocationSummary* locations = instruction->GetLocations();
   1429   uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
   1430   GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
   1431   GpuRegister out = locations->Out().AsRegister<GpuRegister>();
   1432   __ LoadFromOffset(kLoadWord, out, obj, offset);
   1433   codegen_->MaybeRecordImplicitNullCheck(instruction);
   1434 }
   1435 
   1436 void LocationsBuilderMIPS64::VisitArraySet(HArraySet* instruction) {
   1437   bool needs_runtime_call = instruction->NeedsTypeCheck();
   1438   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
   1439       instruction,
   1440       needs_runtime_call ? LocationSummary::kCall : LocationSummary::kNoCall);
   1441   if (needs_runtime_call) {
   1442     InvokeRuntimeCallingConvention calling_convention;
   1443     locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
   1444     locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
   1445     locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
   1446   } else {
   1447     locations->SetInAt(0, Location::RequiresRegister());
   1448     locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
   1449     if (Primitive::IsFloatingPointType(instruction->InputAt(2)->GetType())) {
   1450       locations->SetInAt(2, Location::RequiresFpuRegister());
   1451     } else {
   1452       locations->SetInAt(2, Location::RequiresRegister());
   1453     }
   1454   }
   1455 }
   1456 
   1457 void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) {
   1458   LocationSummary* locations = instruction->GetLocations();
   1459   GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
   1460   Location index = locations->InAt(1);
   1461   Primitive::Type value_type = instruction->GetComponentType();
   1462   bool needs_runtime_call = locations->WillCall();
   1463   bool needs_write_barrier =
   1464       CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
   1465 
   1466   switch (value_type) {
   1467     case Primitive::kPrimBoolean:
   1468     case Primitive::kPrimByte: {
   1469       uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
   1470       GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
   1471       if (index.IsConstant()) {
   1472         size_t offset =
   1473             (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
   1474         __ StoreToOffset(kStoreByte, value, obj, offset);
   1475       } else {
   1476         __ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
   1477         __ StoreToOffset(kStoreByte, value, TMP, data_offset);
   1478       }
   1479       break;
   1480     }
   1481 
   1482     case Primitive::kPrimShort:
   1483     case Primitive::kPrimChar: {
   1484       uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
   1485       GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
   1486       if (index.IsConstant()) {
   1487         size_t offset =
   1488             (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
   1489         __ StoreToOffset(kStoreHalfword, value, obj, offset);
   1490       } else {
   1491         __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
   1492         __ Daddu(TMP, obj, TMP);
   1493         __ StoreToOffset(kStoreHalfword, value, TMP, data_offset);
   1494       }
   1495       break;
   1496     }
   1497 
   1498     case Primitive::kPrimInt:
   1499     case Primitive::kPrimNot: {
   1500       if (!needs_runtime_call) {
   1501         uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
   1502         GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
   1503         if (index.IsConstant()) {
   1504           size_t offset =
   1505               (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
   1506           __ StoreToOffset(kStoreWord, value, obj, offset);
   1507         } else {
   1508           DCHECK(index.IsRegister()) << index;
   1509           __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
   1510           __ Daddu(TMP, obj, TMP);
   1511           __ StoreToOffset(kStoreWord, value, TMP, data_offset);
   1512         }
   1513         codegen_->MaybeRecordImplicitNullCheck(instruction);
   1514         if (needs_write_barrier) {
   1515           DCHECK_EQ(value_type, Primitive::kPrimNot);
   1516           codegen_->MarkGCCard(obj, value, instruction->GetValueCanBeNull());
   1517         }
   1518       } else {
   1519         DCHECK_EQ(value_type, Primitive::kPrimNot);
   1520         codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
   1521                                 instruction,
   1522                                 instruction->GetDexPc(),
   1523                                 nullptr);
   1524         CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>();
   1525       }
   1526       break;
   1527     }
   1528 
   1529     case Primitive::kPrimLong: {
   1530       uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
   1531       GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
   1532       if (index.IsConstant()) {
   1533         size_t offset =
   1534             (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
   1535         __ StoreToOffset(kStoreDoubleword, value, obj, offset);
   1536       } else {
   1537         __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
   1538         __ Daddu(TMP, obj, TMP);
   1539         __ StoreToOffset(kStoreDoubleword, value, TMP, data_offset);
   1540       }
   1541       break;
   1542     }
   1543 
   1544     case Primitive::kPrimFloat: {
   1545       uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
   1546       FpuRegister value = locations->InAt(2).AsFpuRegister<FpuRegister>();
   1547       DCHECK(locations->InAt(2).IsFpuRegister());
   1548       if (index.IsConstant()) {
   1549         size_t offset =
   1550             (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
   1551         __ StoreFpuToOffset(kStoreWord, value, obj, offset);
   1552       } else {
   1553         __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
   1554         __ Daddu(TMP, obj, TMP);
   1555         __ StoreFpuToOffset(kStoreWord, value, TMP, data_offset);
   1556       }
   1557       break;
   1558     }
   1559 
   1560     case Primitive::kPrimDouble: {
   1561       uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
   1562       FpuRegister value = locations->InAt(2).AsFpuRegister<FpuRegister>();
   1563       DCHECK(locations->InAt(2).IsFpuRegister());
   1564       if (index.IsConstant()) {
   1565         size_t offset =
   1566             (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
   1567         __ StoreFpuToOffset(kStoreDoubleword, value, obj, offset);
   1568       } else {
   1569         __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
   1570         __ Daddu(TMP, obj, TMP);
   1571         __ StoreFpuToOffset(kStoreDoubleword, value, TMP, data_offset);
   1572       }
   1573       break;
   1574     }
   1575 
   1576     case Primitive::kPrimVoid:
   1577       LOG(FATAL) << "Unreachable type " << instruction->GetType();
   1578       UNREACHABLE();
   1579   }
   1580 
   1581   // Ints and objects are handled in the switch.
   1582   if (value_type != Primitive::kPrimInt && value_type != Primitive::kPrimNot) {
   1583     codegen_->MaybeRecordImplicitNullCheck(instruction);
   1584   }
   1585 }
   1586 
   1587 void LocationsBuilderMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
   1588   LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
   1589       ? LocationSummary::kCallOnSlowPath
   1590       : LocationSummary::kNoCall;
   1591   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
   1592   locations->SetInAt(0, Location::RequiresRegister());
   1593   locations->SetInAt(1, Location::RequiresRegister());
   1594   if (instruction->HasUses()) {
   1595     locations->SetOut(Location::SameAsFirstInput());
   1596   }
   1597 }
   1598 
   1599 void InstructionCodeGeneratorMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
   1600   LocationSummary* locations = instruction->GetLocations();
   1601   BoundsCheckSlowPathMIPS64* slow_path =
   1602       new (GetGraph()->GetArena()) BoundsCheckSlowPathMIPS64(instruction);
   1603   codegen_->AddSlowPath(slow_path);
   1604 
   1605   GpuRegister index = locations->InAt(0).AsRegister<GpuRegister>();
   1606   GpuRegister length = locations->InAt(1).AsRegister<GpuRegister>();
   1607 
   1608   // length is limited by the maximum positive signed 32-bit integer.
   1609   // Unsigned comparison of length and index checks for index < 0
   1610   // and for length <= index simultaneously.
   1611   __ Bgeuc(index, length, slow_path->GetEntryLabel());
   1612 }
   1613 
   1614 void LocationsBuilderMIPS64::VisitCheckCast(HCheckCast* instruction) {
   1615   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
   1616       instruction,
   1617       LocationSummary::kCallOnSlowPath);
   1618   locations->SetInAt(0, Location::RequiresRegister());
   1619   locations->SetInAt(1, Location::RequiresRegister());
   1620   // Note that TypeCheckSlowPathMIPS64 uses this register too.
   1621   locations->AddTemp(Location::RequiresRegister());
   1622 }
   1623 
   1624 void InstructionCodeGeneratorMIPS64::VisitCheckCast(HCheckCast* instruction) {
   1625   LocationSummary* locations = instruction->GetLocations();
   1626   GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
   1627   GpuRegister cls = locations->InAt(1).AsRegister<GpuRegister>();
   1628   GpuRegister obj_cls = locations->GetTemp(0).AsRegister<GpuRegister>();
   1629 
   1630   SlowPathCodeMIPS64* slow_path =
   1631       new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction);
   1632   codegen_->AddSlowPath(slow_path);
   1633 
   1634   // TODO: avoid this check if we know obj is not null.
   1635   __ Beqzc(obj, slow_path->GetExitLabel());
   1636   // Compare the class of `obj` with `cls`.
   1637   __ LoadFromOffset(kLoadUnsignedWord, obj_cls, obj, mirror::Object::ClassOffset().Int32Value());
   1638   __ Bnec(obj_cls, cls, slow_path->GetEntryLabel());
   1639   __ Bind(slow_path->GetExitLabel());
   1640 }
   1641 
   1642 void LocationsBuilderMIPS64::VisitClinitCheck(HClinitCheck* check) {
   1643   LocationSummary* locations =
   1644       new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
   1645   locations->SetInAt(0, Location::RequiresRegister());
   1646   if (check->HasUses()) {
   1647     locations->SetOut(Location::SameAsFirstInput());
   1648   }
   1649 }
   1650 
   1651 void InstructionCodeGeneratorMIPS64::VisitClinitCheck(HClinitCheck* check) {
   1652   // We assume the class is not null.
   1653   SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS64(
   1654       check->GetLoadClass(),
   1655       check,
   1656       check->GetDexPc(),
   1657       true);
   1658   codegen_->AddSlowPath(slow_path);
   1659   GenerateClassInitializationCheck(slow_path,
   1660                                    check->GetLocations()->InAt(0).AsRegister<GpuRegister>());
   1661 }
   1662 
   1663 void LocationsBuilderMIPS64::VisitCompare(HCompare* compare) {
   1664   Primitive::Type in_type = compare->InputAt(0)->GetType();
   1665 
   1666   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(compare);
   1667 
   1668   switch (in_type) {
   1669     case Primitive::kPrimBoolean:
   1670     case Primitive::kPrimByte:
   1671     case Primitive::kPrimShort:
   1672     case Primitive::kPrimChar:
   1673     case Primitive::kPrimInt:
   1674     case Primitive::kPrimLong:
   1675       locations->SetInAt(0, Location::RequiresRegister());
   1676       locations->SetInAt(1, Location::RegisterOrConstant(compare->InputAt(1)));
   1677       locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   1678       break;
   1679 
   1680     case Primitive::kPrimFloat:
   1681     case Primitive::kPrimDouble:
   1682       locations->SetInAt(0, Location::RequiresFpuRegister());
   1683       locations->SetInAt(1, Location::RequiresFpuRegister());
   1684       locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   1685       break;
   1686 
   1687     default:
   1688       LOG(FATAL) << "Unexpected type for compare operation " << in_type;
   1689   }
   1690 }
   1691 
   1692 void InstructionCodeGeneratorMIPS64::VisitCompare(HCompare* instruction) {
   1693   LocationSummary* locations = instruction->GetLocations();
   1694   GpuRegister res = locations->Out().AsRegister<GpuRegister>();
   1695   Primitive::Type in_type = instruction->InputAt(0)->GetType();
   1696 
   1697   //  0 if: left == right
   1698   //  1 if: left  > right
   1699   // -1 if: left  < right
   1700   switch (in_type) {
   1701     case Primitive::kPrimBoolean:
   1702     case Primitive::kPrimByte:
   1703     case Primitive::kPrimShort:
   1704     case Primitive::kPrimChar:
   1705     case Primitive::kPrimInt:
   1706     case Primitive::kPrimLong: {
   1707       GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
   1708       Location rhs_location = locations->InAt(1);
   1709       bool use_imm = rhs_location.IsConstant();
   1710       GpuRegister rhs = ZERO;
   1711       if (use_imm) {
   1712         if (in_type == Primitive::kPrimLong) {
   1713           int64_t value = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant()->AsConstant());
   1714           if (value != 0) {
   1715             rhs = AT;
   1716             __ LoadConst64(rhs, value);
   1717           }
   1718         } else {
   1719           int32_t value = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant()->AsConstant());
   1720           if (value != 0) {
   1721             rhs = AT;
   1722             __ LoadConst32(rhs, value);
   1723           }
   1724         }
   1725       } else {
   1726         rhs = rhs_location.AsRegister<GpuRegister>();
   1727       }
   1728       __ Slt(TMP, lhs, rhs);
   1729       __ Slt(res, rhs, lhs);
   1730       __ Subu(res, res, TMP);
   1731       break;
   1732     }
   1733 
   1734     case Primitive::kPrimFloat: {
   1735       FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
   1736       FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
   1737       Mips64Label done;
   1738       __ CmpEqS(FTMP, lhs, rhs);
   1739       __ LoadConst32(res, 0);
   1740       __ Bc1nez(FTMP, &done);
   1741       if (instruction->IsGtBias()) {
   1742         __ CmpLtS(FTMP, lhs, rhs);
   1743         __ LoadConst32(res, -1);
   1744         __ Bc1nez(FTMP, &done);
   1745         __ LoadConst32(res, 1);
   1746       } else {
   1747         __ CmpLtS(FTMP, rhs, lhs);
   1748         __ LoadConst32(res, 1);
   1749         __ Bc1nez(FTMP, &done);
   1750         __ LoadConst32(res, -1);
   1751       }
   1752       __ Bind(&done);
   1753       break;
   1754     }
   1755 
   1756     case Primitive::kPrimDouble: {
   1757       FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
   1758       FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
   1759       Mips64Label done;
   1760       __ CmpEqD(FTMP, lhs, rhs);
   1761       __ LoadConst32(res, 0);
   1762       __ Bc1nez(FTMP, &done);
   1763       if (instruction->IsGtBias()) {
   1764         __ CmpLtD(FTMP, lhs, rhs);
   1765         __ LoadConst32(res, -1);
   1766         __ Bc1nez(FTMP, &done);
   1767         __ LoadConst32(res, 1);
   1768       } else {
   1769         __ CmpLtD(FTMP, rhs, lhs);
   1770         __ LoadConst32(res, 1);
   1771         __ Bc1nez(FTMP, &done);
   1772         __ LoadConst32(res, -1);
   1773       }
   1774       __ Bind(&done);
   1775       break;
   1776     }
   1777 
   1778     default:
   1779       LOG(FATAL) << "Unimplemented compare type " << in_type;
   1780   }
   1781 }
   1782 
   1783 void LocationsBuilderMIPS64::HandleCondition(HCondition* instruction) {
   1784   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
   1785   switch (instruction->InputAt(0)->GetType()) {
   1786     default:
   1787     case Primitive::kPrimLong:
   1788       locations->SetInAt(0, Location::RequiresRegister());
   1789       locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
   1790       break;
   1791 
   1792     case Primitive::kPrimFloat:
   1793     case Primitive::kPrimDouble:
   1794       locations->SetInAt(0, Location::RequiresFpuRegister());
   1795       locations->SetInAt(1, Location::RequiresFpuRegister());
   1796       break;
   1797   }
   1798   if (!instruction->IsEmittedAtUseSite()) {
   1799     locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   1800   }
   1801 }
   1802 
   1803 void InstructionCodeGeneratorMIPS64::HandleCondition(HCondition* instruction) {
   1804   if (instruction->IsEmittedAtUseSite()) {
   1805     return;
   1806   }
   1807 
   1808   Primitive::Type type = instruction->InputAt(0)->GetType();
   1809   LocationSummary* locations = instruction->GetLocations();
   1810   GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
   1811   Mips64Label true_label;
   1812 
   1813   switch (type) {
   1814     default:
   1815       // Integer case.
   1816       GenerateIntLongCompare(instruction->GetCondition(), /* is64bit */ false, locations);
   1817       return;
   1818     case Primitive::kPrimLong:
   1819       GenerateIntLongCompare(instruction->GetCondition(), /* is64bit */ true, locations);
   1820       return;
   1821 
   1822     case Primitive::kPrimFloat:
   1823     case Primitive::kPrimDouble:
   1824       // TODO: don't use branches.
   1825       GenerateFpCompareAndBranch(instruction->GetCondition(),
   1826                                  instruction->IsGtBias(),
   1827                                  type,
   1828                                  locations,
   1829                                  &true_label);
   1830       break;
   1831   }
   1832 
   1833   // Convert the branches into the result.
   1834   Mips64Label done;
   1835 
   1836   // False case: result = 0.
   1837   __ LoadConst32(dst, 0);
   1838   __ Bc(&done);
   1839 
   1840   // True case: result = 1.
   1841   __ Bind(&true_label);
   1842   __ LoadConst32(dst, 1);
   1843   __ Bind(&done);
   1844 }
   1845 
   1846 void InstructionCodeGeneratorMIPS64::DivRemOneOrMinusOne(HBinaryOperation* instruction) {
   1847   DCHECK(instruction->IsDiv() || instruction->IsRem());
   1848   Primitive::Type type = instruction->GetResultType();
   1849 
   1850   LocationSummary* locations = instruction->GetLocations();
   1851   Location second = locations->InAt(1);
   1852   DCHECK(second.IsConstant());
   1853 
   1854   GpuRegister out = locations->Out().AsRegister<GpuRegister>();
   1855   GpuRegister dividend = locations->InAt(0).AsRegister<GpuRegister>();
   1856   int64_t imm = Int64FromConstant(second.GetConstant());
   1857   DCHECK(imm == 1 || imm == -1);
   1858 
   1859   if (instruction->IsRem()) {
   1860     __ Move(out, ZERO);
   1861   } else {
   1862     if (imm == -1) {
   1863       if (type == Primitive::kPrimInt) {
   1864         __ Subu(out, ZERO, dividend);
   1865       } else {
   1866         DCHECK_EQ(type, Primitive::kPrimLong);
   1867         __ Dsubu(out, ZERO, dividend);
   1868       }
   1869     } else if (out != dividend) {
   1870       __ Move(out, dividend);
   1871     }
   1872   }
   1873 }
   1874 
   1875 void InstructionCodeGeneratorMIPS64::DivRemByPowerOfTwo(HBinaryOperation* instruction) {
   1876   DCHECK(instruction->IsDiv() || instruction->IsRem());
   1877   Primitive::Type type = instruction->GetResultType();
   1878 
   1879   LocationSummary* locations = instruction->GetLocations();
   1880   Location second = locations->InAt(1);
   1881   DCHECK(second.IsConstant());
   1882 
   1883   GpuRegister out = locations->Out().AsRegister<GpuRegister>();
   1884   GpuRegister dividend = locations->InAt(0).AsRegister<GpuRegister>();
   1885   int64_t imm = Int64FromConstant(second.GetConstant());
   1886   uint64_t abs_imm = static_cast<uint64_t>(AbsOrMin(imm));
   1887   int ctz_imm = CTZ(abs_imm);
   1888 
   1889   if (instruction->IsDiv()) {
   1890     if (type == Primitive::kPrimInt) {
   1891       if (ctz_imm == 1) {
   1892         // Fast path for division by +/-2, which is very common.
   1893         __ Srl(TMP, dividend, 31);
   1894       } else {
   1895         __ Sra(TMP, dividend, 31);
   1896         __ Srl(TMP, TMP, 32 - ctz_imm);
   1897       }
   1898       __ Addu(out, dividend, TMP);
   1899       __ Sra(out, out, ctz_imm);
   1900       if (imm < 0) {
   1901         __ Subu(out, ZERO, out);
   1902       }
   1903     } else {
   1904       DCHECK_EQ(type, Primitive::kPrimLong);
   1905       if (ctz_imm == 1) {
   1906         // Fast path for division by +/-2, which is very common.
   1907         __ Dsrl32(TMP, dividend, 31);
   1908       } else {
   1909         __ Dsra32(TMP, dividend, 31);
   1910         if (ctz_imm > 32) {
   1911           __ Dsrl(TMP, TMP, 64 - ctz_imm);
   1912         } else {
   1913           __ Dsrl32(TMP, TMP, 32 - ctz_imm);
   1914         }
   1915       }
   1916       __ Daddu(out, dividend, TMP);
   1917       if (ctz_imm < 32) {
   1918         __ Dsra(out, out, ctz_imm);
   1919       } else {
   1920         __ Dsra32(out, out, ctz_imm - 32);
   1921       }
   1922       if (imm < 0) {
   1923         __ Dsubu(out, ZERO, out);
   1924       }
   1925     }
   1926   } else {
   1927     if (type == Primitive::kPrimInt) {
   1928       if (ctz_imm == 1) {
   1929         // Fast path for modulo +/-2, which is very common.
   1930         __ Sra(TMP, dividend, 31);
   1931         __ Subu(out, dividend, TMP);
   1932         __ Andi(out, out, 1);
   1933         __ Addu(out, out, TMP);
   1934       } else {
   1935         __ Sra(TMP, dividend, 31);
   1936         __ Srl(TMP, TMP, 32 - ctz_imm);
   1937         __ Addu(out, dividend, TMP);
   1938         if (IsUint<16>(abs_imm - 1)) {
   1939           __ Andi(out, out, abs_imm - 1);
   1940         } else {
   1941           __ Sll(out, out, 32 - ctz_imm);
   1942           __ Srl(out, out, 32 - ctz_imm);
   1943         }
   1944         __ Subu(out, out, TMP);
   1945       }
   1946     } else {
   1947       DCHECK_EQ(type, Primitive::kPrimLong);
   1948       if (ctz_imm == 1) {
   1949         // Fast path for modulo +/-2, which is very common.
   1950         __ Dsra32(TMP, dividend, 31);
   1951         __ Dsubu(out, dividend, TMP);
   1952         __ Andi(out, out, 1);
   1953         __ Daddu(out, out, TMP);
   1954       } else {
   1955         __ Dsra32(TMP, dividend, 31);
   1956         if (ctz_imm > 32) {
   1957           __ Dsrl(TMP, TMP, 64 - ctz_imm);
   1958         } else {
   1959           __ Dsrl32(TMP, TMP, 32 - ctz_imm);
   1960         }
   1961         __ Daddu(out, dividend, TMP);
   1962         if (IsUint<16>(abs_imm - 1)) {
   1963           __ Andi(out, out, abs_imm - 1);
   1964         } else {
   1965           if (ctz_imm > 32) {
   1966             __ Dsll(out, out, 64 - ctz_imm);
   1967             __ Dsrl(out, out, 64 - ctz_imm);
   1968           } else {
   1969             __ Dsll32(out, out, 32 - ctz_imm);
   1970             __ Dsrl32(out, out, 32 - ctz_imm);
   1971           }
   1972         }
   1973         __ Dsubu(out, out, TMP);
   1974       }
   1975     }
   1976   }
   1977 }
   1978 
   1979 void InstructionCodeGeneratorMIPS64::GenerateDivRemWithAnyConstant(HBinaryOperation* instruction) {
   1980   DCHECK(instruction->IsDiv() || instruction->IsRem());
   1981 
   1982   LocationSummary* locations = instruction->GetLocations();
   1983   Location second = locations->InAt(1);
   1984   DCHECK(second.IsConstant());
   1985 
   1986   GpuRegister out = locations->Out().AsRegister<GpuRegister>();
   1987   GpuRegister dividend = locations->InAt(0).AsRegister<GpuRegister>();
   1988   int64_t imm = Int64FromConstant(second.GetConstant());
   1989 
   1990   Primitive::Type type = instruction->GetResultType();
   1991   DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong) << type;
   1992 
   1993   int64_t magic;
   1994   int shift;
   1995   CalculateMagicAndShiftForDivRem(imm,
   1996                                   (type == Primitive::kPrimLong),
   1997                                   &magic,
   1998                                   &shift);
   1999 
   2000   if (type == Primitive::kPrimInt) {
   2001     __ LoadConst32(TMP, magic);
   2002     __ MuhR6(TMP, dividend, TMP);
   2003 
   2004     if (imm > 0 && magic < 0) {
   2005       __ Addu(TMP, TMP, dividend);
   2006     } else if (imm < 0 && magic > 0) {
   2007       __ Subu(TMP, TMP, dividend);
   2008     }
   2009 
   2010     if (shift != 0) {
   2011       __ Sra(TMP, TMP, shift);
   2012     }
   2013 
   2014     if (instruction->IsDiv()) {
   2015       __ Sra(out, TMP, 31);
   2016       __ Subu(out, TMP, out);
   2017     } else {
   2018       __ Sra(AT, TMP, 31);
   2019       __ Subu(AT, TMP, AT);
   2020       __ LoadConst32(TMP, imm);
   2021       __ MulR6(TMP, AT, TMP);
   2022       __ Subu(out, dividend, TMP);
   2023     }
   2024   } else {
   2025     __ LoadConst64(TMP, magic);
   2026     __ Dmuh(TMP, dividend, TMP);
   2027 
   2028     if (imm > 0 && magic < 0) {
   2029       __ Daddu(TMP, TMP, dividend);
   2030     } else if (imm < 0 && magic > 0) {
   2031       __ Dsubu(TMP, TMP, dividend);
   2032     }
   2033 
   2034     if (shift >= 32) {
   2035       __ Dsra32(TMP, TMP, shift - 32);
   2036     } else if (shift > 0) {
   2037       __ Dsra(TMP, TMP, shift);
   2038     }
   2039 
   2040     if (instruction->IsDiv()) {
   2041       __ Dsra32(out, TMP, 31);
   2042       __ Dsubu(out, TMP, out);
   2043     } else {
   2044       __ Dsra32(AT, TMP, 31);
   2045       __ Dsubu(AT, TMP, AT);
   2046       __ LoadConst64(TMP, imm);
   2047       __ Dmul(TMP, AT, TMP);
   2048       __ Dsubu(out, dividend, TMP);
   2049     }
   2050   }
   2051 }
   2052 
   2053 void InstructionCodeGeneratorMIPS64::GenerateDivRemIntegral(HBinaryOperation* instruction) {
   2054   DCHECK(instruction->IsDiv() || instruction->IsRem());
   2055   Primitive::Type type = instruction->GetResultType();
   2056   DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong) << type;
   2057 
   2058   LocationSummary* locations = instruction->GetLocations();
   2059   GpuRegister out = locations->Out().AsRegister<GpuRegister>();
   2060   Location second = locations->InAt(1);
   2061 
   2062   if (second.IsConstant()) {
   2063     int64_t imm = Int64FromConstant(second.GetConstant());
   2064     if (imm == 0) {
   2065       // Do not generate anything. DivZeroCheck would prevent any code to be executed.
   2066     } else if (imm == 1 || imm == -1) {
   2067       DivRemOneOrMinusOne(instruction);
   2068     } else if (IsPowerOfTwo(AbsOrMin(imm))) {
   2069       DivRemByPowerOfTwo(instruction);
   2070     } else {
   2071       DCHECK(imm <= -2 || imm >= 2);
   2072       GenerateDivRemWithAnyConstant(instruction);
   2073     }
   2074   } else {
   2075     GpuRegister dividend = locations->InAt(0).AsRegister<GpuRegister>();
   2076     GpuRegister divisor = second.AsRegister<GpuRegister>();
   2077     if (instruction->IsDiv()) {
   2078       if (type == Primitive::kPrimInt)
   2079         __ DivR6(out, dividend, divisor);
   2080       else
   2081         __ Ddiv(out, dividend, divisor);
   2082     } else {
   2083       if (type == Primitive::kPrimInt)
   2084         __ ModR6(out, dividend, divisor);
   2085       else
   2086         __ Dmod(out, dividend, divisor);
   2087     }
   2088   }
   2089 }
   2090 
   2091 void LocationsBuilderMIPS64::VisitDiv(HDiv* div) {
   2092   LocationSummary* locations =
   2093       new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
   2094   switch (div->GetResultType()) {
   2095     case Primitive::kPrimInt:
   2096     case Primitive::kPrimLong:
   2097       locations->SetInAt(0, Location::RequiresRegister());
   2098       locations->SetInAt(1, Location::RegisterOrConstant(div->InputAt(1)));
   2099       locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   2100       break;
   2101 
   2102     case Primitive::kPrimFloat:
   2103     case Primitive::kPrimDouble:
   2104       locations->SetInAt(0, Location::RequiresFpuRegister());
   2105       locations->SetInAt(1, Location::RequiresFpuRegister());
   2106       locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
   2107       break;
   2108 
   2109     default:
   2110       LOG(FATAL) << "Unexpected div type " << div->GetResultType();
   2111   }
   2112 }
   2113 
   2114 void InstructionCodeGeneratorMIPS64::VisitDiv(HDiv* instruction) {
   2115   Primitive::Type type = instruction->GetType();
   2116   LocationSummary* locations = instruction->GetLocations();
   2117 
   2118   switch (type) {
   2119     case Primitive::kPrimInt:
   2120     case Primitive::kPrimLong:
   2121       GenerateDivRemIntegral(instruction);
   2122       break;
   2123     case Primitive::kPrimFloat:
   2124     case Primitive::kPrimDouble: {
   2125       FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
   2126       FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
   2127       FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
   2128       if (type == Primitive::kPrimFloat)
   2129         __ DivS(dst, lhs, rhs);
   2130       else
   2131         __ DivD(dst, lhs, rhs);
   2132       break;
   2133     }
   2134     default:
   2135       LOG(FATAL) << "Unexpected div type " << type;
   2136   }
   2137 }
   2138 
   2139 void LocationsBuilderMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
   2140   LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
   2141       ? LocationSummary::kCallOnSlowPath
   2142       : LocationSummary::kNoCall;
   2143   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
   2144   locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
   2145   if (instruction->HasUses()) {
   2146     locations->SetOut(Location::SameAsFirstInput());
   2147   }
   2148 }
   2149 
   2150 void InstructionCodeGeneratorMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
   2151   SlowPathCodeMIPS64* slow_path =
   2152       new (GetGraph()->GetArena()) DivZeroCheckSlowPathMIPS64(instruction);
   2153   codegen_->AddSlowPath(slow_path);
   2154   Location value = instruction->GetLocations()->InAt(0);
   2155 
   2156   Primitive::Type type = instruction->GetType();
   2157 
   2158   if (!Primitive::IsIntegralType(type)) {
   2159     LOG(FATAL) << "Unexpected type " << type << " for DivZeroCheck.";
   2160     return;
   2161   }
   2162 
   2163   if (value.IsConstant()) {
   2164     int64_t divisor = codegen_->GetInt64ValueOf(value.GetConstant()->AsConstant());
   2165     if (divisor == 0) {
   2166       __ Bc(slow_path->GetEntryLabel());
   2167     } else {
   2168       // A division by a non-null constant is valid. We don't need to perform
   2169       // any check, so simply fall through.
   2170     }
   2171   } else {
   2172     __ Beqzc(value.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
   2173   }
   2174 }
   2175 
   2176 void LocationsBuilderMIPS64::VisitDoubleConstant(HDoubleConstant* constant) {
   2177   LocationSummary* locations =
   2178       new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
   2179   locations->SetOut(Location::ConstantLocation(constant));
   2180 }
   2181 
   2182 void InstructionCodeGeneratorMIPS64::VisitDoubleConstant(HDoubleConstant* cst ATTRIBUTE_UNUSED) {
   2183   // Will be generated at use site.
   2184 }
   2185 
   2186 void LocationsBuilderMIPS64::VisitExit(HExit* exit) {
   2187   exit->SetLocations(nullptr);
   2188 }
   2189 
   2190 void InstructionCodeGeneratorMIPS64::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
   2191 }
   2192 
   2193 void LocationsBuilderMIPS64::VisitFloatConstant(HFloatConstant* constant) {
   2194   LocationSummary* locations =
   2195       new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
   2196   locations->SetOut(Location::ConstantLocation(constant));
   2197 }
   2198 
   2199 void InstructionCodeGeneratorMIPS64::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
   2200   // Will be generated at use site.
   2201 }
   2202 
   2203 void InstructionCodeGeneratorMIPS64::HandleGoto(HInstruction* got, HBasicBlock* successor) {
   2204   DCHECK(!successor->IsExitBlock());
   2205   HBasicBlock* block = got->GetBlock();
   2206   HInstruction* previous = got->GetPrevious();
   2207   HLoopInformation* info = block->GetLoopInformation();
   2208 
   2209   if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
   2210     codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
   2211     GenerateSuspendCheck(info->GetSuspendCheck(), successor);
   2212     return;
   2213   }
   2214   if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
   2215     GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
   2216   }
   2217   if (!codegen_->GoesToNextBlock(block, successor)) {
   2218     __ Bc(codegen_->GetLabelOf(successor));
   2219   }
   2220 }
   2221 
   2222 void LocationsBuilderMIPS64::VisitGoto(HGoto* got) {
   2223   got->SetLocations(nullptr);
   2224 }
   2225 
   2226 void InstructionCodeGeneratorMIPS64::VisitGoto(HGoto* got) {
   2227   HandleGoto(got, got->GetSuccessor());
   2228 }
   2229 
   2230 void LocationsBuilderMIPS64::VisitTryBoundary(HTryBoundary* try_boundary) {
   2231   try_boundary->SetLocations(nullptr);
   2232 }
   2233 
   2234 void InstructionCodeGeneratorMIPS64::VisitTryBoundary(HTryBoundary* try_boundary) {
   2235   HBasicBlock* successor = try_boundary->GetNormalFlowSuccessor();
   2236   if (!successor->IsExitBlock()) {
   2237     HandleGoto(try_boundary, successor);
   2238   }
   2239 }
   2240 
   2241 void InstructionCodeGeneratorMIPS64::GenerateIntLongCompare(IfCondition cond,
   2242                                                             bool is64bit,
   2243                                                             LocationSummary* locations) {
   2244   GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
   2245   GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
   2246   Location rhs_location = locations->InAt(1);
   2247   GpuRegister rhs_reg = ZERO;
   2248   int64_t rhs_imm = 0;
   2249   bool use_imm = rhs_location.IsConstant();
   2250   if (use_imm) {
   2251     if (is64bit) {
   2252       rhs_imm = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant());
   2253     } else {
   2254       rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
   2255     }
   2256   } else {
   2257     rhs_reg = rhs_location.AsRegister<GpuRegister>();
   2258   }
   2259   int64_t rhs_imm_plus_one = rhs_imm + UINT64_C(1);
   2260 
   2261   switch (cond) {
   2262     case kCondEQ:
   2263     case kCondNE:
   2264       if (use_imm && IsUint<16>(rhs_imm)) {
   2265         __ Xori(dst, lhs, rhs_imm);
   2266       } else {
   2267         if (use_imm) {
   2268           rhs_reg = TMP;
   2269           __ LoadConst64(rhs_reg, rhs_imm);
   2270         }
   2271         __ Xor(dst, lhs, rhs_reg);
   2272       }
   2273       if (cond == kCondEQ) {
   2274         __ Sltiu(dst, dst, 1);
   2275       } else {
   2276         __ Sltu(dst, ZERO, dst);
   2277       }
   2278       break;
   2279 
   2280     case kCondLT:
   2281     case kCondGE:
   2282       if (use_imm && IsInt<16>(rhs_imm)) {
   2283         __ Slti(dst, lhs, rhs_imm);
   2284       } else {
   2285         if (use_imm) {
   2286           rhs_reg = TMP;
   2287           __ LoadConst64(rhs_reg, rhs_imm);
   2288         }
   2289         __ Slt(dst, lhs, rhs_reg);
   2290       }
   2291       if (cond == kCondGE) {
   2292         // Simulate lhs >= rhs via !(lhs < rhs) since there's
   2293         // only the slt instruction but no sge.
   2294         __ Xori(dst, dst, 1);
   2295       }
   2296       break;
   2297 
   2298     case kCondLE:
   2299     case kCondGT:
   2300       if (use_imm && IsInt<16>(rhs_imm_plus_one)) {
   2301         // Simulate lhs <= rhs via lhs < rhs + 1.
   2302         __ Slti(dst, lhs, rhs_imm_plus_one);
   2303         if (cond == kCondGT) {
   2304           // Simulate lhs > rhs via !(lhs <= rhs) since there's
   2305           // only the slti instruction but no sgti.
   2306           __ Xori(dst, dst, 1);
   2307         }
   2308       } else {
   2309         if (use_imm) {
   2310           rhs_reg = TMP;
   2311           __ LoadConst64(rhs_reg, rhs_imm);
   2312         }
   2313         __ Slt(dst, rhs_reg, lhs);
   2314         if (cond == kCondLE) {
   2315           // Simulate lhs <= rhs via !(rhs < lhs) since there's
   2316           // only the slt instruction but no sle.
   2317           __ Xori(dst, dst, 1);
   2318         }
   2319       }
   2320       break;
   2321 
   2322     case kCondB:
   2323     case kCondAE:
   2324       if (use_imm && IsInt<16>(rhs_imm)) {
   2325         // Sltiu sign-extends its 16-bit immediate operand before
   2326         // the comparison and thus lets us compare directly with
   2327         // unsigned values in the ranges [0, 0x7fff] and
   2328         // [0x[ffffffff]ffff8000, 0x[ffffffff]ffffffff].
   2329         __ Sltiu(dst, lhs, rhs_imm);
   2330       } else {
   2331         if (use_imm) {
   2332           rhs_reg = TMP;
   2333           __ LoadConst64(rhs_reg, rhs_imm);
   2334         }
   2335         __ Sltu(dst, lhs, rhs_reg);
   2336       }
   2337       if (cond == kCondAE) {
   2338         // Simulate lhs >= rhs via !(lhs < rhs) since there's
   2339         // only the sltu instruction but no sgeu.
   2340         __ Xori(dst, dst, 1);
   2341       }
   2342       break;
   2343 
   2344     case kCondBE:
   2345     case kCondA:
   2346       if (use_imm && (rhs_imm_plus_one != 0) && IsInt<16>(rhs_imm_plus_one)) {
   2347         // Simulate lhs <= rhs via lhs < rhs + 1.
   2348         // Note that this only works if rhs + 1 does not overflow
   2349         // to 0, hence the check above.
   2350         // Sltiu sign-extends its 16-bit immediate operand before
   2351         // the comparison and thus lets us compare directly with
   2352         // unsigned values in the ranges [0, 0x7fff] and
   2353         // [0x[ffffffff]ffff8000, 0x[ffffffff]ffffffff].
   2354         __ Sltiu(dst, lhs, rhs_imm_plus_one);
   2355         if (cond == kCondA) {
   2356           // Simulate lhs > rhs via !(lhs <= rhs) since there's
   2357           // only the sltiu instruction but no sgtiu.
   2358           __ Xori(dst, dst, 1);
   2359         }
   2360       } else {
   2361         if (use_imm) {
   2362           rhs_reg = TMP;
   2363           __ LoadConst64(rhs_reg, rhs_imm);
   2364         }
   2365         __ Sltu(dst, rhs_reg, lhs);
   2366         if (cond == kCondBE) {
   2367           // Simulate lhs <= rhs via !(rhs < lhs) since there's
   2368           // only the sltu instruction but no sleu.
   2369           __ Xori(dst, dst, 1);
   2370         }
   2371       }
   2372       break;
   2373   }
   2374 }
   2375 
   2376 void InstructionCodeGeneratorMIPS64::GenerateIntLongCompareAndBranch(IfCondition cond,
   2377                                                                      bool is64bit,
   2378                                                                      LocationSummary* locations,
   2379                                                                      Mips64Label* label) {
   2380   GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
   2381   Location rhs_location = locations->InAt(1);
   2382   GpuRegister rhs_reg = ZERO;
   2383   int64_t rhs_imm = 0;
   2384   bool use_imm = rhs_location.IsConstant();
   2385   if (use_imm) {
   2386     if (is64bit) {
   2387       rhs_imm = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant());
   2388     } else {
   2389       rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
   2390     }
   2391   } else {
   2392     rhs_reg = rhs_location.AsRegister<GpuRegister>();
   2393   }
   2394 
   2395   if (use_imm && rhs_imm == 0) {
   2396     switch (cond) {
   2397       case kCondEQ:
   2398       case kCondBE:  // <= 0 if zero
   2399         __ Beqzc(lhs, label);
   2400         break;
   2401       case kCondNE:
   2402       case kCondA:  // > 0 if non-zero
   2403         __ Bnezc(lhs, label);
   2404         break;
   2405       case kCondLT:
   2406         __ Bltzc(lhs, label);
   2407         break;
   2408       case kCondGE:
   2409         __ Bgezc(lhs, label);
   2410         break;
   2411       case kCondLE:
   2412         __ Blezc(lhs, label);
   2413         break;
   2414       case kCondGT:
   2415         __ Bgtzc(lhs, label);
   2416         break;
   2417       case kCondB:  // always false
   2418         break;
   2419       case kCondAE:  // always true
   2420         __ Bc(label);
   2421         break;
   2422     }
   2423   } else {
   2424     if (use_imm) {
   2425       rhs_reg = TMP;
   2426       __ LoadConst64(rhs_reg, rhs_imm);
   2427     }
   2428     switch (cond) {
   2429       case kCondEQ:
   2430         __ Beqc(lhs, rhs_reg, label);
   2431         break;
   2432       case kCondNE:
   2433         __ Bnec(lhs, rhs_reg, label);
   2434         break;
   2435       case kCondLT:
   2436         __ Bltc(lhs, rhs_reg, label);
   2437         break;
   2438       case kCondGE:
   2439         __ Bgec(lhs, rhs_reg, label);
   2440         break;
   2441       case kCondLE:
   2442         __ Bgec(rhs_reg, lhs, label);
   2443         break;
   2444       case kCondGT:
   2445         __ Bltc(rhs_reg, lhs, label);
   2446         break;
   2447       case kCondB:
   2448         __ Bltuc(lhs, rhs_reg, label);
   2449         break;
   2450       case kCondAE:
   2451         __ Bgeuc(lhs, rhs_reg, label);
   2452         break;
   2453       case kCondBE:
   2454         __ Bgeuc(rhs_reg, lhs, label);
   2455         break;
   2456       case kCondA:
   2457         __ Bltuc(rhs_reg, lhs, label);
   2458         break;
   2459     }
   2460   }
   2461 }
   2462 
   2463 void InstructionCodeGeneratorMIPS64::GenerateFpCompareAndBranch(IfCondition cond,
   2464                                                                 bool gt_bias,
   2465                                                                 Primitive::Type type,
   2466                                                                 LocationSummary* locations,
   2467                                                                 Mips64Label* label) {
   2468   FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
   2469   FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
   2470   if (type == Primitive::kPrimFloat) {
   2471     switch (cond) {
   2472       case kCondEQ:
   2473         __ CmpEqS(FTMP, lhs, rhs);
   2474         __ Bc1nez(FTMP, label);
   2475         break;
   2476       case kCondNE:
   2477         __ CmpEqS(FTMP, lhs, rhs);
   2478         __ Bc1eqz(FTMP, label);
   2479         break;
   2480       case kCondLT:
   2481         if (gt_bias) {
   2482           __ CmpLtS(FTMP, lhs, rhs);
   2483         } else {
   2484           __ CmpUltS(FTMP, lhs, rhs);
   2485         }
   2486         __ Bc1nez(FTMP, label);
   2487         break;
   2488       case kCondLE:
   2489         if (gt_bias) {
   2490           __ CmpLeS(FTMP, lhs, rhs);
   2491         } else {
   2492           __ CmpUleS(FTMP, lhs, rhs);
   2493         }
   2494         __ Bc1nez(FTMP, label);
   2495         break;
   2496       case kCondGT:
   2497         if (gt_bias) {
   2498           __ CmpUltS(FTMP, rhs, lhs);
   2499         } else {
   2500           __ CmpLtS(FTMP, rhs, lhs);
   2501         }
   2502         __ Bc1nez(FTMP, label);
   2503         break;
   2504       case kCondGE:
   2505         if (gt_bias) {
   2506           __ CmpUleS(FTMP, rhs, lhs);
   2507         } else {
   2508           __ CmpLeS(FTMP, rhs, lhs);
   2509         }
   2510         __ Bc1nez(FTMP, label);
   2511         break;
   2512       default:
   2513         LOG(FATAL) << "Unexpected non-floating-point condition";
   2514     }
   2515   } else {
   2516     DCHECK_EQ(type, Primitive::kPrimDouble);
   2517     switch (cond) {
   2518       case kCondEQ:
   2519         __ CmpEqD(FTMP, lhs, rhs);
   2520         __ Bc1nez(FTMP, label);
   2521         break;
   2522       case kCondNE:
   2523         __ CmpEqD(FTMP, lhs, rhs);
   2524         __ Bc1eqz(FTMP, label);
   2525         break;
   2526       case kCondLT:
   2527         if (gt_bias) {
   2528           __ CmpLtD(FTMP, lhs, rhs);
   2529         } else {
   2530           __ CmpUltD(FTMP, lhs, rhs);
   2531         }
   2532         __ Bc1nez(FTMP, label);
   2533         break;
   2534       case kCondLE:
   2535         if (gt_bias) {
   2536           __ CmpLeD(FTMP, lhs, rhs);
   2537         } else {
   2538           __ CmpUleD(FTMP, lhs, rhs);
   2539         }
   2540         __ Bc1nez(FTMP, label);
   2541         break;
   2542       case kCondGT:
   2543         if (gt_bias) {
   2544           __ CmpUltD(FTMP, rhs, lhs);
   2545         } else {
   2546           __ CmpLtD(FTMP, rhs, lhs);
   2547         }
   2548         __ Bc1nez(FTMP, label);
   2549         break;
   2550       case kCondGE:
   2551         if (gt_bias) {
   2552           __ CmpUleD(FTMP, rhs, lhs);
   2553         } else {
   2554           __ CmpLeD(FTMP, rhs, lhs);
   2555         }
   2556         __ Bc1nez(FTMP, label);
   2557         break;
   2558       default:
   2559         LOG(FATAL) << "Unexpected non-floating-point condition";
   2560     }
   2561   }
   2562 }
   2563 
   2564 void InstructionCodeGeneratorMIPS64::GenerateTestAndBranch(HInstruction* instruction,
   2565                                                            size_t condition_input_index,
   2566                                                            Mips64Label* true_target,
   2567                                                            Mips64Label* false_target) {
   2568   HInstruction* cond = instruction->InputAt(condition_input_index);
   2569 
   2570   if (true_target == nullptr && false_target == nullptr) {
   2571     // Nothing to do. The code always falls through.
   2572     return;
   2573   } else if (cond->IsIntConstant()) {
   2574     // Constant condition, statically compared against "true" (integer value 1).
   2575     if (cond->AsIntConstant()->IsTrue()) {
   2576       if (true_target != nullptr) {
   2577         __ Bc(true_target);
   2578       }
   2579     } else {
   2580       DCHECK(cond->AsIntConstant()->IsFalse()) << cond->AsIntConstant()->GetValue();
   2581       if (false_target != nullptr) {
   2582         __ Bc(false_target);
   2583       }
   2584     }
   2585     return;
   2586   }
   2587 
   2588   // The following code generates these patterns:
   2589   //  (1) true_target == nullptr && false_target != nullptr
   2590   //        - opposite condition true => branch to false_target
   2591   //  (2) true_target != nullptr && false_target == nullptr
   2592   //        - condition true => branch to true_target
   2593   //  (3) true_target != nullptr && false_target != nullptr
   2594   //        - condition true => branch to true_target
   2595   //        - branch to false_target
   2596   if (IsBooleanValueOrMaterializedCondition(cond)) {
   2597     // The condition instruction has been materialized, compare the output to 0.
   2598     Location cond_val = instruction->GetLocations()->InAt(condition_input_index);
   2599     DCHECK(cond_val.IsRegister());
   2600     if (true_target == nullptr) {
   2601       __ Beqzc(cond_val.AsRegister<GpuRegister>(), false_target);
   2602     } else {
   2603       __ Bnezc(cond_val.AsRegister<GpuRegister>(), true_target);
   2604     }
   2605   } else {
   2606     // The condition instruction has not been materialized, use its inputs as
   2607     // the comparison and its condition as the branch condition.
   2608     HCondition* condition = cond->AsCondition();
   2609     Primitive::Type type = condition->InputAt(0)->GetType();
   2610     LocationSummary* locations = cond->GetLocations();
   2611     IfCondition if_cond = condition->GetCondition();
   2612     Mips64Label* branch_target = true_target;
   2613 
   2614     if (true_target == nullptr) {
   2615       if_cond = condition->GetOppositeCondition();
   2616       branch_target = false_target;
   2617     }
   2618 
   2619     switch (type) {
   2620       default:
   2621         GenerateIntLongCompareAndBranch(if_cond, /* is64bit */ false, locations, branch_target);
   2622         break;
   2623       case Primitive::kPrimLong:
   2624         GenerateIntLongCompareAndBranch(if_cond, /* is64bit */ true, locations, branch_target);
   2625         break;
   2626       case Primitive::kPrimFloat:
   2627       case Primitive::kPrimDouble:
   2628         GenerateFpCompareAndBranch(if_cond, condition->IsGtBias(), type, locations, branch_target);
   2629         break;
   2630     }
   2631   }
   2632 
   2633   // If neither branch falls through (case 3), the conditional branch to `true_target`
   2634   // was already emitted (case 2) and we need to emit a jump to `false_target`.
   2635   if (true_target != nullptr && false_target != nullptr) {
   2636     __ Bc(false_target);
   2637   }
   2638 }
   2639 
   2640 void LocationsBuilderMIPS64::VisitIf(HIf* if_instr) {
   2641   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
   2642   if (IsBooleanValueOrMaterializedCondition(if_instr->InputAt(0))) {
   2643     locations->SetInAt(0, Location::RequiresRegister());
   2644   }
   2645 }
   2646 
   2647 void InstructionCodeGeneratorMIPS64::VisitIf(HIf* if_instr) {
   2648   HBasicBlock* true_successor = if_instr->IfTrueSuccessor();
   2649   HBasicBlock* false_successor = if_instr->IfFalseSuccessor();
   2650   Mips64Label* true_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), true_successor) ?
   2651       nullptr : codegen_->GetLabelOf(true_successor);
   2652   Mips64Label* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ?
   2653       nullptr : codegen_->GetLabelOf(false_successor);
   2654   GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
   2655 }
   2656 
   2657 void LocationsBuilderMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
   2658   LocationSummary* locations = new (GetGraph()->GetArena())
   2659       LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
   2660   if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) {
   2661     locations->SetInAt(0, Location::RequiresRegister());
   2662   }
   2663 }
   2664 
   2665 void InstructionCodeGeneratorMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
   2666   SlowPathCodeMIPS64* slow_path =
   2667       deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathMIPS64>(deoptimize);
   2668   GenerateTestAndBranch(deoptimize,
   2669                         /* condition_input_index */ 0,
   2670                         slow_path->GetEntryLabel(),
   2671                         /* false_target */ nullptr);
   2672 }
   2673 
   2674 void LocationsBuilderMIPS64::VisitSelect(HSelect* select) {
   2675   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select);
   2676   if (Primitive::IsFloatingPointType(select->GetType())) {
   2677     locations->SetInAt(0, Location::RequiresFpuRegister());
   2678     locations->SetInAt(1, Location::RequiresFpuRegister());
   2679   } else {
   2680     locations->SetInAt(0, Location::RequiresRegister());
   2681     locations->SetInAt(1, Location::RequiresRegister());
   2682   }
   2683   if (IsBooleanValueOrMaterializedCondition(select->GetCondition())) {
   2684     locations->SetInAt(2, Location::RequiresRegister());
   2685   }
   2686   locations->SetOut(Location::SameAsFirstInput());
   2687 }
   2688 
   2689 void InstructionCodeGeneratorMIPS64::VisitSelect(HSelect* select) {
   2690   LocationSummary* locations = select->GetLocations();
   2691   Mips64Label false_target;
   2692   GenerateTestAndBranch(select,
   2693                         /* condition_input_index */ 2,
   2694                         /* true_target */ nullptr,
   2695                         &false_target);
   2696   codegen_->MoveLocation(locations->Out(), locations->InAt(1), select->GetType());
   2697   __ Bind(&false_target);
   2698 }
   2699 
   2700 void LocationsBuilderMIPS64::VisitNativeDebugInfo(HNativeDebugInfo* info) {
   2701   new (GetGraph()->GetArena()) LocationSummary(info);
   2702 }
   2703 
   2704 void InstructionCodeGeneratorMIPS64::VisitNativeDebugInfo(HNativeDebugInfo*) {
   2705   // MaybeRecordNativeDebugInfo is already called implicitly in CodeGenerator::Compile.
   2706 }
   2707 
   2708 void CodeGeneratorMIPS64::GenerateNop() {
   2709   __ Nop();
   2710 }
   2711 
   2712 void LocationsBuilderMIPS64::HandleFieldGet(HInstruction* instruction,
   2713                                             const FieldInfo& field_info ATTRIBUTE_UNUSED) {
   2714   LocationSummary* locations =
   2715       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
   2716   locations->SetInAt(0, Location::RequiresRegister());
   2717   if (Primitive::IsFloatingPointType(instruction->GetType())) {
   2718     locations->SetOut(Location::RequiresFpuRegister());
   2719   } else {
   2720     locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   2721   }
   2722 }
   2723 
   2724 void InstructionCodeGeneratorMIPS64::HandleFieldGet(HInstruction* instruction,
   2725                                                     const FieldInfo& field_info) {
   2726   Primitive::Type type = field_info.GetFieldType();
   2727   LocationSummary* locations = instruction->GetLocations();
   2728   GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
   2729   LoadOperandType load_type = kLoadUnsignedByte;
   2730   switch (type) {
   2731     case Primitive::kPrimBoolean:
   2732       load_type = kLoadUnsignedByte;
   2733       break;
   2734     case Primitive::kPrimByte:
   2735       load_type = kLoadSignedByte;
   2736       break;
   2737     case Primitive::kPrimShort:
   2738       load_type = kLoadSignedHalfword;
   2739       break;
   2740     case Primitive::kPrimChar:
   2741       load_type = kLoadUnsignedHalfword;
   2742       break;
   2743     case Primitive::kPrimInt:
   2744     case Primitive::kPrimFloat:
   2745       load_type = kLoadWord;
   2746       break;
   2747     case Primitive::kPrimLong:
   2748     case Primitive::kPrimDouble:
   2749       load_type = kLoadDoubleword;
   2750       break;
   2751     case Primitive::kPrimNot:
   2752       load_type = kLoadUnsignedWord;
   2753       break;
   2754     case Primitive::kPrimVoid:
   2755       LOG(FATAL) << "Unreachable type " << type;
   2756       UNREACHABLE();
   2757   }
   2758   if (!Primitive::IsFloatingPointType(type)) {
   2759     DCHECK(locations->Out().IsRegister());
   2760     GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
   2761     __ LoadFromOffset(load_type, dst, obj, field_info.GetFieldOffset().Uint32Value());
   2762   } else {
   2763     DCHECK(locations->Out().IsFpuRegister());
   2764     FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
   2765     __ LoadFpuFromOffset(load_type, dst, obj, field_info.GetFieldOffset().Uint32Value());
   2766   }
   2767 
   2768   codegen_->MaybeRecordImplicitNullCheck(instruction);
   2769   // TODO: memory barrier?
   2770 }
   2771 
   2772 void LocationsBuilderMIPS64::HandleFieldSet(HInstruction* instruction,
   2773                                             const FieldInfo& field_info ATTRIBUTE_UNUSED) {
   2774   LocationSummary* locations =
   2775       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
   2776   locations->SetInAt(0, Location::RequiresRegister());
   2777   if (Primitive::IsFloatingPointType(instruction->InputAt(1)->GetType())) {
   2778     locations->SetInAt(1, Location::RequiresFpuRegister());
   2779   } else {
   2780     locations->SetInAt(1, Location::RequiresRegister());
   2781   }
   2782 }
   2783 
   2784 void InstructionCodeGeneratorMIPS64::HandleFieldSet(HInstruction* instruction,
   2785                                                     const FieldInfo& field_info,
   2786                                                     bool value_can_be_null) {
   2787   Primitive::Type type = field_info.GetFieldType();
   2788   LocationSummary* locations = instruction->GetLocations();
   2789   GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
   2790   StoreOperandType store_type = kStoreByte;
   2791   switch (type) {
   2792     case Primitive::kPrimBoolean:
   2793     case Primitive::kPrimByte:
   2794       store_type = kStoreByte;
   2795       break;
   2796     case Primitive::kPrimShort:
   2797     case Primitive::kPrimChar:
   2798       store_type = kStoreHalfword;
   2799       break;
   2800     case Primitive::kPrimInt:
   2801     case Primitive::kPrimFloat:
   2802     case Primitive::kPrimNot:
   2803       store_type = kStoreWord;
   2804       break;
   2805     case Primitive::kPrimLong:
   2806     case Primitive::kPrimDouble:
   2807       store_type = kStoreDoubleword;
   2808       break;
   2809     case Primitive::kPrimVoid:
   2810       LOG(FATAL) << "Unreachable type " << type;
   2811       UNREACHABLE();
   2812   }
   2813   if (!Primitive::IsFloatingPointType(type)) {
   2814     DCHECK(locations->InAt(1).IsRegister());
   2815     GpuRegister src = locations->InAt(1).AsRegister<GpuRegister>();
   2816     __ StoreToOffset(store_type, src, obj, field_info.GetFieldOffset().Uint32Value());
   2817   } else {
   2818     DCHECK(locations->InAt(1).IsFpuRegister());
   2819     FpuRegister src = locations->InAt(1).AsFpuRegister<FpuRegister>();
   2820     __ StoreFpuToOffset(store_type, src, obj, field_info.GetFieldOffset().Uint32Value());
   2821   }
   2822 
   2823   codegen_->MaybeRecordImplicitNullCheck(instruction);
   2824   // TODO: memory barriers?
   2825   if (CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1))) {
   2826     DCHECK(locations->InAt(1).IsRegister());
   2827     GpuRegister src = locations->InAt(1).AsRegister<GpuRegister>();
   2828     codegen_->MarkGCCard(obj, src, value_can_be_null);
   2829   }
   2830 }
   2831 
   2832 void LocationsBuilderMIPS64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
   2833   HandleFieldGet(instruction, instruction->GetFieldInfo());
   2834 }
   2835 
   2836 void InstructionCodeGeneratorMIPS64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
   2837   HandleFieldGet(instruction, instruction->GetFieldInfo());
   2838 }
   2839 
   2840 void LocationsBuilderMIPS64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
   2841   HandleFieldSet(instruction, instruction->GetFieldInfo());
   2842 }
   2843 
   2844 void InstructionCodeGeneratorMIPS64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
   2845   HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
   2846 }
   2847 
   2848 void LocationsBuilderMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
   2849   LocationSummary::CallKind call_kind =
   2850       instruction->IsExactCheck() ? LocationSummary::kNoCall : LocationSummary::kCallOnSlowPath;
   2851   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
   2852   locations->SetInAt(0, Location::RequiresRegister());
   2853   locations->SetInAt(1, Location::RequiresRegister());
   2854   // The output does overlap inputs.
   2855   // Note that TypeCheckSlowPathMIPS64 uses this register too.
   2856   locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
   2857 }
   2858 
   2859 void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
   2860   LocationSummary* locations = instruction->GetLocations();
   2861   GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
   2862   GpuRegister cls = locations->InAt(1).AsRegister<GpuRegister>();
   2863   GpuRegister out = locations->Out().AsRegister<GpuRegister>();
   2864 
   2865   Mips64Label done;
   2866 
   2867   // Return 0 if `obj` is null.
   2868   // TODO: Avoid this check if we know `obj` is not null.
   2869   __ Move(out, ZERO);
   2870   __ Beqzc(obj, &done);
   2871 
   2872   // Compare the class of `obj` with `cls`.
   2873   __ LoadFromOffset(kLoadUnsignedWord, out, obj, mirror::Object::ClassOffset().Int32Value());
   2874   if (instruction->IsExactCheck()) {
   2875     // Classes must be equal for the instanceof to succeed.
   2876     __ Xor(out, out, cls);
   2877     __ Sltiu(out, out, 1);
   2878   } else {
   2879     // If the classes are not equal, we go into a slow path.
   2880     DCHECK(locations->OnlyCallsOnSlowPath());
   2881     SlowPathCodeMIPS64* slow_path =
   2882         new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction);
   2883     codegen_->AddSlowPath(slow_path);
   2884     __ Bnec(out, cls, slow_path->GetEntryLabel());
   2885     __ LoadConst32(out, 1);
   2886     __ Bind(slow_path->GetExitLabel());
   2887   }
   2888 
   2889   __ Bind(&done);
   2890 }
   2891 
   2892 void LocationsBuilderMIPS64::VisitIntConstant(HIntConstant* constant) {
   2893   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
   2894   locations->SetOut(Location::ConstantLocation(constant));
   2895 }
   2896 
   2897 void InstructionCodeGeneratorMIPS64::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
   2898   // Will be generated at use site.
   2899 }
   2900 
   2901 void LocationsBuilderMIPS64::VisitNullConstant(HNullConstant* constant) {
   2902   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
   2903   locations->SetOut(Location::ConstantLocation(constant));
   2904 }
   2905 
   2906 void InstructionCodeGeneratorMIPS64::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
   2907   // Will be generated at use site.
   2908 }
   2909 
   2910 void LocationsBuilderMIPS64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
   2911   // The trampoline uses the same calling convention as dex calling conventions,
   2912   // except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain
   2913   // the method_idx.
   2914   HandleInvoke(invoke);
   2915 }
   2916 
   2917 void InstructionCodeGeneratorMIPS64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
   2918   codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke);
   2919 }
   2920 
   2921 void LocationsBuilderMIPS64::HandleInvoke(HInvoke* invoke) {
   2922   InvokeDexCallingConventionVisitorMIPS64 calling_convention_visitor;
   2923   CodeGenerator::CreateCommonInvokeLocationSummary(invoke, &calling_convention_visitor);
   2924 }
   2925 
   2926 void LocationsBuilderMIPS64::VisitInvokeInterface(HInvokeInterface* invoke) {
   2927   HandleInvoke(invoke);
   2928   // The register T0 is required to be used for the hidden argument in
   2929   // art_quick_imt_conflict_trampoline, so add the hidden argument.
   2930   invoke->GetLocations()->AddTemp(Location::RegisterLocation(T0));
   2931 }
   2932 
   2933 void InstructionCodeGeneratorMIPS64::VisitInvokeInterface(HInvokeInterface* invoke) {
   2934   // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
   2935   GpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<GpuRegister>();
   2936   Location receiver = invoke->GetLocations()->InAt(0);
   2937   uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
   2938   Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64DoublewordSize);
   2939 
   2940   // Set the hidden argument.
   2941   __ LoadConst32(invoke->GetLocations()->GetTemp(1).AsRegister<GpuRegister>(),
   2942                  invoke->GetDexMethodIndex());
   2943 
   2944   // temp = object->GetClass();
   2945   if (receiver.IsStackSlot()) {
   2946     __ LoadFromOffset(kLoadUnsignedWord, temp, SP, receiver.GetStackIndex());
   2947     __ LoadFromOffset(kLoadUnsignedWord, temp, temp, class_offset);
   2948   } else {
   2949     __ LoadFromOffset(kLoadUnsignedWord, temp, receiver.AsRegister<GpuRegister>(), class_offset);
   2950   }
   2951   codegen_->MaybeRecordImplicitNullCheck(invoke);
   2952   __ LoadFromOffset(kLoadDoubleword, temp, temp,
   2953       mirror::Class::ImtPtrOffset(kMips64PointerSize).Uint32Value());
   2954   uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
   2955       invoke->GetImtIndex() % ImTable::kSize, kMips64PointerSize));
   2956   // temp = temp->GetImtEntryAt(method_offset);
   2957   __ LoadFromOffset(kLoadDoubleword, temp, temp, method_offset);
   2958   // T9 = temp->GetEntryPoint();
   2959   __ LoadFromOffset(kLoadDoubleword, T9, temp, entry_point.Int32Value());
   2960   // T9();
   2961   __ Jalr(T9);
   2962   __ Nop();
   2963   DCHECK(!codegen_->IsLeafMethod());
   2964   codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
   2965 }
   2966 
   2967 void LocationsBuilderMIPS64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
   2968   IntrinsicLocationsBuilderMIPS64 intrinsic(codegen_);
   2969   if (intrinsic.TryDispatch(invoke)) {
   2970     return;
   2971   }
   2972 
   2973   HandleInvoke(invoke);
   2974 }
   2975 
   2976 void LocationsBuilderMIPS64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
   2977   // Explicit clinit checks triggered by static invokes must have been pruned by
   2978   // art::PrepareForRegisterAllocation.
   2979   DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
   2980 
   2981   IntrinsicLocationsBuilderMIPS64 intrinsic(codegen_);
   2982   if (intrinsic.TryDispatch(invoke)) {
   2983     return;
   2984   }
   2985 
   2986   HandleInvoke(invoke);
   2987 
   2988   // While SetupBlockedRegisters() blocks registers S2-S8 due to their
   2989   // clobbering somewhere else, reduce further register pressure by avoiding
   2990   // allocation of a register for the current method pointer like on x86 baseline.
   2991   // TODO: remove this once all the issues with register saving/restoring are
   2992   // sorted out.
   2993   if (invoke->HasCurrentMethodInput()) {
   2994     LocationSummary* locations = invoke->GetLocations();
   2995     Location location = locations->InAt(invoke->GetSpecialInputIndex());
   2996     if (location.IsUnallocated() && location.GetPolicy() == Location::kRequiresRegister) {
   2997       locations->SetInAt(invoke->GetSpecialInputIndex(), Location::NoLocation());
   2998     }
   2999   }
   3000 }
   3001 
   3002 static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorMIPS64* codegen) {
   3003   if (invoke->GetLocations()->Intrinsified()) {
   3004     IntrinsicCodeGeneratorMIPS64 intrinsic(codegen);
   3005     intrinsic.Dispatch(invoke);
   3006     return true;
   3007   }
   3008   return false;
   3009 }
   3010 
   3011 HLoadString::LoadKind CodeGeneratorMIPS64::GetSupportedLoadStringKind(
   3012     HLoadString::LoadKind desired_string_load_kind ATTRIBUTE_UNUSED) {
   3013   // TODO: Implement other kinds.
   3014   return HLoadString::LoadKind::kDexCacheViaMethod;
   3015 }
   3016 
   3017 HInvokeStaticOrDirect::DispatchInfo CodeGeneratorMIPS64::GetSupportedInvokeStaticOrDirectDispatch(
   3018       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
   3019       MethodReference target_method ATTRIBUTE_UNUSED) {
   3020   switch (desired_dispatch_info.method_load_kind) {
   3021     case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
   3022     case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
   3023       // TODO: Implement these types. For the moment, we fall back to kDexCacheViaMethod.
   3024       return HInvokeStaticOrDirect::DispatchInfo {
   3025         HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod,
   3026         HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
   3027         0u,
   3028         0u
   3029       };
   3030     default:
   3031       break;
   3032   }
   3033   switch (desired_dispatch_info.code_ptr_location) {
   3034     case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
   3035     case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
   3036       // TODO: Implement these types. For the moment, we fall back to kCallArtMethod.
   3037       return HInvokeStaticOrDirect::DispatchInfo {
   3038         desired_dispatch_info.method_load_kind,
   3039         HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
   3040         desired_dispatch_info.method_load_data,
   3041         0u
   3042       };
   3043     default:
   3044       return desired_dispatch_info;
   3045   }
   3046 }
   3047 
   3048 void CodeGeneratorMIPS64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
   3049   // All registers are assumed to be correctly set up per the calling convention.
   3050 
   3051   Location callee_method = temp;  // For all kinds except kRecursive, callee will be in temp.
   3052   switch (invoke->GetMethodLoadKind()) {
   3053     case HInvokeStaticOrDirect::MethodLoadKind::kStringInit:
   3054       // temp = thread->string_init_entrypoint
   3055       __ LoadFromOffset(kLoadDoubleword,
   3056                         temp.AsRegister<GpuRegister>(),
   3057                         TR,
   3058                         invoke->GetStringInitOffset());
   3059       break;
   3060     case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
   3061       callee_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
   3062       break;
   3063     case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
   3064       __ LoadConst64(temp.AsRegister<GpuRegister>(), invoke->GetMethodAddress());
   3065       break;
   3066     case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
   3067     case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
   3068       // TODO: Implement these types.
   3069       // Currently filtered out by GetSupportedInvokeStaticOrDirectDispatch().
   3070       LOG(FATAL) << "Unsupported";
   3071       UNREACHABLE();
   3072     case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
   3073       Location current_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
   3074       GpuRegister reg = temp.AsRegister<GpuRegister>();
   3075       GpuRegister method_reg;
   3076       if (current_method.IsRegister()) {
   3077         method_reg = current_method.AsRegister<GpuRegister>();
   3078       } else {
   3079         // TODO: use the appropriate DCHECK() here if possible.
   3080         // DCHECK(invoke->GetLocations()->Intrinsified());
   3081         DCHECK(!current_method.IsValid());
   3082         method_reg = reg;
   3083         __ Ld(reg, SP, kCurrentMethodStackOffset);
   3084       }
   3085 
   3086       // temp = temp->dex_cache_resolved_methods_;
   3087       __ LoadFromOffset(kLoadDoubleword,
   3088                         reg,
   3089                         method_reg,
   3090                         ArtMethod::DexCacheResolvedMethodsOffset(kMips64PointerSize).Int32Value());
   3091       // temp = temp[index_in_cache];
   3092       // Note: Don't use invoke->GetTargetMethod() as it may point to a different dex file.
   3093       uint32_t index_in_cache = invoke->GetDexMethodIndex();
   3094       __ LoadFromOffset(kLoadDoubleword,
   3095                         reg,
   3096                         reg,
   3097                         CodeGenerator::GetCachePointerOffset(index_in_cache));
   3098       break;
   3099     }
   3100   }
   3101 
   3102   switch (invoke->GetCodePtrLocation()) {
   3103     case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
   3104       __ Jialc(&frame_entry_label_, T9);
   3105       break;
   3106     case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
   3107       // LR = invoke->GetDirectCodePtr();
   3108       __ LoadConst64(T9, invoke->GetDirectCodePtr());
   3109       // LR()
   3110       __ Jalr(T9);
   3111       __ Nop();
   3112       break;
   3113     case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
   3114     case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
   3115       // TODO: Implement these types.
   3116       // Currently filtered out by GetSupportedInvokeStaticOrDirectDispatch().
   3117       LOG(FATAL) << "Unsupported";
   3118       UNREACHABLE();
   3119     case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
   3120       // T9 = callee_method->entry_point_from_quick_compiled_code_;
   3121       __ LoadFromOffset(kLoadDoubleword,
   3122                         T9,
   3123                         callee_method.AsRegister<GpuRegister>(),
   3124                         ArtMethod::EntryPointFromQuickCompiledCodeOffset(
   3125                             kMips64DoublewordSize).Int32Value());
   3126       // T9()
   3127       __ Jalr(T9);
   3128       __ Nop();
   3129       break;
   3130   }
   3131   DCHECK(!IsLeafMethod());
   3132 }
   3133 
   3134 void InstructionCodeGeneratorMIPS64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
   3135   // Explicit clinit checks triggered by static invokes must have been pruned by
   3136   // art::PrepareForRegisterAllocation.
   3137   DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
   3138 
   3139   if (TryGenerateIntrinsicCode(invoke, codegen_)) {
   3140     return;
   3141   }
   3142 
   3143   LocationSummary* locations = invoke->GetLocations();
   3144   codegen_->GenerateStaticOrDirectCall(invoke,
   3145                                        locations->HasTemps()
   3146                                            ? locations->GetTemp(0)
   3147                                            : Location::NoLocation());
   3148   codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
   3149 }
   3150 
   3151 void CodeGeneratorMIPS64::GenerateVirtualCall(HInvokeVirtual* invoke, Location temp_location) {
   3152   // Use the calling convention instead of the location of the receiver, as
   3153   // intrinsics may have put the receiver in a different register. In the intrinsics
   3154   // slow path, the arguments have been moved to the right place, so here we are
   3155   // guaranteed that the receiver is the first register of the calling convention.
   3156   InvokeDexCallingConvention calling_convention;
   3157   GpuRegister receiver = calling_convention.GetRegisterAt(0);
   3158 
   3159   GpuRegister temp = temp_location.AsRegister<GpuRegister>();
   3160   size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
   3161       invoke->GetVTableIndex(), kMips64PointerSize).SizeValue();
   3162   uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
   3163   Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64DoublewordSize);
   3164 
   3165   // temp = object->GetClass();
   3166   __ LoadFromOffset(kLoadUnsignedWord, temp, receiver, class_offset);
   3167   MaybeRecordImplicitNullCheck(invoke);
   3168   // temp = temp->GetMethodAt(method_offset);
   3169   __ LoadFromOffset(kLoadDoubleword, temp, temp, method_offset);
   3170   // T9 = temp->GetEntryPoint();
   3171   __ LoadFromOffset(kLoadDoubleword, T9, temp, entry_point.Int32Value());
   3172   // T9();
   3173   __ Jalr(T9);
   3174   __ Nop();
   3175 }
   3176 
   3177 void InstructionCodeGeneratorMIPS64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
   3178   if (TryGenerateIntrinsicCode(invoke, codegen_)) {
   3179     return;
   3180   }
   3181 
   3182   codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0));
   3183   DCHECK(!codegen_->IsLeafMethod());
   3184   codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
   3185 }
   3186 
   3187 void LocationsBuilderMIPS64::VisitLoadClass(HLoadClass* cls) {
   3188   InvokeRuntimeCallingConvention calling_convention;
   3189   CodeGenerator::CreateLoadClassLocationSummary(
   3190       cls,
   3191       Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
   3192       calling_convention.GetReturnLocation(cls->GetType()));
   3193 }
   3194 
   3195 void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) {
   3196   LocationSummary* locations = cls->GetLocations();
   3197   if (cls->NeedsAccessCheck()) {
   3198     codegen_->MoveConstant(locations->GetTemp(0), cls->GetTypeIndex());
   3199     codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInitializeTypeAndVerifyAccess),
   3200                             cls,
   3201                             cls->GetDexPc(),
   3202                             nullptr);
   3203     CheckEntrypointTypes<kQuickInitializeTypeAndVerifyAccess, void*, uint32_t>();
   3204     return;
   3205   }
   3206 
   3207   GpuRegister out = locations->Out().AsRegister<GpuRegister>();
   3208   GpuRegister current_method = locations->InAt(0).AsRegister<GpuRegister>();
   3209   if (cls->IsReferrersClass()) {
   3210     DCHECK(!cls->CanCallRuntime());
   3211     DCHECK(!cls->MustGenerateClinitCheck());
   3212     __ LoadFromOffset(kLoadUnsignedWord, out, current_method,
   3213                       ArtMethod::DeclaringClassOffset().Int32Value());
   3214   } else {
   3215     __ LoadFromOffset(kLoadDoubleword, out, current_method,
   3216                       ArtMethod::DexCacheResolvedTypesOffset(kMips64PointerSize).Int32Value());
   3217     __ LoadFromOffset(
   3218         kLoadUnsignedWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
   3219     // TODO: We will need a read barrier here.
   3220     if (!cls->IsInDexCache() || cls->MustGenerateClinitCheck()) {
   3221       DCHECK(cls->CanCallRuntime());
   3222       SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS64(
   3223           cls,
   3224           cls,
   3225           cls->GetDexPc(),
   3226           cls->MustGenerateClinitCheck());
   3227       codegen_->AddSlowPath(slow_path);
   3228       if (!cls->IsInDexCache()) {
   3229         __ Beqzc(out, slow_path->GetEntryLabel());
   3230       }
   3231       if (cls->MustGenerateClinitCheck()) {
   3232         GenerateClassInitializationCheck(slow_path, out);
   3233       } else {
   3234         __ Bind(slow_path->GetExitLabel());
   3235       }
   3236     }
   3237   }
   3238 }
   3239 
   3240 static int32_t GetExceptionTlsOffset() {
   3241   return Thread::ExceptionOffset<kMips64DoublewordSize>().Int32Value();
   3242 }
   3243 
   3244 void LocationsBuilderMIPS64::VisitLoadException(HLoadException* load) {
   3245   LocationSummary* locations =
   3246       new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
   3247   locations->SetOut(Location::RequiresRegister());
   3248 }
   3249 
   3250 void InstructionCodeGeneratorMIPS64::VisitLoadException(HLoadException* load) {
   3251   GpuRegister out = load->GetLocations()->Out().AsRegister<GpuRegister>();
   3252   __ LoadFromOffset(kLoadUnsignedWord, out, TR, GetExceptionTlsOffset());
   3253 }
   3254 
   3255 void LocationsBuilderMIPS64::VisitClearException(HClearException* clear) {
   3256   new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
   3257 }
   3258 
   3259 void InstructionCodeGeneratorMIPS64::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
   3260   __ StoreToOffset(kStoreWord, ZERO, TR, GetExceptionTlsOffset());
   3261 }
   3262 
   3263 void LocationsBuilderMIPS64::VisitLoadString(HLoadString* load) {
   3264   LocationSummary::CallKind call_kind = load->NeedsEnvironment()
   3265       ? LocationSummary::kCallOnSlowPath
   3266       : LocationSummary::kNoCall;
   3267   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
   3268   locations->SetInAt(0, Location::RequiresRegister());
   3269   locations->SetOut(Location::RequiresRegister());
   3270 }
   3271 
   3272 void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) {
   3273   LocationSummary* locations = load->GetLocations();
   3274   GpuRegister out = locations->Out().AsRegister<GpuRegister>();
   3275   GpuRegister current_method = locations->InAt(0).AsRegister<GpuRegister>();
   3276   __ LoadFromOffset(kLoadUnsignedWord, out, current_method,
   3277                     ArtMethod::DeclaringClassOffset().Int32Value());
   3278   __ LoadFromOffset(kLoadDoubleword, out, out, mirror::Class::DexCacheStringsOffset().Int32Value());
   3279   __ LoadFromOffset(
   3280       kLoadUnsignedWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
   3281   // TODO: We will need a read barrier here.
   3282 
   3283   if (!load->IsInDexCache()) {
   3284     SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS64(load);
   3285     codegen_->AddSlowPath(slow_path);
   3286     __ Beqzc(out, slow_path->GetEntryLabel());
   3287     __ Bind(slow_path->GetExitLabel());
   3288   }
   3289 }
   3290 
   3291 void LocationsBuilderMIPS64::VisitLongConstant(HLongConstant* constant) {
   3292   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
   3293   locations->SetOut(Location::ConstantLocation(constant));
   3294 }
   3295 
   3296 void InstructionCodeGeneratorMIPS64::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
   3297   // Will be generated at use site.
   3298 }
   3299 
   3300 void LocationsBuilderMIPS64::VisitMonitorOperation(HMonitorOperation* instruction) {
   3301   LocationSummary* locations =
   3302       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
   3303   InvokeRuntimeCallingConvention calling_convention;
   3304   locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
   3305 }
   3306 
   3307 void InstructionCodeGeneratorMIPS64::VisitMonitorOperation(HMonitorOperation* instruction) {
   3308   codegen_->InvokeRuntime(instruction->IsEnter()
   3309                               ? QUICK_ENTRY_POINT(pLockObject)
   3310                               : QUICK_ENTRY_POINT(pUnlockObject),
   3311                           instruction,
   3312                           instruction->GetDexPc(),
   3313                           nullptr);
   3314   if (instruction->IsEnter()) {
   3315     CheckEntrypointTypes<kQuickLockObject, void, mirror::Object*>();
   3316   } else {
   3317     CheckEntrypointTypes<kQuickUnlockObject, void, mirror::Object*>();
   3318   }
   3319 }
   3320 
   3321 void LocationsBuilderMIPS64::VisitMul(HMul* mul) {
   3322   LocationSummary* locations =
   3323       new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
   3324   switch (mul->GetResultType()) {
   3325     case Primitive::kPrimInt:
   3326     case Primitive::kPrimLong:
   3327       locations->SetInAt(0, Location::RequiresRegister());
   3328       locations->SetInAt(1, Location::RequiresRegister());
   3329       locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   3330       break;
   3331 
   3332     case Primitive::kPrimFloat:
   3333     case Primitive::kPrimDouble:
   3334       locations->SetInAt(0, Location::RequiresFpuRegister());
   3335       locations->SetInAt(1, Location::RequiresFpuRegister());
   3336       locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
   3337       break;
   3338 
   3339     default:
   3340       LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
   3341   }
   3342 }
   3343 
   3344 void InstructionCodeGeneratorMIPS64::VisitMul(HMul* instruction) {
   3345   Primitive::Type type = instruction->GetType();
   3346   LocationSummary* locations = instruction->GetLocations();
   3347 
   3348   switch (type) {
   3349     case Primitive::kPrimInt:
   3350     case Primitive::kPrimLong: {
   3351       GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
   3352       GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
   3353       GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
   3354       if (type == Primitive::kPrimInt)
   3355         __ MulR6(dst, lhs, rhs);
   3356       else
   3357         __ Dmul(dst, lhs, rhs);
   3358       break;
   3359     }
   3360     case Primitive::kPrimFloat:
   3361     case Primitive::kPrimDouble: {
   3362       FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
   3363       FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
   3364       FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
   3365       if (type == Primitive::kPrimFloat)
   3366         __ MulS(dst, lhs, rhs);
   3367       else
   3368         __ MulD(dst, lhs, rhs);
   3369       break;
   3370     }
   3371     default:
   3372       LOG(FATAL) << "Unexpected mul type " << type;
   3373   }
   3374 }
   3375 
   3376 void LocationsBuilderMIPS64::VisitNeg(HNeg* neg) {
   3377   LocationSummary* locations =
   3378       new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
   3379   switch (neg->GetResultType()) {
   3380     case Primitive::kPrimInt:
   3381     case Primitive::kPrimLong:
   3382       locations->SetInAt(0, Location::RequiresRegister());
   3383       locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   3384       break;
   3385 
   3386     case Primitive::kPrimFloat:
   3387     case Primitive::kPrimDouble:
   3388       locations->SetInAt(0, Location::RequiresFpuRegister());
   3389       locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
   3390       break;
   3391 
   3392     default:
   3393       LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
   3394   }
   3395 }
   3396 
   3397 void InstructionCodeGeneratorMIPS64::VisitNeg(HNeg* instruction) {
   3398   Primitive::Type type = instruction->GetType();
   3399   LocationSummary* locations = instruction->GetLocations();
   3400 
   3401   switch (type) {
   3402     case Primitive::kPrimInt:
   3403     case Primitive::kPrimLong: {
   3404       GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
   3405       GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
   3406       if (type == Primitive::kPrimInt)
   3407         __ Subu(dst, ZERO, src);
   3408       else
   3409         __ Dsubu(dst, ZERO, src);
   3410       break;
   3411     }
   3412     case Primitive::kPrimFloat:
   3413     case Primitive::kPrimDouble: {
   3414       FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
   3415       FpuRegister src = locations->InAt(0).AsFpuRegister<FpuRegister>();
   3416       if (type == Primitive::kPrimFloat)
   3417         __ NegS(dst, src);
   3418       else
   3419         __ NegD(dst, src);
   3420       break;
   3421     }
   3422     default:
   3423       LOG(FATAL) << "Unexpected neg type " << type;
   3424   }
   3425 }
   3426 
   3427 void LocationsBuilderMIPS64::VisitNewArray(HNewArray* instruction) {
   3428   LocationSummary* locations =
   3429       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
   3430   InvokeRuntimeCallingConvention calling_convention;
   3431   locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
   3432   locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
   3433   locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
   3434   locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
   3435 }
   3436 
   3437 void InstructionCodeGeneratorMIPS64::VisitNewArray(HNewArray* instruction) {
   3438   LocationSummary* locations = instruction->GetLocations();
   3439   // Move an uint16_t value to a register.
   3440   __ LoadConst32(locations->GetTemp(0).AsRegister<GpuRegister>(), instruction->GetTypeIndex());
   3441   codegen_->InvokeRuntime(instruction->GetEntrypoint(),
   3442                           instruction,
   3443                           instruction->GetDexPc(),
   3444                           nullptr);
   3445   CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*>();
   3446 }
   3447 
   3448 void LocationsBuilderMIPS64::VisitNewInstance(HNewInstance* instruction) {
   3449   LocationSummary* locations =
   3450       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
   3451   InvokeRuntimeCallingConvention calling_convention;
   3452   if (instruction->IsStringAlloc()) {
   3453     locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument));
   3454   } else {
   3455     locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
   3456     locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
   3457   }
   3458   locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
   3459 }
   3460 
   3461 void InstructionCodeGeneratorMIPS64::VisitNewInstance(HNewInstance* instruction) {
   3462   if (instruction->IsStringAlloc()) {
   3463     // String is allocated through StringFactory. Call NewEmptyString entry point.
   3464     GpuRegister temp = instruction->GetLocations()->GetTemp(0).AsRegister<GpuRegister>();
   3465     MemberOffset code_offset =
   3466         ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64DoublewordSize);
   3467     __ LoadFromOffset(kLoadDoubleword, temp, TR, QUICK_ENTRY_POINT(pNewEmptyString));
   3468     __ LoadFromOffset(kLoadDoubleword, T9, temp, code_offset.Int32Value());
   3469     __ Jalr(T9);
   3470     __ Nop();
   3471     codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
   3472   } else {
   3473     codegen_->InvokeRuntime(instruction->GetEntrypoint(),
   3474                             instruction,
   3475                             instruction->GetDexPc(),
   3476                             nullptr);
   3477     CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
   3478   }
   3479 }
   3480 
   3481 void LocationsBuilderMIPS64::VisitNot(HNot* instruction) {
   3482   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
   3483   locations->SetInAt(0, Location::RequiresRegister());
   3484   locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   3485 }
   3486 
   3487 void InstructionCodeGeneratorMIPS64::VisitNot(HNot* instruction) {
   3488   Primitive::Type type = instruction->GetType();
   3489   LocationSummary* locations = instruction->GetLocations();
   3490 
   3491   switch (type) {
   3492     case Primitive::kPrimInt:
   3493     case Primitive::kPrimLong: {
   3494       GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
   3495       GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
   3496       __ Nor(dst, src, ZERO);
   3497       break;
   3498     }
   3499 
   3500     default:
   3501       LOG(FATAL) << "Unexpected type for not operation " << instruction->GetResultType();
   3502   }
   3503 }
   3504 
   3505 void LocationsBuilderMIPS64::VisitBooleanNot(HBooleanNot* instruction) {
   3506   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
   3507   locations->SetInAt(0, Location::RequiresRegister());
   3508   locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   3509 }
   3510 
   3511 void InstructionCodeGeneratorMIPS64::VisitBooleanNot(HBooleanNot* instruction) {
   3512   LocationSummary* locations = instruction->GetLocations();
   3513   __ Xori(locations->Out().AsRegister<GpuRegister>(),
   3514           locations->InAt(0).AsRegister<GpuRegister>(),
   3515           1);
   3516 }
   3517 
   3518 void LocationsBuilderMIPS64::VisitNullCheck(HNullCheck* instruction) {
   3519   LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
   3520       ? LocationSummary::kCallOnSlowPath
   3521       : LocationSummary::kNoCall;
   3522   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
   3523   locations->SetInAt(0, Location::RequiresRegister());
   3524   if (instruction->HasUses()) {
   3525     locations->SetOut(Location::SameAsFirstInput());
   3526   }
   3527 }
   3528 
   3529 void CodeGeneratorMIPS64::GenerateImplicitNullCheck(HNullCheck* instruction) {
   3530   if (CanMoveNullCheckToUser(instruction)) {
   3531     return;
   3532   }
   3533   Location obj = instruction->GetLocations()->InAt(0);
   3534 
   3535   __ Lw(ZERO, obj.AsRegister<GpuRegister>(), 0);
   3536   RecordPcInfo(instruction, instruction->GetDexPc());
   3537 }
   3538 
   3539 void CodeGeneratorMIPS64::GenerateExplicitNullCheck(HNullCheck* instruction) {
   3540   SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathMIPS64(instruction);
   3541   AddSlowPath(slow_path);
   3542 
   3543   Location obj = instruction->GetLocations()->InAt(0);
   3544 
   3545   __ Beqzc(obj.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
   3546 }
   3547 
   3548 void InstructionCodeGeneratorMIPS64::VisitNullCheck(HNullCheck* instruction) {
   3549   codegen_->GenerateNullCheck(instruction);
   3550 }
   3551 
   3552 void LocationsBuilderMIPS64::VisitOr(HOr* instruction) {
   3553   HandleBinaryOp(instruction);
   3554 }
   3555 
   3556 void InstructionCodeGeneratorMIPS64::VisitOr(HOr* instruction) {
   3557   HandleBinaryOp(instruction);
   3558 }
   3559 
   3560 void LocationsBuilderMIPS64::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
   3561   LOG(FATAL) << "Unreachable";
   3562 }
   3563 
   3564 void InstructionCodeGeneratorMIPS64::VisitParallelMove(HParallelMove* instruction) {
   3565   codegen_->GetMoveResolver()->EmitNativeCode(instruction);
   3566 }
   3567 
   3568 void LocationsBuilderMIPS64::VisitParameterValue(HParameterValue* instruction) {
   3569   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
   3570   Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
   3571   if (location.IsStackSlot()) {
   3572     location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
   3573   } else if (location.IsDoubleStackSlot()) {
   3574     location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
   3575   }
   3576   locations->SetOut(location);
   3577 }
   3578 
   3579 void InstructionCodeGeneratorMIPS64::VisitParameterValue(HParameterValue* instruction
   3580                                                          ATTRIBUTE_UNUSED) {
   3581   // Nothing to do, the parameter is already at its location.
   3582 }
   3583 
   3584 void LocationsBuilderMIPS64::VisitCurrentMethod(HCurrentMethod* instruction) {
   3585   LocationSummary* locations =
   3586       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
   3587   locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
   3588 }
   3589 
   3590 void InstructionCodeGeneratorMIPS64::VisitCurrentMethod(HCurrentMethod* instruction
   3591                                                         ATTRIBUTE_UNUSED) {
   3592   // Nothing to do, the method is already at its location.
   3593 }
   3594 
   3595 void LocationsBuilderMIPS64::VisitPhi(HPhi* instruction) {
   3596   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
   3597   for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
   3598     locations->SetInAt(i, Location::Any());
   3599   }
   3600   locations->SetOut(Location::Any());
   3601 }
   3602 
   3603 void InstructionCodeGeneratorMIPS64::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
   3604   LOG(FATAL) << "Unreachable";
   3605 }
   3606 
   3607 void LocationsBuilderMIPS64::VisitRem(HRem* rem) {
   3608   Primitive::Type type = rem->GetResultType();
   3609   LocationSummary::CallKind call_kind =
   3610       Primitive::IsFloatingPointType(type) ? LocationSummary::kCall : LocationSummary::kNoCall;
   3611   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
   3612 
   3613   switch (type) {
   3614     case Primitive::kPrimInt:
   3615     case Primitive::kPrimLong:
   3616       locations->SetInAt(0, Location::RequiresRegister());
   3617       locations->SetInAt(1, Location::RegisterOrConstant(rem->InputAt(1)));
   3618       locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   3619       break;
   3620 
   3621     case Primitive::kPrimFloat:
   3622     case Primitive::kPrimDouble: {
   3623       InvokeRuntimeCallingConvention calling_convention;
   3624       locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
   3625       locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
   3626       locations->SetOut(calling_convention.GetReturnLocation(type));
   3627       break;
   3628     }
   3629 
   3630     default:
   3631       LOG(FATAL) << "Unexpected rem type " << type;
   3632   }
   3633 }
   3634 
   3635 void InstructionCodeGeneratorMIPS64::VisitRem(HRem* instruction) {
   3636   Primitive::Type type = instruction->GetType();
   3637 
   3638   switch (type) {
   3639     case Primitive::kPrimInt:
   3640     case Primitive::kPrimLong:
   3641       GenerateDivRemIntegral(instruction);
   3642       break;
   3643 
   3644     case Primitive::kPrimFloat:
   3645     case Primitive::kPrimDouble: {
   3646       int32_t entry_offset = (type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pFmodf)
   3647                                                              : QUICK_ENTRY_POINT(pFmod);
   3648       codegen_->InvokeRuntime(entry_offset, instruction, instruction->GetDexPc(), nullptr);
   3649       if (type == Primitive::kPrimFloat) {
   3650         CheckEntrypointTypes<kQuickFmodf, float, float, float>();
   3651       } else {
   3652         CheckEntrypointTypes<kQuickFmod, double, double, double>();
   3653       }
   3654       break;
   3655     }
   3656     default:
   3657       LOG(FATAL) << "Unexpected rem type " << type;
   3658   }
   3659 }
   3660 
   3661 void LocationsBuilderMIPS64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
   3662   memory_barrier->SetLocations(nullptr);
   3663 }
   3664 
   3665 void InstructionCodeGeneratorMIPS64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
   3666   GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
   3667 }
   3668 
   3669 void LocationsBuilderMIPS64::VisitReturn(HReturn* ret) {
   3670   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(ret);
   3671   Primitive::Type return_type = ret->InputAt(0)->GetType();
   3672   locations->SetInAt(0, Mips64ReturnLocation(return_type));
   3673 }
   3674 
   3675 void InstructionCodeGeneratorMIPS64::VisitReturn(HReturn* ret ATTRIBUTE_UNUSED) {
   3676   codegen_->GenerateFrameExit();
   3677 }
   3678 
   3679 void LocationsBuilderMIPS64::VisitReturnVoid(HReturnVoid* ret) {
   3680   ret->SetLocations(nullptr);
   3681 }
   3682 
   3683 void InstructionCodeGeneratorMIPS64::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) {
   3684   codegen_->GenerateFrameExit();
   3685 }
   3686 
   3687 void LocationsBuilderMIPS64::VisitRor(HRor* ror) {
   3688   HandleShift(ror);
   3689 }
   3690 
   3691 void InstructionCodeGeneratorMIPS64::VisitRor(HRor* ror) {
   3692   HandleShift(ror);
   3693 }
   3694 
   3695 void LocationsBuilderMIPS64::VisitShl(HShl* shl) {
   3696   HandleShift(shl);
   3697 }
   3698 
   3699 void InstructionCodeGeneratorMIPS64::VisitShl(HShl* shl) {
   3700   HandleShift(shl);
   3701 }
   3702 
   3703 void LocationsBuilderMIPS64::VisitShr(HShr* shr) {
   3704   HandleShift(shr);
   3705 }
   3706 
   3707 void InstructionCodeGeneratorMIPS64::VisitShr(HShr* shr) {
   3708   HandleShift(shr);
   3709 }
   3710 
   3711 void LocationsBuilderMIPS64::VisitSub(HSub* instruction) {
   3712   HandleBinaryOp(instruction);
   3713 }
   3714 
   3715 void InstructionCodeGeneratorMIPS64::VisitSub(HSub* instruction) {
   3716   HandleBinaryOp(instruction);
   3717 }
   3718 
   3719 void LocationsBuilderMIPS64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
   3720   HandleFieldGet(instruction, instruction->GetFieldInfo());
   3721 }
   3722 
   3723 void InstructionCodeGeneratorMIPS64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
   3724   HandleFieldGet(instruction, instruction->GetFieldInfo());
   3725 }
   3726 
   3727 void LocationsBuilderMIPS64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
   3728   HandleFieldSet(instruction, instruction->GetFieldInfo());
   3729 }
   3730 
   3731 void InstructionCodeGeneratorMIPS64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
   3732   HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
   3733 }
   3734 
   3735 void LocationsBuilderMIPS64::VisitUnresolvedInstanceFieldGet(
   3736     HUnresolvedInstanceFieldGet* instruction) {
   3737   FieldAccessCallingConventionMIPS64 calling_convention;
   3738   codegen_->CreateUnresolvedFieldLocationSummary(
   3739       instruction, instruction->GetFieldType(), calling_convention);
   3740 }
   3741 
   3742 void InstructionCodeGeneratorMIPS64::VisitUnresolvedInstanceFieldGet(
   3743     HUnresolvedInstanceFieldGet* instruction) {
   3744   FieldAccessCallingConventionMIPS64 calling_convention;
   3745   codegen_->GenerateUnresolvedFieldAccess(instruction,
   3746                                           instruction->GetFieldType(),
   3747                                           instruction->GetFieldIndex(),
   3748                                           instruction->GetDexPc(),
   3749                                           calling_convention);
   3750 }
   3751 
   3752 void LocationsBuilderMIPS64::VisitUnresolvedInstanceFieldSet(
   3753     HUnresolvedInstanceFieldSet* instruction) {
   3754   FieldAccessCallingConventionMIPS64 calling_convention;
   3755   codegen_->CreateUnresolvedFieldLocationSummary(
   3756       instruction, instruction->GetFieldType(), calling_convention);
   3757 }
   3758 
   3759 void InstructionCodeGeneratorMIPS64::VisitUnresolvedInstanceFieldSet(
   3760     HUnresolvedInstanceFieldSet* instruction) {
   3761   FieldAccessCallingConventionMIPS64 calling_convention;
   3762   codegen_->GenerateUnresolvedFieldAccess(instruction,
   3763                                           instruction->GetFieldType(),
   3764                                           instruction->GetFieldIndex(),
   3765                                           instruction->GetDexPc(),
   3766                                           calling_convention);
   3767 }
   3768 
   3769 void LocationsBuilderMIPS64::VisitUnresolvedStaticFieldGet(
   3770     HUnresolvedStaticFieldGet* instruction) {
   3771   FieldAccessCallingConventionMIPS64 calling_convention;
   3772   codegen_->CreateUnresolvedFieldLocationSummary(
   3773       instruction, instruction->GetFieldType(), calling_convention);
   3774 }
   3775 
   3776 void InstructionCodeGeneratorMIPS64::VisitUnresolvedStaticFieldGet(
   3777     HUnresolvedStaticFieldGet* instruction) {
   3778   FieldAccessCallingConventionMIPS64 calling_convention;
   3779   codegen_->GenerateUnresolvedFieldAccess(instruction,
   3780                                           instruction->GetFieldType(),
   3781                                           instruction->GetFieldIndex(),
   3782                                           instruction->GetDexPc(),
   3783                                           calling_convention);
   3784 }
   3785 
   3786 void LocationsBuilderMIPS64::VisitUnresolvedStaticFieldSet(
   3787     HUnresolvedStaticFieldSet* instruction) {
   3788   FieldAccessCallingConventionMIPS64 calling_convention;
   3789   codegen_->CreateUnresolvedFieldLocationSummary(
   3790       instruction, instruction->GetFieldType(), calling_convention);
   3791 }
   3792 
   3793 void InstructionCodeGeneratorMIPS64::VisitUnresolvedStaticFieldSet(
   3794     HUnresolvedStaticFieldSet* instruction) {
   3795   FieldAccessCallingConventionMIPS64 calling_convention;
   3796   codegen_->GenerateUnresolvedFieldAccess(instruction,
   3797                                           instruction->GetFieldType(),
   3798                                           instruction->GetFieldIndex(),
   3799                                           instruction->GetDexPc(),
   3800                                           calling_convention);
   3801 }
   3802 
   3803 void LocationsBuilderMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) {
   3804   new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
   3805 }
   3806 
   3807 void InstructionCodeGeneratorMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) {
   3808   HBasicBlock* block = instruction->GetBlock();
   3809   if (block->GetLoopInformation() != nullptr) {
   3810     DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
   3811     // The back edge will generate the suspend check.
   3812     return;
   3813   }
   3814   if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
   3815     // The goto will generate the suspend check.
   3816     return;
   3817   }
   3818   GenerateSuspendCheck(instruction, nullptr);
   3819 }
   3820 
   3821 void LocationsBuilderMIPS64::VisitThrow(HThrow* instruction) {
   3822   LocationSummary* locations =
   3823       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
   3824   InvokeRuntimeCallingConvention calling_convention;
   3825   locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
   3826 }
   3827 
   3828 void InstructionCodeGeneratorMIPS64::VisitThrow(HThrow* instruction) {
   3829   codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pDeliverException),
   3830                           instruction,
   3831                           instruction->GetDexPc(),
   3832                           nullptr);
   3833   CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
   3834 }
   3835 
   3836 void LocationsBuilderMIPS64::VisitTypeConversion(HTypeConversion* conversion) {
   3837   Primitive::Type input_type = conversion->GetInputType();
   3838   Primitive::Type result_type = conversion->GetResultType();
   3839   DCHECK_NE(input_type, result_type);
   3840 
   3841   if ((input_type == Primitive::kPrimNot) || (input_type == Primitive::kPrimVoid) ||
   3842       (result_type == Primitive::kPrimNot) || (result_type == Primitive::kPrimVoid)) {
   3843     LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type;
   3844   }
   3845 
   3846   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(conversion);
   3847 
   3848   if (Primitive::IsFloatingPointType(input_type)) {
   3849     locations->SetInAt(0, Location::RequiresFpuRegister());
   3850   } else {
   3851     locations->SetInAt(0, Location::RequiresRegister());
   3852   }
   3853 
   3854   if (Primitive::IsFloatingPointType(result_type)) {
   3855     locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
   3856   } else {
   3857     locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   3858   }
   3859 }
   3860 
   3861 void InstructionCodeGeneratorMIPS64::VisitTypeConversion(HTypeConversion* conversion) {
   3862   LocationSummary* locations = conversion->GetLocations();
   3863   Primitive::Type result_type = conversion->GetResultType();
   3864   Primitive::Type input_type = conversion->GetInputType();
   3865 
   3866   DCHECK_NE(input_type, result_type);
   3867 
   3868   if (Primitive::IsIntegralType(result_type) && Primitive::IsIntegralType(input_type)) {
   3869     GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
   3870     GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
   3871 
   3872     switch (result_type) {
   3873       case Primitive::kPrimChar:
   3874         __ Andi(dst, src, 0xFFFF);
   3875         break;
   3876       case Primitive::kPrimByte:
   3877         if (input_type == Primitive::kPrimLong) {
   3878           // Type conversion from long to types narrower than int is a result of code
   3879           // transformations. To avoid unpredictable results for SEB and SEH, we first
   3880           // need to sign-extend the low 32-bit value into bits 32 through 63.
   3881           __ Sll(dst, src, 0);
   3882           __ Seb(dst, dst);
   3883         } else {
   3884           __ Seb(dst, src);
   3885         }
   3886         break;
   3887       case Primitive::kPrimShort:
   3888         if (input_type == Primitive::kPrimLong) {
   3889           // Type conversion from long to types narrower than int is a result of code
   3890           // transformations. To avoid unpredictable results for SEB and SEH, we first
   3891           // need to sign-extend the low 32-bit value into bits 32 through 63.
   3892           __ Sll(dst, src, 0);
   3893           __ Seh(dst, dst);
   3894         } else {
   3895           __ Seh(dst, src);
   3896         }
   3897         break;
   3898       case Primitive::kPrimInt:
   3899       case Primitive::kPrimLong:
   3900         // Sign-extend 32-bit int into bits 32 through 63 for
   3901         // int-to-long and long-to-int conversions
   3902         __ Sll(dst, src, 0);
   3903         break;
   3904 
   3905       default:
   3906         LOG(FATAL) << "Unexpected type conversion from " << input_type
   3907                    << " to " << result_type;
   3908     }
   3909   } else if (Primitive::IsFloatingPointType(result_type) && Primitive::IsIntegralType(input_type)) {
   3910     FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
   3911     GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
   3912     if (input_type == Primitive::kPrimLong) {
   3913       __ Dmtc1(src, FTMP);
   3914       if (result_type == Primitive::kPrimFloat) {
   3915         __ Cvtsl(dst, FTMP);
   3916       } else {
   3917         __ Cvtdl(dst, FTMP);
   3918       }
   3919     } else {
   3920       __ Mtc1(src, FTMP);
   3921       if (result_type == Primitive::kPrimFloat) {
   3922         __ Cvtsw(dst, FTMP);
   3923       } else {
   3924         __ Cvtdw(dst, FTMP);
   3925       }
   3926     }
   3927   } else if (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type)) {
   3928     CHECK(result_type == Primitive::kPrimInt || result_type == Primitive::kPrimLong);
   3929     GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
   3930     FpuRegister src = locations->InAt(0).AsFpuRegister<FpuRegister>();
   3931     Mips64Label truncate;
   3932     Mips64Label done;
   3933 
   3934     // When NAN2008=0 (R2 and before), the truncate instruction produces the maximum positive
   3935     // value when the input is either a NaN or is outside of the range of the output type
   3936     // after the truncation. IOW, the three special cases (NaN, too small, too big) produce
   3937     // the same result.
   3938     //
   3939     // When NAN2008=1 (R6), the truncate instruction caps the output at the minimum/maximum
   3940     // value of the output type if the input is outside of the range after the truncation or
   3941     // produces 0 when the input is a NaN. IOW, the three special cases produce three distinct
   3942     // results. This matches the desired float/double-to-int/long conversion exactly.
   3943     //
   3944     // So, NAN2008 affects handling of negative values and NaNs by the truncate instruction.
   3945     //
   3946     // The following code supports both NAN2008=0 and NAN2008=1 behaviors of the truncate
   3947     // instruction, the reason being that the emulator implements NAN2008=0 on MIPS64R6,
   3948     // even though it must be NAN2008=1 on R6.
   3949     //
   3950     // The code takes care of the different behaviors by first comparing the input to the
   3951     // minimum output value (-2**-63 for truncating to long, -2**-31 for truncating to int).
   3952     // If the input is greater than or equal to the minimum, it procedes to the truncate
   3953     // instruction, which will handle such an input the same way irrespective of NAN2008.
   3954     // Otherwise the input is compared to itself to determine whether it is a NaN or not
   3955     // in order to return either zero or the minimum value.
   3956     //
   3957     // TODO: simplify this when the emulator correctly implements NAN2008=1 behavior of the
   3958     // truncate instruction for MIPS64R6.
   3959     if (input_type == Primitive::kPrimFloat) {
   3960       uint32_t min_val = (result_type == Primitive::kPrimLong)
   3961           ? bit_cast<uint32_t, float>(std::numeric_limits<int64_t>::min())
   3962           : bit_cast<uint32_t, float>(std::numeric_limits<int32_t>::min());
   3963       __ LoadConst32(TMP, min_val);
   3964       __ Mtc1(TMP, FTMP);
   3965       __ CmpLeS(FTMP, FTMP, src);
   3966     } else {
   3967       uint64_t min_val = (result_type == Primitive::kPrimLong)
   3968           ? bit_cast<uint64_t, double>(std::numeric_limits<int64_t>::min())
   3969           : bit_cast<uint64_t, double>(std::numeric_limits<int32_t>::min());
   3970       __ LoadConst64(TMP, min_val);
   3971       __ Dmtc1(TMP, FTMP);
   3972       __ CmpLeD(FTMP, FTMP, src);
   3973     }
   3974 
   3975     __ Bc1nez(FTMP, &truncate);
   3976 
   3977     if (input_type == Primitive::kPrimFloat) {
   3978       __ CmpEqS(FTMP, src, src);
   3979     } else {
   3980       __ CmpEqD(FTMP, src, src);
   3981     }
   3982     if (result_type == Primitive::kPrimLong) {
   3983       __ LoadConst64(dst, std::numeric_limits<int64_t>::min());
   3984     } else {
   3985       __ LoadConst32(dst, std::numeric_limits<int32_t>::min());
   3986     }
   3987     __ Mfc1(TMP, FTMP);
   3988     __ And(dst, dst, TMP);
   3989 
   3990     __ Bc(&done);
   3991 
   3992     __ Bind(&truncate);
   3993 
   3994     if (result_type == Primitive::kPrimLong) {
   3995       if (input_type == Primitive::kPrimFloat) {
   3996         __ TruncLS(FTMP, src);
   3997       } else {
   3998         __ TruncLD(FTMP, src);
   3999       }
   4000       __ Dmfc1(dst, FTMP);
   4001     } else {
   4002       if (input_type == Primitive::kPrimFloat) {
   4003         __ TruncWS(FTMP, src);
   4004       } else {
   4005         __ TruncWD(FTMP, src);
   4006       }
   4007       __ Mfc1(dst, FTMP);
   4008     }
   4009 
   4010     __ Bind(&done);
   4011   } else if (Primitive::IsFloatingPointType(result_type) &&
   4012              Primitive::IsFloatingPointType(input_type)) {
   4013     FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
   4014     FpuRegister src = locations->InAt(0).AsFpuRegister<FpuRegister>();
   4015     if (result_type == Primitive::kPrimFloat) {
   4016       __ Cvtsd(dst, src);
   4017     } else {
   4018       __ Cvtds(dst, src);
   4019     }
   4020   } else {
   4021     LOG(FATAL) << "Unexpected or unimplemented type conversion from " << input_type
   4022                 << " to " << result_type;
   4023   }
   4024 }
   4025 
   4026 void LocationsBuilderMIPS64::VisitUShr(HUShr* ushr) {
   4027   HandleShift(ushr);
   4028 }
   4029 
   4030 void InstructionCodeGeneratorMIPS64::VisitUShr(HUShr* ushr) {
   4031   HandleShift(ushr);
   4032 }
   4033 
   4034 void LocationsBuilderMIPS64::VisitXor(HXor* instruction) {
   4035   HandleBinaryOp(instruction);
   4036 }
   4037 
   4038 void InstructionCodeGeneratorMIPS64::VisitXor(HXor* instruction) {
   4039   HandleBinaryOp(instruction);
   4040 }
   4041 
   4042 void LocationsBuilderMIPS64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
   4043   // Nothing to do, this should be removed during prepare for register allocator.
   4044   LOG(FATAL) << "Unreachable";
   4045 }
   4046 
   4047 void InstructionCodeGeneratorMIPS64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
   4048   // Nothing to do, this should be removed during prepare for register allocator.
   4049   LOG(FATAL) << "Unreachable";
   4050 }
   4051 
   4052 void LocationsBuilderMIPS64::VisitEqual(HEqual* comp) {
   4053   HandleCondition(comp);
   4054 }
   4055 
   4056 void InstructionCodeGeneratorMIPS64::VisitEqual(HEqual* comp) {
   4057   HandleCondition(comp);
   4058 }
   4059 
   4060 void LocationsBuilderMIPS64::VisitNotEqual(HNotEqual* comp) {
   4061   HandleCondition(comp);
   4062 }
   4063 
   4064 void InstructionCodeGeneratorMIPS64::VisitNotEqual(HNotEqual* comp) {
   4065   HandleCondition(comp);
   4066 }
   4067 
   4068 void LocationsBuilderMIPS64::VisitLessThan(HLessThan* comp) {
   4069   HandleCondition(comp);
   4070 }
   4071 
   4072 void InstructionCodeGeneratorMIPS64::VisitLessThan(HLessThan* comp) {
   4073   HandleCondition(comp);
   4074 }
   4075 
   4076 void LocationsBuilderMIPS64::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
   4077   HandleCondition(comp);
   4078 }
   4079 
   4080 void InstructionCodeGeneratorMIPS64::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
   4081   HandleCondition(comp);
   4082 }
   4083 
   4084 void LocationsBuilderMIPS64::VisitGreaterThan(HGreaterThan* comp) {
   4085   HandleCondition(comp);
   4086 }
   4087 
   4088 void InstructionCodeGeneratorMIPS64::VisitGreaterThan(HGreaterThan* comp) {
   4089   HandleCondition(comp);
   4090 }
   4091 
   4092 void LocationsBuilderMIPS64::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
   4093   HandleCondition(comp);
   4094 }
   4095 
   4096 void InstructionCodeGeneratorMIPS64::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
   4097   HandleCondition(comp);
   4098 }
   4099 
   4100 void LocationsBuilderMIPS64::VisitBelow(HBelow* comp) {
   4101   HandleCondition(comp);
   4102 }
   4103 
   4104 void InstructionCodeGeneratorMIPS64::VisitBelow(HBelow* comp) {
   4105   HandleCondition(comp);
   4106 }
   4107 
   4108 void LocationsBuilderMIPS64::VisitBelowOrEqual(HBelowOrEqual* comp) {
   4109   HandleCondition(comp);
   4110 }
   4111 
   4112 void InstructionCodeGeneratorMIPS64::VisitBelowOrEqual(HBelowOrEqual* comp) {
   4113   HandleCondition(comp);
   4114 }
   4115 
   4116 void LocationsBuilderMIPS64::VisitAbove(HAbove* comp) {
   4117   HandleCondition(comp);
   4118 }
   4119 
   4120 void InstructionCodeGeneratorMIPS64::VisitAbove(HAbove* comp) {
   4121   HandleCondition(comp);
   4122 }
   4123 
   4124 void LocationsBuilderMIPS64::VisitAboveOrEqual(HAboveOrEqual* comp) {
   4125   HandleCondition(comp);
   4126 }
   4127 
   4128 void InstructionCodeGeneratorMIPS64::VisitAboveOrEqual(HAboveOrEqual* comp) {
   4129   HandleCondition(comp);
   4130 }
   4131 
   4132 // Simple implementation of packed switch - generate cascaded compare/jumps.
   4133 void LocationsBuilderMIPS64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
   4134   LocationSummary* locations =
   4135       new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
   4136   locations->SetInAt(0, Location::RequiresRegister());
   4137 }
   4138 
   4139 void InstructionCodeGeneratorMIPS64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
   4140   int32_t lower_bound = switch_instr->GetStartValue();
   4141   int32_t num_entries = switch_instr->GetNumEntries();
   4142   LocationSummary* locations = switch_instr->GetLocations();
   4143   GpuRegister value_reg = locations->InAt(0).AsRegister<GpuRegister>();
   4144   HBasicBlock* default_block = switch_instr->GetDefaultBlock();
   4145 
   4146   // Create a set of compare/jumps.
   4147   GpuRegister temp_reg = TMP;
   4148   if (IsInt<16>(-lower_bound)) {
   4149     __ Addiu(temp_reg, value_reg, -lower_bound);
   4150   } else {
   4151     __ LoadConst32(AT, -lower_bound);
   4152     __ Addu(temp_reg, value_reg, AT);
   4153   }
   4154   // Jump to default if index is negative
   4155   // Note: We don't check the case that index is positive while value < lower_bound, because in
   4156   // this case, index >= num_entries must be true. So that we can save one branch instruction.
   4157   __ Bltzc(temp_reg, codegen_->GetLabelOf(default_block));
   4158 
   4159   const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors();
   4160   // Jump to successors[0] if value == lower_bound.
   4161   __ Beqzc(temp_reg, codegen_->GetLabelOf(successors[0]));
   4162   int32_t last_index = 0;
   4163   for (; num_entries - last_index > 2; last_index += 2) {
   4164     __ Addiu(temp_reg, temp_reg, -2);
   4165     // Jump to successors[last_index + 1] if value < case_value[last_index + 2].
   4166     __ Bltzc(temp_reg, codegen_->GetLabelOf(successors[last_index + 1]));
   4167     // Jump to successors[last_index + 2] if value == case_value[last_index + 2].
   4168     __ Beqzc(temp_reg, codegen_->GetLabelOf(successors[last_index + 2]));
   4169   }
   4170   if (num_entries - last_index == 2) {
   4171     // The last missing case_value.
   4172     __ Addiu(temp_reg, temp_reg, -1);
   4173     __ Beqzc(temp_reg, codegen_->GetLabelOf(successors[last_index + 1]));
   4174   }
   4175 
   4176   // And the default for any other value.
   4177   if (!codegen_->GoesToNextBlock(switch_instr->GetBlock(), default_block)) {
   4178     __ Bc(codegen_->GetLabelOf(default_block));
   4179   }
   4180 }
   4181 
   4182 void LocationsBuilderMIPS64::VisitClassTableGet(HClassTableGet*) {
   4183   UNIMPLEMENTED(FATAL) << "ClassTableGet is unimplemented on mips64";
   4184 }
   4185 
   4186 void InstructionCodeGeneratorMIPS64::VisitClassTableGet(HClassTableGet*) {
   4187   UNIMPLEMENTED(FATAL) << "ClassTableGet is unimplemented on mips64";
   4188 }
   4189 
   4190 }  // namespace mips64
   4191 }  // namespace art
   4192