Home | History | Annotate | Download | only in optimizing
      1 /*
      2  * Copyright (C) 2015 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "code_generator_mips.h"
     18 
     19 #include "arch/mips/entrypoints_direct_mips.h"
     20 #include "arch/mips/instruction_set_features_mips.h"
     21 #include "art_method.h"
     22 #include "code_generator_utils.h"
     23 #include "compiled_method.h"
     24 #include "entrypoints/quick/quick_entrypoints.h"
     25 #include "entrypoints/quick/quick_entrypoints_enum.h"
     26 #include "gc/accounting/card_table.h"
     27 #include "intrinsics.h"
     28 #include "intrinsics_mips.h"
     29 #include "mirror/array-inl.h"
     30 #include "mirror/class-inl.h"
     31 #include "offsets.h"
     32 #include "thread.h"
     33 #include "utils/assembler.h"
     34 #include "utils/mips/assembler_mips.h"
     35 #include "utils/stack_checks.h"
     36 
     37 namespace art {
     38 namespace mips {
     39 
     40 static constexpr int kCurrentMethodStackOffset = 0;
     41 static constexpr Register kMethodRegisterArgument = A0;
     42 
     43 Location MipsReturnLocation(Primitive::Type return_type) {
     44   switch (return_type) {
     45     case Primitive::kPrimBoolean:
     46     case Primitive::kPrimByte:
     47     case Primitive::kPrimChar:
     48     case Primitive::kPrimShort:
     49     case Primitive::kPrimInt:
     50     case Primitive::kPrimNot:
     51       return Location::RegisterLocation(V0);
     52 
     53     case Primitive::kPrimLong:
     54       return Location::RegisterPairLocation(V0, V1);
     55 
     56     case Primitive::kPrimFloat:
     57     case Primitive::kPrimDouble:
     58       return Location::FpuRegisterLocation(F0);
     59 
     60     case Primitive::kPrimVoid:
     61       return Location();
     62   }
     63   UNREACHABLE();
     64 }
     65 
     66 Location InvokeDexCallingConventionVisitorMIPS::GetReturnLocation(Primitive::Type type) const {
     67   return MipsReturnLocation(type);
     68 }
     69 
     70 Location InvokeDexCallingConventionVisitorMIPS::GetMethodLocation() const {
     71   return Location::RegisterLocation(kMethodRegisterArgument);
     72 }
     73 
     74 Location InvokeDexCallingConventionVisitorMIPS::GetNextLocation(Primitive::Type type) {
     75   Location next_location;
     76 
     77   switch (type) {
     78     case Primitive::kPrimBoolean:
     79     case Primitive::kPrimByte:
     80     case Primitive::kPrimChar:
     81     case Primitive::kPrimShort:
     82     case Primitive::kPrimInt:
     83     case Primitive::kPrimNot: {
     84       uint32_t gp_index = gp_index_++;
     85       if (gp_index < calling_convention.GetNumberOfRegisters()) {
     86         next_location = Location::RegisterLocation(calling_convention.GetRegisterAt(gp_index));
     87       } else {
     88         size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
     89         next_location = Location::StackSlot(stack_offset);
     90       }
     91       break;
     92     }
     93 
     94     case Primitive::kPrimLong: {
     95       uint32_t gp_index = gp_index_;
     96       gp_index_ += 2;
     97       if (gp_index + 1 < calling_convention.GetNumberOfRegisters()) {
     98         Register reg = calling_convention.GetRegisterAt(gp_index);
     99         if (reg == A1 || reg == A3) {
    100           gp_index_++;  // Skip A1(A3), and use A2_A3(T0_T1) instead.
    101           gp_index++;
    102         }
    103         Register low_even = calling_convention.GetRegisterAt(gp_index);
    104         Register high_odd = calling_convention.GetRegisterAt(gp_index + 1);
    105         DCHECK_EQ(low_even + 1, high_odd);
    106         next_location = Location::RegisterPairLocation(low_even, high_odd);
    107       } else {
    108         size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
    109         next_location = Location::DoubleStackSlot(stack_offset);
    110       }
    111       break;
    112     }
    113 
    114     // Note: both float and double types are stored in even FPU registers. On 32 bit FPU, double
    115     // will take up the even/odd pair, while floats are stored in even regs only.
    116     // On 64 bit FPU, both double and float are stored in even registers only.
    117     case Primitive::kPrimFloat:
    118     case Primitive::kPrimDouble: {
    119       uint32_t float_index = float_index_++;
    120       if (float_index < calling_convention.GetNumberOfFpuRegisters()) {
    121         next_location = Location::FpuRegisterLocation(
    122             calling_convention.GetFpuRegisterAt(float_index));
    123       } else {
    124         size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
    125         next_location = Primitive::Is64BitType(type) ? Location::DoubleStackSlot(stack_offset)
    126                                                      : Location::StackSlot(stack_offset);
    127       }
    128       break;
    129     }
    130 
    131     case Primitive::kPrimVoid:
    132       LOG(FATAL) << "Unexpected parameter type " << type;
    133       break;
    134   }
    135 
    136   // Space on the stack is reserved for all arguments.
    137   stack_index_ += Primitive::Is64BitType(type) ? 2 : 1;
    138 
    139   return next_location;
    140 }
    141 
    142 Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type type) {
    143   return MipsReturnLocation(type);
    144 }
    145 
    146 // NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
    147 #define __ down_cast<CodeGeneratorMIPS*>(codegen)->GetAssembler()->  // NOLINT
    148 #define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, x).Int32Value()
    149 
    150 class BoundsCheckSlowPathMIPS : public SlowPathCodeMIPS {
    151  public:
    152   explicit BoundsCheckSlowPathMIPS(HBoundsCheck* instruction) : SlowPathCodeMIPS(instruction) {}
    153 
    154   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
    155     LocationSummary* locations = instruction_->GetLocations();
    156     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
    157     __ Bind(GetEntryLabel());
    158     if (instruction_->CanThrowIntoCatchBlock()) {
    159       // Live registers will be restored in the catch block if caught.
    160       SaveLiveRegisters(codegen, instruction_->GetLocations());
    161     }
    162     // We're moving two locations to locations that could overlap, so we need a parallel
    163     // move resolver.
    164     InvokeRuntimeCallingConvention calling_convention;
    165     codegen->EmitParallelMoves(locations->InAt(0),
    166                                Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
    167                                Primitive::kPrimInt,
    168                                locations->InAt(1),
    169                                Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
    170                                Primitive::kPrimInt);
    171     QuickEntrypointEnum entrypoint = instruction_->AsBoundsCheck()->IsStringCharAt()
    172         ? kQuickThrowStringBounds
    173         : kQuickThrowArrayBounds;
    174     mips_codegen->InvokeRuntime(entrypoint, instruction_, instruction_->GetDexPc(), this);
    175     CheckEntrypointTypes<kQuickThrowStringBounds, void, int32_t, int32_t>();
    176     CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
    177   }
    178 
    179   bool IsFatal() const OVERRIDE { return true; }
    180 
    181   const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathMIPS"; }
    182 
    183  private:
    184   DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathMIPS);
    185 };
    186 
    187 class DivZeroCheckSlowPathMIPS : public SlowPathCodeMIPS {
    188  public:
    189   explicit DivZeroCheckSlowPathMIPS(HDivZeroCheck* instruction) : SlowPathCodeMIPS(instruction) {}
    190 
    191   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
    192     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
    193     __ Bind(GetEntryLabel());
    194     mips_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
    195     CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
    196   }
    197 
    198   bool IsFatal() const OVERRIDE { return true; }
    199 
    200   const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathMIPS"; }
    201 
    202  private:
    203   DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathMIPS);
    204 };
    205 
    206 class LoadClassSlowPathMIPS : public SlowPathCodeMIPS {
    207  public:
    208   LoadClassSlowPathMIPS(HLoadClass* cls,
    209                         HInstruction* at,
    210                         uint32_t dex_pc,
    211                         bool do_clinit,
    212                         const CodeGeneratorMIPS::PcRelativePatchInfo* bss_info_high = nullptr)
    213       : SlowPathCodeMIPS(at),
    214         cls_(cls),
    215         dex_pc_(dex_pc),
    216         do_clinit_(do_clinit),
    217         bss_info_high_(bss_info_high) {
    218     DCHECK(at->IsLoadClass() || at->IsClinitCheck());
    219   }
    220 
    221   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
    222     LocationSummary* locations = instruction_->GetLocations();
    223     Location out = locations->Out();
    224     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
    225     const bool baker_or_no_read_barriers = (!kUseReadBarrier || kUseBakerReadBarrier);
    226     InvokeRuntimeCallingConvention calling_convention;
    227     DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
    228     const bool is_load_class_bss_entry =
    229         (cls_ == instruction_) && (cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry);
    230     __ Bind(GetEntryLabel());
    231     SaveLiveRegisters(codegen, locations);
    232 
    233     // For HLoadClass/kBssEntry/kSaveEverything, make sure we preserve the address of the entry.
    234     Register entry_address = kNoRegister;
    235     if (is_load_class_bss_entry && baker_or_no_read_barriers) {
    236       Register temp = locations->GetTemp(0).AsRegister<Register>();
    237       bool temp_is_a0 = (temp == calling_convention.GetRegisterAt(0));
    238       // In the unlucky case that `temp` is A0, we preserve the address in `out` across the
    239       // kSaveEverything call.
    240       entry_address = temp_is_a0 ? out.AsRegister<Register>() : temp;
    241       DCHECK_NE(entry_address, calling_convention.GetRegisterAt(0));
    242       if (temp_is_a0) {
    243         __ Move(entry_address, temp);
    244       }
    245     }
    246 
    247     dex::TypeIndex type_index = cls_->GetTypeIndex();
    248     __ LoadConst32(calling_convention.GetRegisterAt(0), type_index.index_);
    249     QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
    250                                                 : kQuickInitializeType;
    251     mips_codegen->InvokeRuntime(entrypoint, instruction_, dex_pc_, this);
    252     if (do_clinit_) {
    253       CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
    254     } else {
    255       CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
    256     }
    257 
    258     // For HLoadClass/kBssEntry, store the resolved class to the BSS entry.
    259     if (is_load_class_bss_entry && baker_or_no_read_barriers) {
    260       // The class entry address was preserved in `entry_address` thanks to kSaveEverything.
    261       DCHECK(bss_info_high_);
    262       CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
    263           mips_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index, bss_info_high_);
    264       bool reordering = __ SetReorder(false);
    265       __ Bind(&info_low->label);
    266       __ StoreToOffset(kStoreWord,
    267                        calling_convention.GetRegisterAt(0),
    268                        entry_address,
    269                        /* placeholder */ 0x5678);
    270       __ SetReorder(reordering);
    271     }
    272 
    273     // Move the class to the desired location.
    274     if (out.IsValid()) {
    275       DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
    276       Primitive::Type type = instruction_->GetType();
    277       mips_codegen->MoveLocation(out,
    278                                  Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
    279                                  type);
    280     }
    281     RestoreLiveRegisters(codegen, locations);
    282 
    283     // For HLoadClass/kBssEntry, store the resolved class to the BSS entry.
    284     if (is_load_class_bss_entry && !baker_or_no_read_barriers) {
    285       // For non-Baker read barriers we need to re-calculate the address of
    286       // the class entry.
    287       const bool isR6 = mips_codegen->GetInstructionSetFeatures().IsR6();
    288       Register base = isR6 ? ZERO : locations->InAt(0).AsRegister<Register>();
    289       CodeGeneratorMIPS::PcRelativePatchInfo* info_high =
    290           mips_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index);
    291       CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
    292           mips_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index, info_high);
    293       bool reordering = __ SetReorder(false);
    294       mips_codegen->EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, base, info_low);
    295       __ StoreToOffset(kStoreWord, out.AsRegister<Register>(), TMP, /* placeholder */ 0x5678);
    296       __ SetReorder(reordering);
    297     }
    298     __ B(GetExitLabel());
    299   }
    300 
    301   const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathMIPS"; }
    302 
    303  private:
    304   // The class this slow path will load.
    305   HLoadClass* const cls_;
    306 
    307   // The dex PC of `at_`.
    308   const uint32_t dex_pc_;
    309 
    310   // Whether to initialize the class.
    311   const bool do_clinit_;
    312 
    313   // Pointer to the high half PC-relative patch info for HLoadClass/kBssEntry.
    314   const CodeGeneratorMIPS::PcRelativePatchInfo* bss_info_high_;
    315 
    316   DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathMIPS);
    317 };
    318 
    319 class LoadStringSlowPathMIPS : public SlowPathCodeMIPS {
    320  public:
    321   explicit LoadStringSlowPathMIPS(HLoadString* instruction,
    322                                   const CodeGeneratorMIPS::PcRelativePatchInfo* bss_info_high)
    323       : SlowPathCodeMIPS(instruction), bss_info_high_(bss_info_high) {}
    324 
    325   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
    326     DCHECK(instruction_->IsLoadString());
    327     DCHECK_EQ(instruction_->AsLoadString()->GetLoadKind(), HLoadString::LoadKind::kBssEntry);
    328     LocationSummary* locations = instruction_->GetLocations();
    329     DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
    330     HLoadString* load = instruction_->AsLoadString();
    331     const dex::StringIndex string_index = load->GetStringIndex();
    332     Register out = locations->Out().AsRegister<Register>();
    333     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
    334     const bool baker_or_no_read_barriers = (!kUseReadBarrier || kUseBakerReadBarrier);
    335     InvokeRuntimeCallingConvention calling_convention;
    336     __ Bind(GetEntryLabel());
    337     SaveLiveRegisters(codegen, locations);
    338 
    339     // For HLoadString/kBssEntry/kSaveEverything, make sure we preserve the address of the entry.
    340     Register entry_address = kNoRegister;
    341     if (baker_or_no_read_barriers) {
    342       Register temp = locations->GetTemp(0).AsRegister<Register>();
    343       bool temp_is_a0 = (temp == calling_convention.GetRegisterAt(0));
    344       // In the unlucky case that `temp` is A0, we preserve the address in `out` across the
    345       // kSaveEverything call.
    346       entry_address = temp_is_a0 ? out : temp;
    347       DCHECK_NE(entry_address, calling_convention.GetRegisterAt(0));
    348       if (temp_is_a0) {
    349         __ Move(entry_address, temp);
    350       }
    351     }
    352 
    353     __ LoadConst32(calling_convention.GetRegisterAt(0), string_index.index_);
    354     mips_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
    355     CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
    356 
    357     // Store the resolved string to the BSS entry.
    358     if (baker_or_no_read_barriers) {
    359       // The string entry address was preserved in `entry_address` thanks to kSaveEverything.
    360       DCHECK(bss_info_high_);
    361       CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
    362           mips_codegen->NewPcRelativeStringPatch(load->GetDexFile(), string_index, bss_info_high_);
    363       bool reordering = __ SetReorder(false);
    364       __ Bind(&info_low->label);
    365       __ StoreToOffset(kStoreWord,
    366                        calling_convention.GetRegisterAt(0),
    367                        entry_address,
    368                        /* placeholder */ 0x5678);
    369       __ SetReorder(reordering);
    370     }
    371 
    372     Primitive::Type type = instruction_->GetType();
    373     mips_codegen->MoveLocation(locations->Out(),
    374                                Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
    375                                type);
    376     RestoreLiveRegisters(codegen, locations);
    377 
    378     // Store the resolved string to the BSS entry.
    379     if (!baker_or_no_read_barriers) {
    380       // For non-Baker read barriers we need to re-calculate the address of
    381       // the string entry.
    382       const bool isR6 = mips_codegen->GetInstructionSetFeatures().IsR6();
    383       Register base = isR6 ? ZERO : locations->InAt(0).AsRegister<Register>();
    384       CodeGeneratorMIPS::PcRelativePatchInfo* info_high =
    385           mips_codegen->NewPcRelativeStringPatch(load->GetDexFile(), string_index);
    386       CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
    387           mips_codegen->NewPcRelativeStringPatch(load->GetDexFile(), string_index, info_high);
    388       bool reordering = __ SetReorder(false);
    389       mips_codegen->EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, base, info_low);
    390       __ StoreToOffset(kStoreWord, out, TMP, /* placeholder */ 0x5678);
    391       __ SetReorder(reordering);
    392     }
    393     __ B(GetExitLabel());
    394   }
    395 
    396   const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathMIPS"; }
    397 
    398  private:
    399   // Pointer to the high half PC-relative patch info.
    400   const CodeGeneratorMIPS::PcRelativePatchInfo* bss_info_high_;
    401 
    402   DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS);
    403 };
    404 
    405 class NullCheckSlowPathMIPS : public SlowPathCodeMIPS {
    406  public:
    407   explicit NullCheckSlowPathMIPS(HNullCheck* instr) : SlowPathCodeMIPS(instr) {}
    408 
    409   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
    410     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
    411     __ Bind(GetEntryLabel());
    412     if (instruction_->CanThrowIntoCatchBlock()) {
    413       // Live registers will be restored in the catch block if caught.
    414       SaveLiveRegisters(codegen, instruction_->GetLocations());
    415     }
    416     mips_codegen->InvokeRuntime(kQuickThrowNullPointer,
    417                                 instruction_,
    418                                 instruction_->GetDexPc(),
    419                                 this);
    420     CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
    421   }
    422 
    423   bool IsFatal() const OVERRIDE { return true; }
    424 
    425   const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathMIPS"; }
    426 
    427  private:
    428   DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathMIPS);
    429 };
    430 
    431 class SuspendCheckSlowPathMIPS : public SlowPathCodeMIPS {
    432  public:
    433   SuspendCheckSlowPathMIPS(HSuspendCheck* instruction, HBasicBlock* successor)
    434       : SlowPathCodeMIPS(instruction), successor_(successor) {}
    435 
    436   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
    437     LocationSummary* locations = instruction_->GetLocations();
    438     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
    439     __ Bind(GetEntryLabel());
    440     SaveLiveRegisters(codegen, locations);     // Only saves live vector registers for SIMD.
    441     mips_codegen->InvokeRuntime(kQuickTestSuspend, instruction_, instruction_->GetDexPc(), this);
    442     CheckEntrypointTypes<kQuickTestSuspend, void, void>();
    443     RestoreLiveRegisters(codegen, locations);  // Only restores live vector registers for SIMD.
    444     if (successor_ == nullptr) {
    445       __ B(GetReturnLabel());
    446     } else {
    447       __ B(mips_codegen->GetLabelOf(successor_));
    448     }
    449   }
    450 
    451   MipsLabel* GetReturnLabel() {
    452     DCHECK(successor_ == nullptr);
    453     return &return_label_;
    454   }
    455 
    456   const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathMIPS"; }
    457 
    458  private:
    459   // If not null, the block to branch to after the suspend check.
    460   HBasicBlock* const successor_;
    461 
    462   // If `successor_` is null, the label to branch to after the suspend check.
    463   MipsLabel return_label_;
    464 
    465   DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathMIPS);
    466 };
    467 
    468 class TypeCheckSlowPathMIPS : public SlowPathCodeMIPS {
    469  public:
    470   explicit TypeCheckSlowPathMIPS(HInstruction* instruction, bool is_fatal)
    471       : SlowPathCodeMIPS(instruction), is_fatal_(is_fatal) {}
    472 
    473   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
    474     LocationSummary* locations = instruction_->GetLocations();
    475     uint32_t dex_pc = instruction_->GetDexPc();
    476     DCHECK(instruction_->IsCheckCast()
    477            || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
    478     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
    479 
    480     __ Bind(GetEntryLabel());
    481     if (!is_fatal_) {
    482       SaveLiveRegisters(codegen, locations);
    483     }
    484 
    485     // We're moving two locations to locations that could overlap, so we need a parallel
    486     // move resolver.
    487     InvokeRuntimeCallingConvention calling_convention;
    488     codegen->EmitParallelMoves(locations->InAt(0),
    489                                Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
    490                                Primitive::kPrimNot,
    491                                locations->InAt(1),
    492                                Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
    493                                Primitive::kPrimNot);
    494     if (instruction_->IsInstanceOf()) {
    495       mips_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this);
    496       CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*>();
    497       Primitive::Type ret_type = instruction_->GetType();
    498       Location ret_loc = calling_convention.GetReturnLocation(ret_type);
    499       mips_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
    500     } else {
    501       DCHECK(instruction_->IsCheckCast());
    502       mips_codegen->InvokeRuntime(kQuickCheckInstanceOf, instruction_, dex_pc, this);
    503       CheckEntrypointTypes<kQuickCheckInstanceOf, void, mirror::Object*, mirror::Class*>();
    504     }
    505 
    506     if (!is_fatal_) {
    507       RestoreLiveRegisters(codegen, locations);
    508       __ B(GetExitLabel());
    509     }
    510   }
    511 
    512   const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathMIPS"; }
    513 
    514   bool IsFatal() const OVERRIDE { return is_fatal_; }
    515 
    516  private:
    517   const bool is_fatal_;
    518 
    519   DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathMIPS);
    520 };
    521 
    522 class DeoptimizationSlowPathMIPS : public SlowPathCodeMIPS {
    523  public:
    524   explicit DeoptimizationSlowPathMIPS(HDeoptimize* instruction)
    525     : SlowPathCodeMIPS(instruction) {}
    526 
    527   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
    528     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
    529     __ Bind(GetEntryLabel());
    530     LocationSummary* locations = instruction_->GetLocations();
    531     SaveLiveRegisters(codegen, locations);
    532     InvokeRuntimeCallingConvention calling_convention;
    533     __ LoadConst32(calling_convention.GetRegisterAt(0),
    534                    static_cast<uint32_t>(instruction_->AsDeoptimize()->GetDeoptimizationKind()));
    535     mips_codegen->InvokeRuntime(kQuickDeoptimize, instruction_, instruction_->GetDexPc(), this);
    536     CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>();
    537   }
    538 
    539   const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathMIPS"; }
    540 
    541  private:
    542   DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS);
    543 };
    544 
    545 class ArraySetSlowPathMIPS : public SlowPathCodeMIPS {
    546  public:
    547   explicit ArraySetSlowPathMIPS(HInstruction* instruction) : SlowPathCodeMIPS(instruction) {}
    548 
    549   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
    550     LocationSummary* locations = instruction_->GetLocations();
    551     __ Bind(GetEntryLabel());
    552     SaveLiveRegisters(codegen, locations);
    553 
    554     InvokeRuntimeCallingConvention calling_convention;
    555     HParallelMove parallel_move(codegen->GetGraph()->GetArena());
    556     parallel_move.AddMove(
    557         locations->InAt(0),
    558         Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
    559         Primitive::kPrimNot,
    560         nullptr);
    561     parallel_move.AddMove(
    562         locations->InAt(1),
    563         Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
    564         Primitive::kPrimInt,
    565         nullptr);
    566     parallel_move.AddMove(
    567         locations->InAt(2),
    568         Location::RegisterLocation(calling_convention.GetRegisterAt(2)),
    569         Primitive::kPrimNot,
    570         nullptr);
    571     codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
    572 
    573     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
    574     mips_codegen->InvokeRuntime(kQuickAputObject, instruction_, instruction_->GetDexPc(), this);
    575     CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>();
    576     RestoreLiveRegisters(codegen, locations);
    577     __ B(GetExitLabel());
    578   }
    579 
    580   const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathMIPS"; }
    581 
    582  private:
    583   DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathMIPS);
    584 };
    585 
    586 // Slow path marking an object reference `ref` during a read
    587 // barrier. The field `obj.field` in the object `obj` holding this
    588 // reference does not get updated by this slow path after marking (see
    589 // ReadBarrierMarkAndUpdateFieldSlowPathMIPS below for that).
    590 //
    591 // This means that after the execution of this slow path, `ref` will
    592 // always be up-to-date, but `obj.field` may not; i.e., after the
    593 // flip, `ref` will be a to-space reference, but `obj.field` will
    594 // probably still be a from-space reference (unless it gets updated by
    595 // another thread, or if another thread installed another object
    596 // reference (different from `ref`) in `obj.field`).
    597 //
    598 // If `entrypoint` is a valid location it is assumed to already be
    599 // holding the entrypoint. The case where the entrypoint is passed in
    600 // is for the GcRoot read barrier.
    601 class ReadBarrierMarkSlowPathMIPS : public SlowPathCodeMIPS {
    602  public:
    603   ReadBarrierMarkSlowPathMIPS(HInstruction* instruction,
    604                               Location ref,
    605                               Location entrypoint = Location::NoLocation())
    606       : SlowPathCodeMIPS(instruction), ref_(ref), entrypoint_(entrypoint) {
    607     DCHECK(kEmitCompilerReadBarrier);
    608   }
    609 
    610   const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkSlowPathMIPS"; }
    611 
    612   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
    613     LocationSummary* locations = instruction_->GetLocations();
    614     Register ref_reg = ref_.AsRegister<Register>();
    615     DCHECK(locations->CanCall());
    616     DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_reg)) << ref_reg;
    617     DCHECK(instruction_->IsInstanceFieldGet() ||
    618            instruction_->IsStaticFieldGet() ||
    619            instruction_->IsArrayGet() ||
    620            instruction_->IsArraySet() ||
    621            instruction_->IsLoadClass() ||
    622            instruction_->IsLoadString() ||
    623            instruction_->IsInstanceOf() ||
    624            instruction_->IsCheckCast() ||
    625            (instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()) ||
    626            (instruction_->IsInvokeStaticOrDirect() && instruction_->GetLocations()->Intrinsified()))
    627         << "Unexpected instruction in read barrier marking slow path: "
    628         << instruction_->DebugName();
    629 
    630     __ Bind(GetEntryLabel());
    631     // No need to save live registers; it's taken care of by the
    632     // entrypoint. Also, there is no need to update the stack mask,
    633     // as this runtime call will not trigger a garbage collection.
    634     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
    635     DCHECK((V0 <= ref_reg && ref_reg <= T7) ||
    636            (S2 <= ref_reg && ref_reg <= S7) ||
    637            (ref_reg == FP)) << ref_reg;
    638     // "Compact" slow path, saving two moves.
    639     //
    640     // Instead of using the standard runtime calling convention (input
    641     // and output in A0 and V0 respectively):
    642     //
    643     //   A0 <- ref
    644     //   V0 <- ReadBarrierMark(A0)
    645     //   ref <- V0
    646     //
    647     // we just use rX (the register containing `ref`) as input and output
    648     // of a dedicated entrypoint:
    649     //
    650     //   rX <- ReadBarrierMarkRegX(rX)
    651     //
    652     if (entrypoint_.IsValid()) {
    653       mips_codegen->ValidateInvokeRuntimeWithoutRecordingPcInfo(instruction_, this);
    654       DCHECK_EQ(entrypoint_.AsRegister<Register>(), T9);
    655       __ Jalr(entrypoint_.AsRegister<Register>());
    656       __ NopIfNoReordering();
    657     } else {
    658       int32_t entry_point_offset =
    659           Thread::ReadBarrierMarkEntryPointsOffset<kMipsPointerSize>(ref_reg - 1);
    660       // This runtime call does not require a stack map.
    661       mips_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset,
    662                                                         instruction_,
    663                                                         this,
    664                                                         /* direct */ false);
    665     }
    666     __ B(GetExitLabel());
    667   }
    668 
    669  private:
    670   // The location (register) of the marked object reference.
    671   const Location ref_;
    672 
    673   // The location of the entrypoint if already loaded.
    674   const Location entrypoint_;
    675 
    676   DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkSlowPathMIPS);
    677 };
    678 
    679 // Slow path marking an object reference `ref` during a read barrier,
    680 // and if needed, atomically updating the field `obj.field` in the
    681 // object `obj` holding this reference after marking (contrary to
    682 // ReadBarrierMarkSlowPathMIPS above, which never tries to update
    683 // `obj.field`).
    684 //
    685 // This means that after the execution of this slow path, both `ref`
    686 // and `obj.field` will be up-to-date; i.e., after the flip, both will
    687 // hold the same to-space reference (unless another thread installed
    688 // another object reference (different from `ref`) in `obj.field`).
    689 class ReadBarrierMarkAndUpdateFieldSlowPathMIPS : public SlowPathCodeMIPS {
    690  public:
    691   ReadBarrierMarkAndUpdateFieldSlowPathMIPS(HInstruction* instruction,
    692                                             Location ref,
    693                                             Register obj,
    694                                             Location field_offset,
    695                                             Register temp1)
    696       : SlowPathCodeMIPS(instruction),
    697         ref_(ref),
    698         obj_(obj),
    699         field_offset_(field_offset),
    700         temp1_(temp1) {
    701     DCHECK(kEmitCompilerReadBarrier);
    702   }
    703 
    704   const char* GetDescription() const OVERRIDE {
    705     return "ReadBarrierMarkAndUpdateFieldSlowPathMIPS";
    706   }
    707 
    708   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
    709     LocationSummary* locations = instruction_->GetLocations();
    710     Register ref_reg = ref_.AsRegister<Register>();
    711     DCHECK(locations->CanCall());
    712     DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_reg)) << ref_reg;
    713     // This slow path is only used by the UnsafeCASObject intrinsic.
    714     DCHECK((instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()))
    715         << "Unexpected instruction in read barrier marking and field updating slow path: "
    716         << instruction_->DebugName();
    717     DCHECK(instruction_->GetLocations()->Intrinsified());
    718     DCHECK_EQ(instruction_->AsInvoke()->GetIntrinsic(), Intrinsics::kUnsafeCASObject);
    719     DCHECK(field_offset_.IsRegisterPair()) << field_offset_;
    720 
    721     __ Bind(GetEntryLabel());
    722 
    723     // Save the old reference.
    724     // Note that we cannot use AT or TMP to save the old reference, as those
    725     // are used by the code that follows, but we need the old reference after
    726     // the call to the ReadBarrierMarkRegX entry point.
    727     DCHECK_NE(temp1_, AT);
    728     DCHECK_NE(temp1_, TMP);
    729     __ Move(temp1_, ref_reg);
    730 
    731     // No need to save live registers; it's taken care of by the
    732     // entrypoint. Also, there is no need to update the stack mask,
    733     // as this runtime call will not trigger a garbage collection.
    734     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
    735     DCHECK((V0 <= ref_reg && ref_reg <= T7) ||
    736            (S2 <= ref_reg && ref_reg <= S7) ||
    737            (ref_reg == FP)) << ref_reg;
    738     // "Compact" slow path, saving two moves.
    739     //
    740     // Instead of using the standard runtime calling convention (input
    741     // and output in A0 and V0 respectively):
    742     //
    743     //   A0 <- ref
    744     //   V0 <- ReadBarrierMark(A0)
    745     //   ref <- V0
    746     //
    747     // we just use rX (the register containing `ref`) as input and output
    748     // of a dedicated entrypoint:
    749     //
    750     //   rX <- ReadBarrierMarkRegX(rX)
    751     //
    752     int32_t entry_point_offset =
    753         Thread::ReadBarrierMarkEntryPointsOffset<kMipsPointerSize>(ref_reg - 1);
    754     // This runtime call does not require a stack map.
    755     mips_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset,
    756                                                       instruction_,
    757                                                       this,
    758                                                       /* direct */ false);
    759 
    760     // If the new reference is different from the old reference,
    761     // update the field in the holder (`*(obj_ + field_offset_)`).
    762     //
    763     // Note that this field could also hold a different object, if
    764     // another thread had concurrently changed it. In that case, the
    765     // the compare-and-set (CAS) loop below would abort, leaving the
    766     // field as-is.
    767     MipsLabel done;
    768     __ Beq(temp1_, ref_reg, &done);
    769 
    770     // Update the the holder's field atomically.  This may fail if
    771     // mutator updates before us, but it's OK.  This is achieved
    772     // using a strong compare-and-set (CAS) operation with relaxed
    773     // memory synchronization ordering, where the expected value is
    774     // the old reference and the desired value is the new reference.
    775 
    776     // Convenience aliases.
    777     Register base = obj_;
    778     // The UnsafeCASObject intrinsic uses a register pair as field
    779     // offset ("long offset"), of which only the low part contains
    780     // data.
    781     Register offset = field_offset_.AsRegisterPairLow<Register>();
    782     Register expected = temp1_;
    783     Register value = ref_reg;
    784     Register tmp_ptr = TMP;      // Pointer to actual memory.
    785     Register tmp = AT;           // Value in memory.
    786 
    787     __ Addu(tmp_ptr, base, offset);
    788 
    789     if (kPoisonHeapReferences) {
    790       __ PoisonHeapReference(expected);
    791       // Do not poison `value` if it is the same register as
    792       // `expected`, which has just been poisoned.
    793       if (value != expected) {
    794         __ PoisonHeapReference(value);
    795       }
    796     }
    797 
    798     // do {
    799     //   tmp = [r_ptr] - expected;
    800     // } while (tmp == 0 && failure([r_ptr] <- r_new_value));
    801 
    802     bool is_r6 = mips_codegen->GetInstructionSetFeatures().IsR6();
    803     MipsLabel loop_head, exit_loop;
    804     __ Bind(&loop_head);
    805     if (is_r6) {
    806       __ LlR6(tmp, tmp_ptr);
    807     } else {
    808       __ LlR2(tmp, tmp_ptr);
    809     }
    810     __ Bne(tmp, expected, &exit_loop);
    811     __ Move(tmp, value);
    812     if (is_r6) {
    813       __ ScR6(tmp, tmp_ptr);
    814     } else {
    815       __ ScR2(tmp, tmp_ptr);
    816     }
    817     __ Beqz(tmp, &loop_head);
    818     __ Bind(&exit_loop);
    819 
    820     if (kPoisonHeapReferences) {
    821       __ UnpoisonHeapReference(expected);
    822       // Do not unpoison `value` if it is the same register as
    823       // `expected`, which has just been unpoisoned.
    824       if (value != expected) {
    825         __ UnpoisonHeapReference(value);
    826       }
    827     }
    828 
    829     __ Bind(&done);
    830     __ B(GetExitLabel());
    831   }
    832 
    833  private:
    834   // The location (register) of the marked object reference.
    835   const Location ref_;
    836   // The register containing the object holding the marked object reference field.
    837   const Register obj_;
    838   // The location of the offset of the marked reference field within `obj_`.
    839   Location field_offset_;
    840 
    841   const Register temp1_;
    842 
    843   DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkAndUpdateFieldSlowPathMIPS);
    844 };
    845 
    846 // Slow path generating a read barrier for a heap reference.
    847 class ReadBarrierForHeapReferenceSlowPathMIPS : public SlowPathCodeMIPS {
    848  public:
    849   ReadBarrierForHeapReferenceSlowPathMIPS(HInstruction* instruction,
    850                                           Location out,
    851                                           Location ref,
    852                                           Location obj,
    853                                           uint32_t offset,
    854                                           Location index)
    855       : SlowPathCodeMIPS(instruction),
    856         out_(out),
    857         ref_(ref),
    858         obj_(obj),
    859         offset_(offset),
    860         index_(index) {
    861     DCHECK(kEmitCompilerReadBarrier);
    862     // If `obj` is equal to `out` or `ref`, it means the initial object
    863     // has been overwritten by (or after) the heap object reference load
    864     // to be instrumented, e.g.:
    865     //
    866     //   __ LoadFromOffset(kLoadWord, out, out, offset);
    867     //   codegen_->GenerateReadBarrierSlow(instruction, out_loc, out_loc, out_loc, offset);
    868     //
    869     // In that case, we have lost the information about the original
    870     // object, and the emitted read barrier cannot work properly.
    871     DCHECK(!obj.Equals(out)) << "obj=" << obj << " out=" << out;
    872     DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref;
    873   }
    874 
    875   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
    876     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
    877     LocationSummary* locations = instruction_->GetLocations();
    878     Register reg_out = out_.AsRegister<Register>();
    879     DCHECK(locations->CanCall());
    880     DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out));
    881     DCHECK(instruction_->IsInstanceFieldGet() ||
    882            instruction_->IsStaticFieldGet() ||
    883            instruction_->IsArrayGet() ||
    884            instruction_->IsInstanceOf() ||
    885            instruction_->IsCheckCast() ||
    886            (instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()))
    887         << "Unexpected instruction in read barrier for heap reference slow path: "
    888         << instruction_->DebugName();
    889 
    890     __ Bind(GetEntryLabel());
    891     SaveLiveRegisters(codegen, locations);
    892 
    893     // We may have to change the index's value, but as `index_` is a
    894     // constant member (like other "inputs" of this slow path),
    895     // introduce a copy of it, `index`.
    896     Location index = index_;
    897     if (index_.IsValid()) {
    898       // Handle `index_` for HArrayGet and UnsafeGetObject/UnsafeGetObjectVolatile intrinsics.
    899       if (instruction_->IsArrayGet()) {
    900         // Compute the actual memory offset and store it in `index`.
    901         Register index_reg = index_.AsRegister<Register>();
    902         DCHECK(locations->GetLiveRegisters()->ContainsCoreRegister(index_reg));
    903         if (codegen->IsCoreCalleeSaveRegister(index_reg)) {
    904           // We are about to change the value of `index_reg` (see the
    905           // calls to art::mips::MipsAssembler::Sll and
    906           // art::mips::MipsAssembler::Addiu32 below), but it has
    907           // not been saved by the previous call to
    908           // art::SlowPathCode::SaveLiveRegisters, as it is a
    909           // callee-save register --
    910           // art::SlowPathCode::SaveLiveRegisters does not consider
    911           // callee-save registers, as it has been designed with the
    912           // assumption that callee-save registers are supposed to be
    913           // handled by the called function.  So, as a callee-save
    914           // register, `index_reg` _would_ eventually be saved onto
    915           // the stack, but it would be too late: we would have
    916           // changed its value earlier.  Therefore, we manually save
    917           // it here into another freely available register,
    918           // `free_reg`, chosen of course among the caller-save
    919           // registers (as a callee-save `free_reg` register would
    920           // exhibit the same problem).
    921           //
    922           // Note we could have requested a temporary register from
    923           // the register allocator instead; but we prefer not to, as
    924           // this is a slow path, and we know we can find a
    925           // caller-save register that is available.
    926           Register free_reg = FindAvailableCallerSaveRegister(codegen);
    927           __ Move(free_reg, index_reg);
    928           index_reg = free_reg;
    929           index = Location::RegisterLocation(index_reg);
    930         } else {
    931           // The initial register stored in `index_` has already been
    932           // saved in the call to art::SlowPathCode::SaveLiveRegisters
    933           // (as it is not a callee-save register), so we can freely
    934           // use it.
    935         }
    936         // Shifting the index value contained in `index_reg` by the scale
    937         // factor (2) cannot overflow in practice, as the runtime is
    938         // unable to allocate object arrays with a size larger than
    939         // 2^26 - 1 (that is, 2^28 - 4 bytes).
    940         __ Sll(index_reg, index_reg, TIMES_4);
    941         static_assert(
    942             sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
    943             "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
    944         __ Addiu32(index_reg, index_reg, offset_);
    945       } else {
    946         // In the case of the UnsafeGetObject/UnsafeGetObjectVolatile
    947         // intrinsics, `index_` is not shifted by a scale factor of 2
    948         // (as in the case of ArrayGet), as it is actually an offset
    949         // to an object field within an object.
    950         DCHECK(instruction_->IsInvoke()) << instruction_->DebugName();
    951         DCHECK(instruction_->GetLocations()->Intrinsified());
    952         DCHECK((instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObject) ||
    953                (instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile))
    954             << instruction_->AsInvoke()->GetIntrinsic();
    955         DCHECK_EQ(offset_, 0U);
    956         DCHECK(index_.IsRegisterPair());
    957         // UnsafeGet's offset location is a register pair, the low
    958         // part contains the correct offset.
    959         index = index_.ToLow();
    960       }
    961     }
    962 
    963     // We're moving two or three locations to locations that could
    964     // overlap, so we need a parallel move resolver.
    965     InvokeRuntimeCallingConvention calling_convention;
    966     HParallelMove parallel_move(codegen->GetGraph()->GetArena());
    967     parallel_move.AddMove(ref_,
    968                           Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
    969                           Primitive::kPrimNot,
    970                           nullptr);
    971     parallel_move.AddMove(obj_,
    972                           Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
    973                           Primitive::kPrimNot,
    974                           nullptr);
    975     if (index.IsValid()) {
    976       parallel_move.AddMove(index,
    977                             Location::RegisterLocation(calling_convention.GetRegisterAt(2)),
    978                             Primitive::kPrimInt,
    979                             nullptr);
    980       codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
    981     } else {
    982       codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
    983       __ LoadConst32(calling_convention.GetRegisterAt(2), offset_);
    984     }
    985     mips_codegen->InvokeRuntime(kQuickReadBarrierSlow,
    986                                 instruction_,
    987                                 instruction_->GetDexPc(),
    988                                 this);
    989     CheckEntrypointTypes<
    990         kQuickReadBarrierSlow, mirror::Object*, mirror::Object*, mirror::Object*, uint32_t>();
    991     mips_codegen->MoveLocation(out_,
    992                                calling_convention.GetReturnLocation(Primitive::kPrimNot),
    993                                Primitive::kPrimNot);
    994 
    995     RestoreLiveRegisters(codegen, locations);
    996     __ B(GetExitLabel());
    997   }
    998 
    999   const char* GetDescription() const OVERRIDE { return "ReadBarrierForHeapReferenceSlowPathMIPS"; }
   1000 
   1001  private:
   1002   Register FindAvailableCallerSaveRegister(CodeGenerator* codegen) {
   1003     size_t ref = static_cast<int>(ref_.AsRegister<Register>());
   1004     size_t obj = static_cast<int>(obj_.AsRegister<Register>());
   1005     for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) {
   1006       if (i != ref &&
   1007           i != obj &&
   1008           !codegen->IsCoreCalleeSaveRegister(i) &&
   1009           !codegen->IsBlockedCoreRegister(i)) {
   1010         return static_cast<Register>(i);
   1011       }
   1012     }
   1013     // We shall never fail to find a free caller-save register, as
   1014     // there are more than two core caller-save registers on MIPS
   1015     // (meaning it is possible to find one which is different from
   1016     // `ref` and `obj`).
   1017     DCHECK_GT(codegen->GetNumberOfCoreCallerSaveRegisters(), 2u);
   1018     LOG(FATAL) << "Could not find a free caller-save register";
   1019     UNREACHABLE();
   1020   }
   1021 
   1022   const Location out_;
   1023   const Location ref_;
   1024   const Location obj_;
   1025   const uint32_t offset_;
   1026   // An additional location containing an index to an array.
   1027   // Only used for HArrayGet and the UnsafeGetObject &
   1028   // UnsafeGetObjectVolatile intrinsics.
   1029   const Location index_;
   1030 
   1031   DISALLOW_COPY_AND_ASSIGN(ReadBarrierForHeapReferenceSlowPathMIPS);
   1032 };
   1033 
   1034 // Slow path generating a read barrier for a GC root.
   1035 class ReadBarrierForRootSlowPathMIPS : public SlowPathCodeMIPS {
   1036  public:
   1037   ReadBarrierForRootSlowPathMIPS(HInstruction* instruction, Location out, Location root)
   1038       : SlowPathCodeMIPS(instruction), out_(out), root_(root) {
   1039     DCHECK(kEmitCompilerReadBarrier);
   1040   }
   1041 
   1042   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
   1043     LocationSummary* locations = instruction_->GetLocations();
   1044     Register reg_out = out_.AsRegister<Register>();
   1045     DCHECK(locations->CanCall());
   1046     DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out));
   1047     DCHECK(instruction_->IsLoadClass() || instruction_->IsLoadString())
   1048         << "Unexpected instruction in read barrier for GC root slow path: "
   1049         << instruction_->DebugName();
   1050 
   1051     __ Bind(GetEntryLabel());
   1052     SaveLiveRegisters(codegen, locations);
   1053 
   1054     InvokeRuntimeCallingConvention calling_convention;
   1055     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
   1056     mips_codegen->MoveLocation(Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
   1057                                root_,
   1058                                Primitive::kPrimNot);
   1059     mips_codegen->InvokeRuntime(kQuickReadBarrierForRootSlow,
   1060                                 instruction_,
   1061                                 instruction_->GetDexPc(),
   1062                                 this);
   1063     CheckEntrypointTypes<kQuickReadBarrierForRootSlow, mirror::Object*, GcRoot<mirror::Object>*>();
   1064     mips_codegen->MoveLocation(out_,
   1065                                calling_convention.GetReturnLocation(Primitive::kPrimNot),
   1066                                Primitive::kPrimNot);
   1067 
   1068     RestoreLiveRegisters(codegen, locations);
   1069     __ B(GetExitLabel());
   1070   }
   1071 
   1072   const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathMIPS"; }
   1073 
   1074  private:
   1075   const Location out_;
   1076   const Location root_;
   1077 
   1078   DISALLOW_COPY_AND_ASSIGN(ReadBarrierForRootSlowPathMIPS);
   1079 };
   1080 
   1081 CodeGeneratorMIPS::CodeGeneratorMIPS(HGraph* graph,
   1082                                      const MipsInstructionSetFeatures& isa_features,
   1083                                      const CompilerOptions& compiler_options,
   1084                                      OptimizingCompilerStats* stats)
   1085     : CodeGenerator(graph,
   1086                     kNumberOfCoreRegisters,
   1087                     kNumberOfFRegisters,
   1088                     kNumberOfRegisterPairs,
   1089                     ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
   1090                                         arraysize(kCoreCalleeSaves)),
   1091                     ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
   1092                                         arraysize(kFpuCalleeSaves)),
   1093                     compiler_options,
   1094                     stats),
   1095       block_labels_(nullptr),
   1096       location_builder_(graph, this),
   1097       instruction_visitor_(graph, this),
   1098       move_resolver_(graph->GetArena(), this),
   1099       assembler_(graph->GetArena(), &isa_features),
   1100       isa_features_(isa_features),
   1101       uint32_literals_(std::less<uint32_t>(),
   1102                        graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
   1103       pc_relative_method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
   1104       method_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
   1105       pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
   1106       type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
   1107       pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
   1108       jit_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
   1109       jit_class_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
   1110       clobbered_ra_(false) {
   1111   // Save RA (containing the return address) to mimic Quick.
   1112   AddAllocatedRegister(Location::RegisterLocation(RA));
   1113 }
   1114 
   1115 #undef __
   1116 // NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
   1117 #define __ down_cast<MipsAssembler*>(GetAssembler())->  // NOLINT
   1118 #define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, x).Int32Value()
   1119 
   1120 void CodeGeneratorMIPS::Finalize(CodeAllocator* allocator) {
   1121   // Ensure that we fix up branches.
   1122   __ FinalizeCode();
   1123 
   1124   // Adjust native pc offsets in stack maps.
   1125   for (size_t i = 0, num = stack_map_stream_.GetNumberOfStackMaps(); i != num; ++i) {
   1126     uint32_t old_position =
   1127         stack_map_stream_.GetStackMap(i).native_pc_code_offset.Uint32Value(kMips);
   1128     uint32_t new_position = __ GetAdjustedPosition(old_position);
   1129     DCHECK_GE(new_position, old_position);
   1130     stack_map_stream_.SetStackMapNativePcOffset(i, new_position);
   1131   }
   1132 
   1133   // Adjust pc offsets for the disassembly information.
   1134   if (disasm_info_ != nullptr) {
   1135     GeneratedCodeInterval* frame_entry_interval = disasm_info_->GetFrameEntryInterval();
   1136     frame_entry_interval->start = __ GetAdjustedPosition(frame_entry_interval->start);
   1137     frame_entry_interval->end = __ GetAdjustedPosition(frame_entry_interval->end);
   1138     for (auto& it : *disasm_info_->GetInstructionIntervals()) {
   1139       it.second.start = __ GetAdjustedPosition(it.second.start);
   1140       it.second.end = __ GetAdjustedPosition(it.second.end);
   1141     }
   1142     for (auto& it : *disasm_info_->GetSlowPathIntervals()) {
   1143       it.code_interval.start = __ GetAdjustedPosition(it.code_interval.start);
   1144       it.code_interval.end = __ GetAdjustedPosition(it.code_interval.end);
   1145     }
   1146   }
   1147 
   1148   CodeGenerator::Finalize(allocator);
   1149 }
   1150 
   1151 MipsAssembler* ParallelMoveResolverMIPS::GetAssembler() const {
   1152   return codegen_->GetAssembler();
   1153 }
   1154 
   1155 void ParallelMoveResolverMIPS::EmitMove(size_t index) {
   1156   DCHECK_LT(index, moves_.size());
   1157   MoveOperands* move = moves_[index];
   1158   codegen_->MoveLocation(move->GetDestination(), move->GetSource(), move->GetType());
   1159 }
   1160 
   1161 void ParallelMoveResolverMIPS::EmitSwap(size_t index) {
   1162   DCHECK_LT(index, moves_.size());
   1163   MoveOperands* move = moves_[index];
   1164   Primitive::Type type = move->GetType();
   1165   Location loc1 = move->GetDestination();
   1166   Location loc2 = move->GetSource();
   1167 
   1168   DCHECK(!loc1.IsConstant());
   1169   DCHECK(!loc2.IsConstant());
   1170 
   1171   if (loc1.Equals(loc2)) {
   1172     return;
   1173   }
   1174 
   1175   if (loc1.IsRegister() && loc2.IsRegister()) {
   1176     // Swap 2 GPRs.
   1177     Register r1 = loc1.AsRegister<Register>();
   1178     Register r2 = loc2.AsRegister<Register>();
   1179     __ Move(TMP, r2);
   1180     __ Move(r2, r1);
   1181     __ Move(r1, TMP);
   1182   } else if (loc1.IsFpuRegister() && loc2.IsFpuRegister()) {
   1183     FRegister f1 = loc1.AsFpuRegister<FRegister>();
   1184     FRegister f2 = loc2.AsFpuRegister<FRegister>();
   1185     if (type == Primitive::kPrimFloat) {
   1186       __ MovS(FTMP, f2);
   1187       __ MovS(f2, f1);
   1188       __ MovS(f1, FTMP);
   1189     } else {
   1190       DCHECK_EQ(type, Primitive::kPrimDouble);
   1191       __ MovD(FTMP, f2);
   1192       __ MovD(f2, f1);
   1193       __ MovD(f1, FTMP);
   1194     }
   1195   } else if ((loc1.IsRegister() && loc2.IsFpuRegister()) ||
   1196              (loc1.IsFpuRegister() && loc2.IsRegister())) {
   1197     // Swap FPR and GPR.
   1198     DCHECK_EQ(type, Primitive::kPrimFloat);  // Can only swap a float.
   1199     FRegister f1 = loc1.IsFpuRegister() ? loc1.AsFpuRegister<FRegister>()
   1200                                         : loc2.AsFpuRegister<FRegister>();
   1201     Register r2 = loc1.IsRegister() ? loc1.AsRegister<Register>() : loc2.AsRegister<Register>();
   1202     __ Move(TMP, r2);
   1203     __ Mfc1(r2, f1);
   1204     __ Mtc1(TMP, f1);
   1205   } else if (loc1.IsRegisterPair() && loc2.IsRegisterPair()) {
   1206     // Swap 2 GPR register pairs.
   1207     Register r1 = loc1.AsRegisterPairLow<Register>();
   1208     Register r2 = loc2.AsRegisterPairLow<Register>();
   1209     __ Move(TMP, r2);
   1210     __ Move(r2, r1);
   1211     __ Move(r1, TMP);
   1212     r1 = loc1.AsRegisterPairHigh<Register>();
   1213     r2 = loc2.AsRegisterPairHigh<Register>();
   1214     __ Move(TMP, r2);
   1215     __ Move(r2, r1);
   1216     __ Move(r1, TMP);
   1217   } else if ((loc1.IsRegisterPair() && loc2.IsFpuRegister()) ||
   1218              (loc1.IsFpuRegister() && loc2.IsRegisterPair())) {
   1219     // Swap FPR and GPR register pair.
   1220     DCHECK_EQ(type, Primitive::kPrimDouble);
   1221     FRegister f1 = loc1.IsFpuRegister() ? loc1.AsFpuRegister<FRegister>()
   1222                                         : loc2.AsFpuRegister<FRegister>();
   1223     Register r2_l = loc1.IsRegisterPair() ? loc1.AsRegisterPairLow<Register>()
   1224                                           : loc2.AsRegisterPairLow<Register>();
   1225     Register r2_h = loc1.IsRegisterPair() ? loc1.AsRegisterPairHigh<Register>()
   1226                                           : loc2.AsRegisterPairHigh<Register>();
   1227     // Use 2 temporary registers because we can't first swap the low 32 bits of an FPR and
   1228     // then swap the high 32 bits of the same FPR. mtc1 makes the high 32 bits of an FPR
   1229     // unpredictable and the following mfch1 will fail.
   1230     __ Mfc1(TMP, f1);
   1231     __ MoveFromFpuHigh(AT, f1);
   1232     __ Mtc1(r2_l, f1);
   1233     __ MoveToFpuHigh(r2_h, f1);
   1234     __ Move(r2_l, TMP);
   1235     __ Move(r2_h, AT);
   1236   } else if (loc1.IsStackSlot() && loc2.IsStackSlot()) {
   1237     Exchange(loc1.GetStackIndex(), loc2.GetStackIndex(), /* double_slot */ false);
   1238   } else if (loc1.IsDoubleStackSlot() && loc2.IsDoubleStackSlot()) {
   1239     Exchange(loc1.GetStackIndex(), loc2.GetStackIndex(), /* double_slot */ true);
   1240   } else if ((loc1.IsRegister() && loc2.IsStackSlot()) ||
   1241              (loc1.IsStackSlot() && loc2.IsRegister())) {
   1242     Register reg = loc1.IsRegister() ? loc1.AsRegister<Register>() : loc2.AsRegister<Register>();
   1243     intptr_t offset = loc1.IsStackSlot() ? loc1.GetStackIndex() : loc2.GetStackIndex();
   1244     __ Move(TMP, reg);
   1245     __ LoadFromOffset(kLoadWord, reg, SP, offset);
   1246     __ StoreToOffset(kStoreWord, TMP, SP, offset);
   1247   } else if ((loc1.IsRegisterPair() && loc2.IsDoubleStackSlot()) ||
   1248              (loc1.IsDoubleStackSlot() && loc2.IsRegisterPair())) {
   1249     Register reg_l = loc1.IsRegisterPair() ? loc1.AsRegisterPairLow<Register>()
   1250                                            : loc2.AsRegisterPairLow<Register>();
   1251     Register reg_h = loc1.IsRegisterPair() ? loc1.AsRegisterPairHigh<Register>()
   1252                                            : loc2.AsRegisterPairHigh<Register>();
   1253     intptr_t offset_l = loc1.IsDoubleStackSlot() ? loc1.GetStackIndex() : loc2.GetStackIndex();
   1254     intptr_t offset_h = loc1.IsDoubleStackSlot() ? loc1.GetHighStackIndex(kMipsWordSize)
   1255                                                  : loc2.GetHighStackIndex(kMipsWordSize);
   1256     __ Move(TMP, reg_l);
   1257     __ LoadFromOffset(kLoadWord, reg_l, SP, offset_l);
   1258     __ StoreToOffset(kStoreWord, TMP, SP, offset_l);
   1259     __ Move(TMP, reg_h);
   1260     __ LoadFromOffset(kLoadWord, reg_h, SP, offset_h);
   1261     __ StoreToOffset(kStoreWord, TMP, SP, offset_h);
   1262   } else if (loc1.IsFpuRegister() || loc2.IsFpuRegister()) {
   1263     FRegister reg = loc1.IsFpuRegister() ? loc1.AsFpuRegister<FRegister>()
   1264                                          : loc2.AsFpuRegister<FRegister>();
   1265     intptr_t offset = loc1.IsFpuRegister() ? loc2.GetStackIndex() : loc1.GetStackIndex();
   1266     if (type == Primitive::kPrimFloat) {
   1267       __ MovS(FTMP, reg);
   1268       __ LoadSFromOffset(reg, SP, offset);
   1269       __ StoreSToOffset(FTMP, SP, offset);
   1270     } else {
   1271       DCHECK_EQ(type, Primitive::kPrimDouble);
   1272       __ MovD(FTMP, reg);
   1273       __ LoadDFromOffset(reg, SP, offset);
   1274       __ StoreDToOffset(FTMP, SP, offset);
   1275     }
   1276   } else {
   1277     LOG(FATAL) << "Swap between " << loc1 << " and " << loc2 << " is unsupported";
   1278   }
   1279 }
   1280 
   1281 void ParallelMoveResolverMIPS::RestoreScratch(int reg) {
   1282   __ Pop(static_cast<Register>(reg));
   1283 }
   1284 
   1285 void ParallelMoveResolverMIPS::SpillScratch(int reg) {
   1286   __ Push(static_cast<Register>(reg));
   1287 }
   1288 
   1289 void ParallelMoveResolverMIPS::Exchange(int index1, int index2, bool double_slot) {
   1290   // Allocate a scratch register other than TMP, if available.
   1291   // Else, spill V0 (arbitrary choice) and use it as a scratch register (it will be
   1292   // automatically unspilled when the scratch scope object is destroyed).
   1293   ScratchRegisterScope ensure_scratch(this, TMP, V0, codegen_->GetNumberOfCoreRegisters());
   1294   // If V0 spills onto the stack, SP-relative offsets need to be adjusted.
   1295   int stack_offset = ensure_scratch.IsSpilled() ? kMipsWordSize : 0;
   1296   for (int i = 0; i <= (double_slot ? 1 : 0); i++, stack_offset += kMipsWordSize) {
   1297     __ LoadFromOffset(kLoadWord,
   1298                       Register(ensure_scratch.GetRegister()),
   1299                       SP,
   1300                       index1 + stack_offset);
   1301     __ LoadFromOffset(kLoadWord,
   1302                       TMP,
   1303                       SP,
   1304                       index2 + stack_offset);
   1305     __ StoreToOffset(kStoreWord,
   1306                      Register(ensure_scratch.GetRegister()),
   1307                      SP,
   1308                      index2 + stack_offset);
   1309     __ StoreToOffset(kStoreWord, TMP, SP, index1 + stack_offset);
   1310   }
   1311 }
   1312 
   1313 void CodeGeneratorMIPS::ComputeSpillMask() {
   1314   core_spill_mask_ = allocated_registers_.GetCoreRegisters() & core_callee_save_mask_;
   1315   fpu_spill_mask_ = allocated_registers_.GetFloatingPointRegisters() & fpu_callee_save_mask_;
   1316   DCHECK_NE(core_spill_mask_, 0u) << "At least the return address register must be saved";
   1317   // If there're FPU callee-saved registers and there's an odd number of GPR callee-saved
   1318   // registers, include the ZERO register to force alignment of FPU callee-saved registers
   1319   // within the stack frame.
   1320   if ((fpu_spill_mask_ != 0) && (POPCOUNT(core_spill_mask_) % 2 != 0)) {
   1321     core_spill_mask_ |= (1 << ZERO);
   1322   }
   1323 }
   1324 
   1325 bool CodeGeneratorMIPS::HasAllocatedCalleeSaveRegisters() const {
   1326   // If RA is clobbered by PC-relative operations on R2 and it's the only spilled register
   1327   // (this can happen in leaf methods), force CodeGenerator::InitializeCodeGeneration()
   1328   // into the path that creates a stack frame so that RA can be explicitly saved and restored.
   1329   // RA can't otherwise be saved/restored when it's the only spilled register.
   1330   return CodeGenerator::HasAllocatedCalleeSaveRegisters() || clobbered_ra_;
   1331 }
   1332 
   1333 static dwarf::Reg DWARFReg(Register reg) {
   1334   return dwarf::Reg::MipsCore(static_cast<int>(reg));
   1335 }
   1336 
   1337 // TODO: mapping of floating-point registers to DWARF.
   1338 
   1339 void CodeGeneratorMIPS::GenerateFrameEntry() {
   1340   __ Bind(&frame_entry_label_);
   1341 
   1342   bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kMips) || !IsLeafMethod();
   1343 
   1344   if (do_overflow_check) {
   1345     __ LoadFromOffset(kLoadWord,
   1346                       ZERO,
   1347                       SP,
   1348                       -static_cast<int32_t>(GetStackOverflowReservedBytes(kMips)));
   1349     RecordPcInfo(nullptr, 0);
   1350   }
   1351 
   1352   if (HasEmptyFrame()) {
   1353     CHECK_EQ(fpu_spill_mask_, 0u);
   1354     CHECK_EQ(core_spill_mask_, 1u << RA);
   1355     CHECK(!clobbered_ra_);
   1356     return;
   1357   }
   1358 
   1359   // Make sure the frame size isn't unreasonably large.
   1360   if (GetFrameSize() > GetStackOverflowReservedBytes(kMips)) {
   1361     LOG(FATAL) << "Stack frame larger than " << GetStackOverflowReservedBytes(kMips) << " bytes";
   1362   }
   1363 
   1364   // Spill callee-saved registers.
   1365 
   1366   uint32_t ofs = GetFrameSize();
   1367   __ IncreaseFrameSize(ofs);
   1368 
   1369   for (uint32_t mask = core_spill_mask_; mask != 0; ) {
   1370     Register reg = static_cast<Register>(MostSignificantBit(mask));
   1371     mask ^= 1u << reg;
   1372     ofs -= kMipsWordSize;
   1373     // The ZERO register is only included for alignment.
   1374     if (reg != ZERO) {
   1375       __ StoreToOffset(kStoreWord, reg, SP, ofs);
   1376       __ cfi().RelOffset(DWARFReg(reg), ofs);
   1377     }
   1378   }
   1379 
   1380   for (uint32_t mask = fpu_spill_mask_; mask != 0; ) {
   1381     FRegister reg = static_cast<FRegister>(MostSignificantBit(mask));
   1382     mask ^= 1u << reg;
   1383     ofs -= kMipsDoublewordSize;
   1384     __ StoreDToOffset(reg, SP, ofs);
   1385     // TODO: __ cfi().RelOffset(DWARFReg(reg), ofs);
   1386   }
   1387 
   1388   // Save the current method if we need it. Note that we do not
   1389   // do this in HCurrentMethod, as the instruction might have been removed
   1390   // in the SSA graph.
   1391   if (RequiresCurrentMethod()) {
   1392     __ StoreToOffset(kStoreWord, kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
   1393   }
   1394 
   1395   if (GetGraph()->HasShouldDeoptimizeFlag()) {
   1396     // Initialize should deoptimize flag to 0.
   1397     __ StoreToOffset(kStoreWord, ZERO, SP, GetStackOffsetOfShouldDeoptimizeFlag());
   1398   }
   1399 }
   1400 
   1401 void CodeGeneratorMIPS::GenerateFrameExit() {
   1402   __ cfi().RememberState();
   1403 
   1404   if (!HasEmptyFrame()) {
   1405     // Restore callee-saved registers.
   1406 
   1407     // For better instruction scheduling restore RA before other registers.
   1408     uint32_t ofs = GetFrameSize();
   1409     for (uint32_t mask = core_spill_mask_; mask != 0; ) {
   1410       Register reg = static_cast<Register>(MostSignificantBit(mask));
   1411       mask ^= 1u << reg;
   1412       ofs -= kMipsWordSize;
   1413       // The ZERO register is only included for alignment.
   1414       if (reg != ZERO) {
   1415         __ LoadFromOffset(kLoadWord, reg, SP, ofs);
   1416         __ cfi().Restore(DWARFReg(reg));
   1417       }
   1418     }
   1419 
   1420     for (uint32_t mask = fpu_spill_mask_; mask != 0; ) {
   1421       FRegister reg = static_cast<FRegister>(MostSignificantBit(mask));
   1422       mask ^= 1u << reg;
   1423       ofs -= kMipsDoublewordSize;
   1424       __ LoadDFromOffset(reg, SP, ofs);
   1425       // TODO: __ cfi().Restore(DWARFReg(reg));
   1426     }
   1427 
   1428     size_t frame_size = GetFrameSize();
   1429     // Adjust the stack pointer in the delay slot if doing so doesn't break CFI.
   1430     bool exchange = IsInt<16>(static_cast<int32_t>(frame_size));
   1431     bool reordering = __ SetReorder(false);
   1432     if (exchange) {
   1433       __ Jr(RA);
   1434       __ DecreaseFrameSize(frame_size);  // Single instruction in delay slot.
   1435     } else {
   1436       __ DecreaseFrameSize(frame_size);
   1437       __ Jr(RA);
   1438       __ Nop();  // In delay slot.
   1439     }
   1440     __ SetReorder(reordering);
   1441   } else {
   1442     __ Jr(RA);
   1443     __ NopIfNoReordering();
   1444   }
   1445 
   1446   __ cfi().RestoreState();
   1447   __ cfi().DefCFAOffset(GetFrameSize());
   1448 }
   1449 
   1450 void CodeGeneratorMIPS::Bind(HBasicBlock* block) {
   1451   __ Bind(GetLabelOf(block));
   1452 }
   1453 
   1454 VectorRegister VectorRegisterFrom(Location location) {
   1455   DCHECK(location.IsFpuRegister());
   1456   return static_cast<VectorRegister>(location.AsFpuRegister<FRegister>());
   1457 }
   1458 
   1459 void CodeGeneratorMIPS::MoveLocation(Location destination,
   1460                                      Location source,
   1461                                      Primitive::Type dst_type) {
   1462   if (source.Equals(destination)) {
   1463     return;
   1464   }
   1465 
   1466   if (source.IsConstant()) {
   1467     MoveConstant(destination, source.GetConstant());
   1468   } else {
   1469     if (destination.IsRegister()) {
   1470       if (source.IsRegister()) {
   1471         __ Move(destination.AsRegister<Register>(), source.AsRegister<Register>());
   1472       } else if (source.IsFpuRegister()) {
   1473         __ Mfc1(destination.AsRegister<Register>(), source.AsFpuRegister<FRegister>());
   1474       } else {
   1475         DCHECK(source.IsStackSlot()) << "Cannot move from " << source << " to " << destination;
   1476       __ LoadFromOffset(kLoadWord, destination.AsRegister<Register>(), SP, source.GetStackIndex());
   1477       }
   1478     } else if (destination.IsRegisterPair()) {
   1479       if (source.IsRegisterPair()) {
   1480         __ Move(destination.AsRegisterPairHigh<Register>(), source.AsRegisterPairHigh<Register>());
   1481         __ Move(destination.AsRegisterPairLow<Register>(), source.AsRegisterPairLow<Register>());
   1482       } else if (source.IsFpuRegister()) {
   1483         Register dst_high = destination.AsRegisterPairHigh<Register>();
   1484         Register dst_low =  destination.AsRegisterPairLow<Register>();
   1485         FRegister src = source.AsFpuRegister<FRegister>();
   1486         __ Mfc1(dst_low, src);
   1487         __ MoveFromFpuHigh(dst_high, src);
   1488       } else {
   1489         DCHECK(source.IsDoubleStackSlot()) << "Cannot move from " << source << " to " << destination;
   1490         int32_t off = source.GetStackIndex();
   1491         Register r = destination.AsRegisterPairLow<Register>();
   1492         __ LoadFromOffset(kLoadDoubleword, r, SP, off);
   1493       }
   1494     } else if (destination.IsFpuRegister()) {
   1495       if (source.IsRegister()) {
   1496         DCHECK(!Primitive::Is64BitType(dst_type));
   1497         __ Mtc1(source.AsRegister<Register>(), destination.AsFpuRegister<FRegister>());
   1498       } else if (source.IsRegisterPair()) {
   1499         DCHECK(Primitive::Is64BitType(dst_type));
   1500         FRegister dst = destination.AsFpuRegister<FRegister>();
   1501         Register src_high = source.AsRegisterPairHigh<Register>();
   1502         Register src_low = source.AsRegisterPairLow<Register>();
   1503         __ Mtc1(src_low, dst);
   1504         __ MoveToFpuHigh(src_high, dst);
   1505       } else if (source.IsFpuRegister()) {
   1506         if (GetGraph()->HasSIMD()) {
   1507           __ MoveV(VectorRegisterFrom(destination),
   1508                    VectorRegisterFrom(source));
   1509         } else {
   1510           if (Primitive::Is64BitType(dst_type)) {
   1511             __ MovD(destination.AsFpuRegister<FRegister>(), source.AsFpuRegister<FRegister>());
   1512           } else {
   1513             DCHECK_EQ(dst_type, Primitive::kPrimFloat);
   1514             __ MovS(destination.AsFpuRegister<FRegister>(), source.AsFpuRegister<FRegister>());
   1515           }
   1516         }
   1517       } else if (source.IsSIMDStackSlot()) {
   1518         __ LoadQFromOffset(destination.AsFpuRegister<FRegister>(), SP, source.GetStackIndex());
   1519       } else if (source.IsDoubleStackSlot()) {
   1520         DCHECK(Primitive::Is64BitType(dst_type));
   1521         __ LoadDFromOffset(destination.AsFpuRegister<FRegister>(), SP, source.GetStackIndex());
   1522       } else {
   1523         DCHECK(!Primitive::Is64BitType(dst_type));
   1524         DCHECK(source.IsStackSlot()) << "Cannot move from " << source << " to " << destination;
   1525         __ LoadSFromOffset(destination.AsFpuRegister<FRegister>(), SP, source.GetStackIndex());
   1526       }
   1527     } else if (destination.IsSIMDStackSlot()) {
   1528       if (source.IsFpuRegister()) {
   1529         __ StoreQToOffset(source.AsFpuRegister<FRegister>(), SP, destination.GetStackIndex());
   1530       } else {
   1531         DCHECK(source.IsSIMDStackSlot());
   1532         __ LoadQFromOffset(FTMP, SP, source.GetStackIndex());
   1533         __ StoreQToOffset(FTMP, SP, destination.GetStackIndex());
   1534       }
   1535     } else if (destination.IsDoubleStackSlot()) {
   1536       int32_t dst_offset = destination.GetStackIndex();
   1537       if (source.IsRegisterPair()) {
   1538         __ StoreToOffset(kStoreDoubleword, source.AsRegisterPairLow<Register>(), SP, dst_offset);
   1539       } else if (source.IsFpuRegister()) {
   1540         __ StoreDToOffset(source.AsFpuRegister<FRegister>(), SP, dst_offset);
   1541       } else {
   1542         DCHECK(source.IsDoubleStackSlot()) << "Cannot move from " << source << " to " << destination;
   1543         __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex());
   1544         __ StoreToOffset(kStoreWord, TMP, SP, dst_offset);
   1545         __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex() + 4);
   1546         __ StoreToOffset(kStoreWord, TMP, SP, dst_offset + 4);
   1547       }
   1548     } else {
   1549       DCHECK(destination.IsStackSlot()) << destination;
   1550       int32_t dst_offset = destination.GetStackIndex();
   1551       if (source.IsRegister()) {
   1552         __ StoreToOffset(kStoreWord, source.AsRegister<Register>(), SP, dst_offset);
   1553       } else if (source.IsFpuRegister()) {
   1554         __ StoreSToOffset(source.AsFpuRegister<FRegister>(), SP, dst_offset);
   1555       } else {
   1556         DCHECK(source.IsStackSlot()) << "Cannot move from " << source << " to " << destination;
   1557         __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex());
   1558         __ StoreToOffset(kStoreWord, TMP, SP, dst_offset);
   1559       }
   1560     }
   1561   }
   1562 }
   1563 
   1564 void CodeGeneratorMIPS::MoveConstant(Location destination, HConstant* c) {
   1565   if (c->IsIntConstant() || c->IsNullConstant()) {
   1566     // Move 32 bit constant.
   1567     int32_t value = GetInt32ValueOf(c);
   1568     if (destination.IsRegister()) {
   1569       Register dst = destination.AsRegister<Register>();
   1570       __ LoadConst32(dst, value);
   1571     } else {
   1572       DCHECK(destination.IsStackSlot())
   1573           << "Cannot move " << c->DebugName() << " to " << destination;
   1574       __ StoreConstToOffset(kStoreWord, value, SP, destination.GetStackIndex(), TMP);
   1575     }
   1576   } else if (c->IsLongConstant()) {
   1577     // Move 64 bit constant.
   1578     int64_t value = GetInt64ValueOf(c);
   1579     if (destination.IsRegisterPair()) {
   1580       Register r_h = destination.AsRegisterPairHigh<Register>();
   1581       Register r_l = destination.AsRegisterPairLow<Register>();
   1582       __ LoadConst64(r_h, r_l, value);
   1583     } else {
   1584       DCHECK(destination.IsDoubleStackSlot())
   1585           << "Cannot move " << c->DebugName() << " to " << destination;
   1586       __ StoreConstToOffset(kStoreDoubleword, value, SP, destination.GetStackIndex(), TMP);
   1587     }
   1588   } else if (c->IsFloatConstant()) {
   1589     // Move 32 bit float constant.
   1590     int32_t value = GetInt32ValueOf(c);
   1591     if (destination.IsFpuRegister()) {
   1592       __ LoadSConst32(destination.AsFpuRegister<FRegister>(), value, TMP);
   1593     } else {
   1594       DCHECK(destination.IsStackSlot())
   1595           << "Cannot move " << c->DebugName() << " to " << destination;
   1596       __ StoreConstToOffset(kStoreWord, value, SP, destination.GetStackIndex(), TMP);
   1597     }
   1598   } else {
   1599     // Move 64 bit double constant.
   1600     DCHECK(c->IsDoubleConstant()) << c->DebugName();
   1601     int64_t value = GetInt64ValueOf(c);
   1602     if (destination.IsFpuRegister()) {
   1603       FRegister fd = destination.AsFpuRegister<FRegister>();
   1604       __ LoadDConst64(fd, value, TMP);
   1605     } else {
   1606       DCHECK(destination.IsDoubleStackSlot())
   1607           << "Cannot move " << c->DebugName() << " to " << destination;
   1608       __ StoreConstToOffset(kStoreDoubleword, value, SP, destination.GetStackIndex(), TMP);
   1609     }
   1610   }
   1611 }
   1612 
   1613 void CodeGeneratorMIPS::MoveConstant(Location destination, int32_t value) {
   1614   DCHECK(destination.IsRegister());
   1615   Register dst = destination.AsRegister<Register>();
   1616   __ LoadConst32(dst, value);
   1617 }
   1618 
   1619 void CodeGeneratorMIPS::AddLocationAsTemp(Location location, LocationSummary* locations) {
   1620   if (location.IsRegister()) {
   1621     locations->AddTemp(location);
   1622   } else if (location.IsRegisterPair()) {
   1623     locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairLow<Register>()));
   1624     locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairHigh<Register>()));
   1625   } else {
   1626     UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
   1627   }
   1628 }
   1629 
   1630 template <LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
   1631 inline void CodeGeneratorMIPS::EmitPcRelativeLinkerPatches(
   1632     const ArenaDeque<PcRelativePatchInfo>& infos,
   1633     ArenaVector<LinkerPatch>* linker_patches) {
   1634   for (const PcRelativePatchInfo& info : infos) {
   1635     const DexFile& dex_file = info.target_dex_file;
   1636     size_t offset_or_index = info.offset_or_index;
   1637     DCHECK(info.label.IsBound());
   1638     uint32_t literal_offset = __ GetLabelLocation(&info.label);
   1639     // On R2 we use HMipsComputeBaseMethodAddress and patch relative to
   1640     // the assembler's base label used for PC-relative addressing.
   1641     const PcRelativePatchInfo& info_high = info.patch_info_high ? *info.patch_info_high : info;
   1642     uint32_t pc_rel_offset = info_high.pc_rel_label.IsBound()
   1643         ? __ GetLabelLocation(&info_high.pc_rel_label)
   1644         : __ GetPcRelBaseLabelLocation();
   1645     linker_patches->push_back(Factory(literal_offset, &dex_file, pc_rel_offset, offset_or_index));
   1646   }
   1647 }
   1648 
   1649 void CodeGeneratorMIPS::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) {
   1650   DCHECK(linker_patches->empty());
   1651   size_t size =
   1652       pc_relative_method_patches_.size() +
   1653       method_bss_entry_patches_.size() +
   1654       pc_relative_type_patches_.size() +
   1655       type_bss_entry_patches_.size() +
   1656       pc_relative_string_patches_.size();
   1657   linker_patches->reserve(size);
   1658   if (GetCompilerOptions().IsBootImage()) {
   1659     EmitPcRelativeLinkerPatches<LinkerPatch::RelativeMethodPatch>(pc_relative_method_patches_,
   1660                                                                   linker_patches);
   1661     EmitPcRelativeLinkerPatches<LinkerPatch::RelativeTypePatch>(pc_relative_type_patches_,
   1662                                                                 linker_patches);
   1663     EmitPcRelativeLinkerPatches<LinkerPatch::RelativeStringPatch>(pc_relative_string_patches_,
   1664                                                                   linker_patches);
   1665   } else {
   1666     DCHECK(pc_relative_method_patches_.empty());
   1667     DCHECK(pc_relative_type_patches_.empty());
   1668     EmitPcRelativeLinkerPatches<LinkerPatch::StringBssEntryPatch>(pc_relative_string_patches_,
   1669                                                                   linker_patches);
   1670   }
   1671   EmitPcRelativeLinkerPatches<LinkerPatch::MethodBssEntryPatch>(method_bss_entry_patches_,
   1672                                                                 linker_patches);
   1673   EmitPcRelativeLinkerPatches<LinkerPatch::TypeBssEntryPatch>(type_bss_entry_patches_,
   1674                                                               linker_patches);
   1675   DCHECK_EQ(size, linker_patches->size());
   1676 }
   1677 
   1678 CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewPcRelativeMethodPatch(
   1679     MethodReference target_method,
   1680     const PcRelativePatchInfo* info_high) {
   1681   return NewPcRelativePatch(*target_method.dex_file,
   1682                             target_method.dex_method_index,
   1683                             info_high,
   1684                             &pc_relative_method_patches_);
   1685 }
   1686 
   1687 CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewMethodBssEntryPatch(
   1688     MethodReference target_method,
   1689     const PcRelativePatchInfo* info_high) {
   1690   return NewPcRelativePatch(*target_method.dex_file,
   1691                             target_method.dex_method_index,
   1692                             info_high,
   1693                             &method_bss_entry_patches_);
   1694 }
   1695 
   1696 CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewPcRelativeTypePatch(
   1697     const DexFile& dex_file,
   1698     dex::TypeIndex type_index,
   1699     const PcRelativePatchInfo* info_high) {
   1700   return NewPcRelativePatch(dex_file, type_index.index_, info_high, &pc_relative_type_patches_);
   1701 }
   1702 
   1703 CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewTypeBssEntryPatch(
   1704     const DexFile& dex_file,
   1705     dex::TypeIndex type_index,
   1706     const PcRelativePatchInfo* info_high) {
   1707   return NewPcRelativePatch(dex_file, type_index.index_, info_high, &type_bss_entry_patches_);
   1708 }
   1709 
   1710 CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewPcRelativeStringPatch(
   1711     const DexFile& dex_file,
   1712     dex::StringIndex string_index,
   1713     const PcRelativePatchInfo* info_high) {
   1714   return NewPcRelativePatch(dex_file, string_index.index_, info_high, &pc_relative_string_patches_);
   1715 }
   1716 
   1717 CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewPcRelativePatch(
   1718     const DexFile& dex_file,
   1719     uint32_t offset_or_index,
   1720     const PcRelativePatchInfo* info_high,
   1721     ArenaDeque<PcRelativePatchInfo>* patches) {
   1722   patches->emplace_back(dex_file, offset_or_index, info_high);
   1723   return &patches->back();
   1724 }
   1725 
   1726 Literal* CodeGeneratorMIPS::DeduplicateUint32Literal(uint32_t value, Uint32ToLiteralMap* map) {
   1727   return map->GetOrCreate(
   1728       value,
   1729       [this, value]() { return __ NewLiteral<uint32_t>(value); });
   1730 }
   1731 
   1732 Literal* CodeGeneratorMIPS::DeduplicateBootImageAddressLiteral(uint32_t address) {
   1733   return DeduplicateUint32Literal(dchecked_integral_cast<uint32_t>(address), &uint32_literals_);
   1734 }
   1735 
   1736 void CodeGeneratorMIPS::EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo* info_high,
   1737                                                              Register out,
   1738                                                              Register base,
   1739                                                              PcRelativePatchInfo* info_low) {
   1740   DCHECK(!info_high->patch_info_high);
   1741   DCHECK_NE(out, base);
   1742   if (GetInstructionSetFeatures().IsR6()) {
   1743     DCHECK_EQ(base, ZERO);
   1744     __ Bind(&info_high->label);
   1745     __ Bind(&info_high->pc_rel_label);
   1746     // Add the high half of a 32-bit offset to PC.
   1747     __ Auipc(out, /* placeholder */ 0x1234);
   1748   } else {
   1749     // If base is ZERO, emit NAL to obtain the actual base.
   1750     if (base == ZERO) {
   1751       // Generate a dummy PC-relative call to obtain PC.
   1752       __ Nal();
   1753     }
   1754     __ Bind(&info_high->label);
   1755     __ Lui(out, /* placeholder */ 0x1234);
   1756     // If we emitted the NAL, bind the pc_rel_label, otherwise base is a register holding
   1757     // the HMipsComputeBaseMethodAddress which has its own label stored in MipsAssembler.
   1758     if (base == ZERO) {
   1759       __ Bind(&info_high->pc_rel_label);
   1760     }
   1761     // Add the high half of a 32-bit offset to PC.
   1762     __ Addu(out, out, (base == ZERO) ? RA : base);
   1763   }
   1764   // A following instruction will add the sign-extended low half of the 32-bit
   1765   // offset to `out` (e.g. lw, jialc, addiu).
   1766   DCHECK_EQ(info_low->patch_info_high, info_high);
   1767   __ Bind(&info_low->label);
   1768 }
   1769 
   1770 CodeGeneratorMIPS::JitPatchInfo* CodeGeneratorMIPS::NewJitRootStringPatch(
   1771     const DexFile& dex_file,
   1772     dex::StringIndex dex_index,
   1773     Handle<mirror::String> handle) {
   1774   jit_string_roots_.Overwrite(StringReference(&dex_file, dex_index),
   1775                               reinterpret_cast64<uint64_t>(handle.GetReference()));
   1776   jit_string_patches_.emplace_back(dex_file, dex_index.index_);
   1777   return &jit_string_patches_.back();
   1778 }
   1779 
   1780 CodeGeneratorMIPS::JitPatchInfo* CodeGeneratorMIPS::NewJitRootClassPatch(
   1781     const DexFile& dex_file,
   1782     dex::TypeIndex dex_index,
   1783     Handle<mirror::Class> handle) {
   1784   jit_class_roots_.Overwrite(TypeReference(&dex_file, dex_index),
   1785                              reinterpret_cast64<uint64_t>(handle.GetReference()));
   1786   jit_class_patches_.emplace_back(dex_file, dex_index.index_);
   1787   return &jit_class_patches_.back();
   1788 }
   1789 
   1790 void CodeGeneratorMIPS::PatchJitRootUse(uint8_t* code,
   1791                                         const uint8_t* roots_data,
   1792                                         const CodeGeneratorMIPS::JitPatchInfo& info,
   1793                                         uint64_t index_in_table) const {
   1794   uint32_t literal_offset = GetAssembler().GetLabelLocation(&info.high_label);
   1795   uintptr_t address =
   1796       reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>);
   1797   uint32_t addr32 = dchecked_integral_cast<uint32_t>(address);
   1798   // lui reg, addr32_high
   1799   DCHECK_EQ(code[literal_offset + 0], 0x34);
   1800   DCHECK_EQ(code[literal_offset + 1], 0x12);
   1801   DCHECK_EQ((code[literal_offset + 2] & 0xE0), 0x00);
   1802   DCHECK_EQ(code[literal_offset + 3], 0x3C);
   1803   // instr reg, reg, addr32_low
   1804   DCHECK_EQ(code[literal_offset + 4], 0x78);
   1805   DCHECK_EQ(code[literal_offset + 5], 0x56);
   1806   addr32 += (addr32 & 0x8000) << 1;  // Account for sign extension in "instr reg, reg, addr32_low".
   1807   // lui reg, addr32_high
   1808   code[literal_offset + 0] = static_cast<uint8_t>(addr32 >> 16);
   1809   code[literal_offset + 1] = static_cast<uint8_t>(addr32 >> 24);
   1810   // instr reg, reg, addr32_low
   1811   code[literal_offset + 4] = static_cast<uint8_t>(addr32 >> 0);
   1812   code[literal_offset + 5] = static_cast<uint8_t>(addr32 >> 8);
   1813 }
   1814 
   1815 void CodeGeneratorMIPS::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) {
   1816   for (const JitPatchInfo& info : jit_string_patches_) {
   1817     const auto it = jit_string_roots_.find(StringReference(&info.target_dex_file,
   1818                                                            dex::StringIndex(info.index)));
   1819     DCHECK(it != jit_string_roots_.end());
   1820     uint64_t index_in_table = it->second;
   1821     PatchJitRootUse(code, roots_data, info, index_in_table);
   1822   }
   1823   for (const JitPatchInfo& info : jit_class_patches_) {
   1824     const auto it = jit_class_roots_.find(TypeReference(&info.target_dex_file,
   1825                                                         dex::TypeIndex(info.index)));
   1826     DCHECK(it != jit_class_roots_.end());
   1827     uint64_t index_in_table = it->second;
   1828     PatchJitRootUse(code, roots_data, info, index_in_table);
   1829   }
   1830 }
   1831 
   1832 void CodeGeneratorMIPS::MarkGCCard(Register object,
   1833                                    Register value,
   1834                                    bool value_can_be_null) {
   1835   MipsLabel done;
   1836   Register card = AT;
   1837   Register temp = TMP;
   1838   if (value_can_be_null) {
   1839     __ Beqz(value, &done);
   1840   }
   1841   __ LoadFromOffset(kLoadWord,
   1842                     card,
   1843                     TR,
   1844                     Thread::CardTableOffset<kMipsPointerSize>().Int32Value());
   1845   __ Srl(temp, object, gc::accounting::CardTable::kCardShift);
   1846   __ Addu(temp, card, temp);
   1847   __ Sb(card, temp, 0);
   1848   if (value_can_be_null) {
   1849     __ Bind(&done);
   1850   }
   1851 }
   1852 
   1853 void CodeGeneratorMIPS::SetupBlockedRegisters() const {
   1854   // ZERO, K0, K1, GP, SP, RA are always reserved and can't be allocated.
   1855   blocked_core_registers_[ZERO] = true;
   1856   blocked_core_registers_[K0] = true;
   1857   blocked_core_registers_[K1] = true;
   1858   blocked_core_registers_[GP] = true;
   1859   blocked_core_registers_[SP] = true;
   1860   blocked_core_registers_[RA] = true;
   1861 
   1862   // AT and TMP(T8) are used as temporary/scratch registers
   1863   // (similar to how AT is used by MIPS assemblers).
   1864   blocked_core_registers_[AT] = true;
   1865   blocked_core_registers_[TMP] = true;
   1866   blocked_fpu_registers_[FTMP] = true;
   1867 
   1868   // Reserve suspend and thread registers.
   1869   blocked_core_registers_[S0] = true;
   1870   blocked_core_registers_[TR] = true;
   1871 
   1872   // Reserve T9 for function calls
   1873   blocked_core_registers_[T9] = true;
   1874 
   1875   // Reserve odd-numbered FPU registers.
   1876   for (size_t i = 1; i < kNumberOfFRegisters; i += 2) {
   1877     blocked_fpu_registers_[i] = true;
   1878   }
   1879 
   1880   if (GetGraph()->IsDebuggable()) {
   1881     // Stubs do not save callee-save floating point registers. If the graph
   1882     // is debuggable, we need to deal with these registers differently. For
   1883     // now, just block them.
   1884     for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
   1885       blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
   1886     }
   1887   }
   1888 }
   1889 
   1890 size_t CodeGeneratorMIPS::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
   1891   __ StoreToOffset(kStoreWord, Register(reg_id), SP, stack_index);
   1892   return kMipsWordSize;
   1893 }
   1894 
   1895 size_t CodeGeneratorMIPS::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
   1896   __ LoadFromOffset(kLoadWord, Register(reg_id), SP, stack_index);
   1897   return kMipsWordSize;
   1898 }
   1899 
   1900 size_t CodeGeneratorMIPS::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
   1901   if (GetGraph()->HasSIMD()) {
   1902     __ StoreQToOffset(FRegister(reg_id), SP, stack_index);
   1903   } else {
   1904     __ StoreDToOffset(FRegister(reg_id), SP, stack_index);
   1905   }
   1906   return GetFloatingPointSpillSlotSize();
   1907 }
   1908 
   1909 size_t CodeGeneratorMIPS::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
   1910   if (GetGraph()->HasSIMD()) {
   1911     __ LoadQFromOffset(FRegister(reg_id), SP, stack_index);
   1912   } else {
   1913     __ LoadDFromOffset(FRegister(reg_id), SP, stack_index);
   1914   }
   1915   return GetFloatingPointSpillSlotSize();
   1916 }
   1917 
   1918 void CodeGeneratorMIPS::DumpCoreRegister(std::ostream& stream, int reg) const {
   1919   stream << Register(reg);
   1920 }
   1921 
   1922 void CodeGeneratorMIPS::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
   1923   stream << FRegister(reg);
   1924 }
   1925 
   1926 constexpr size_t kMipsDirectEntrypointRuntimeOffset = 16;
   1927 
   1928 void CodeGeneratorMIPS::InvokeRuntime(QuickEntrypointEnum entrypoint,
   1929                                       HInstruction* instruction,
   1930                                       uint32_t dex_pc,
   1931                                       SlowPathCode* slow_path) {
   1932   ValidateInvokeRuntime(entrypoint, instruction, slow_path);
   1933   GenerateInvokeRuntime(GetThreadOffset<kMipsPointerSize>(entrypoint).Int32Value(),
   1934                         IsDirectEntrypoint(entrypoint));
   1935   if (EntrypointRequiresStackMap(entrypoint)) {
   1936     RecordPcInfo(instruction, dex_pc, slow_path);
   1937   }
   1938 }
   1939 
   1940 void CodeGeneratorMIPS::InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,
   1941                                                             HInstruction* instruction,
   1942                                                             SlowPathCode* slow_path,
   1943                                                             bool direct) {
   1944   ValidateInvokeRuntimeWithoutRecordingPcInfo(instruction, slow_path);
   1945   GenerateInvokeRuntime(entry_point_offset, direct);
   1946 }
   1947 
   1948 void CodeGeneratorMIPS::GenerateInvokeRuntime(int32_t entry_point_offset, bool direct) {
   1949   bool reordering = __ SetReorder(false);
   1950   __ LoadFromOffset(kLoadWord, T9, TR, entry_point_offset);
   1951   __ Jalr(T9);
   1952   if (direct) {
   1953     // Reserve argument space on stack (for $a0-$a3) for
   1954     // entrypoints that directly reference native implementations.
   1955     // Called function may use this space to store $a0-$a3 regs.
   1956     __ IncreaseFrameSize(kMipsDirectEntrypointRuntimeOffset);  // Single instruction in delay slot.
   1957     __ DecreaseFrameSize(kMipsDirectEntrypointRuntimeOffset);
   1958   } else {
   1959     __ Nop();  // In delay slot.
   1960   }
   1961   __ SetReorder(reordering);
   1962 }
   1963 
   1964 void InstructionCodeGeneratorMIPS::GenerateClassInitializationCheck(SlowPathCodeMIPS* slow_path,
   1965                                                                     Register class_reg) {
   1966   __ LoadFromOffset(kLoadWord, TMP, class_reg, mirror::Class::StatusOffset().Int32Value());
   1967   __ LoadConst32(AT, mirror::Class::kStatusInitialized);
   1968   __ Blt(TMP, AT, slow_path->GetEntryLabel());
   1969   // Even if the initialized flag is set, we need to ensure consistent memory ordering.
   1970   __ Sync(0);
   1971   __ Bind(slow_path->GetExitLabel());
   1972 }
   1973 
   1974 void InstructionCodeGeneratorMIPS::GenerateMemoryBarrier(MemBarrierKind kind ATTRIBUTE_UNUSED) {
   1975   __ Sync(0);  // Only stype 0 is supported.
   1976 }
   1977 
   1978 void InstructionCodeGeneratorMIPS::GenerateSuspendCheck(HSuspendCheck* instruction,
   1979                                                         HBasicBlock* successor) {
   1980   SuspendCheckSlowPathMIPS* slow_path =
   1981     new (GetGraph()->GetArena()) SuspendCheckSlowPathMIPS(instruction, successor);
   1982   codegen_->AddSlowPath(slow_path);
   1983 
   1984   __ LoadFromOffset(kLoadUnsignedHalfword,
   1985                     TMP,
   1986                     TR,
   1987                     Thread::ThreadFlagsOffset<kMipsPointerSize>().Int32Value());
   1988   if (successor == nullptr) {
   1989     __ Bnez(TMP, slow_path->GetEntryLabel());
   1990     __ Bind(slow_path->GetReturnLabel());
   1991   } else {
   1992     __ Beqz(TMP, codegen_->GetLabelOf(successor));
   1993     __ B(slow_path->GetEntryLabel());
   1994     // slow_path will return to GetLabelOf(successor).
   1995   }
   1996 }
   1997 
   1998 InstructionCodeGeneratorMIPS::InstructionCodeGeneratorMIPS(HGraph* graph,
   1999                                                            CodeGeneratorMIPS* codegen)
   2000       : InstructionCodeGenerator(graph, codegen),
   2001         assembler_(codegen->GetAssembler()),
   2002         codegen_(codegen) {}
   2003 
   2004 void LocationsBuilderMIPS::HandleBinaryOp(HBinaryOperation* instruction) {
   2005   DCHECK_EQ(instruction->InputCount(), 2U);
   2006   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
   2007   Primitive::Type type = instruction->GetResultType();
   2008   switch (type) {
   2009     case Primitive::kPrimInt: {
   2010       locations->SetInAt(0, Location::RequiresRegister());
   2011       HInstruction* right = instruction->InputAt(1);
   2012       bool can_use_imm = false;
   2013       if (right->IsConstant()) {
   2014         int32_t imm = CodeGenerator::GetInt32ValueOf(right->AsConstant());
   2015         if (instruction->IsAnd() || instruction->IsOr() || instruction->IsXor()) {
   2016           can_use_imm = IsUint<16>(imm);
   2017         } else if (instruction->IsAdd()) {
   2018           can_use_imm = IsInt<16>(imm);
   2019         } else {
   2020           DCHECK(instruction->IsSub());
   2021           can_use_imm = IsInt<16>(-imm);
   2022         }
   2023       }
   2024       if (can_use_imm)
   2025         locations->SetInAt(1, Location::ConstantLocation(right->AsConstant()));
   2026       else
   2027         locations->SetInAt(1, Location::RequiresRegister());
   2028       locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   2029       break;
   2030     }
   2031 
   2032     case Primitive::kPrimLong: {
   2033       locations->SetInAt(0, Location::RequiresRegister());
   2034       locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
   2035       locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   2036       break;
   2037     }
   2038 
   2039     case Primitive::kPrimFloat:
   2040     case Primitive::kPrimDouble:
   2041       DCHECK(instruction->IsAdd() || instruction->IsSub());
   2042       locations->SetInAt(0, Location::RequiresFpuRegister());
   2043       locations->SetInAt(1, Location::RequiresFpuRegister());
   2044       locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
   2045       break;
   2046 
   2047     default:
   2048       LOG(FATAL) << "Unexpected " << instruction->DebugName() << " type " << type;
   2049   }
   2050 }
   2051 
   2052 void InstructionCodeGeneratorMIPS::HandleBinaryOp(HBinaryOperation* instruction) {
   2053   Primitive::Type type = instruction->GetType();
   2054   LocationSummary* locations = instruction->GetLocations();
   2055 
   2056   switch (type) {
   2057     case Primitive::kPrimInt: {
   2058       Register dst = locations->Out().AsRegister<Register>();
   2059       Register lhs = locations->InAt(0).AsRegister<Register>();
   2060       Location rhs_location = locations->InAt(1);
   2061 
   2062       Register rhs_reg = ZERO;
   2063       int32_t rhs_imm = 0;
   2064       bool use_imm = rhs_location.IsConstant();
   2065       if (use_imm) {
   2066         rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
   2067       } else {
   2068         rhs_reg = rhs_location.AsRegister<Register>();
   2069       }
   2070 
   2071       if (instruction->IsAnd()) {
   2072         if (use_imm)
   2073           __ Andi(dst, lhs, rhs_imm);
   2074         else
   2075           __ And(dst, lhs, rhs_reg);
   2076       } else if (instruction->IsOr()) {
   2077         if (use_imm)
   2078           __ Ori(dst, lhs, rhs_imm);
   2079         else
   2080           __ Or(dst, lhs, rhs_reg);
   2081       } else if (instruction->IsXor()) {
   2082         if (use_imm)
   2083           __ Xori(dst, lhs, rhs_imm);
   2084         else
   2085           __ Xor(dst, lhs, rhs_reg);
   2086       } else if (instruction->IsAdd()) {
   2087         if (use_imm)
   2088           __ Addiu(dst, lhs, rhs_imm);
   2089         else
   2090           __ Addu(dst, lhs, rhs_reg);
   2091       } else {
   2092         DCHECK(instruction->IsSub());
   2093         if (use_imm)
   2094           __ Addiu(dst, lhs, -rhs_imm);
   2095         else
   2096           __ Subu(dst, lhs, rhs_reg);
   2097       }
   2098       break;
   2099     }
   2100 
   2101     case Primitive::kPrimLong: {
   2102       Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
   2103       Register dst_low = locations->Out().AsRegisterPairLow<Register>();
   2104       Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>();
   2105       Register lhs_low = locations->InAt(0).AsRegisterPairLow<Register>();
   2106       Location rhs_location = locations->InAt(1);
   2107       bool use_imm = rhs_location.IsConstant();
   2108       if (!use_imm) {
   2109         Register rhs_high = rhs_location.AsRegisterPairHigh<Register>();
   2110         Register rhs_low = rhs_location.AsRegisterPairLow<Register>();
   2111         if (instruction->IsAnd()) {
   2112           __ And(dst_low, lhs_low, rhs_low);
   2113           __ And(dst_high, lhs_high, rhs_high);
   2114         } else if (instruction->IsOr()) {
   2115           __ Or(dst_low, lhs_low, rhs_low);
   2116           __ Or(dst_high, lhs_high, rhs_high);
   2117         } else if (instruction->IsXor()) {
   2118           __ Xor(dst_low, lhs_low, rhs_low);
   2119           __ Xor(dst_high, lhs_high, rhs_high);
   2120         } else if (instruction->IsAdd()) {
   2121           if (lhs_low == rhs_low) {
   2122             // Special case for lhs = rhs and the sum potentially overwriting both lhs and rhs.
   2123             __ Slt(TMP, lhs_low, ZERO);
   2124             __ Addu(dst_low, lhs_low, rhs_low);
   2125           } else {
   2126             __ Addu(dst_low, lhs_low, rhs_low);
   2127             // If the sum overwrites rhs, lhs remains unchanged, otherwise rhs remains unchanged.
   2128             __ Sltu(TMP, dst_low, (dst_low == rhs_low) ? lhs_low : rhs_low);
   2129           }
   2130           __ Addu(dst_high, lhs_high, rhs_high);
   2131           __ Addu(dst_high, dst_high, TMP);
   2132         } else {
   2133           DCHECK(instruction->IsSub());
   2134           __ Sltu(TMP, lhs_low, rhs_low);
   2135           __ Subu(dst_low, lhs_low, rhs_low);
   2136           __ Subu(dst_high, lhs_high, rhs_high);
   2137           __ Subu(dst_high, dst_high, TMP);
   2138         }
   2139       } else {
   2140         int64_t value = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant()->AsConstant());
   2141         if (instruction->IsOr()) {
   2142           uint32_t low = Low32Bits(value);
   2143           uint32_t high = High32Bits(value);
   2144           if (IsUint<16>(low)) {
   2145             if (dst_low != lhs_low || low != 0) {
   2146               __ Ori(dst_low, lhs_low, low);
   2147             }
   2148           } else {
   2149             __ LoadConst32(TMP, low);
   2150             __ Or(dst_low, lhs_low, TMP);
   2151           }
   2152           if (IsUint<16>(high)) {
   2153             if (dst_high != lhs_high || high != 0) {
   2154               __ Ori(dst_high, lhs_high, high);
   2155             }
   2156           } else {
   2157             if (high != low) {
   2158               __ LoadConst32(TMP, high);
   2159             }
   2160             __ Or(dst_high, lhs_high, TMP);
   2161           }
   2162         } else if (instruction->IsXor()) {
   2163           uint32_t low = Low32Bits(value);
   2164           uint32_t high = High32Bits(value);
   2165           if (IsUint<16>(low)) {
   2166             if (dst_low != lhs_low || low != 0) {
   2167               __ Xori(dst_low, lhs_low, low);
   2168             }
   2169           } else {
   2170             __ LoadConst32(TMP, low);
   2171             __ Xor(dst_low, lhs_low, TMP);
   2172           }
   2173           if (IsUint<16>(high)) {
   2174             if (dst_high != lhs_high || high != 0) {
   2175               __ Xori(dst_high, lhs_high, high);
   2176             }
   2177           } else {
   2178             if (high != low) {
   2179               __ LoadConst32(TMP, high);
   2180             }
   2181             __ Xor(dst_high, lhs_high, TMP);
   2182           }
   2183         } else if (instruction->IsAnd()) {
   2184           uint32_t low = Low32Bits(value);
   2185           uint32_t high = High32Bits(value);
   2186           if (IsUint<16>(low)) {
   2187             __ Andi(dst_low, lhs_low, low);
   2188           } else if (low != 0xFFFFFFFF) {
   2189             __ LoadConst32(TMP, low);
   2190             __ And(dst_low, lhs_low, TMP);
   2191           } else if (dst_low != lhs_low) {
   2192             __ Move(dst_low, lhs_low);
   2193           }
   2194           if (IsUint<16>(high)) {
   2195             __ Andi(dst_high, lhs_high, high);
   2196           } else if (high != 0xFFFFFFFF) {
   2197             if (high != low) {
   2198               __ LoadConst32(TMP, high);
   2199             }
   2200             __ And(dst_high, lhs_high, TMP);
   2201           } else if (dst_high != lhs_high) {
   2202             __ Move(dst_high, lhs_high);
   2203           }
   2204         } else {
   2205           if (instruction->IsSub()) {
   2206             value = -value;
   2207           } else {
   2208             DCHECK(instruction->IsAdd());
   2209           }
   2210           int32_t low = Low32Bits(value);
   2211           int32_t high = High32Bits(value);
   2212           if (IsInt<16>(low)) {
   2213             if (dst_low != lhs_low || low != 0) {
   2214               __ Addiu(dst_low, lhs_low, low);
   2215             }
   2216             if (low != 0) {
   2217               __ Sltiu(AT, dst_low, low);
   2218             }
   2219           } else {
   2220             __ LoadConst32(TMP, low);
   2221             __ Addu(dst_low, lhs_low, TMP);
   2222             __ Sltu(AT, dst_low, TMP);
   2223           }
   2224           if (IsInt<16>(high)) {
   2225             if (dst_high != lhs_high || high != 0) {
   2226               __ Addiu(dst_high, lhs_high, high);
   2227             }
   2228           } else {
   2229             if (high != low) {
   2230               __ LoadConst32(TMP, high);
   2231             }
   2232             __ Addu(dst_high, lhs_high, TMP);
   2233           }
   2234           if (low != 0) {
   2235             __ Addu(dst_high, dst_high, AT);
   2236           }
   2237         }
   2238       }
   2239       break;
   2240     }
   2241 
   2242     case Primitive::kPrimFloat:
   2243     case Primitive::kPrimDouble: {
   2244       FRegister dst = locations->Out().AsFpuRegister<FRegister>();
   2245       FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
   2246       FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
   2247       if (instruction->IsAdd()) {
   2248         if (type == Primitive::kPrimFloat) {
   2249           __ AddS(dst, lhs, rhs);
   2250         } else {
   2251           __ AddD(dst, lhs, rhs);
   2252         }
   2253       } else {
   2254         DCHECK(instruction->IsSub());
   2255         if (type == Primitive::kPrimFloat) {
   2256           __ SubS(dst, lhs, rhs);
   2257         } else {
   2258           __ SubD(dst, lhs, rhs);
   2259         }
   2260       }
   2261       break;
   2262     }
   2263 
   2264     default:
   2265       LOG(FATAL) << "Unexpected binary operation type " << type;
   2266   }
   2267 }
   2268 
   2269 void LocationsBuilderMIPS::HandleShift(HBinaryOperation* instr) {
   2270   DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor());
   2271 
   2272   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
   2273   Primitive::Type type = instr->GetResultType();
   2274   switch (type) {
   2275     case Primitive::kPrimInt:
   2276       locations->SetInAt(0, Location::RequiresRegister());
   2277       locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
   2278       locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   2279       break;
   2280     case Primitive::kPrimLong:
   2281       locations->SetInAt(0, Location::RequiresRegister());
   2282       locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
   2283       locations->SetOut(Location::RequiresRegister());
   2284       break;
   2285     default:
   2286       LOG(FATAL) << "Unexpected shift type " << type;
   2287   }
   2288 }
   2289 
   2290 static constexpr size_t kMipsBitsPerWord = kMipsWordSize * kBitsPerByte;
   2291 
   2292 void InstructionCodeGeneratorMIPS::HandleShift(HBinaryOperation* instr) {
   2293   DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor());
   2294   LocationSummary* locations = instr->GetLocations();
   2295   Primitive::Type type = instr->GetType();
   2296 
   2297   Location rhs_location = locations->InAt(1);
   2298   bool use_imm = rhs_location.IsConstant();
   2299   Register rhs_reg = use_imm ? ZERO : rhs_location.AsRegister<Register>();
   2300   int64_t rhs_imm = use_imm ? CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant()) : 0;
   2301   const uint32_t shift_mask =
   2302       (type == Primitive::kPrimInt) ? kMaxIntShiftDistance : kMaxLongShiftDistance;
   2303   const uint32_t shift_value = rhs_imm & shift_mask;
   2304   // Are the INS (Insert Bit Field) and ROTR instructions supported?
   2305   bool has_ins_rotr = codegen_->GetInstructionSetFeatures().IsMipsIsaRevGreaterThanEqual2();
   2306 
   2307   switch (type) {
   2308     case Primitive::kPrimInt: {
   2309       Register dst = locations->Out().AsRegister<Register>();
   2310       Register lhs = locations->InAt(0).AsRegister<Register>();
   2311       if (use_imm) {
   2312         if (shift_value == 0) {
   2313           if (dst != lhs) {
   2314             __ Move(dst, lhs);
   2315           }
   2316         } else if (instr->IsShl()) {
   2317           __ Sll(dst, lhs, shift_value);
   2318         } else if (instr->IsShr()) {
   2319           __ Sra(dst, lhs, shift_value);
   2320         } else if (instr->IsUShr()) {
   2321           __ Srl(dst, lhs, shift_value);
   2322         } else {
   2323           if (has_ins_rotr) {
   2324             __ Rotr(dst, lhs, shift_value);
   2325           } else {
   2326             __ Sll(TMP, lhs, (kMipsBitsPerWord - shift_value) & shift_mask);
   2327             __ Srl(dst, lhs, shift_value);
   2328             __ Or(dst, dst, TMP);
   2329           }
   2330         }
   2331       } else {
   2332         if (instr->IsShl()) {
   2333           __ Sllv(dst, lhs, rhs_reg);
   2334         } else if (instr->IsShr()) {
   2335           __ Srav(dst, lhs, rhs_reg);
   2336         } else if (instr->IsUShr()) {
   2337           __ Srlv(dst, lhs, rhs_reg);
   2338         } else {
   2339           if (has_ins_rotr) {
   2340             __ Rotrv(dst, lhs, rhs_reg);
   2341           } else {
   2342             __ Subu(TMP, ZERO, rhs_reg);
   2343             // 32-bit shift instructions use the 5 least significant bits of the shift count, so
   2344             // shifting by `-rhs_reg` is equivalent to shifting by `(32 - rhs_reg) & 31`. The case
   2345             // when `rhs_reg & 31 == 0` is OK even though we don't shift `lhs` left all the way out
   2346             // by 32, because the result in this case is computed as `(lhs >> 0) | (lhs << 0)`,
   2347             // IOW, the OR'd values are equal.
   2348             __ Sllv(TMP, lhs, TMP);
   2349             __ Srlv(dst, lhs, rhs_reg);
   2350             __ Or(dst, dst, TMP);
   2351           }
   2352         }
   2353       }
   2354       break;
   2355     }
   2356 
   2357     case Primitive::kPrimLong: {
   2358       Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
   2359       Register dst_low = locations->Out().AsRegisterPairLow<Register>();
   2360       Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>();
   2361       Register lhs_low = locations->InAt(0).AsRegisterPairLow<Register>();
   2362       if (use_imm) {
   2363           if (shift_value == 0) {
   2364             codegen_->MoveLocation(locations->Out(), locations->InAt(0), type);
   2365           } else if (shift_value < kMipsBitsPerWord) {
   2366             if (has_ins_rotr) {
   2367               if (instr->IsShl()) {
   2368                 __ Srl(dst_high, lhs_low, kMipsBitsPerWord - shift_value);
   2369                 __ Ins(dst_high, lhs_high, shift_value, kMipsBitsPerWord - shift_value);
   2370                 __ Sll(dst_low, lhs_low, shift_value);
   2371               } else if (instr->IsShr()) {
   2372                 __ Srl(dst_low, lhs_low, shift_value);
   2373                 __ Ins(dst_low, lhs_high, kMipsBitsPerWord - shift_value, shift_value);
   2374                 __ Sra(dst_high, lhs_high, shift_value);
   2375               } else if (instr->IsUShr()) {
   2376                 __ Srl(dst_low, lhs_low, shift_value);
   2377                 __ Ins(dst_low, lhs_high, kMipsBitsPerWord - shift_value, shift_value);
   2378                 __ Srl(dst_high, lhs_high, shift_value);
   2379               } else {
   2380                 __ Srl(dst_low, lhs_low, shift_value);
   2381                 __ Ins(dst_low, lhs_high, kMipsBitsPerWord - shift_value, shift_value);
   2382                 __ Srl(dst_high, lhs_high, shift_value);
   2383                 __ Ins(dst_high, lhs_low, kMipsBitsPerWord - shift_value, shift_value);
   2384               }
   2385             } else {
   2386               if (instr->IsShl()) {
   2387                 __ Sll(dst_low, lhs_low, shift_value);
   2388                 __ Srl(TMP, lhs_low, kMipsBitsPerWord - shift_value);
   2389                 __ Sll(dst_high, lhs_high, shift_value);
   2390                 __ Or(dst_high, dst_high, TMP);
   2391               } else if (instr->IsShr()) {
   2392                 __ Sra(dst_high, lhs_high, shift_value);
   2393                 __ Sll(TMP, lhs_high, kMipsBitsPerWord - shift_value);
   2394                 __ Srl(dst_low, lhs_low, shift_value);
   2395                 __ Or(dst_low, dst_low, TMP);
   2396               } else if (instr->IsUShr()) {
   2397                 __ Srl(dst_high, lhs_high, shift_value);
   2398                 __ Sll(TMP, lhs_high, kMipsBitsPerWord - shift_value);
   2399                 __ Srl(dst_low, lhs_low, shift_value);
   2400                 __ Or(dst_low, dst_low, TMP);
   2401               } else {
   2402                 __ Srl(TMP, lhs_low, shift_value);
   2403                 __ Sll(dst_low, lhs_high, kMipsBitsPerWord - shift_value);
   2404                 __ Or(dst_low, dst_low, TMP);
   2405                 __ Srl(TMP, lhs_high, shift_value);
   2406                 __ Sll(dst_high, lhs_low, kMipsBitsPerWord - shift_value);
   2407                 __ Or(dst_high, dst_high, TMP);
   2408               }
   2409             }
   2410           } else {
   2411             const uint32_t shift_value_high = shift_value - kMipsBitsPerWord;
   2412             if (instr->IsShl()) {
   2413               __ Sll(dst_high, lhs_low, shift_value_high);
   2414               __ Move(dst_low, ZERO);
   2415             } else if (instr->IsShr()) {
   2416               __ Sra(dst_low, lhs_high, shift_value_high);
   2417               __ Sra(dst_high, dst_low, kMipsBitsPerWord - 1);
   2418             } else if (instr->IsUShr()) {
   2419               __ Srl(dst_low, lhs_high, shift_value_high);
   2420               __ Move(dst_high, ZERO);
   2421             } else {
   2422               if (shift_value == kMipsBitsPerWord) {
   2423                 // 64-bit rotation by 32 is just a swap.
   2424                 __ Move(dst_low, lhs_high);
   2425                 __ Move(dst_high, lhs_low);
   2426               } else {
   2427                 if (has_ins_rotr) {
   2428                   __ Srl(dst_low, lhs_high, shift_value_high);
   2429                   __ Ins(dst_low, lhs_low, kMipsBitsPerWord - shift_value_high, shift_value_high);
   2430                   __ Srl(dst_high, lhs_low, shift_value_high);
   2431                   __ Ins(dst_high, lhs_high, kMipsBitsPerWord - shift_value_high, shift_value_high);
   2432                 } else {
   2433                   __ Sll(TMP, lhs_low, kMipsBitsPerWord - shift_value_high);
   2434                   __ Srl(dst_low, lhs_high, shift_value_high);
   2435                   __ Or(dst_low, dst_low, TMP);
   2436                   __ Sll(TMP, lhs_high, kMipsBitsPerWord - shift_value_high);
   2437                   __ Srl(dst_high, lhs_low, shift_value_high);
   2438                   __ Or(dst_high, dst_high, TMP);
   2439                 }
   2440               }
   2441             }
   2442           }
   2443       } else {
   2444         MipsLabel done;
   2445         if (instr->IsShl()) {
   2446           __ Sllv(dst_low, lhs_low, rhs_reg);
   2447           __ Nor(AT, ZERO, rhs_reg);
   2448           __ Srl(TMP, lhs_low, 1);
   2449           __ Srlv(TMP, TMP, AT);
   2450           __ Sllv(dst_high, lhs_high, rhs_reg);
   2451           __ Or(dst_high, dst_high, TMP);
   2452           __ Andi(TMP, rhs_reg, kMipsBitsPerWord);
   2453           __ Beqz(TMP, &done);
   2454           __ Move(dst_high, dst_low);
   2455           __ Move(dst_low, ZERO);
   2456         } else if (instr->IsShr()) {
   2457           __ Srav(dst_high, lhs_high, rhs_reg);
   2458           __ Nor(AT, ZERO, rhs_reg);
   2459           __ Sll(TMP, lhs_high, 1);
   2460           __ Sllv(TMP, TMP, AT);
   2461           __ Srlv(dst_low, lhs_low, rhs_reg);
   2462           __ Or(dst_low, dst_low, TMP);
   2463           __ Andi(TMP, rhs_reg, kMipsBitsPerWord);
   2464           __ Beqz(TMP, &done);
   2465           __ Move(dst_low, dst_high);
   2466           __ Sra(dst_high, dst_high, 31);
   2467         } else if (instr->IsUShr()) {
   2468           __ Srlv(dst_high, lhs_high, rhs_reg);
   2469           __ Nor(AT, ZERO, rhs_reg);
   2470           __ Sll(TMP, lhs_high, 1);
   2471           __ Sllv(TMP, TMP, AT);
   2472           __ Srlv(dst_low, lhs_low, rhs_reg);
   2473           __ Or(dst_low, dst_low, TMP);
   2474           __ Andi(TMP, rhs_reg, kMipsBitsPerWord);
   2475           __ Beqz(TMP, &done);
   2476           __ Move(dst_low, dst_high);
   2477           __ Move(dst_high, ZERO);
   2478         } else {
   2479           __ Nor(AT, ZERO, rhs_reg);
   2480           __ Srlv(TMP, lhs_low, rhs_reg);
   2481           __ Sll(dst_low, lhs_high, 1);
   2482           __ Sllv(dst_low, dst_low, AT);
   2483           __ Or(dst_low, dst_low, TMP);
   2484           __ Srlv(TMP, lhs_high, rhs_reg);
   2485           __ Sll(dst_high, lhs_low, 1);
   2486           __ Sllv(dst_high, dst_high, AT);
   2487           __ Or(dst_high, dst_high, TMP);
   2488           __ Andi(TMP, rhs_reg, kMipsBitsPerWord);
   2489           __ Beqz(TMP, &done);
   2490           __ Move(TMP, dst_high);
   2491           __ Move(dst_high, dst_low);
   2492           __ Move(dst_low, TMP);
   2493         }
   2494         __ Bind(&done);
   2495       }
   2496       break;
   2497     }
   2498 
   2499     default:
   2500       LOG(FATAL) << "Unexpected shift operation type " << type;
   2501   }
   2502 }
   2503 
   2504 void LocationsBuilderMIPS::VisitAdd(HAdd* instruction) {
   2505   HandleBinaryOp(instruction);
   2506 }
   2507 
   2508 void InstructionCodeGeneratorMIPS::VisitAdd(HAdd* instruction) {
   2509   HandleBinaryOp(instruction);
   2510 }
   2511 
   2512 void LocationsBuilderMIPS::VisitAnd(HAnd* instruction) {
   2513   HandleBinaryOp(instruction);
   2514 }
   2515 
   2516 void InstructionCodeGeneratorMIPS::VisitAnd(HAnd* instruction) {
   2517   HandleBinaryOp(instruction);
   2518 }
   2519 
   2520 void LocationsBuilderMIPS::VisitArrayGet(HArrayGet* instruction) {
   2521   Primitive::Type type = instruction->GetType();
   2522   bool object_array_get_with_read_barrier =
   2523       kEmitCompilerReadBarrier && (type == Primitive::kPrimNot);
   2524   LocationSummary* locations =
   2525       new (GetGraph()->GetArena()) LocationSummary(instruction,
   2526                                                    object_array_get_with_read_barrier
   2527                                                        ? LocationSummary::kCallOnSlowPath
   2528                                                        : LocationSummary::kNoCall);
   2529   if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
   2530     locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty());  // No caller-save registers.
   2531   }
   2532   locations->SetInAt(0, Location::RequiresRegister());
   2533   locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
   2534   if (Primitive::IsFloatingPointType(type)) {
   2535     locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
   2536   } else {
   2537     // The output overlaps in the case of an object array get with
   2538     // read barriers enabled: we do not want the move to overwrite the
   2539     // array's location, as we need it to emit the read barrier.
   2540     locations->SetOut(Location::RequiresRegister(),
   2541                       object_array_get_with_read_barrier
   2542                           ? Location::kOutputOverlap
   2543                           : Location::kNoOutputOverlap);
   2544   }
   2545   // We need a temporary register for the read barrier marking slow
   2546   // path in CodeGeneratorMIPS::GenerateArrayLoadWithBakerReadBarrier.
   2547   if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
   2548     locations->AddTemp(Location::RequiresRegister());
   2549   }
   2550 }
   2551 
   2552 static auto GetImplicitNullChecker(HInstruction* instruction, CodeGeneratorMIPS* codegen) {
   2553   auto null_checker = [codegen, instruction]() {
   2554     codegen->MaybeRecordImplicitNullCheck(instruction);
   2555   };
   2556   return null_checker;
   2557 }
   2558 
   2559 void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) {
   2560   LocationSummary* locations = instruction->GetLocations();
   2561   Location obj_loc = locations->InAt(0);
   2562   Register obj = obj_loc.AsRegister<Register>();
   2563   Location out_loc = locations->Out();
   2564   Location index = locations->InAt(1);
   2565   uint32_t data_offset = CodeGenerator::GetArrayDataOffset(instruction);
   2566   auto null_checker = GetImplicitNullChecker(instruction, codegen_);
   2567 
   2568   Primitive::Type type = instruction->GetType();
   2569   const bool maybe_compressed_char_at = mirror::kUseStringCompression &&
   2570                                         instruction->IsStringCharAt();
   2571   switch (type) {
   2572     case Primitive::kPrimBoolean: {
   2573       Register out = out_loc.AsRegister<Register>();
   2574       if (index.IsConstant()) {
   2575         size_t offset =
   2576             (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
   2577         __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset, null_checker);
   2578       } else {
   2579         __ Addu(TMP, obj, index.AsRegister<Register>());
   2580         __ LoadFromOffset(kLoadUnsignedByte, out, TMP, data_offset, null_checker);
   2581       }
   2582       break;
   2583     }
   2584 
   2585     case Primitive::kPrimByte: {
   2586       Register out = out_loc.AsRegister<Register>();
   2587       if (index.IsConstant()) {
   2588         size_t offset =
   2589             (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
   2590         __ LoadFromOffset(kLoadSignedByte, out, obj, offset, null_checker);
   2591       } else {
   2592         __ Addu(TMP, obj, index.AsRegister<Register>());
   2593         __ LoadFromOffset(kLoadSignedByte, out, TMP, data_offset, null_checker);
   2594       }
   2595       break;
   2596     }
   2597 
   2598     case Primitive::kPrimShort: {
   2599       Register out = out_loc.AsRegister<Register>();
   2600       if (index.IsConstant()) {
   2601         size_t offset =
   2602             (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
   2603         __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset, null_checker);
   2604       } else {
   2605         __ ShiftAndAdd(TMP, index.AsRegister<Register>(), obj, TIMES_2, TMP);
   2606         __ LoadFromOffset(kLoadSignedHalfword, out, TMP, data_offset, null_checker);
   2607       }
   2608       break;
   2609     }
   2610 
   2611     case Primitive::kPrimChar: {
   2612       Register out = out_loc.AsRegister<Register>();
   2613       if (maybe_compressed_char_at) {
   2614         uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
   2615         __ LoadFromOffset(kLoadWord, TMP, obj, count_offset, null_checker);
   2616         __ Sll(TMP, TMP, 31);    // Extract compression flag into the most significant bit of TMP.
   2617         static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
   2618                       "Expecting 0=compressed, 1=uncompressed");
   2619       }
   2620       if (index.IsConstant()) {
   2621         int32_t const_index = index.GetConstant()->AsIntConstant()->GetValue();
   2622         if (maybe_compressed_char_at) {
   2623           MipsLabel uncompressed_load, done;
   2624           __ Bnez(TMP, &uncompressed_load);
   2625           __ LoadFromOffset(kLoadUnsignedByte,
   2626                             out,
   2627                             obj,
   2628                             data_offset + (const_index << TIMES_1));
   2629           __ B(&done);
   2630           __ Bind(&uncompressed_load);
   2631           __ LoadFromOffset(kLoadUnsignedHalfword,
   2632                             out,
   2633                             obj,
   2634                             data_offset + (const_index << TIMES_2));
   2635           __ Bind(&done);
   2636         } else {
   2637           __ LoadFromOffset(kLoadUnsignedHalfword,
   2638                             out,
   2639                             obj,
   2640                             data_offset + (const_index << TIMES_2),
   2641                             null_checker);
   2642         }
   2643       } else {
   2644         Register index_reg = index.AsRegister<Register>();
   2645         if (maybe_compressed_char_at) {
   2646           MipsLabel uncompressed_load, done;
   2647           __ Bnez(TMP, &uncompressed_load);
   2648           __ Addu(TMP, obj, index_reg);
   2649           __ LoadFromOffset(kLoadUnsignedByte, out, TMP, data_offset);
   2650           __ B(&done);
   2651           __ Bind(&uncompressed_load);
   2652           __ ShiftAndAdd(TMP, index_reg, obj, TIMES_2, TMP);
   2653           __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset);
   2654           __ Bind(&done);
   2655         } else {
   2656           __ ShiftAndAdd(TMP, index_reg, obj, TIMES_2, TMP);
   2657           __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset, null_checker);
   2658         }
   2659       }
   2660       break;
   2661     }
   2662 
   2663     case Primitive::kPrimInt: {
   2664       DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
   2665       Register out = out_loc.AsRegister<Register>();
   2666       if (index.IsConstant()) {
   2667         size_t offset =
   2668             (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
   2669         __ LoadFromOffset(kLoadWord, out, obj, offset, null_checker);
   2670       } else {
   2671         __ ShiftAndAdd(TMP, index.AsRegister<Register>(), obj, TIMES_4, TMP);
   2672         __ LoadFromOffset(kLoadWord, out, TMP, data_offset, null_checker);
   2673       }
   2674       break;
   2675     }
   2676 
   2677     case Primitive::kPrimNot: {
   2678       static_assert(
   2679           sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
   2680           "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
   2681       // /* HeapReference<Object> */ out =
   2682       //     *(obj + data_offset + index * sizeof(HeapReference<Object>))
   2683       if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
   2684         Location temp = locations->GetTemp(0);
   2685         // Note that a potential implicit null check is handled in this
   2686         // CodeGeneratorMIPS::GenerateArrayLoadWithBakerReadBarrier call.
   2687         codegen_->GenerateArrayLoadWithBakerReadBarrier(instruction,
   2688                                                         out_loc,
   2689                                                         obj,
   2690                                                         data_offset,
   2691                                                         index,
   2692                                                         temp,
   2693                                                         /* needs_null_check */ true);
   2694       } else {
   2695         Register out = out_loc.AsRegister<Register>();
   2696         if (index.IsConstant()) {
   2697           size_t offset =
   2698               (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
   2699           __ LoadFromOffset(kLoadWord, out, obj, offset, null_checker);
   2700           // If read barriers are enabled, emit read barriers other than
   2701           // Baker's using a slow path (and also unpoison the loaded
   2702           // reference, if heap poisoning is enabled).
   2703           codegen_->MaybeGenerateReadBarrierSlow(instruction, out_loc, out_loc, obj_loc, offset);
   2704         } else {
   2705           __ ShiftAndAdd(TMP, index.AsRegister<Register>(), obj, TIMES_4, TMP);
   2706           __ LoadFromOffset(kLoadWord, out, TMP, data_offset, null_checker);
   2707           // If read barriers are enabled, emit read barriers other than
   2708           // Baker's using a slow path (and also unpoison the loaded
   2709           // reference, if heap poisoning is enabled).
   2710           codegen_->MaybeGenerateReadBarrierSlow(instruction,
   2711                                                  out_loc,
   2712                                                  out_loc,
   2713                                                  obj_loc,
   2714                                                  data_offset,
   2715                                                  index);
   2716         }
   2717       }
   2718       break;
   2719     }
   2720 
   2721     case Primitive::kPrimLong: {
   2722       Register out = out_loc.AsRegisterPairLow<Register>();
   2723       if (index.IsConstant()) {
   2724         size_t offset =
   2725             (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
   2726         __ LoadFromOffset(kLoadDoubleword, out, obj, offset, null_checker);
   2727       } else {
   2728         __ ShiftAndAdd(TMP, index.AsRegister<Register>(), obj, TIMES_8, TMP);
   2729         __ LoadFromOffset(kLoadDoubleword, out, TMP, data_offset, null_checker);
   2730       }
   2731       break;
   2732     }
   2733 
   2734     case Primitive::kPrimFloat: {
   2735       FRegister out = out_loc.AsFpuRegister<FRegister>();
   2736       if (index.IsConstant()) {
   2737         size_t offset =
   2738             (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
   2739         __ LoadSFromOffset(out, obj, offset, null_checker);
   2740       } else {
   2741         __ ShiftAndAdd(TMP, index.AsRegister<Register>(), obj, TIMES_4, TMP);
   2742         __ LoadSFromOffset(out, TMP, data_offset, null_checker);
   2743       }
   2744       break;
   2745     }
   2746 
   2747     case Primitive::kPrimDouble: {
   2748       FRegister out = out_loc.AsFpuRegister<FRegister>();
   2749       if (index.IsConstant()) {
   2750         size_t offset =
   2751             (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
   2752         __ LoadDFromOffset(out, obj, offset, null_checker);
   2753       } else {
   2754         __ ShiftAndAdd(TMP, index.AsRegister<Register>(), obj, TIMES_8, TMP);
   2755         __ LoadDFromOffset(out, TMP, data_offset, null_checker);
   2756       }
   2757       break;
   2758     }
   2759 
   2760     case Primitive::kPrimVoid:
   2761       LOG(FATAL) << "Unreachable type " << instruction->GetType();
   2762       UNREACHABLE();
   2763   }
   2764 }
   2765 
   2766 void LocationsBuilderMIPS::VisitArrayLength(HArrayLength* instruction) {
   2767   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
   2768   locations->SetInAt(0, Location::RequiresRegister());
   2769   locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   2770 }
   2771 
   2772 void InstructionCodeGeneratorMIPS::VisitArrayLength(HArrayLength* instruction) {
   2773   LocationSummary* locations = instruction->GetLocations();
   2774   uint32_t offset = CodeGenerator::GetArrayLengthOffset(instruction);
   2775   Register obj = locations->InAt(0).AsRegister<Register>();
   2776   Register out = locations->Out().AsRegister<Register>();
   2777   __ LoadFromOffset(kLoadWord, out, obj, offset);
   2778   codegen_->MaybeRecordImplicitNullCheck(instruction);
   2779   // Mask out compression flag from String's array length.
   2780   if (mirror::kUseStringCompression && instruction->IsStringLength()) {
   2781     __ Srl(out, out, 1u);
   2782   }
   2783 }
   2784 
   2785 Location LocationsBuilderMIPS::RegisterOrZeroConstant(HInstruction* instruction) {
   2786   return (instruction->IsConstant() && instruction->AsConstant()->IsZeroBitPattern())
   2787       ? Location::ConstantLocation(instruction->AsConstant())
   2788       : Location::RequiresRegister();
   2789 }
   2790 
   2791 Location LocationsBuilderMIPS::FpuRegisterOrConstantForStore(HInstruction* instruction) {
   2792   // We can store 0.0 directly (from the ZERO register) without loading it into an FPU register.
   2793   // We can store a non-zero float or double constant without first loading it into the FPU,
   2794   // but we should only prefer this if the constant has a single use.
   2795   if (instruction->IsConstant() &&
   2796       (instruction->AsConstant()->IsZeroBitPattern() ||
   2797        instruction->GetUses().HasExactlyOneElement())) {
   2798     return Location::ConstantLocation(instruction->AsConstant());
   2799     // Otherwise fall through and require an FPU register for the constant.
   2800   }
   2801   return Location::RequiresFpuRegister();
   2802 }
   2803 
   2804 void LocationsBuilderMIPS::VisitArraySet(HArraySet* instruction) {
   2805   Primitive::Type value_type = instruction->GetComponentType();
   2806 
   2807   bool needs_write_barrier =
   2808       CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
   2809   bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
   2810 
   2811   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
   2812       instruction,
   2813       may_need_runtime_call_for_type_check ?
   2814           LocationSummary::kCallOnSlowPath :
   2815           LocationSummary::kNoCall);
   2816 
   2817   locations->SetInAt(0, Location::RequiresRegister());
   2818   locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
   2819   if (Primitive::IsFloatingPointType(instruction->InputAt(2)->GetType())) {
   2820     locations->SetInAt(2, FpuRegisterOrConstantForStore(instruction->InputAt(2)));
   2821   } else {
   2822     locations->SetInAt(2, RegisterOrZeroConstant(instruction->InputAt(2)));
   2823   }
   2824   if (needs_write_barrier) {
   2825     // Temporary register for the write barrier.
   2826     locations->AddTemp(Location::RequiresRegister());  // Possibly used for ref. poisoning too.
   2827   }
   2828 }
   2829 
   2830 void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) {
   2831   LocationSummary* locations = instruction->GetLocations();
   2832   Register obj = locations->InAt(0).AsRegister<Register>();
   2833   Location index = locations->InAt(1);
   2834   Location value_location = locations->InAt(2);
   2835   Primitive::Type value_type = instruction->GetComponentType();
   2836   bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
   2837   bool needs_write_barrier =
   2838       CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
   2839   auto null_checker = GetImplicitNullChecker(instruction, codegen_);
   2840   Register base_reg = index.IsConstant() ? obj : TMP;
   2841 
   2842   switch (value_type) {
   2843     case Primitive::kPrimBoolean:
   2844     case Primitive::kPrimByte: {
   2845       uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
   2846       if (index.IsConstant()) {
   2847         data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1;
   2848       } else {
   2849         __ Addu(base_reg, obj, index.AsRegister<Register>());
   2850       }
   2851       if (value_location.IsConstant()) {
   2852         int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
   2853         __ StoreConstToOffset(kStoreByte, value, base_reg, data_offset, TMP, null_checker);
   2854       } else {
   2855         Register value = value_location.AsRegister<Register>();
   2856         __ StoreToOffset(kStoreByte, value, base_reg, data_offset, null_checker);
   2857       }
   2858       break;
   2859     }
   2860 
   2861     case Primitive::kPrimShort:
   2862     case Primitive::kPrimChar: {
   2863       uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
   2864       if (index.IsConstant()) {
   2865         data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2;
   2866       } else {
   2867         __ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_2, base_reg);
   2868       }
   2869       if (value_location.IsConstant()) {
   2870         int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
   2871         __ StoreConstToOffset(kStoreHalfword, value, base_reg, data_offset, TMP, null_checker);
   2872       } else {
   2873         Register value = value_location.AsRegister<Register>();
   2874         __ StoreToOffset(kStoreHalfword, value, base_reg, data_offset, null_checker);
   2875       }
   2876       break;
   2877     }
   2878 
   2879     case Primitive::kPrimInt: {
   2880       uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
   2881       if (index.IsConstant()) {
   2882         data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
   2883       } else {
   2884         __ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_4, base_reg);
   2885       }
   2886       if (value_location.IsConstant()) {
   2887         int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
   2888         __ StoreConstToOffset(kStoreWord, value, base_reg, data_offset, TMP, null_checker);
   2889       } else {
   2890         Register value = value_location.AsRegister<Register>();
   2891         __ StoreToOffset(kStoreWord, value, base_reg, data_offset, null_checker);
   2892       }
   2893       break;
   2894     }
   2895 
   2896     case Primitive::kPrimNot: {
   2897       if (value_location.IsConstant()) {
   2898         // Just setting null.
   2899         uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
   2900         if (index.IsConstant()) {
   2901           data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
   2902         } else {
   2903           __ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_4, base_reg);
   2904         }
   2905         int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
   2906         DCHECK_EQ(value, 0);
   2907         __ StoreConstToOffset(kStoreWord, value, base_reg, data_offset, TMP, null_checker);
   2908         DCHECK(!needs_write_barrier);
   2909         DCHECK(!may_need_runtime_call_for_type_check);
   2910         break;
   2911       }
   2912 
   2913       DCHECK(needs_write_barrier);
   2914       Register value = value_location.AsRegister<Register>();
   2915       Register temp1 = locations->GetTemp(0).AsRegister<Register>();
   2916       Register temp2 = TMP;  // Doesn't need to survive slow path.
   2917       uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
   2918       uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
   2919       uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
   2920       MipsLabel done;
   2921       SlowPathCodeMIPS* slow_path = nullptr;
   2922 
   2923       if (may_need_runtime_call_for_type_check) {
   2924         slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathMIPS(instruction);
   2925         codegen_->AddSlowPath(slow_path);
   2926         if (instruction->GetValueCanBeNull()) {
   2927           MipsLabel non_zero;
   2928           __ Bnez(value, &non_zero);
   2929           uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
   2930           if (index.IsConstant()) {
   2931             data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
   2932           } else {
   2933             __ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_4, base_reg);
   2934           }
   2935           __ StoreToOffset(kStoreWord, value, base_reg, data_offset, null_checker);
   2936           __ B(&done);
   2937           __ Bind(&non_zero);
   2938         }
   2939 
   2940         // Note that when read barriers are enabled, the type checks
   2941         // are performed without read barriers.  This is fine, even in
   2942         // the case where a class object is in the from-space after
   2943         // the flip, as a comparison involving such a type would not
   2944         // produce a false positive; it may of course produce a false
   2945         // negative, in which case we would take the ArraySet slow
   2946         // path.
   2947 
   2948         // /* HeapReference<Class> */ temp1 = obj->klass_
   2949         __ LoadFromOffset(kLoadWord, temp1, obj, class_offset, null_checker);
   2950         __ MaybeUnpoisonHeapReference(temp1);
   2951 
   2952         // /* HeapReference<Class> */ temp1 = temp1->component_type_
   2953         __ LoadFromOffset(kLoadWord, temp1, temp1, component_offset);
   2954         // /* HeapReference<Class> */ temp2 = value->klass_
   2955         __ LoadFromOffset(kLoadWord, temp2, value, class_offset);
   2956         // If heap poisoning is enabled, no need to unpoison `temp1`
   2957         // nor `temp2`, as we are comparing two poisoned references.
   2958 
   2959         if (instruction->StaticTypeOfArrayIsObjectArray()) {
   2960           MipsLabel do_put;
   2961           __ Beq(temp1, temp2, &do_put);
   2962           // If heap poisoning is enabled, the `temp1` reference has
   2963           // not been unpoisoned yet; unpoison it now.
   2964           __ MaybeUnpoisonHeapReference(temp1);
   2965 
   2966           // /* HeapReference<Class> */ temp1 = temp1->super_class_
   2967           __ LoadFromOffset(kLoadWord, temp1, temp1, super_offset);
   2968           // If heap poisoning is enabled, no need to unpoison
   2969           // `temp1`, as we are comparing against null below.
   2970           __ Bnez(temp1, slow_path->GetEntryLabel());
   2971           __ Bind(&do_put);
   2972         } else {
   2973           __ Bne(temp1, temp2, slow_path->GetEntryLabel());
   2974         }
   2975       }
   2976 
   2977       Register source = value;
   2978       if (kPoisonHeapReferences) {
   2979         // Note that in the case where `value` is a null reference,
   2980         // we do not enter this block, as a null reference does not
   2981         // need poisoning.
   2982         __ Move(temp1, value);
   2983         __ PoisonHeapReference(temp1);
   2984         source = temp1;
   2985       }
   2986 
   2987       uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
   2988       if (index.IsConstant()) {
   2989         data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
   2990       } else {
   2991         __ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_4, base_reg);
   2992       }
   2993       __ StoreToOffset(kStoreWord, source, base_reg, data_offset);
   2994 
   2995       if (!may_need_runtime_call_for_type_check) {
   2996         codegen_->MaybeRecordImplicitNullCheck(instruction);
   2997       }
   2998 
   2999       codegen_->MarkGCCard(obj, value, instruction->GetValueCanBeNull());
   3000 
   3001       if (done.IsLinked()) {
   3002         __ Bind(&done);
   3003       }
   3004 
   3005       if (slow_path != nullptr) {
   3006         __ Bind(slow_path->GetExitLabel());
   3007       }
   3008       break;
   3009     }
   3010 
   3011     case Primitive::kPrimLong: {
   3012       uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
   3013       if (index.IsConstant()) {
   3014         data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8;
   3015       } else {
   3016         __ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_8, base_reg);
   3017       }
   3018       if (value_location.IsConstant()) {
   3019         int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant());
   3020         __ StoreConstToOffset(kStoreDoubleword, value, base_reg, data_offset, TMP, null_checker);
   3021       } else {
   3022         Register value = value_location.AsRegisterPairLow<Register>();
   3023         __ StoreToOffset(kStoreDoubleword, value, base_reg, data_offset, null_checker);
   3024       }
   3025       break;
   3026     }
   3027 
   3028     case Primitive::kPrimFloat: {
   3029       uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
   3030       if (index.IsConstant()) {
   3031         data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
   3032       } else {
   3033         __ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_4, base_reg);
   3034       }
   3035       if (value_location.IsConstant()) {
   3036         int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
   3037         __ StoreConstToOffset(kStoreWord, value, base_reg, data_offset, TMP, null_checker);
   3038       } else {
   3039         FRegister value = value_location.AsFpuRegister<FRegister>();
   3040         __ StoreSToOffset(value, base_reg, data_offset, null_checker);
   3041       }
   3042       break;
   3043     }
   3044 
   3045     case Primitive::kPrimDouble: {
   3046       uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
   3047       if (index.IsConstant()) {
   3048         data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8;
   3049       } else {
   3050         __ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_8, base_reg);
   3051       }
   3052       if (value_location.IsConstant()) {
   3053         int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant());
   3054         __ StoreConstToOffset(kStoreDoubleword, value, base_reg, data_offset, TMP, null_checker);
   3055       } else {
   3056         FRegister value = value_location.AsFpuRegister<FRegister>();
   3057         __ StoreDToOffset(value, base_reg, data_offset, null_checker);
   3058       }
   3059       break;
   3060     }
   3061 
   3062     case Primitive::kPrimVoid:
   3063       LOG(FATAL) << "Unreachable type " << instruction->GetType();
   3064       UNREACHABLE();
   3065   }
   3066 }
   3067 
   3068 void LocationsBuilderMIPS::VisitBoundsCheck(HBoundsCheck* instruction) {
   3069   RegisterSet caller_saves = RegisterSet::Empty();
   3070   InvokeRuntimeCallingConvention calling_convention;
   3071   caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
   3072   caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
   3073   LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction, caller_saves);
   3074   locations->SetInAt(0, Location::RequiresRegister());
   3075   locations->SetInAt(1, Location::RequiresRegister());
   3076 }
   3077 
   3078 void InstructionCodeGeneratorMIPS::VisitBoundsCheck(HBoundsCheck* instruction) {
   3079   LocationSummary* locations = instruction->GetLocations();
   3080   BoundsCheckSlowPathMIPS* slow_path =
   3081       new (GetGraph()->GetArena()) BoundsCheckSlowPathMIPS(instruction);
   3082   codegen_->AddSlowPath(slow_path);
   3083 
   3084   Register index = locations->InAt(0).AsRegister<Register>();
   3085   Register length = locations->InAt(1).AsRegister<Register>();
   3086 
   3087   // length is limited by the maximum positive signed 32-bit integer.
   3088   // Unsigned comparison of length and index checks for index < 0
   3089   // and for length <= index simultaneously.
   3090   __ Bgeu(index, length, slow_path->GetEntryLabel());
   3091 }
   3092 
   3093 // Temp is used for read barrier.
   3094 static size_t NumberOfInstanceOfTemps(TypeCheckKind type_check_kind) {
   3095   if (kEmitCompilerReadBarrier &&
   3096       (kUseBakerReadBarrier ||
   3097        type_check_kind == TypeCheckKind::kAbstractClassCheck ||
   3098        type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
   3099        type_check_kind == TypeCheckKind::kArrayObjectCheck)) {
   3100     return 1;
   3101   }
   3102   return 0;
   3103 }
   3104 
   3105 // Extra temp is used for read barrier.
   3106 static size_t NumberOfCheckCastTemps(TypeCheckKind type_check_kind) {
   3107   return 1 + NumberOfInstanceOfTemps(type_check_kind);
   3108 }
   3109 
   3110 void LocationsBuilderMIPS::VisitCheckCast(HCheckCast* instruction) {
   3111   LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
   3112   bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
   3113 
   3114   TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
   3115   switch (type_check_kind) {
   3116     case TypeCheckKind::kExactCheck:
   3117     case TypeCheckKind::kAbstractClassCheck:
   3118     case TypeCheckKind::kClassHierarchyCheck:
   3119     case TypeCheckKind::kArrayObjectCheck:
   3120       call_kind = (throws_into_catch || kEmitCompilerReadBarrier)
   3121           ? LocationSummary::kCallOnSlowPath
   3122           : LocationSummary::kNoCall;  // In fact, call on a fatal (non-returning) slow path.
   3123       break;
   3124     case TypeCheckKind::kArrayCheck:
   3125     case TypeCheckKind::kUnresolvedCheck:
   3126     case TypeCheckKind::kInterfaceCheck:
   3127       call_kind = LocationSummary::kCallOnSlowPath;
   3128       break;
   3129   }
   3130 
   3131   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
   3132   locations->SetInAt(0, Location::RequiresRegister());
   3133   locations->SetInAt(1, Location::RequiresRegister());
   3134   locations->AddRegisterTemps(NumberOfCheckCastTemps(type_check_kind));
   3135 }
   3136 
   3137 void InstructionCodeGeneratorMIPS::VisitCheckCast(HCheckCast* instruction) {
   3138   TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
   3139   LocationSummary* locations = instruction->GetLocations();
   3140   Location obj_loc = locations->InAt(0);
   3141   Register obj = obj_loc.AsRegister<Register>();
   3142   Register cls = locations->InAt(1).AsRegister<Register>();
   3143   Location temp_loc = locations->GetTemp(0);
   3144   Register temp = temp_loc.AsRegister<Register>();
   3145   const size_t num_temps = NumberOfCheckCastTemps(type_check_kind);
   3146   DCHECK_LE(num_temps, 2u);
   3147   Location maybe_temp2_loc = (num_temps >= 2) ? locations->GetTemp(1) : Location::NoLocation();
   3148   const uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
   3149   const uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
   3150   const uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
   3151   const uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
   3152   const uint32_t iftable_offset = mirror::Class::IfTableOffset().Uint32Value();
   3153   const uint32_t array_length_offset = mirror::Array::LengthOffset().Uint32Value();
   3154   const uint32_t object_array_data_offset =
   3155       mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
   3156   MipsLabel done;
   3157 
   3158   // Always false for read barriers since we may need to go to the entrypoint for non-fatal cases
   3159   // from false negatives. The false negatives may come from avoiding read barriers below. Avoiding
   3160   // read barriers is done for performance and code size reasons.
   3161   bool is_type_check_slow_path_fatal = false;
   3162   if (!kEmitCompilerReadBarrier) {
   3163     is_type_check_slow_path_fatal =
   3164         (type_check_kind == TypeCheckKind::kExactCheck ||
   3165          type_check_kind == TypeCheckKind::kAbstractClassCheck ||
   3166          type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
   3167          type_check_kind == TypeCheckKind::kArrayObjectCheck) &&
   3168         !instruction->CanThrowIntoCatchBlock();
   3169   }
   3170   SlowPathCodeMIPS* slow_path =
   3171       new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS(instruction,
   3172                                                          is_type_check_slow_path_fatal);
   3173   codegen_->AddSlowPath(slow_path);
   3174 
   3175   // Avoid this check if we know `obj` is not null.
   3176   if (instruction->MustDoNullCheck()) {
   3177     __ Beqz(obj, &done);
   3178   }
   3179 
   3180   switch (type_check_kind) {
   3181     case TypeCheckKind::kExactCheck:
   3182     case TypeCheckKind::kArrayCheck: {
   3183       // /* HeapReference<Class> */ temp = obj->klass_
   3184       GenerateReferenceLoadTwoRegisters(instruction,
   3185                                         temp_loc,
   3186                                         obj_loc,
   3187                                         class_offset,
   3188                                         maybe_temp2_loc,
   3189                                         kWithoutReadBarrier);
   3190       // Jump to slow path for throwing the exception or doing a
   3191       // more involved array check.
   3192       __ Bne(temp, cls, slow_path->GetEntryLabel());
   3193       break;
   3194     }
   3195 
   3196     case TypeCheckKind::kAbstractClassCheck: {
   3197       // /* HeapReference<Class> */ temp = obj->klass_
   3198       GenerateReferenceLoadTwoRegisters(instruction,
   3199                                         temp_loc,
   3200                                         obj_loc,
   3201                                         class_offset,
   3202                                         maybe_temp2_loc,
   3203                                         kWithoutReadBarrier);
   3204       // If the class is abstract, we eagerly fetch the super class of the
   3205       // object to avoid doing a comparison we know will fail.
   3206       MipsLabel loop;
   3207       __ Bind(&loop);
   3208       // /* HeapReference<Class> */ temp = temp->super_class_
   3209       GenerateReferenceLoadOneRegister(instruction,
   3210                                        temp_loc,
   3211                                        super_offset,
   3212                                        maybe_temp2_loc,
   3213                                        kWithoutReadBarrier);
   3214       // If the class reference currently in `temp` is null, jump to the slow path to throw the
   3215       // exception.
   3216       __ Beqz(temp, slow_path->GetEntryLabel());
   3217       // Otherwise, compare the classes.
   3218       __ Bne(temp, cls, &loop);
   3219       break;
   3220     }
   3221 
   3222     case TypeCheckKind::kClassHierarchyCheck: {
   3223       // /* HeapReference<Class> */ temp = obj->klass_
   3224       GenerateReferenceLoadTwoRegisters(instruction,
   3225                                         temp_loc,
   3226                                         obj_loc,
   3227                                         class_offset,
   3228                                         maybe_temp2_loc,
   3229                                         kWithoutReadBarrier);
   3230       // Walk over the class hierarchy to find a match.
   3231       MipsLabel loop;
   3232       __ Bind(&loop);
   3233       __ Beq(temp, cls, &done);
   3234       // /* HeapReference<Class> */ temp = temp->super_class_
   3235       GenerateReferenceLoadOneRegister(instruction,
   3236                                        temp_loc,
   3237                                        super_offset,
   3238                                        maybe_temp2_loc,
   3239                                        kWithoutReadBarrier);
   3240       // If the class reference currently in `temp` is null, jump to the slow path to throw the
   3241       // exception. Otherwise, jump to the beginning of the loop.
   3242       __ Bnez(temp, &loop);
   3243       __ B(slow_path->GetEntryLabel());
   3244       break;
   3245     }
   3246 
   3247     case TypeCheckKind::kArrayObjectCheck: {
   3248       // /* HeapReference<Class> */ temp = obj->klass_
   3249       GenerateReferenceLoadTwoRegisters(instruction,
   3250                                         temp_loc,
   3251                                         obj_loc,
   3252                                         class_offset,
   3253                                         maybe_temp2_loc,
   3254                                         kWithoutReadBarrier);
   3255       // Do an exact check.
   3256       __ Beq(temp, cls, &done);
   3257       // Otherwise, we need to check that the object's class is a non-primitive array.
   3258       // /* HeapReference<Class> */ temp = temp->component_type_
   3259       GenerateReferenceLoadOneRegister(instruction,
   3260                                        temp_loc,
   3261                                        component_offset,
   3262                                        maybe_temp2_loc,
   3263                                        kWithoutReadBarrier);
   3264       // If the component type is null, jump to the slow path to throw the exception.
   3265       __ Beqz(temp, slow_path->GetEntryLabel());
   3266       // Otherwise, the object is indeed an array, further check that this component
   3267       // type is not a primitive type.
   3268       __ LoadFromOffset(kLoadUnsignedHalfword, temp, temp, primitive_offset);
   3269       static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
   3270       __ Bnez(temp, slow_path->GetEntryLabel());
   3271       break;
   3272     }
   3273 
   3274     case TypeCheckKind::kUnresolvedCheck:
   3275       // We always go into the type check slow path for the unresolved check case.
   3276       // We cannot directly call the CheckCast runtime entry point
   3277       // without resorting to a type checking slow path here (i.e. by
   3278       // calling InvokeRuntime directly), as it would require to
   3279       // assign fixed registers for the inputs of this HInstanceOf
   3280       // instruction (following the runtime calling convention), which
   3281       // might be cluttered by the potential first read barrier
   3282       // emission at the beginning of this method.
   3283       __ B(slow_path->GetEntryLabel());
   3284       break;
   3285 
   3286     case TypeCheckKind::kInterfaceCheck: {
   3287       // Avoid read barriers to improve performance of the fast path. We can not get false
   3288       // positives by doing this.
   3289       // /* HeapReference<Class> */ temp = obj->klass_
   3290       GenerateReferenceLoadTwoRegisters(instruction,
   3291                                         temp_loc,
   3292                                         obj_loc,
   3293                                         class_offset,
   3294                                         maybe_temp2_loc,
   3295                                         kWithoutReadBarrier);
   3296       // /* HeapReference<Class> */ temp = temp->iftable_
   3297       GenerateReferenceLoadTwoRegisters(instruction,
   3298                                         temp_loc,
   3299                                         temp_loc,
   3300                                         iftable_offset,
   3301                                         maybe_temp2_loc,
   3302                                         kWithoutReadBarrier);
   3303       // Iftable is never null.
   3304       __ Lw(TMP, temp, array_length_offset);
   3305       // Loop through the iftable and check if any class matches.
   3306       MipsLabel loop;
   3307       __ Bind(&loop);
   3308       __ Addiu(temp, temp, 2 * kHeapReferenceSize);  // Possibly in delay slot on R2.
   3309       __ Beqz(TMP, slow_path->GetEntryLabel());
   3310       __ Lw(AT, temp, object_array_data_offset - 2 * kHeapReferenceSize);
   3311       __ MaybeUnpoisonHeapReference(AT);
   3312       // Go to next interface.
   3313       __ Addiu(TMP, TMP, -2);
   3314       // Compare the classes and continue the loop if they do not match.
   3315       __ Bne(AT, cls, &loop);
   3316       break;
   3317     }
   3318   }
   3319 
   3320   __ Bind(&done);
   3321   __ Bind(slow_path->GetExitLabel());
   3322 }
   3323 
   3324 void LocationsBuilderMIPS::VisitClinitCheck(HClinitCheck* check) {
   3325   LocationSummary* locations =
   3326       new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
   3327   locations->SetInAt(0, Location::RequiresRegister());
   3328   if (check->HasUses()) {
   3329     locations->SetOut(Location::SameAsFirstInput());
   3330   }
   3331 }
   3332 
   3333 void InstructionCodeGeneratorMIPS::VisitClinitCheck(HClinitCheck* check) {
   3334   // We assume the class is not null.
   3335   SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS(
   3336       check->GetLoadClass(),
   3337       check,
   3338       check->GetDexPc(),
   3339       true);
   3340   codegen_->AddSlowPath(slow_path);
   3341   GenerateClassInitializationCheck(slow_path,
   3342                                    check->GetLocations()->InAt(0).AsRegister<Register>());
   3343 }
   3344 
   3345 void LocationsBuilderMIPS::VisitCompare(HCompare* compare) {
   3346   Primitive::Type in_type = compare->InputAt(0)->GetType();
   3347 
   3348   LocationSummary* locations =
   3349       new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
   3350 
   3351   switch (in_type) {
   3352     case Primitive::kPrimBoolean:
   3353     case Primitive::kPrimByte:
   3354     case Primitive::kPrimShort:
   3355     case Primitive::kPrimChar:
   3356     case Primitive::kPrimInt:
   3357       locations->SetInAt(0, Location::RequiresRegister());
   3358       locations->SetInAt(1, Location::RequiresRegister());
   3359       locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   3360       break;
   3361 
   3362     case Primitive::kPrimLong:
   3363       locations->SetInAt(0, Location::RequiresRegister());
   3364       locations->SetInAt(1, Location::RequiresRegister());
   3365       // Output overlaps because it is written before doing the low comparison.
   3366       locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
   3367       break;
   3368 
   3369     case Primitive::kPrimFloat:
   3370     case Primitive::kPrimDouble:
   3371       locations->SetInAt(0, Location::RequiresFpuRegister());
   3372       locations->SetInAt(1, Location::RequiresFpuRegister());
   3373       locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   3374       break;
   3375 
   3376     default:
   3377       LOG(FATAL) << "Unexpected type for compare operation " << in_type;
   3378   }
   3379 }
   3380 
   3381 void InstructionCodeGeneratorMIPS::VisitCompare(HCompare* instruction) {
   3382   LocationSummary* locations = instruction->GetLocations();
   3383   Register res = locations->Out().AsRegister<Register>();
   3384   Primitive::Type in_type = instruction->InputAt(0)->GetType();
   3385   bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
   3386 
   3387   //  0 if: left == right
   3388   //  1 if: left  > right
   3389   // -1 if: left  < right
   3390   switch (in_type) {
   3391     case Primitive::kPrimBoolean:
   3392     case Primitive::kPrimByte:
   3393     case Primitive::kPrimShort:
   3394     case Primitive::kPrimChar:
   3395     case Primitive::kPrimInt: {
   3396       Register lhs = locations->InAt(0).AsRegister<Register>();
   3397       Register rhs = locations->InAt(1).AsRegister<Register>();
   3398       __ Slt(TMP, lhs, rhs);
   3399       __ Slt(res, rhs, lhs);
   3400       __ Subu(res, res, TMP);
   3401       break;
   3402     }
   3403     case Primitive::kPrimLong: {
   3404       MipsLabel done;
   3405       Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>();
   3406       Register lhs_low  = locations->InAt(0).AsRegisterPairLow<Register>();
   3407       Register rhs_high = locations->InAt(1).AsRegisterPairHigh<Register>();
   3408       Register rhs_low  = locations->InAt(1).AsRegisterPairLow<Register>();
   3409       // TODO: more efficient (direct) comparison with a constant.
   3410       __ Slt(TMP, lhs_high, rhs_high);
   3411       __ Slt(AT, rhs_high, lhs_high);  // Inverted: is actually gt.
   3412       __ Subu(res, AT, TMP);           // Result -1:1:0 for [ <, >, == ].
   3413       __ Bnez(res, &done);             // If we compared ==, check if lower bits are also equal.
   3414       __ Sltu(TMP, lhs_low, rhs_low);
   3415       __ Sltu(AT, rhs_low, lhs_low);   // Inverted: is actually gt.
   3416       __ Subu(res, AT, TMP);           // Result -1:1:0 for [ <, >, == ].
   3417       __ Bind(&done);
   3418       break;
   3419     }
   3420 
   3421     case Primitive::kPrimFloat: {
   3422       bool gt_bias = instruction->IsGtBias();
   3423       FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
   3424       FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
   3425       MipsLabel done;
   3426       if (isR6) {
   3427         __ CmpEqS(FTMP, lhs, rhs);
   3428         __ LoadConst32(res, 0);
   3429         __ Bc1nez(FTMP, &done);
   3430         if (gt_bias) {
   3431           __ CmpLtS(FTMP, lhs, rhs);
   3432           __ LoadConst32(res, -1);
   3433           __ Bc1nez(FTMP, &done);
   3434           __ LoadConst32(res, 1);
   3435         } else {
   3436           __ CmpLtS(FTMP, rhs, lhs);
   3437           __ LoadConst32(res, 1);
   3438           __ Bc1nez(FTMP, &done);
   3439           __ LoadConst32(res, -1);
   3440         }
   3441       } else {
   3442         if (gt_bias) {
   3443           __ ColtS(0, lhs, rhs);
   3444           __ LoadConst32(res, -1);
   3445           __ Bc1t(0, &done);
   3446           __ CeqS(0, lhs, rhs);
   3447           __ LoadConst32(res, 1);
   3448           __ Movt(res, ZERO, 0);
   3449         } else {
   3450           __ ColtS(0, rhs, lhs);
   3451           __ LoadConst32(res, 1);
   3452           __ Bc1t(0, &done);
   3453           __ CeqS(0, lhs, rhs);
   3454           __ LoadConst32(res, -1);
   3455           __ Movt(res, ZERO, 0);
   3456         }
   3457       }
   3458       __ Bind(&done);
   3459       break;
   3460     }
   3461     case Primitive::kPrimDouble: {
   3462       bool gt_bias = instruction->IsGtBias();
   3463       FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
   3464       FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
   3465       MipsLabel done;
   3466       if (isR6) {
   3467         __ CmpEqD(FTMP, lhs, rhs);
   3468         __ LoadConst32(res, 0);
   3469         __ Bc1nez(FTMP, &done);
   3470         if (gt_bias) {
   3471           __ CmpLtD(FTMP, lhs, rhs);
   3472           __ LoadConst32(res, -1);
   3473           __ Bc1nez(FTMP, &done);
   3474           __ LoadConst32(res, 1);
   3475         } else {
   3476           __ CmpLtD(FTMP, rhs, lhs);
   3477           __ LoadConst32(res, 1);
   3478           __ Bc1nez(FTMP, &done);
   3479           __ LoadConst32(res, -1);
   3480         }
   3481       } else {
   3482         if (gt_bias) {
   3483           __ ColtD(0, lhs, rhs);
   3484           __ LoadConst32(res, -1);
   3485           __ Bc1t(0, &done);
   3486           __ CeqD(0, lhs, rhs);
   3487           __ LoadConst32(res, 1);
   3488           __ Movt(res, ZERO, 0);
   3489         } else {
   3490           __ ColtD(0, rhs, lhs);
   3491           __ LoadConst32(res, 1);
   3492           __ Bc1t(0, &done);
   3493           __ CeqD(0, lhs, rhs);
   3494           __ LoadConst32(res, -1);
   3495           __ Movt(res, ZERO, 0);
   3496         }
   3497       }
   3498       __ Bind(&done);
   3499       break;
   3500     }
   3501 
   3502     default:
   3503       LOG(FATAL) << "Unimplemented compare type " << in_type;
   3504   }
   3505 }
   3506 
   3507 void LocationsBuilderMIPS::HandleCondition(HCondition* instruction) {
   3508   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
   3509   switch (instruction->InputAt(0)->GetType()) {
   3510     default:
   3511     case Primitive::kPrimLong:
   3512       locations->SetInAt(0, Location::RequiresRegister());
   3513       locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
   3514       break;
   3515 
   3516     case Primitive::kPrimFloat:
   3517     case Primitive::kPrimDouble:
   3518       locations->SetInAt(0, Location::RequiresFpuRegister());
   3519       locations->SetInAt(1, Location::RequiresFpuRegister());
   3520       break;
   3521   }
   3522   if (!instruction->IsEmittedAtUseSite()) {
   3523     locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   3524   }
   3525 }
   3526 
   3527 void InstructionCodeGeneratorMIPS::HandleCondition(HCondition* instruction) {
   3528   if (instruction->IsEmittedAtUseSite()) {
   3529     return;
   3530   }
   3531 
   3532   Primitive::Type type = instruction->InputAt(0)->GetType();
   3533   LocationSummary* locations = instruction->GetLocations();
   3534 
   3535   switch (type) {
   3536     default:
   3537       // Integer case.
   3538       GenerateIntCompare(instruction->GetCondition(), locations);
   3539       return;
   3540 
   3541     case Primitive::kPrimLong:
   3542       GenerateLongCompare(instruction->GetCondition(), locations);
   3543       return;
   3544 
   3545     case Primitive::kPrimFloat:
   3546     case Primitive::kPrimDouble:
   3547       GenerateFpCompare(instruction->GetCondition(), instruction->IsGtBias(), type, locations);
   3548       return;
   3549   }
   3550 }
   3551 
   3552 void InstructionCodeGeneratorMIPS::DivRemOneOrMinusOne(HBinaryOperation* instruction) {
   3553   DCHECK(instruction->IsDiv() || instruction->IsRem());
   3554   DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimInt);
   3555 
   3556   LocationSummary* locations = instruction->GetLocations();
   3557   Location second = locations->InAt(1);
   3558   DCHECK(second.IsConstant());
   3559 
   3560   Register out = locations->Out().AsRegister<Register>();
   3561   Register dividend = locations->InAt(0).AsRegister<Register>();
   3562   int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
   3563   DCHECK(imm == 1 || imm == -1);
   3564 
   3565   if (instruction->IsRem()) {
   3566     __ Move(out, ZERO);
   3567   } else {
   3568     if (imm == -1) {
   3569       __ Subu(out, ZERO, dividend);
   3570     } else if (out != dividend) {
   3571       __ Move(out, dividend);
   3572     }
   3573   }
   3574 }
   3575 
   3576 void InstructionCodeGeneratorMIPS::DivRemByPowerOfTwo(HBinaryOperation* instruction) {
   3577   DCHECK(instruction->IsDiv() || instruction->IsRem());
   3578   DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimInt);
   3579 
   3580   LocationSummary* locations = instruction->GetLocations();
   3581   Location second = locations->InAt(1);
   3582   DCHECK(second.IsConstant());
   3583 
   3584   Register out = locations->Out().AsRegister<Register>();
   3585   Register dividend = locations->InAt(0).AsRegister<Register>();
   3586   int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
   3587   uint32_t abs_imm = static_cast<uint32_t>(AbsOrMin(imm));
   3588   int ctz_imm = CTZ(abs_imm);
   3589 
   3590   if (instruction->IsDiv()) {
   3591     if (ctz_imm == 1) {
   3592       // Fast path for division by +/-2, which is very common.
   3593       __ Srl(TMP, dividend, 31);
   3594     } else {
   3595       __ Sra(TMP, dividend, 31);
   3596       __ Srl(TMP, TMP, 32 - ctz_imm);
   3597     }
   3598     __ Addu(out, dividend, TMP);
   3599     __ Sra(out, out, ctz_imm);
   3600     if (imm < 0) {
   3601       __ Subu(out, ZERO, out);
   3602     }
   3603   } else {
   3604     if (ctz_imm == 1) {
   3605       // Fast path for modulo +/-2, which is very common.
   3606       __ Sra(TMP, dividend, 31);
   3607       __ Subu(out, dividend, TMP);
   3608       __ Andi(out, out, 1);
   3609       __ Addu(out, out, TMP);
   3610     } else {
   3611       __ Sra(TMP, dividend, 31);
   3612       __ Srl(TMP, TMP, 32 - ctz_imm);
   3613       __ Addu(out, dividend, TMP);
   3614       if (IsUint<16>(abs_imm - 1)) {
   3615         __ Andi(out, out, abs_imm - 1);
   3616       } else {
   3617         __ Sll(out, out, 32 - ctz_imm);
   3618         __ Srl(out, out, 32 - ctz_imm);
   3619       }
   3620       __ Subu(out, out, TMP);
   3621     }
   3622   }
   3623 }
   3624 
   3625 void InstructionCodeGeneratorMIPS::GenerateDivRemWithAnyConstant(HBinaryOperation* instruction) {
   3626   DCHECK(instruction->IsDiv() || instruction->IsRem());
   3627   DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimInt);
   3628 
   3629   LocationSummary* locations = instruction->GetLocations();
   3630   Location second = locations->InAt(1);
   3631   DCHECK(second.IsConstant());
   3632 
   3633   Register out = locations->Out().AsRegister<Register>();
   3634   Register dividend = locations->InAt(0).AsRegister<Register>();
   3635   int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
   3636 
   3637   int64_t magic;
   3638   int shift;
   3639   CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
   3640 
   3641   bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
   3642 
   3643   __ LoadConst32(TMP, magic);
   3644   if (isR6) {
   3645     __ MuhR6(TMP, dividend, TMP);
   3646   } else {
   3647     __ MultR2(dividend, TMP);
   3648     __ Mfhi(TMP);
   3649   }
   3650   if (imm > 0 && magic < 0) {
   3651     __ Addu(TMP, TMP, dividend);
   3652   } else if (imm < 0 && magic > 0) {
   3653     __ Subu(TMP, TMP, dividend);
   3654   }
   3655 
   3656   if (shift != 0) {
   3657     __ Sra(TMP, TMP, shift);
   3658   }
   3659 
   3660   if (instruction->IsDiv()) {
   3661     __ Sra(out, TMP, 31);
   3662     __ Subu(out, TMP, out);
   3663   } else {
   3664     __ Sra(AT, TMP, 31);
   3665     __ Subu(AT, TMP, AT);
   3666     __ LoadConst32(TMP, imm);
   3667     if (isR6) {
   3668       __ MulR6(TMP, AT, TMP);
   3669     } else {
   3670       __ MulR2(TMP, AT, TMP);
   3671     }
   3672     __ Subu(out, dividend, TMP);
   3673   }
   3674 }
   3675 
   3676 void InstructionCodeGeneratorMIPS::GenerateDivRemIntegral(HBinaryOperation* instruction) {
   3677   DCHECK(instruction->IsDiv() || instruction->IsRem());
   3678   DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimInt);
   3679 
   3680   LocationSummary* locations = instruction->GetLocations();
   3681   Register out = locations->Out().AsRegister<Register>();
   3682   Location second = locations->InAt(1);
   3683 
   3684   if (second.IsConstant()) {
   3685     int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
   3686     if (imm == 0) {
   3687       // Do not generate anything. DivZeroCheck would prevent any code to be executed.
   3688     } else if (imm == 1 || imm == -1) {
   3689       DivRemOneOrMinusOne(instruction);
   3690     } else if (IsPowerOfTwo(AbsOrMin(imm))) {
   3691       DivRemByPowerOfTwo(instruction);
   3692     } else {
   3693       DCHECK(imm <= -2 || imm >= 2);
   3694       GenerateDivRemWithAnyConstant(instruction);
   3695     }
   3696   } else {
   3697     Register dividend = locations->InAt(0).AsRegister<Register>();
   3698     Register divisor = second.AsRegister<Register>();
   3699     bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
   3700     if (instruction->IsDiv()) {
   3701       if (isR6) {
   3702         __ DivR6(out, dividend, divisor);
   3703       } else {
   3704         __ DivR2(out, dividend, divisor);
   3705       }
   3706     } else {
   3707       if (isR6) {
   3708         __ ModR6(out, dividend, divisor);
   3709       } else {
   3710         __ ModR2(out, dividend, divisor);
   3711       }
   3712     }
   3713   }
   3714 }
   3715 
   3716 void LocationsBuilderMIPS::VisitDiv(HDiv* div) {
   3717   Primitive::Type type = div->GetResultType();
   3718   LocationSummary::CallKind call_kind = (type == Primitive::kPrimLong)
   3719       ? LocationSummary::kCallOnMainOnly
   3720       : LocationSummary::kNoCall;
   3721 
   3722   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
   3723 
   3724   switch (type) {
   3725     case Primitive::kPrimInt:
   3726       locations->SetInAt(0, Location::RequiresRegister());
   3727       locations->SetInAt(1, Location::RegisterOrConstant(div->InputAt(1)));
   3728       locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   3729       break;
   3730 
   3731     case Primitive::kPrimLong: {
   3732       InvokeRuntimeCallingConvention calling_convention;
   3733       locations->SetInAt(0, Location::RegisterPairLocation(
   3734           calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
   3735       locations->SetInAt(1, Location::RegisterPairLocation(
   3736           calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
   3737       locations->SetOut(calling_convention.GetReturnLocation(type));
   3738       break;
   3739     }
   3740 
   3741     case Primitive::kPrimFloat:
   3742     case Primitive::kPrimDouble:
   3743       locations->SetInAt(0, Location::RequiresFpuRegister());
   3744       locations->SetInAt(1, Location::RequiresFpuRegister());
   3745       locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
   3746       break;
   3747 
   3748     default:
   3749       LOG(FATAL) << "Unexpected div type " << type;
   3750   }
   3751 }
   3752 
   3753 void InstructionCodeGeneratorMIPS::VisitDiv(HDiv* instruction) {
   3754   Primitive::Type type = instruction->GetType();
   3755   LocationSummary* locations = instruction->GetLocations();
   3756 
   3757   switch (type) {
   3758     case Primitive::kPrimInt:
   3759       GenerateDivRemIntegral(instruction);
   3760       break;
   3761     case Primitive::kPrimLong: {
   3762       codegen_->InvokeRuntime(kQuickLdiv, instruction, instruction->GetDexPc());
   3763       CheckEntrypointTypes<kQuickLdiv, int64_t, int64_t, int64_t>();
   3764       break;
   3765     }
   3766     case Primitive::kPrimFloat:
   3767     case Primitive::kPrimDouble: {
   3768       FRegister dst = locations->Out().AsFpuRegister<FRegister>();
   3769       FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
   3770       FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
   3771       if (type == Primitive::kPrimFloat) {
   3772         __ DivS(dst, lhs, rhs);
   3773       } else {
   3774         __ DivD(dst, lhs, rhs);
   3775       }
   3776       break;
   3777     }
   3778     default:
   3779       LOG(FATAL) << "Unexpected div type " << type;
   3780   }
   3781 }
   3782 
   3783 void LocationsBuilderMIPS::VisitDivZeroCheck(HDivZeroCheck* instruction) {
   3784   LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction);
   3785   locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
   3786 }
   3787 
   3788 void InstructionCodeGeneratorMIPS::VisitDivZeroCheck(HDivZeroCheck* instruction) {
   3789   SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) DivZeroCheckSlowPathMIPS(instruction);
   3790   codegen_->AddSlowPath(slow_path);
   3791   Location value = instruction->GetLocations()->InAt(0);
   3792   Primitive::Type type = instruction->GetType();
   3793 
   3794   switch (type) {
   3795     case Primitive::kPrimBoolean:
   3796     case Primitive::kPrimByte:
   3797     case Primitive::kPrimChar:
   3798     case Primitive::kPrimShort:
   3799     case Primitive::kPrimInt: {
   3800       if (value.IsConstant()) {
   3801         if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
   3802           __ B(slow_path->GetEntryLabel());
   3803         } else {
   3804           // A division by a non-null constant is valid. We don't need to perform
   3805           // any check, so simply fall through.
   3806         }
   3807       } else {
   3808         DCHECK(value.IsRegister()) << value;
   3809         __ Beqz(value.AsRegister<Register>(), slow_path->GetEntryLabel());
   3810       }
   3811       break;
   3812     }
   3813     case Primitive::kPrimLong: {
   3814       if (value.IsConstant()) {
   3815         if (value.GetConstant()->AsLongConstant()->GetValue() == 0) {
   3816           __ B(slow_path->GetEntryLabel());
   3817         } else {
   3818           // A division by a non-null constant is valid. We don't need to perform
   3819           // any check, so simply fall through.
   3820         }
   3821       } else {
   3822         DCHECK(value.IsRegisterPair()) << value;
   3823         __ Or(TMP, value.AsRegisterPairHigh<Register>(), value.AsRegisterPairLow<Register>());
   3824         __ Beqz(TMP, slow_path->GetEntryLabel());
   3825       }
   3826       break;
   3827     }
   3828     default:
   3829       LOG(FATAL) << "Unexpected type " << type << " for DivZeroCheck.";
   3830   }
   3831 }
   3832 
   3833 void LocationsBuilderMIPS::VisitDoubleConstant(HDoubleConstant* constant) {
   3834   LocationSummary* locations =
   3835       new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
   3836   locations->SetOut(Location::ConstantLocation(constant));
   3837 }
   3838 
   3839 void InstructionCodeGeneratorMIPS::VisitDoubleConstant(HDoubleConstant* cst ATTRIBUTE_UNUSED) {
   3840   // Will be generated at use site.
   3841 }
   3842 
   3843 void LocationsBuilderMIPS::VisitExit(HExit* exit) {
   3844   exit->SetLocations(nullptr);
   3845 }
   3846 
   3847 void InstructionCodeGeneratorMIPS::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
   3848 }
   3849 
   3850 void LocationsBuilderMIPS::VisitFloatConstant(HFloatConstant* constant) {
   3851   LocationSummary* locations =
   3852       new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
   3853   locations->SetOut(Location::ConstantLocation(constant));
   3854 }
   3855 
   3856 void InstructionCodeGeneratorMIPS::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
   3857   // Will be generated at use site.
   3858 }
   3859 
   3860 void LocationsBuilderMIPS::VisitGoto(HGoto* got) {
   3861   got->SetLocations(nullptr);
   3862 }
   3863 
   3864 void InstructionCodeGeneratorMIPS::HandleGoto(HInstruction* got, HBasicBlock* successor) {
   3865   DCHECK(!successor->IsExitBlock());
   3866   HBasicBlock* block = got->GetBlock();
   3867   HInstruction* previous = got->GetPrevious();
   3868   HLoopInformation* info = block->GetLoopInformation();
   3869 
   3870   if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
   3871     codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
   3872     GenerateSuspendCheck(info->GetSuspendCheck(), successor);
   3873     return;
   3874   }
   3875   if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
   3876     GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
   3877   }
   3878   if (!codegen_->GoesToNextBlock(block, successor)) {
   3879     __ B(codegen_->GetLabelOf(successor));
   3880   }
   3881 }
   3882 
   3883 void InstructionCodeGeneratorMIPS::VisitGoto(HGoto* got) {
   3884   HandleGoto(got, got->GetSuccessor());
   3885 }
   3886 
   3887 void LocationsBuilderMIPS::VisitTryBoundary(HTryBoundary* try_boundary) {
   3888   try_boundary->SetLocations(nullptr);
   3889 }
   3890 
   3891 void InstructionCodeGeneratorMIPS::VisitTryBoundary(HTryBoundary* try_boundary) {
   3892   HBasicBlock* successor = try_boundary->GetNormalFlowSuccessor();
   3893   if (!successor->IsExitBlock()) {
   3894     HandleGoto(try_boundary, successor);
   3895   }
   3896 }
   3897 
   3898 void InstructionCodeGeneratorMIPS::GenerateIntCompare(IfCondition cond,
   3899                                                       LocationSummary* locations) {
   3900   Register dst = locations->Out().AsRegister<Register>();
   3901   Register lhs = locations->InAt(0).AsRegister<Register>();
   3902   Location rhs_location = locations->InAt(1);
   3903   Register rhs_reg = ZERO;
   3904   int64_t rhs_imm = 0;
   3905   bool use_imm = rhs_location.IsConstant();
   3906   if (use_imm) {
   3907     rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
   3908   } else {
   3909     rhs_reg = rhs_location.AsRegister<Register>();
   3910   }
   3911 
   3912   switch (cond) {
   3913     case kCondEQ:
   3914     case kCondNE:
   3915       if (use_imm && IsInt<16>(-rhs_imm)) {
   3916         if (rhs_imm == 0) {
   3917           if (cond == kCondEQ) {
   3918             __ Sltiu(dst, lhs, 1);
   3919           } else {
   3920             __ Sltu(dst, ZERO, lhs);
   3921           }
   3922         } else {
   3923           __ Addiu(dst, lhs, -rhs_imm);
   3924           if (cond == kCondEQ) {
   3925             __ Sltiu(dst, dst, 1);
   3926           } else {
   3927             __ Sltu(dst, ZERO, dst);
   3928           }
   3929         }
   3930       } else {
   3931         if (use_imm && IsUint<16>(rhs_imm)) {
   3932           __ Xori(dst, lhs, rhs_imm);
   3933         } else {
   3934           if (use_imm) {
   3935             rhs_reg = TMP;
   3936             __ LoadConst32(rhs_reg, rhs_imm);
   3937           }
   3938           __ Xor(dst, lhs, rhs_reg);
   3939         }
   3940         if (cond == kCondEQ) {
   3941           __ Sltiu(dst, dst, 1);
   3942         } else {
   3943           __ Sltu(dst, ZERO, dst);
   3944         }
   3945       }
   3946       break;
   3947 
   3948     case kCondLT:
   3949     case kCondGE:
   3950       if (use_imm && IsInt<16>(rhs_imm)) {
   3951         __ Slti(dst, lhs, rhs_imm);
   3952       } else {
   3953         if (use_imm) {
   3954           rhs_reg = TMP;
   3955           __ LoadConst32(rhs_reg, rhs_imm);
   3956         }
   3957         __ Slt(dst, lhs, rhs_reg);
   3958       }
   3959       if (cond == kCondGE) {
   3960         // Simulate lhs >= rhs via !(lhs < rhs) since there's
   3961         // only the slt instruction but no sge.
   3962         __ Xori(dst, dst, 1);
   3963       }
   3964       break;
   3965 
   3966     case kCondLE:
   3967     case kCondGT:
   3968       if (use_imm && IsInt<16>(rhs_imm + 1)) {
   3969         // Simulate lhs <= rhs via lhs < rhs + 1.
   3970         __ Slti(dst, lhs, rhs_imm + 1);
   3971         if (cond == kCondGT) {
   3972           // Simulate lhs > rhs via !(lhs <= rhs) since there's
   3973           // only the slti instruction but no sgti.
   3974           __ Xori(dst, dst, 1);
   3975         }
   3976       } else {
   3977         if (use_imm) {
   3978           rhs_reg = TMP;
   3979           __ LoadConst32(rhs_reg, rhs_imm);
   3980         }
   3981         __ Slt(dst, rhs_reg, lhs);
   3982         if (cond == kCondLE) {
   3983           // Simulate lhs <= rhs via !(rhs < lhs) since there's
   3984           // only the slt instruction but no sle.
   3985           __ Xori(dst, dst, 1);
   3986         }
   3987       }
   3988       break;
   3989 
   3990     case kCondB:
   3991     case kCondAE:
   3992       if (use_imm && IsInt<16>(rhs_imm)) {
   3993         // Sltiu sign-extends its 16-bit immediate operand before
   3994         // the comparison and thus lets us compare directly with
   3995         // unsigned values in the ranges [0, 0x7fff] and
   3996         // [0xffff8000, 0xffffffff].
   3997         __ Sltiu(dst, lhs, rhs_imm);
   3998       } else {
   3999         if (use_imm) {
   4000           rhs_reg = TMP;
   4001           __ LoadConst32(rhs_reg, rhs_imm);
   4002         }
   4003         __ Sltu(dst, lhs, rhs_reg);
   4004       }
   4005       if (cond == kCondAE) {
   4006         // Simulate lhs >= rhs via !(lhs < rhs) since there's
   4007         // only the sltu instruction but no sgeu.
   4008         __ Xori(dst, dst, 1);
   4009       }
   4010       break;
   4011 
   4012     case kCondBE:
   4013     case kCondA:
   4014       if (use_imm && (rhs_imm != -1) && IsInt<16>(rhs_imm + 1)) {
   4015         // Simulate lhs <= rhs via lhs < rhs + 1.
   4016         // Note that this only works if rhs + 1 does not overflow
   4017         // to 0, hence the check above.
   4018         // Sltiu sign-extends its 16-bit immediate operand before
   4019         // the comparison and thus lets us compare directly with
   4020         // unsigned values in the ranges [0, 0x7fff] and
   4021         // [0xffff8000, 0xffffffff].
   4022         __ Sltiu(dst, lhs, rhs_imm + 1);
   4023         if (cond == kCondA) {
   4024           // Simulate lhs > rhs via !(lhs <= rhs) since there's
   4025           // only the sltiu instruction but no sgtiu.
   4026           __ Xori(dst, dst, 1);
   4027         }
   4028       } else {
   4029         if (use_imm) {
   4030           rhs_reg = TMP;
   4031           __ LoadConst32(rhs_reg, rhs_imm);
   4032         }
   4033         __ Sltu(dst, rhs_reg, lhs);
   4034         if (cond == kCondBE) {
   4035           // Simulate lhs <= rhs via !(rhs < lhs) since there's
   4036           // only the sltu instruction but no sleu.
   4037           __ Xori(dst, dst, 1);
   4038         }
   4039       }
   4040       break;
   4041   }
   4042 }
   4043 
   4044 bool InstructionCodeGeneratorMIPS::MaterializeIntCompare(IfCondition cond,
   4045                                                          LocationSummary* input_locations,
   4046                                                          Register dst) {
   4047   Register lhs = input_locations->InAt(0).AsRegister<Register>();
   4048   Location rhs_location = input_locations->InAt(1);
   4049   Register rhs_reg = ZERO;
   4050   int64_t rhs_imm = 0;
   4051   bool use_imm = rhs_location.IsConstant();
   4052   if (use_imm) {
   4053     rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
   4054   } else {
   4055     rhs_reg = rhs_location.AsRegister<Register>();
   4056   }
   4057 
   4058   switch (cond) {
   4059     case kCondEQ:
   4060     case kCondNE:
   4061       if (use_imm && IsInt<16>(-rhs_imm)) {
   4062         __ Addiu(dst, lhs, -rhs_imm);
   4063       } else if (use_imm && IsUint<16>(rhs_imm)) {
   4064         __ Xori(dst, lhs, rhs_imm);
   4065       } else {
   4066         if (use_imm) {
   4067           rhs_reg = TMP;
   4068           __ LoadConst32(rhs_reg, rhs_imm);
   4069         }
   4070         __ Xor(dst, lhs, rhs_reg);
   4071       }
   4072       return (cond == kCondEQ);
   4073 
   4074     case kCondLT:
   4075     case kCondGE:
   4076       if (use_imm && IsInt<16>(rhs_imm)) {
   4077         __ Slti(dst, lhs, rhs_imm);
   4078       } else {
   4079         if (use_imm) {
   4080           rhs_reg = TMP;
   4081           __ LoadConst32(rhs_reg, rhs_imm);
   4082         }
   4083         __ Slt(dst, lhs, rhs_reg);
   4084       }
   4085       return (cond == kCondGE);
   4086 
   4087     case kCondLE:
   4088     case kCondGT:
   4089       if (use_imm && IsInt<16>(rhs_imm + 1)) {
   4090         // Simulate lhs <= rhs via lhs < rhs + 1.
   4091         __ Slti(dst, lhs, rhs_imm + 1);
   4092         return (cond == kCondGT);
   4093       } else {
   4094         if (use_imm) {
   4095           rhs_reg = TMP;
   4096           __ LoadConst32(rhs_reg, rhs_imm);
   4097         }
   4098         __ Slt(dst, rhs_reg, lhs);
   4099         return (cond == kCondLE);
   4100       }
   4101 
   4102     case kCondB:
   4103     case kCondAE:
   4104       if (use_imm && IsInt<16>(rhs_imm)) {
   4105         // Sltiu sign-extends its 16-bit immediate operand before
   4106         // the comparison and thus lets us compare directly with
   4107         // unsigned values in the ranges [0, 0x7fff] and
   4108         // [0xffff8000, 0xffffffff].
   4109         __ Sltiu(dst, lhs, rhs_imm);
   4110       } else {
   4111         if (use_imm) {
   4112           rhs_reg = TMP;
   4113           __ LoadConst32(rhs_reg, rhs_imm);
   4114         }
   4115         __ Sltu(dst, lhs, rhs_reg);
   4116       }
   4117       return (cond == kCondAE);
   4118 
   4119     case kCondBE:
   4120     case kCondA:
   4121       if (use_imm && (rhs_imm != -1) && IsInt<16>(rhs_imm + 1)) {
   4122         // Simulate lhs <= rhs via lhs < rhs + 1.
   4123         // Note that this only works if rhs + 1 does not overflow
   4124         // to 0, hence the check above.
   4125         // Sltiu sign-extends its 16-bit immediate operand before
   4126         // the comparison and thus lets us compare directly with
   4127         // unsigned values in the ranges [0, 0x7fff] and
   4128         // [0xffff8000, 0xffffffff].
   4129         __ Sltiu(dst, lhs, rhs_imm + 1);
   4130         return (cond == kCondA);
   4131       } else {
   4132         if (use_imm) {
   4133           rhs_reg = TMP;
   4134           __ LoadConst32(rhs_reg, rhs_imm);
   4135         }
   4136         __ Sltu(dst, rhs_reg, lhs);
   4137         return (cond == kCondBE);
   4138       }
   4139   }
   4140 }
   4141 
   4142 void InstructionCodeGeneratorMIPS::GenerateIntCompareAndBranch(IfCondition cond,
   4143                                                                LocationSummary* locations,
   4144                                                                MipsLabel* label) {
   4145   Register lhs = locations->InAt(0).AsRegister<Register>();
   4146   Location rhs_location = locations->InAt(1);
   4147   Register rhs_reg = ZERO;
   4148   int64_t rhs_imm = 0;
   4149   bool use_imm = rhs_location.IsConstant();
   4150   if (use_imm) {
   4151     rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
   4152   } else {
   4153     rhs_reg = rhs_location.AsRegister<Register>();
   4154   }
   4155 
   4156   if (use_imm && rhs_imm == 0) {
   4157     switch (cond) {
   4158       case kCondEQ:
   4159       case kCondBE:  // <= 0 if zero
   4160         __ Beqz(lhs, label);
   4161         break;
   4162       case kCondNE:
   4163       case kCondA:  // > 0 if non-zero
   4164         __ Bnez(lhs, label);
   4165         break;
   4166       case kCondLT:
   4167         __ Bltz(lhs, label);
   4168         break;
   4169       case kCondGE:
   4170         __ Bgez(lhs, label);
   4171         break;
   4172       case kCondLE:
   4173         __ Blez(lhs, label);
   4174         break;
   4175       case kCondGT:
   4176         __ Bgtz(lhs, label);
   4177         break;
   4178       case kCondB:  // always false
   4179         break;
   4180       case kCondAE:  // always true
   4181         __ B(label);
   4182         break;
   4183     }
   4184   } else {
   4185     bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
   4186     if (isR6 || !use_imm) {
   4187       if (use_imm) {
   4188         rhs_reg = TMP;
   4189         __ LoadConst32(rhs_reg, rhs_imm);
   4190       }
   4191       switch (cond) {
   4192         case kCondEQ:
   4193           __ Beq(lhs, rhs_reg, label);
   4194           break;
   4195         case kCondNE:
   4196           __ Bne(lhs, rhs_reg, label);
   4197           break;
   4198         case kCondLT:
   4199           __ Blt(lhs, rhs_reg, label);
   4200           break;
   4201         case kCondGE:
   4202           __ Bge(lhs, rhs_reg, label);
   4203           break;
   4204         case kCondLE:
   4205           __ Bge(rhs_reg, lhs, label);
   4206           break;
   4207         case kCondGT:
   4208           __ Blt(rhs_reg, lhs, label);
   4209           break;
   4210         case kCondB:
   4211           __ Bltu(lhs, rhs_reg, label);
   4212           break;
   4213         case kCondAE:
   4214           __ Bgeu(lhs, rhs_reg, label);
   4215           break;
   4216         case kCondBE:
   4217           __ Bgeu(rhs_reg, lhs, label);
   4218           break;
   4219         case kCondA:
   4220           __ Bltu(rhs_reg, lhs, label);
   4221           break;
   4222       }
   4223     } else {
   4224       // Special cases for more efficient comparison with constants on R2.
   4225       switch (cond) {
   4226         case kCondEQ:
   4227           __ LoadConst32(TMP, rhs_imm);
   4228           __ Beq(lhs, TMP, label);
   4229           break;
   4230         case kCondNE:
   4231           __ LoadConst32(TMP, rhs_imm);
   4232           __ Bne(lhs, TMP, label);
   4233           break;
   4234         case kCondLT:
   4235           if (IsInt<16>(rhs_imm)) {
   4236             __ Slti(TMP, lhs, rhs_imm);
   4237             __ Bnez(TMP, label);
   4238           } else {
   4239             __ LoadConst32(TMP, rhs_imm);
   4240             __ Blt(lhs, TMP, label);
   4241           }
   4242           break;
   4243         case kCondGE:
   4244           if (IsInt<16>(rhs_imm)) {
   4245             __ Slti(TMP, lhs, rhs_imm);
   4246             __ Beqz(TMP, label);
   4247           } else {
   4248             __ LoadConst32(TMP, rhs_imm);
   4249             __ Bge(lhs, TMP, label);
   4250           }
   4251           break;
   4252         case kCondLE:
   4253           if (IsInt<16>(rhs_imm + 1)) {
   4254             // Simulate lhs <= rhs via lhs < rhs + 1.
   4255             __ Slti(TMP, lhs, rhs_imm + 1);
   4256             __ Bnez(TMP, label);
   4257           } else {
   4258             __ LoadConst32(TMP, rhs_imm);
   4259             __ Bge(TMP, lhs, label);
   4260           }
   4261           break;
   4262         case kCondGT:
   4263           if (IsInt<16>(rhs_imm + 1)) {
   4264             // Simulate lhs > rhs via !(lhs < rhs + 1).
   4265             __ Slti(TMP, lhs, rhs_imm + 1);
   4266             __ Beqz(TMP, label);
   4267           } else {
   4268             __ LoadConst32(TMP, rhs_imm);
   4269             __ Blt(TMP, lhs, label);
   4270           }
   4271           break;
   4272         case kCondB:
   4273           if (IsInt<16>(rhs_imm)) {
   4274             __ Sltiu(TMP, lhs, rhs_imm);
   4275             __ Bnez(TMP, label);
   4276           } else {
   4277             __ LoadConst32(TMP, rhs_imm);
   4278             __ Bltu(lhs, TMP, label);
   4279           }
   4280           break;
   4281         case kCondAE:
   4282           if (IsInt<16>(rhs_imm)) {
   4283             __ Sltiu(TMP, lhs, rhs_imm);
   4284             __ Beqz(TMP, label);
   4285           } else {
   4286             __ LoadConst32(TMP, rhs_imm);
   4287             __ Bgeu(lhs, TMP, label);
   4288           }
   4289           break;
   4290         case kCondBE:
   4291           if ((rhs_imm != -1) && IsInt<16>(rhs_imm + 1)) {
   4292             // Simulate lhs <= rhs via lhs < rhs + 1.
   4293             // Note that this only works if rhs + 1 does not overflow
   4294             // to 0, hence the check above.
   4295             __ Sltiu(TMP, lhs, rhs_imm + 1);
   4296             __ Bnez(TMP, label);
   4297           } else {
   4298             __ LoadConst32(TMP, rhs_imm);
   4299             __ Bgeu(TMP, lhs, label);
   4300           }
   4301           break;
   4302         case kCondA:
   4303           if ((rhs_imm != -1) && IsInt<16>(rhs_imm + 1)) {
   4304             // Simulate lhs > rhs via !(lhs < rhs + 1).
   4305             // Note that this only works if rhs + 1 does not overflow
   4306             // to 0, hence the check above.
   4307             __ Sltiu(TMP, lhs, rhs_imm + 1);
   4308             __ Beqz(TMP, label);
   4309           } else {
   4310             __ LoadConst32(TMP, rhs_imm);
   4311             __ Bltu(TMP, lhs, label);
   4312           }
   4313           break;
   4314       }
   4315     }
   4316   }
   4317 }
   4318 
   4319 void InstructionCodeGeneratorMIPS::GenerateLongCompare(IfCondition cond,
   4320                                                        LocationSummary* locations) {
   4321   Register dst = locations->Out().AsRegister<Register>();
   4322   Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>();
   4323   Register lhs_low = locations->InAt(0).AsRegisterPairLow<Register>();
   4324   Location rhs_location = locations->InAt(1);
   4325   Register rhs_high = ZERO;
   4326   Register rhs_low = ZERO;
   4327   int64_t imm = 0;
   4328   uint32_t imm_high = 0;
   4329   uint32_t imm_low = 0;
   4330   bool use_imm = rhs_location.IsConstant();
   4331   if (use_imm) {
   4332     imm = rhs_location.GetConstant()->AsLongConstant()->GetValue();
   4333     imm_high = High32Bits(imm);
   4334     imm_low = Low32Bits(imm);
   4335   } else {
   4336     rhs_high = rhs_location.AsRegisterPairHigh<Register>();
   4337     rhs_low = rhs_location.AsRegisterPairLow<Register>();
   4338   }
   4339   if (use_imm && imm == 0) {
   4340     switch (cond) {
   4341       case kCondEQ:
   4342       case kCondBE:  // <= 0 if zero
   4343         __ Or(dst, lhs_high, lhs_low);
   4344         __ Sltiu(dst, dst, 1);
   4345         break;
   4346       case kCondNE:
   4347       case kCondA:  // > 0 if non-zero
   4348         __ Or(dst, lhs_high, lhs_low);
   4349         __ Sltu(dst, ZERO, dst);
   4350         break;
   4351       case kCondLT:
   4352         __ Slt(dst, lhs_high, ZERO);
   4353         break;
   4354       case kCondGE:
   4355         __ Slt(dst, lhs_high, ZERO);
   4356         __ Xori(dst, dst, 1);
   4357         break;
   4358       case kCondLE:
   4359         __ Or(TMP, lhs_high, lhs_low);
   4360         __ Sra(AT, lhs_high, 31);
   4361         __ Sltu(dst, AT, TMP);
   4362         __ Xori(dst, dst, 1);
   4363         break;
   4364       case kCondGT:
   4365         __ Or(TMP, lhs_high, lhs_low);
   4366         __ Sra(AT, lhs_high, 31);
   4367         __ Sltu(dst, AT, TMP);
   4368         break;
   4369       case kCondB:  // always false
   4370         __ Andi(dst, dst, 0);
   4371         break;
   4372       case kCondAE:  // always true
   4373         __ Ori(dst, ZERO, 1);
   4374         break;
   4375     }
   4376   } else if (use_imm) {
   4377     // TODO: more efficient comparison with constants without loading them into TMP/AT.
   4378     switch (cond) {
   4379       case kCondEQ:
   4380         __ LoadConst32(TMP, imm_high);
   4381         __ Xor(TMP, TMP, lhs_high);
   4382         __ LoadConst32(AT, imm_low);
   4383         __ Xor(AT, AT, lhs_low);
   4384         __ Or(dst, TMP, AT);
   4385         __ Sltiu(dst, dst, 1);
   4386         break;
   4387       case kCondNE:
   4388         __ LoadConst32(TMP, imm_high);
   4389         __ Xor(TMP, TMP, lhs_high);
   4390         __ LoadConst32(AT, imm_low);
   4391         __ Xor(AT, AT, lhs_low);
   4392         __ Or(dst, TMP, AT);
   4393         __ Sltu(dst, ZERO, dst);
   4394         break;
   4395       case kCondLT:
   4396       case kCondGE:
   4397         if (dst == lhs_low) {
   4398           __ LoadConst32(TMP, imm_low);
   4399           __ Sltu(dst, lhs_low, TMP);
   4400         }
   4401         __ LoadConst32(TMP, imm_high);
   4402         __ Slt(AT, lhs_high, TMP);
   4403         __ Slt(TMP, TMP, lhs_high);
   4404         if (dst != lhs_low) {
   4405           __ LoadConst32(dst, imm_low);
   4406           __ Sltu(dst, lhs_low, dst);
   4407         }
   4408         __ Slt(dst, TMP, dst);
   4409         __ Or(dst, dst, AT);
   4410         if (cond == kCondGE) {
   4411           __ Xori(dst, dst, 1);
   4412         }
   4413         break;
   4414       case kCondGT:
   4415       case kCondLE:
   4416         if (dst == lhs_low) {
   4417           __ LoadConst32(TMP, imm_low);
   4418           __ Sltu(dst, TMP, lhs_low);
   4419         }
   4420         __ LoadConst32(TMP, imm_high);
   4421         __ Slt(AT, TMP, lhs_high);
   4422         __ Slt(TMP, lhs_high, TMP);
   4423         if (dst != lhs_low) {
   4424           __ LoadConst32(dst, imm_low);
   4425           __ Sltu(dst, dst, lhs_low);
   4426         }
   4427         __ Slt(dst, TMP, dst);
   4428         __ Or(dst, dst, AT);
   4429         if (cond == kCondLE) {
   4430           __ Xori(dst, dst, 1);
   4431         }
   4432         break;
   4433       case kCondB:
   4434       case kCondAE:
   4435         if (dst == lhs_low) {
   4436           __ LoadConst32(TMP, imm_low);
   4437           __ Sltu(dst, lhs_low, TMP);
   4438         }
   4439         __ LoadConst32(TMP, imm_high);
   4440         __ Sltu(AT, lhs_high, TMP);
   4441         __ Sltu(TMP, TMP, lhs_high);
   4442         if (dst != lhs_low) {
   4443           __ LoadConst32(dst, imm_low);
   4444           __ Sltu(dst, lhs_low, dst);
   4445         }
   4446         __ Slt(dst, TMP, dst);
   4447         __ Or(dst, dst, AT);
   4448         if (cond == kCondAE) {
   4449           __ Xori(dst, dst, 1);
   4450         }
   4451         break;
   4452       case kCondA:
   4453       case kCondBE:
   4454         if (dst == lhs_low) {
   4455           __ LoadConst32(TMP, imm_low);
   4456           __ Sltu(dst, TMP, lhs_low);
   4457         }
   4458         __ LoadConst32(TMP, imm_high);
   4459         __ Sltu(AT, TMP, lhs_high);
   4460         __ Sltu(TMP, lhs_high, TMP);
   4461         if (dst != lhs_low) {
   4462           __ LoadConst32(dst, imm_low);
   4463           __ Sltu(dst, dst, lhs_low);
   4464         }
   4465         __ Slt(dst, TMP, dst);
   4466         __ Or(dst, dst, AT);
   4467         if (cond == kCondBE) {
   4468           __ Xori(dst, dst, 1);
   4469         }
   4470         break;
   4471     }
   4472   } else {
   4473     switch (cond) {
   4474       case kCondEQ:
   4475         __ Xor(TMP, lhs_high, rhs_high);
   4476         __ Xor(AT, lhs_low, rhs_low);
   4477         __ Or(dst, TMP, AT);
   4478         __ Sltiu(dst, dst, 1);
   4479         break;
   4480       case kCondNE:
   4481         __ Xor(TMP, lhs_high, rhs_high);
   4482         __ Xor(AT, lhs_low, rhs_low);
   4483         __ Or(dst, TMP, AT);
   4484         __ Sltu(dst, ZERO, dst);
   4485         break;
   4486       case kCondLT:
   4487       case kCondGE:
   4488         __ Slt(TMP, rhs_high, lhs_high);
   4489         __ Sltu(AT, lhs_low, rhs_low);
   4490         __ Slt(TMP, TMP, AT);
   4491         __ Slt(AT, lhs_high, rhs_high);
   4492         __ Or(dst, AT, TMP);
   4493         if (cond == kCondGE) {
   4494           __ Xori(dst, dst, 1);
   4495         }
   4496         break;
   4497       case kCondGT:
   4498       case kCondLE:
   4499         __ Slt(TMP, lhs_high, rhs_high);
   4500         __ Sltu(AT, rhs_low, lhs_low);
   4501         __ Slt(TMP, TMP, AT);
   4502         __ Slt(AT, rhs_high, lhs_high);
   4503         __ Or(dst, AT, TMP);
   4504         if (cond == kCondLE) {
   4505           __ Xori(dst, dst, 1);
   4506         }
   4507         break;
   4508       case kCondB:
   4509       case kCondAE:
   4510         __ Sltu(TMP, rhs_high, lhs_high);
   4511         __ Sltu(AT, lhs_low, rhs_low);
   4512         __ Slt(TMP, TMP, AT);
   4513         __ Sltu(AT, lhs_high, rhs_high);
   4514         __ Or(dst, AT, TMP);
   4515         if (cond == kCondAE) {
   4516           __ Xori(dst, dst, 1);
   4517         }
   4518         break;
   4519       case kCondA:
   4520       case kCondBE:
   4521         __ Sltu(TMP, lhs_high, rhs_high);
   4522         __ Sltu(AT, rhs_low, lhs_low);
   4523         __ Slt(TMP, TMP, AT);
   4524         __ Sltu(AT, rhs_high, lhs_high);
   4525         __ Or(dst, AT, TMP);
   4526         if (cond == kCondBE) {
   4527           __ Xori(dst, dst, 1);
   4528         }
   4529         break;
   4530     }
   4531   }
   4532 }
   4533 
   4534 void InstructionCodeGeneratorMIPS::GenerateLongCompareAndBranch(IfCondition cond,
   4535                                                                 LocationSummary* locations,
   4536                                                                 MipsLabel* label) {
   4537   Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>();
   4538   Register lhs_low = locations->InAt(0).AsRegisterPairLow<Register>();
   4539   Location rhs_location = locations->InAt(1);
   4540   Register rhs_high = ZERO;
   4541   Register rhs_low = ZERO;
   4542   int64_t imm = 0;
   4543   uint32_t imm_high = 0;
   4544   uint32_t imm_low = 0;
   4545   bool use_imm = rhs_location.IsConstant();
   4546   if (use_imm) {
   4547     imm = rhs_location.GetConstant()->AsLongConstant()->GetValue();
   4548     imm_high = High32Bits(imm);
   4549     imm_low = Low32Bits(imm);
   4550   } else {
   4551     rhs_high = rhs_location.AsRegisterPairHigh<Register>();
   4552     rhs_low = rhs_location.AsRegisterPairLow<Register>();
   4553   }
   4554 
   4555   if (use_imm && imm == 0) {
   4556     switch (cond) {
   4557       case kCondEQ:
   4558       case kCondBE:  // <= 0 if zero
   4559         __ Or(TMP, lhs_high, lhs_low);
   4560         __ Beqz(TMP, label);
   4561         break;
   4562       case kCondNE:
   4563       case kCondA:  // > 0 if non-zero
   4564         __ Or(TMP, lhs_high, lhs_low);
   4565         __ Bnez(TMP, label);
   4566         break;
   4567       case kCondLT:
   4568         __ Bltz(lhs_high, label);
   4569         break;
   4570       case kCondGE:
   4571         __ Bgez(lhs_high, label);
   4572         break;
   4573       case kCondLE:
   4574         __ Or(TMP, lhs_high, lhs_low);
   4575         __ Sra(AT, lhs_high, 31);
   4576         __ Bgeu(AT, TMP, label);
   4577         break;
   4578       case kCondGT:
   4579         __ Or(TMP, lhs_high, lhs_low);
   4580         __ Sra(AT, lhs_high, 31);
   4581         __ Bltu(AT, TMP, label);
   4582         break;
   4583       case kCondB:  // always false
   4584         break;
   4585       case kCondAE:  // always true
   4586         __ B(label);
   4587         break;
   4588     }
   4589   } else if (use_imm) {
   4590     // TODO: more efficient comparison with constants without loading them into TMP/AT.
   4591     switch (cond) {
   4592       case kCondEQ:
   4593         __ LoadConst32(TMP, imm_high);
   4594         __ Xor(TMP, TMP, lhs_high);
   4595         __ LoadConst32(AT, imm_low);
   4596         __ Xor(AT, AT, lhs_low);
   4597         __ Or(TMP, TMP, AT);
   4598         __ Beqz(TMP, label);
   4599         break;
   4600       case kCondNE:
   4601         __ LoadConst32(TMP, imm_high);
   4602         __ Xor(TMP, TMP, lhs_high);
   4603         __ LoadConst32(AT, imm_low);
   4604         __ Xor(AT, AT, lhs_low);
   4605         __ Or(TMP, TMP, AT);
   4606         __ Bnez(TMP, label);
   4607         break;
   4608       case kCondLT:
   4609         __ LoadConst32(TMP, imm_high);
   4610         __ Blt(lhs_high, TMP, label);
   4611         __ Slt(TMP, TMP, lhs_high);
   4612         __ LoadConst32(AT, imm_low);
   4613         __ Sltu(AT, lhs_low, AT);
   4614         __ Blt(TMP, AT, label);
   4615         break;
   4616       case kCondGE:
   4617         __ LoadConst32(TMP, imm_high);
   4618         __ Blt(TMP, lhs_high, label);
   4619         __ Slt(TMP, lhs_high, TMP);
   4620         __ LoadConst32(AT, imm_low);
   4621         __ Sltu(AT, lhs_low, AT);
   4622         __ Or(TMP, TMP, AT);
   4623         __ Beqz(TMP, label);
   4624         break;
   4625       case kCondLE:
   4626         __ LoadConst32(TMP, imm_high);
   4627         __ Blt(lhs_high, TMP, label);
   4628         __ Slt(TMP, TMP, lhs_high);
   4629         __ LoadConst32(AT, imm_low);
   4630         __ Sltu(AT, AT, lhs_low);
   4631         __ Or(TMP, TMP, AT);
   4632         __ Beqz(TMP, label);
   4633         break;
   4634       case kCondGT:
   4635         __ LoadConst32(TMP, imm_high);
   4636         __ Blt(TMP, lhs_high, label);
   4637         __ Slt(TMP, lhs_high, TMP);
   4638         __ LoadConst32(AT, imm_low);
   4639         __ Sltu(AT, AT, lhs_low);
   4640         __ Blt(TMP, AT, label);
   4641         break;
   4642       case kCondB:
   4643         __ LoadConst32(TMP, imm_high);
   4644         __ Bltu(lhs_high, TMP, label);
   4645         __ Sltu(TMP, TMP, lhs_high);
   4646         __ LoadConst32(AT, imm_low);
   4647         __ Sltu(AT, lhs_low, AT);
   4648         __ Blt(TMP, AT, label);
   4649         break;
   4650       case kCondAE:
   4651         __ LoadConst32(TMP, imm_high);
   4652         __ Bltu(TMP, lhs_high, label);
   4653         __ Sltu(TMP, lhs_high, TMP);
   4654         __ LoadConst32(AT, imm_low);
   4655         __ Sltu(AT, lhs_low, AT);
   4656         __ Or(TMP, TMP, AT);
   4657         __ Beqz(TMP, label);
   4658         break;
   4659       case kCondBE:
   4660         __ LoadConst32(TMP, imm_high);
   4661         __ Bltu(lhs_high, TMP, label);
   4662         __ Sltu(TMP, TMP, lhs_high);
   4663         __ LoadConst32(AT, imm_low);
   4664         __ Sltu(AT, AT, lhs_low);
   4665         __ Or(TMP, TMP, AT);
   4666         __ Beqz(TMP, label);
   4667         break;
   4668       case kCondA:
   4669         __ LoadConst32(TMP, imm_high);
   4670         __ Bltu(TMP, lhs_high, label);
   4671         __ Sltu(TMP, lhs_high, TMP);
   4672         __ LoadConst32(AT, imm_low);
   4673         __ Sltu(AT, AT, lhs_low);
   4674         __ Blt(TMP, AT, label);
   4675         break;
   4676     }
   4677   } else {
   4678     switch (cond) {
   4679       case kCondEQ:
   4680         __ Xor(TMP, lhs_high, rhs_high);
   4681         __ Xor(AT, lhs_low, rhs_low);
   4682         __ Or(TMP, TMP, AT);
   4683         __ Beqz(TMP, label);
   4684         break;
   4685       case kCondNE:
   4686         __ Xor(TMP, lhs_high, rhs_high);
   4687         __ Xor(AT, lhs_low, rhs_low);
   4688         __ Or(TMP, TMP, AT);
   4689         __ Bnez(TMP, label);
   4690         break;
   4691       case kCondLT:
   4692         __ Blt(lhs_high, rhs_high, label);
   4693         __ Slt(TMP, rhs_high, lhs_high);
   4694         __ Sltu(AT, lhs_low, rhs_low);
   4695         __ Blt(TMP, AT, label);
   4696         break;
   4697       case kCondGE:
   4698         __ Blt(rhs_high, lhs_high, label);
   4699         __ Slt(TMP, lhs_high, rhs_high);
   4700         __ Sltu(AT, lhs_low, rhs_low);
   4701         __ Or(TMP, TMP, AT);
   4702         __ Beqz(TMP, label);
   4703         break;
   4704       case kCondLE:
   4705         __ Blt(lhs_high, rhs_high, label);
   4706         __ Slt(TMP, rhs_high, lhs_high);
   4707         __ Sltu(AT, rhs_low, lhs_low);
   4708         __ Or(TMP, TMP, AT);
   4709         __ Beqz(TMP, label);
   4710         break;
   4711       case kCondGT:
   4712         __ Blt(rhs_high, lhs_high, label);
   4713         __ Slt(TMP, lhs_high, rhs_high);
   4714         __ Sltu(AT, rhs_low, lhs_low);
   4715         __ Blt(TMP, AT, label);
   4716         break;
   4717       case kCondB:
   4718         __ Bltu(lhs_high, rhs_high, label);
   4719         __ Sltu(TMP, rhs_high, lhs_high);
   4720         __ Sltu(AT, lhs_low, rhs_low);
   4721         __ Blt(TMP, AT, label);
   4722         break;
   4723       case kCondAE:
   4724         __ Bltu(rhs_high, lhs_high, label);
   4725         __ Sltu(TMP, lhs_high, rhs_high);
   4726         __ Sltu(AT, lhs_low, rhs_low);
   4727         __ Or(TMP, TMP, AT);
   4728         __ Beqz(TMP, label);
   4729         break;
   4730       case kCondBE:
   4731         __ Bltu(lhs_high, rhs_high, label);
   4732         __ Sltu(TMP, rhs_high, lhs_high);
   4733         __ Sltu(AT, rhs_low, lhs_low);
   4734         __ Or(TMP, TMP, AT);
   4735         __ Beqz(TMP, label);
   4736         break;
   4737       case kCondA:
   4738         __ Bltu(rhs_high, lhs_high, label);
   4739         __ Sltu(TMP, lhs_high, rhs_high);
   4740         __ Sltu(AT, rhs_low, lhs_low);
   4741         __ Blt(TMP, AT, label);
   4742         break;
   4743     }
   4744   }
   4745 }
   4746 
   4747 void InstructionCodeGeneratorMIPS::GenerateFpCompare(IfCondition cond,
   4748                                                      bool gt_bias,
   4749                                                      Primitive::Type type,
   4750                                                      LocationSummary* locations) {
   4751   Register dst = locations->Out().AsRegister<Register>();
   4752   FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
   4753   FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
   4754   bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
   4755   if (type == Primitive::kPrimFloat) {
   4756     if (isR6) {
   4757       switch (cond) {
   4758         case kCondEQ:
   4759           __ CmpEqS(FTMP, lhs, rhs);
   4760           __ Mfc1(dst, FTMP);
   4761           __ Andi(dst, dst, 1);
   4762           break;
   4763         case kCondNE:
   4764           __ CmpEqS(FTMP, lhs, rhs);
   4765           __ Mfc1(dst, FTMP);
   4766           __ Addiu(dst, dst, 1);
   4767           break;
   4768         case kCondLT:
   4769           if (gt_bias) {
   4770             __ CmpLtS(FTMP, lhs, rhs);
   4771           } else {
   4772             __ CmpUltS(FTMP, lhs, rhs);
   4773           }
   4774           __ Mfc1(dst, FTMP);
   4775           __ Andi(dst, dst, 1);
   4776           break;
   4777         case kCondLE:
   4778           if (gt_bias) {
   4779             __ CmpLeS(FTMP, lhs, rhs);
   4780           } else {
   4781             __ CmpUleS(FTMP, lhs, rhs);
   4782           }
   4783           __ Mfc1(dst, FTMP);
   4784           __ Andi(dst, dst, 1);
   4785           break;
   4786         case kCondGT:
   4787           if (gt_bias) {
   4788             __ CmpUltS(FTMP, rhs, lhs);
   4789           } else {
   4790             __ CmpLtS(FTMP, rhs, lhs);
   4791           }
   4792           __ Mfc1(dst, FTMP);
   4793           __ Andi(dst, dst, 1);
   4794           break;
   4795         case kCondGE:
   4796           if (gt_bias) {
   4797             __ CmpUleS(FTMP, rhs, lhs);
   4798           } else {
   4799             __ CmpLeS(FTMP, rhs, lhs);
   4800           }
   4801           __ Mfc1(dst, FTMP);
   4802           __ Andi(dst, dst, 1);
   4803           break;
   4804         default:
   4805           LOG(FATAL) << "Unexpected non-floating-point condition " << cond;
   4806           UNREACHABLE();
   4807       }
   4808     } else {
   4809       switch (cond) {
   4810         case kCondEQ:
   4811           __ CeqS(0, lhs, rhs);
   4812           __ LoadConst32(dst, 1);
   4813           __ Movf(dst, ZERO, 0);
   4814           break;
   4815         case kCondNE:
   4816           __ CeqS(0, lhs, rhs);
   4817           __ LoadConst32(dst, 1);
   4818           __ Movt(dst, ZERO, 0);
   4819           break;
   4820         case kCondLT:
   4821           if (gt_bias) {
   4822             __ ColtS(0, lhs, rhs);
   4823           } else {
   4824             __ CultS(0, lhs, rhs);
   4825           }
   4826           __ LoadConst32(dst, 1);
   4827           __ Movf(dst, ZERO, 0);
   4828           break;
   4829         case kCondLE:
   4830           if (gt_bias) {
   4831             __ ColeS(0, lhs, rhs);
   4832           } else {
   4833             __ CuleS(0, lhs, rhs);
   4834           }
   4835           __ LoadConst32(dst, 1);
   4836           __ Movf(dst, ZERO, 0);
   4837           break;
   4838         case kCondGT:
   4839           if (gt_bias) {
   4840             __ CultS(0, rhs, lhs);
   4841           } else {
   4842             __ ColtS(0, rhs, lhs);
   4843           }
   4844           __ LoadConst32(dst, 1);
   4845           __ Movf(dst, ZERO, 0);
   4846           break;
   4847         case kCondGE:
   4848           if (gt_bias) {
   4849             __ CuleS(0, rhs, lhs);
   4850           } else {
   4851             __ ColeS(0, rhs, lhs);
   4852           }
   4853           __ LoadConst32(dst, 1);
   4854           __ Movf(dst, ZERO, 0);
   4855           break;
   4856         default:
   4857           LOG(FATAL) << "Unexpected non-floating-point condition " << cond;
   4858           UNREACHABLE();
   4859       }
   4860     }
   4861   } else {
   4862     DCHECK_EQ(type, Primitive::kPrimDouble);
   4863     if (isR6) {
   4864       switch (cond) {
   4865         case kCondEQ:
   4866           __ CmpEqD(FTMP, lhs, rhs);
   4867           __ Mfc1(dst, FTMP);
   4868           __ Andi(dst, dst, 1);
   4869           break;
   4870         case kCondNE:
   4871           __ CmpEqD(FTMP, lhs, rhs);
   4872           __ Mfc1(dst, FTMP);
   4873           __ Addiu(dst, dst, 1);
   4874           break;
   4875         case kCondLT:
   4876           if (gt_bias) {
   4877             __ CmpLtD(FTMP, lhs, rhs);
   4878           } else {
   4879             __ CmpUltD(FTMP, lhs, rhs);
   4880           }
   4881           __ Mfc1(dst, FTMP);
   4882           __ Andi(dst, dst, 1);
   4883           break;
   4884         case kCondLE:
   4885           if (gt_bias) {
   4886             __ CmpLeD(FTMP, lhs, rhs);
   4887           } else {
   4888             __ CmpUleD(FTMP, lhs, rhs);
   4889           }
   4890           __ Mfc1(dst, FTMP);
   4891           __ Andi(dst, dst, 1);
   4892           break;
   4893         case kCondGT:
   4894           if (gt_bias) {
   4895             __ CmpUltD(FTMP, rhs, lhs);
   4896           } else {
   4897             __ CmpLtD(FTMP, rhs, lhs);
   4898           }
   4899           __ Mfc1(dst, FTMP);
   4900           __ Andi(dst, dst, 1);
   4901           break;
   4902         case kCondGE:
   4903           if (gt_bias) {
   4904             __ CmpUleD(FTMP, rhs, lhs);
   4905           } else {
   4906             __ CmpLeD(FTMP, rhs, lhs);
   4907           }
   4908           __ Mfc1(dst, FTMP);
   4909           __ Andi(dst, dst, 1);
   4910           break;
   4911         default:
   4912           LOG(FATAL) << "Unexpected non-floating-point condition " << cond;
   4913           UNREACHABLE();
   4914       }
   4915     } else {
   4916       switch (cond) {
   4917         case kCondEQ:
   4918           __ CeqD(0, lhs, rhs);
   4919           __ LoadConst32(dst, 1);
   4920           __ Movf(dst, ZERO, 0);
   4921           break;
   4922         case kCondNE:
   4923           __ CeqD(0, lhs, rhs);
   4924           __ LoadConst32(dst, 1);
   4925           __ Movt(dst, ZERO, 0);
   4926           break;
   4927         case kCondLT:
   4928           if (gt_bias) {
   4929             __ ColtD(0, lhs, rhs);
   4930           } else {
   4931             __ CultD(0, lhs, rhs);
   4932           }
   4933           __ LoadConst32(dst, 1);
   4934           __ Movf(dst, ZERO, 0);
   4935           break;
   4936         case kCondLE:
   4937           if (gt_bias) {
   4938             __ ColeD(0, lhs, rhs);
   4939           } else {
   4940             __ CuleD(0, lhs, rhs);
   4941           }
   4942           __ LoadConst32(dst, 1);
   4943           __ Movf(dst, ZERO, 0);
   4944           break;
   4945         case kCondGT:
   4946           if (gt_bias) {
   4947             __ CultD(0, rhs, lhs);
   4948           } else {
   4949             __ ColtD(0, rhs, lhs);
   4950           }
   4951           __ LoadConst32(dst, 1);
   4952           __ Movf(dst, ZERO, 0);
   4953           break;
   4954         case kCondGE:
   4955           if (gt_bias) {
   4956             __ CuleD(0, rhs, lhs);
   4957           } else {
   4958             __ ColeD(0, rhs, lhs);
   4959           }
   4960           __ LoadConst32(dst, 1);
   4961           __ Movf(dst, ZERO, 0);
   4962           break;
   4963         default:
   4964           LOG(FATAL) << "Unexpected non-floating-point condition " << cond;
   4965           UNREACHABLE();
   4966       }
   4967     }
   4968   }
   4969 }
   4970 
   4971 bool InstructionCodeGeneratorMIPS::MaterializeFpCompareR2(IfCondition cond,
   4972                                                           bool gt_bias,
   4973                                                           Primitive::Type type,
   4974                                                           LocationSummary* input_locations,
   4975                                                           int cc) {
   4976   FRegister lhs = input_locations->InAt(0).AsFpuRegister<FRegister>();
   4977   FRegister rhs = input_locations->InAt(1).AsFpuRegister<FRegister>();
   4978   CHECK(!codegen_->GetInstructionSetFeatures().IsR6());
   4979   if (type == Primitive::kPrimFloat) {
   4980     switch (cond) {
   4981       case kCondEQ:
   4982         __ CeqS(cc, lhs, rhs);
   4983         return false;
   4984       case kCondNE:
   4985         __ CeqS(cc, lhs, rhs);
   4986         return true;
   4987       case kCondLT:
   4988         if (gt_bias) {
   4989           __ ColtS(cc, lhs, rhs);
   4990         } else {
   4991           __ CultS(cc, lhs, rhs);
   4992         }
   4993         return false;
   4994       case kCondLE:
   4995         if (gt_bias) {
   4996           __ ColeS(cc, lhs, rhs);
   4997         } else {
   4998           __ CuleS(cc, lhs, rhs);
   4999         }
   5000         return false;
   5001       case kCondGT:
   5002         if (gt_bias) {
   5003           __ CultS(cc, rhs, lhs);
   5004         } else {
   5005           __ ColtS(cc, rhs, lhs);
   5006         }
   5007         return false;
   5008       case kCondGE:
   5009         if (gt_bias) {
   5010           __ CuleS(cc, rhs, lhs);
   5011         } else {
   5012           __ ColeS(cc, rhs, lhs);
   5013         }
   5014         return false;
   5015       default:
   5016         LOG(FATAL) << "Unexpected non-floating-point condition";
   5017         UNREACHABLE();
   5018     }
   5019   } else {
   5020     DCHECK_EQ(type, Primitive::kPrimDouble);
   5021     switch (cond) {
   5022       case kCondEQ:
   5023         __ CeqD(cc, lhs, rhs);
   5024         return false;
   5025       case kCondNE:
   5026         __ CeqD(cc, lhs, rhs);
   5027         return true;
   5028       case kCondLT:
   5029         if (gt_bias) {
   5030           __ ColtD(cc, lhs, rhs);
   5031         } else {
   5032           __ CultD(cc, lhs, rhs);
   5033         }
   5034         return false;
   5035       case kCondLE:
   5036         if (gt_bias) {
   5037           __ ColeD(cc, lhs, rhs);
   5038         } else {
   5039           __ CuleD(cc, lhs, rhs);
   5040         }
   5041         return false;
   5042       case kCondGT:
   5043         if (gt_bias) {
   5044           __ CultD(cc, rhs, lhs);
   5045         } else {
   5046           __ ColtD(cc, rhs, lhs);
   5047         }
   5048         return false;
   5049       case kCondGE:
   5050         if (gt_bias) {
   5051           __ CuleD(cc, rhs, lhs);
   5052         } else {
   5053           __ ColeD(cc, rhs, lhs);
   5054         }
   5055         return false;
   5056       default:
   5057         LOG(FATAL) << "Unexpected non-floating-point condition";
   5058         UNREACHABLE();
   5059     }
   5060   }
   5061 }
   5062 
   5063 bool InstructionCodeGeneratorMIPS::MaterializeFpCompareR6(IfCondition cond,
   5064                                                           bool gt_bias,
   5065                                                           Primitive::Type type,
   5066                                                           LocationSummary* input_locations,
   5067                                                           FRegister dst) {
   5068   FRegister lhs = input_locations->InAt(0).AsFpuRegister<FRegister>();
   5069   FRegister rhs = input_locations->InAt(1).AsFpuRegister<FRegister>();
   5070   CHECK(codegen_->GetInstructionSetFeatures().IsR6());
   5071   if (type == Primitive::kPrimFloat) {
   5072     switch (cond) {
   5073       case kCondEQ:
   5074         __ CmpEqS(dst, lhs, rhs);
   5075         return false;
   5076       case kCondNE:
   5077         __ CmpEqS(dst, lhs, rhs);
   5078         return true;
   5079       case kCondLT:
   5080         if (gt_bias) {
   5081           __ CmpLtS(dst, lhs, rhs);
   5082         } else {
   5083           __ CmpUltS(dst, lhs, rhs);
   5084         }
   5085         return false;
   5086       case kCondLE:
   5087         if (gt_bias) {
   5088           __ CmpLeS(dst, lhs, rhs);
   5089         } else {
   5090           __ CmpUleS(dst, lhs, rhs);
   5091         }
   5092         return false;
   5093       case kCondGT:
   5094         if (gt_bias) {
   5095           __ CmpUltS(dst, rhs, lhs);
   5096         } else {
   5097           __ CmpLtS(dst, rhs, lhs);
   5098         }
   5099         return false;
   5100       case kCondGE:
   5101         if (gt_bias) {
   5102           __ CmpUleS(dst, rhs, lhs);
   5103         } else {
   5104           __ CmpLeS(dst, rhs, lhs);
   5105         }
   5106         return false;
   5107       default:
   5108         LOG(FATAL) << "Unexpected non-floating-point condition";
   5109         UNREACHABLE();
   5110     }
   5111   } else {
   5112     DCHECK_EQ(type, Primitive::kPrimDouble);
   5113     switch (cond) {
   5114       case kCondEQ:
   5115         __ CmpEqD(dst, lhs, rhs);
   5116         return false;
   5117       case kCondNE:
   5118         __ CmpEqD(dst, lhs, rhs);
   5119         return true;
   5120       case kCondLT:
   5121         if (gt_bias) {
   5122           __ CmpLtD(dst, lhs, rhs);
   5123         } else {
   5124           __ CmpUltD(dst, lhs, rhs);
   5125         }
   5126         return false;
   5127       case kCondLE:
   5128         if (gt_bias) {
   5129           __ CmpLeD(dst, lhs, rhs);
   5130         } else {
   5131           __ CmpUleD(dst, lhs, rhs);
   5132         }
   5133         return false;
   5134       case kCondGT:
   5135         if (gt_bias) {
   5136           __ CmpUltD(dst, rhs, lhs);
   5137         } else {
   5138           __ CmpLtD(dst, rhs, lhs);
   5139         }
   5140         return false;
   5141       case kCondGE:
   5142         if (gt_bias) {
   5143           __ CmpUleD(dst, rhs, lhs);
   5144         } else {
   5145           __ CmpLeD(dst, rhs, lhs);
   5146         }
   5147         return false;
   5148       default:
   5149         LOG(FATAL) << "Unexpected non-floating-point condition";
   5150         UNREACHABLE();
   5151     }
   5152   }
   5153 }
   5154 
   5155 void InstructionCodeGeneratorMIPS::GenerateFpCompareAndBranch(IfCondition cond,
   5156                                                               bool gt_bias,
   5157                                                               Primitive::Type type,
   5158                                                               LocationSummary* locations,
   5159                                                               MipsLabel* label) {
   5160   FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
   5161   FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
   5162   bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
   5163   if (type == Primitive::kPrimFloat) {
   5164     if (isR6) {
   5165       switch (cond) {
   5166         case kCondEQ:
   5167           __ CmpEqS(FTMP, lhs, rhs);
   5168           __ Bc1nez(FTMP, label);
   5169           break;
   5170         case kCondNE:
   5171           __ CmpEqS(FTMP, lhs, rhs);
   5172           __ Bc1eqz(FTMP, label);
   5173           break;
   5174         case kCondLT:
   5175           if (gt_bias) {
   5176             __ CmpLtS(FTMP, lhs, rhs);
   5177           } else {
   5178             __ CmpUltS(FTMP, lhs, rhs);
   5179           }
   5180           __ Bc1nez(FTMP, label);
   5181           break;
   5182         case kCondLE:
   5183           if (gt_bias) {
   5184             __ CmpLeS(FTMP, lhs, rhs);
   5185           } else {
   5186             __ CmpUleS(FTMP, lhs, rhs);
   5187           }
   5188           __ Bc1nez(FTMP, label);
   5189           break;
   5190         case kCondGT:
   5191           if (gt_bias) {
   5192             __ CmpUltS(FTMP, rhs, lhs);
   5193           } else {
   5194             __ CmpLtS(FTMP, rhs, lhs);
   5195           }
   5196           __ Bc1nez(FTMP, label);
   5197           break;
   5198         case kCondGE:
   5199           if (gt_bias) {
   5200             __ CmpUleS(FTMP, rhs, lhs);
   5201           } else {
   5202             __ CmpLeS(FTMP, rhs, lhs);
   5203           }
   5204           __ Bc1nez(FTMP, label);
   5205           break;
   5206         default:
   5207           LOG(FATAL) << "Unexpected non-floating-point condition";
   5208           UNREACHABLE();
   5209       }
   5210     } else {
   5211       switch (cond) {
   5212         case kCondEQ:
   5213           __ CeqS(0, lhs, rhs);
   5214           __ Bc1t(0, label);
   5215           break;
   5216         case kCondNE:
   5217           __ CeqS(0, lhs, rhs);
   5218           __ Bc1f(0, label);
   5219           break;
   5220         case kCondLT:
   5221           if (gt_bias) {
   5222             __ ColtS(0, lhs, rhs);
   5223           } else {
   5224             __ CultS(0, lhs, rhs);
   5225           }
   5226           __ Bc1t(0, label);
   5227           break;
   5228         case kCondLE:
   5229           if (gt_bias) {
   5230             __ ColeS(0, lhs, rhs);
   5231           } else {
   5232             __ CuleS(0, lhs, rhs);
   5233           }
   5234           __ Bc1t(0, label);
   5235           break;
   5236         case kCondGT:
   5237           if (gt_bias) {
   5238             __ CultS(0, rhs, lhs);
   5239           } else {
   5240             __ ColtS(0, rhs, lhs);
   5241           }
   5242           __ Bc1t(0, label);
   5243           break;
   5244         case kCondGE:
   5245           if (gt_bias) {
   5246             __ CuleS(0, rhs, lhs);
   5247           } else {
   5248             __ ColeS(0, rhs, lhs);
   5249           }
   5250           __ Bc1t(0, label);
   5251           break;
   5252         default:
   5253           LOG(FATAL) << "Unexpected non-floating-point condition";
   5254           UNREACHABLE();
   5255       }
   5256     }
   5257   } else {
   5258     DCHECK_EQ(type, Primitive::kPrimDouble);
   5259     if (isR6) {
   5260       switch (cond) {
   5261         case kCondEQ:
   5262           __ CmpEqD(FTMP, lhs, rhs);
   5263           __ Bc1nez(FTMP, label);
   5264           break;
   5265         case kCondNE:
   5266           __ CmpEqD(FTMP, lhs, rhs);
   5267           __ Bc1eqz(FTMP, label);
   5268           break;
   5269         case kCondLT:
   5270           if (gt_bias) {
   5271             __ CmpLtD(FTMP, lhs, rhs);
   5272           } else {
   5273             __ CmpUltD(FTMP, lhs, rhs);
   5274           }
   5275           __ Bc1nez(FTMP, label);
   5276           break;
   5277         case kCondLE:
   5278           if (gt_bias) {
   5279             __ CmpLeD(FTMP, lhs, rhs);
   5280           } else {
   5281             __ CmpUleD(FTMP, lhs, rhs);
   5282           }
   5283           __ Bc1nez(FTMP, label);
   5284           break;
   5285         case kCondGT:
   5286           if (gt_bias) {
   5287             __ CmpUltD(FTMP, rhs, lhs);
   5288           } else {
   5289             __ CmpLtD(FTMP, rhs, lhs);
   5290           }
   5291           __ Bc1nez(FTMP, label);
   5292           break;
   5293         case kCondGE:
   5294           if (gt_bias) {
   5295             __ CmpUleD(FTMP, rhs, lhs);
   5296           } else {
   5297             __ CmpLeD(FTMP, rhs, lhs);
   5298           }
   5299           __ Bc1nez(FTMP, label);
   5300           break;
   5301         default:
   5302           LOG(FATAL) << "Unexpected non-floating-point condition";
   5303           UNREACHABLE();
   5304       }
   5305     } else {
   5306       switch (cond) {
   5307         case kCondEQ:
   5308           __ CeqD(0, lhs, rhs);
   5309           __ Bc1t(0, label);
   5310           break;
   5311         case kCondNE:
   5312           __ CeqD(0, lhs, rhs);
   5313           __ Bc1f(0, label);
   5314           break;
   5315         case kCondLT:
   5316           if (gt_bias) {
   5317             __ ColtD(0, lhs, rhs);
   5318           } else {
   5319             __ CultD(0, lhs, rhs);
   5320           }
   5321           __ Bc1t(0, label);
   5322           break;
   5323         case kCondLE:
   5324           if (gt_bias) {
   5325             __ ColeD(0, lhs, rhs);
   5326           } else {
   5327             __ CuleD(0, lhs, rhs);
   5328           }
   5329           __ Bc1t(0, label);
   5330           break;
   5331         case kCondGT:
   5332           if (gt_bias) {
   5333             __ CultD(0, rhs, lhs);
   5334           } else {
   5335             __ ColtD(0, rhs, lhs);
   5336           }
   5337           __ Bc1t(0, label);
   5338           break;
   5339         case kCondGE:
   5340           if (gt_bias) {
   5341             __ CuleD(0, rhs, lhs);
   5342           } else {
   5343             __ ColeD(0, rhs, lhs);
   5344           }
   5345           __ Bc1t(0, label);
   5346           break;
   5347         default:
   5348           LOG(FATAL) << "Unexpected non-floating-point condition";
   5349           UNREACHABLE();
   5350       }
   5351     }
   5352   }
   5353 }
   5354 
   5355 void InstructionCodeGeneratorMIPS::GenerateTestAndBranch(HInstruction* instruction,
   5356                                                          size_t condition_input_index,
   5357                                                          MipsLabel* true_target,
   5358                                                          MipsLabel* false_target) {
   5359   HInstruction* cond = instruction->InputAt(condition_input_index);
   5360 
   5361   if (true_target == nullptr && false_target == nullptr) {
   5362     // Nothing to do. The code always falls through.
   5363     return;
   5364   } else if (cond->IsIntConstant()) {
   5365     // Constant condition, statically compared against "true" (integer value 1).
   5366     if (cond->AsIntConstant()->IsTrue()) {
   5367       if (true_target != nullptr) {
   5368         __ B(true_target);
   5369       }
   5370     } else {
   5371       DCHECK(cond->AsIntConstant()->IsFalse()) << cond->AsIntConstant()->GetValue();
   5372       if (false_target != nullptr) {
   5373         __ B(false_target);
   5374       }
   5375     }
   5376     return;
   5377   }
   5378 
   5379   // The following code generates these patterns:
   5380   //  (1) true_target == nullptr && false_target != nullptr
   5381   //        - opposite condition true => branch to false_target
   5382   //  (2) true_target != nullptr && false_target == nullptr
   5383   //        - condition true => branch to true_target
   5384   //  (3) true_target != nullptr && false_target != nullptr
   5385   //        - condition true => branch to true_target
   5386   //        - branch to false_target
   5387   if (IsBooleanValueOrMaterializedCondition(cond)) {
   5388     // The condition instruction has been materialized, compare the output to 0.
   5389     Location cond_val = instruction->GetLocations()->InAt(condition_input_index);
   5390     DCHECK(cond_val.IsRegister());
   5391     if (true_target == nullptr) {
   5392       __ Beqz(cond_val.AsRegister<Register>(), false_target);
   5393     } else {
   5394       __ Bnez(cond_val.AsRegister<Register>(), true_target);
   5395     }
   5396   } else {
   5397     // The condition instruction has not been materialized, use its inputs as
   5398     // the comparison and its condition as the branch condition.
   5399     HCondition* condition = cond->AsCondition();
   5400     Primitive::Type type = condition->InputAt(0)->GetType();
   5401     LocationSummary* locations = cond->GetLocations();
   5402     IfCondition if_cond = condition->GetCondition();
   5403     MipsLabel* branch_target = true_target;
   5404 
   5405     if (true_target == nullptr) {
   5406       if_cond = condition->GetOppositeCondition();
   5407       branch_target = false_target;
   5408     }
   5409 
   5410     switch (type) {
   5411       default:
   5412         GenerateIntCompareAndBranch(if_cond, locations, branch_target);
   5413         break;
   5414       case Primitive::kPrimLong:
   5415         GenerateLongCompareAndBranch(if_cond, locations, branch_target);
   5416         break;
   5417       case Primitive::kPrimFloat:
   5418       case Primitive::kPrimDouble:
   5419         GenerateFpCompareAndBranch(if_cond, condition->IsGtBias(), type, locations, branch_target);
   5420         break;
   5421     }
   5422   }
   5423 
   5424   // If neither branch falls through (case 3), the conditional branch to `true_target`
   5425   // was already emitted (case 2) and we need to emit a jump to `false_target`.
   5426   if (true_target != nullptr && false_target != nullptr) {
   5427     __ B(false_target);
   5428   }
   5429 }
   5430 
   5431 void LocationsBuilderMIPS::VisitIf(HIf* if_instr) {
   5432   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
   5433   if (IsBooleanValueOrMaterializedCondition(if_instr->InputAt(0))) {
   5434     locations->SetInAt(0, Location::RequiresRegister());
   5435   }
   5436 }
   5437 
   5438 void InstructionCodeGeneratorMIPS::VisitIf(HIf* if_instr) {
   5439   HBasicBlock* true_successor = if_instr->IfTrueSuccessor();
   5440   HBasicBlock* false_successor = if_instr->IfFalseSuccessor();
   5441   MipsLabel* true_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), true_successor) ?
   5442       nullptr : codegen_->GetLabelOf(true_successor);
   5443   MipsLabel* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ?
   5444       nullptr : codegen_->GetLabelOf(false_successor);
   5445   GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
   5446 }
   5447 
   5448 void LocationsBuilderMIPS::VisitDeoptimize(HDeoptimize* deoptimize) {
   5449   LocationSummary* locations = new (GetGraph()->GetArena())
   5450       LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
   5451   InvokeRuntimeCallingConvention calling_convention;
   5452   RegisterSet caller_saves = RegisterSet::Empty();
   5453   caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
   5454   locations->SetCustomSlowPathCallerSaves(caller_saves);
   5455   if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) {
   5456     locations->SetInAt(0, Location::RequiresRegister());
   5457   }
   5458 }
   5459 
   5460 void InstructionCodeGeneratorMIPS::VisitDeoptimize(HDeoptimize* deoptimize) {
   5461   SlowPathCodeMIPS* slow_path =
   5462       deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathMIPS>(deoptimize);
   5463   GenerateTestAndBranch(deoptimize,
   5464                         /* condition_input_index */ 0,
   5465                         slow_path->GetEntryLabel(),
   5466                         /* false_target */ nullptr);
   5467 }
   5468 
   5469 // This function returns true if a conditional move can be generated for HSelect.
   5470 // Otherwise it returns false and HSelect must be implemented in terms of conditonal
   5471 // branches and regular moves.
   5472 //
   5473 // If `locations_to_set` isn't nullptr, its inputs and outputs are set for HSelect.
   5474 //
   5475 // While determining feasibility of a conditional move and setting inputs/outputs
   5476 // are two distinct tasks, this function does both because they share quite a bit
   5477 // of common logic.
   5478 static bool CanMoveConditionally(HSelect* select, bool is_r6, LocationSummary* locations_to_set) {
   5479   bool materialized = IsBooleanValueOrMaterializedCondition(select->GetCondition());
   5480   HInstruction* cond = select->InputAt(/* condition_input_index */ 2);
   5481   HCondition* condition = cond->AsCondition();
   5482 
   5483   Primitive::Type cond_type = materialized ? Primitive::kPrimInt : condition->InputAt(0)->GetType();
   5484   Primitive::Type dst_type = select->GetType();
   5485 
   5486   HConstant* cst_true_value = select->GetTrueValue()->AsConstant();
   5487   HConstant* cst_false_value = select->GetFalseValue()->AsConstant();
   5488   bool is_true_value_zero_constant =
   5489       (cst_true_value != nullptr && cst_true_value->IsZeroBitPattern());
   5490   bool is_false_value_zero_constant =
   5491       (cst_false_value != nullptr && cst_false_value->IsZeroBitPattern());
   5492 
   5493   bool can_move_conditionally = false;
   5494   bool use_const_for_false_in = false;
   5495   bool use_const_for_true_in = false;
   5496 
   5497   if (!cond->IsConstant()) {
   5498     switch (cond_type) {
   5499       default:
   5500         switch (dst_type) {
   5501           default:
   5502             // Moving int on int condition.
   5503             if (is_r6) {
   5504               if (is_true_value_zero_constant) {
   5505                 // seleqz out_reg, false_reg, cond_reg
   5506                 can_move_conditionally = true;
   5507                 use_const_for_true_in = true;
   5508               } else if (is_false_value_zero_constant) {
   5509                 // selnez out_reg, true_reg, cond_reg
   5510                 can_move_conditionally = true;
   5511                 use_const_for_false_in = true;
   5512               } else if (materialized) {
   5513                 // Not materializing unmaterialized int conditions
   5514                 // to keep the instruction count low.
   5515                 // selnez AT, true_reg, cond_reg
   5516                 // seleqz TMP, false_reg, cond_reg
   5517                 // or out_reg, AT, TMP
   5518                 can_move_conditionally = true;
   5519               }
   5520             } else {
   5521               // movn out_reg, true_reg/ZERO, cond_reg
   5522               can_move_conditionally = true;
   5523               use_const_for_true_in = is_true_value_zero_constant;
   5524             }
   5525             break;
   5526           case Primitive::kPrimLong:
   5527             // Moving long on int condition.
   5528             if (is_r6) {
   5529               if (is_true_value_zero_constant) {
   5530                 // seleqz out_reg_lo, false_reg_lo, cond_reg
   5531                 // seleqz out_reg_hi, false_reg_hi, cond_reg
   5532                 can_move_conditionally = true;
   5533                 use_const_for_true_in = true;
   5534               } else if (is_false_value_zero_constant) {
   5535                 // selnez out_reg_lo, true_reg_lo, cond_reg
   5536                 // selnez out_reg_hi, true_reg_hi, cond_reg
   5537                 can_move_conditionally = true;
   5538                 use_const_for_false_in = true;
   5539               }
   5540               // Other long conditional moves would generate 6+ instructions,
   5541               // which is too many.
   5542             } else {
   5543               // movn out_reg_lo, true_reg_lo/ZERO, cond_reg
   5544               // movn out_reg_hi, true_reg_hi/ZERO, cond_reg
   5545               can_move_conditionally = true;
   5546               use_const_for_true_in = is_true_value_zero_constant;
   5547             }
   5548             break;
   5549           case Primitive::kPrimFloat:
   5550           case Primitive::kPrimDouble:
   5551             // Moving float/double on int condition.
   5552             if (is_r6) {
   5553               if (materialized) {
   5554                 // Not materializing unmaterialized int conditions
   5555                 // to keep the instruction count low.
   5556                 can_move_conditionally = true;
   5557                 if (is_true_value_zero_constant) {
   5558                   // sltu TMP, ZERO, cond_reg
   5559                   // mtc1 TMP, temp_cond_reg
   5560                   // seleqz.fmt out_reg, false_reg, temp_cond_reg
   5561                   use_const_for_true_in = true;
   5562                 } else if (is_false_value_zero_constant) {
   5563                   // sltu TMP, ZERO, cond_reg
   5564                   // mtc1 TMP, temp_cond_reg
   5565                   // selnez.fmt out_reg, true_reg, temp_cond_reg
   5566                   use_const_for_false_in = true;
   5567                 } else {
   5568                   // sltu TMP, ZERO, cond_reg
   5569                   // mtc1 TMP, temp_cond_reg
   5570                   // sel.fmt temp_cond_reg, false_reg, true_reg
   5571                   // mov.fmt out_reg, temp_cond_reg
   5572                 }
   5573               }
   5574             } else {
   5575               // movn.fmt out_reg, true_reg, cond_reg
   5576               can_move_conditionally = true;
   5577             }
   5578             break;
   5579         }
   5580         break;
   5581       case Primitive::kPrimLong:
   5582         // We don't materialize long comparison now
   5583         // and use conditional branches instead.
   5584         break;
   5585       case Primitive::kPrimFloat:
   5586       case Primitive::kPrimDouble:
   5587         switch (dst_type) {
   5588           default:
   5589             // Moving int on float/double condition.
   5590             if (is_r6) {
   5591               if (is_true_value_zero_constant) {
   5592                 // mfc1 TMP, temp_cond_reg
   5593                 // seleqz out_reg, false_reg, TMP
   5594                 can_move_conditionally = true;
   5595                 use_const_for_true_in = true;
   5596               } else if (is_false_value_zero_constant) {
   5597                 // mfc1 TMP, temp_cond_reg
   5598                 // selnez out_reg, true_reg, TMP
   5599                 can_move_conditionally = true;
   5600                 use_const_for_false_in = true;
   5601               } else {
   5602                 // mfc1 TMP, temp_cond_reg
   5603                 // selnez AT, true_reg, TMP
   5604                 // seleqz TMP, false_reg, TMP
   5605                 // or out_reg, AT, TMP
   5606                 can_move_conditionally = true;
   5607               }
   5608             } else {
   5609               // movt out_reg, true_reg/ZERO, cc
   5610               can_move_conditionally = true;
   5611               use_const_for_true_in = is_true_value_zero_constant;
   5612             }
   5613             break;
   5614           case Primitive::kPrimLong:
   5615             // Moving long on float/double condition.
   5616             if (is_r6) {
   5617               if (is_true_value_zero_constant) {
   5618                 // mfc1 TMP, temp_cond_reg
   5619                 // seleqz out_reg_lo, false_reg_lo, TMP
   5620                 // seleqz out_reg_hi, false_reg_hi, TMP
   5621                 can_move_conditionally = true;
   5622                 use_const_for_true_in = true;
   5623               } else if (is_false_value_zero_constant) {
   5624                 // mfc1 TMP, temp_cond_reg
   5625                 // selnez out_reg_lo, true_reg_lo, TMP
   5626                 // selnez out_reg_hi, true_reg_hi, TMP
   5627                 can_move_conditionally = true;
   5628                 use_const_for_false_in = true;
   5629               }
   5630               // Other long conditional moves would generate 6+ instructions,
   5631               // which is too many.
   5632             } else {
   5633               // movt out_reg_lo, true_reg_lo/ZERO, cc
   5634               // movt out_reg_hi, true_reg_hi/ZERO, cc
   5635               can_move_conditionally = true;
   5636               use_const_for_true_in = is_true_value_zero_constant;
   5637             }
   5638             break;
   5639           case Primitive::kPrimFloat:
   5640           case Primitive::kPrimDouble:
   5641             // Moving float/double on float/double condition.
   5642             if (is_r6) {
   5643               can_move_conditionally = true;
   5644               if (is_true_value_zero_constant) {
   5645                 // seleqz.fmt out_reg, false_reg, temp_cond_reg
   5646                 use_const_for_true_in = true;
   5647               } else if (is_false_value_zero_constant) {
   5648                 // selnez.fmt out_reg, true_reg, temp_cond_reg
   5649                 use_const_for_false_in = true;
   5650               } else {
   5651                 // sel.fmt temp_cond_reg, false_reg, true_reg
   5652                 // mov.fmt out_reg, temp_cond_reg
   5653               }
   5654             } else {
   5655               // movt.fmt out_reg, true_reg, cc
   5656               can_move_conditionally = true;
   5657             }
   5658             break;
   5659         }
   5660         break;
   5661     }
   5662   }
   5663 
   5664   if (can_move_conditionally) {
   5665     DCHECK(!use_const_for_false_in || !use_const_for_true_in);
   5666   } else {
   5667     DCHECK(!use_const_for_false_in);
   5668     DCHECK(!use_const_for_true_in);
   5669   }
   5670 
   5671   if (locations_to_set != nullptr) {
   5672     if (use_const_for_false_in) {
   5673       locations_to_set->SetInAt(0, Location::ConstantLocation(cst_false_value));
   5674     } else {
   5675       locations_to_set->SetInAt(0,
   5676                                 Primitive::IsFloatingPointType(dst_type)
   5677                                     ? Location::RequiresFpuRegister()
   5678                                     : Location::RequiresRegister());
   5679     }
   5680     if (use_const_for_true_in) {
   5681       locations_to_set->SetInAt(1, Location::ConstantLocation(cst_true_value));
   5682     } else {
   5683       locations_to_set->SetInAt(1,
   5684                                 Primitive::IsFloatingPointType(dst_type)
   5685                                     ? Location::RequiresFpuRegister()
   5686                                     : Location::RequiresRegister());
   5687     }
   5688     if (materialized) {
   5689       locations_to_set->SetInAt(2, Location::RequiresRegister());
   5690     }
   5691     // On R6 we don't require the output to be the same as the
   5692     // first input for conditional moves unlike on R2.
   5693     bool is_out_same_as_first_in = !can_move_conditionally || !is_r6;
   5694     if (is_out_same_as_first_in) {
   5695       locations_to_set->SetOut(Location::SameAsFirstInput());
   5696     } else {
   5697       locations_to_set->SetOut(Primitive::IsFloatingPointType(dst_type)
   5698                                    ? Location::RequiresFpuRegister()
   5699                                    : Location::RequiresRegister());
   5700     }
   5701   }
   5702 
   5703   return can_move_conditionally;
   5704 }
   5705 
   5706 void InstructionCodeGeneratorMIPS::GenConditionalMoveR2(HSelect* select) {
   5707   LocationSummary* locations = select->GetLocations();
   5708   Location dst = locations->Out();
   5709   Location src = locations->InAt(1);
   5710   Register src_reg = ZERO;
   5711   Register src_reg_high = ZERO;
   5712   HInstruction* cond = select->InputAt(/* condition_input_index */ 2);
   5713   Register cond_reg = TMP;
   5714   int cond_cc = 0;
   5715   Primitive::Type cond_type = Primitive::kPrimInt;
   5716   bool cond_inverted = false;
   5717   Primitive::Type dst_type = select->GetType();
   5718 
   5719   if (IsBooleanValueOrMaterializedCondition(cond)) {
   5720     cond_reg = locations->InAt(/* condition_input_index */ 2).AsRegister<Register>();
   5721   } else {
   5722     HCondition* condition = cond->AsCondition();
   5723     LocationSummary* cond_locations = cond->GetLocations();
   5724     IfCondition if_cond = condition->GetCondition();
   5725     cond_type = condition->InputAt(0)->GetType();
   5726     switch (cond_type) {
   5727       default:
   5728         DCHECK_NE(cond_type, Primitive::kPrimLong);
   5729         cond_inverted = MaterializeIntCompare(if_cond, cond_locations, cond_reg);
   5730         break;
   5731       case Primitive::kPrimFloat:
   5732       case Primitive::kPrimDouble:
   5733         cond_inverted = MaterializeFpCompareR2(if_cond,
   5734                                                condition->IsGtBias(),
   5735                                                cond_type,
   5736                                                cond_locations,
   5737                                                cond_cc);
   5738         break;
   5739     }
   5740   }
   5741 
   5742   DCHECK(dst.Equals(locations->InAt(0)));
   5743   if (src.IsRegister()) {
   5744     src_reg = src.AsRegister<Register>();
   5745   } else if (src.IsRegisterPair()) {
   5746     src_reg = src.AsRegisterPairLow<Register>();
   5747     src_reg_high = src.AsRegisterPairHigh<Register>();
   5748   } else if (src.IsConstant()) {
   5749     DCHECK(src.GetConstant()->IsZeroBitPattern());
   5750   }
   5751 
   5752   switch (cond_type) {
   5753     default:
   5754       switch (dst_type) {
   5755         default:
   5756           if (cond_inverted) {
   5757             __ Movz(dst.AsRegister<Register>(), src_reg, cond_reg);
   5758           } else {
   5759             __ Movn(dst.AsRegister<Register>(), src_reg, cond_reg);
   5760           }
   5761           break;
   5762         case Primitive::kPrimLong:
   5763           if (cond_inverted) {
   5764             __ Movz(dst.AsRegisterPairLow<Register>(), src_reg, cond_reg);
   5765             __ Movz(dst.AsRegisterPairHigh<Register>(), src_reg_high, cond_reg);
   5766           } else {
   5767             __ Movn(dst.AsRegisterPairLow<Register>(), src_reg, cond_reg);
   5768             __ Movn(dst.AsRegisterPairHigh<Register>(), src_reg_high, cond_reg);
   5769           }
   5770           break;
   5771         case Primitive::kPrimFloat:
   5772           if (cond_inverted) {
   5773             __ MovzS(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_reg);
   5774           } else {
   5775             __ MovnS(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_reg);
   5776           }
   5777           break;
   5778         case Primitive::kPrimDouble:
   5779           if (cond_inverted) {
   5780             __ MovzD(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_reg);
   5781           } else {
   5782             __ MovnD(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_reg);
   5783           }
   5784           break;
   5785       }
   5786       break;
   5787     case Primitive::kPrimLong:
   5788       LOG(FATAL) << "Unreachable";
   5789       UNREACHABLE();
   5790     case Primitive::kPrimFloat:
   5791     case Primitive::kPrimDouble:
   5792       switch (dst_type) {
   5793         default:
   5794           if (cond_inverted) {
   5795             __ Movf(dst.AsRegister<Register>(), src_reg, cond_cc);
   5796           } else {
   5797             __ Movt(dst.AsRegister<Register>(), src_reg, cond_cc);
   5798           }
   5799           break;
   5800         case Primitive::kPrimLong:
   5801           if (cond_inverted) {
   5802             __ Movf(dst.AsRegisterPairLow<Register>(), src_reg, cond_cc);
   5803             __ Movf(dst.AsRegisterPairHigh<Register>(), src_reg_high, cond_cc);
   5804           } else {
   5805             __ Movt(dst.AsRegisterPairLow<Register>(), src_reg, cond_cc);
   5806             __ Movt(dst.AsRegisterPairHigh<Register>(), src_reg_high, cond_cc);
   5807           }
   5808           break;
   5809         case Primitive::kPrimFloat:
   5810           if (cond_inverted) {
   5811             __ MovfS(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_cc);
   5812           } else {
   5813             __ MovtS(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_cc);
   5814           }
   5815           break;
   5816         case Primitive::kPrimDouble:
   5817           if (cond_inverted) {
   5818             __ MovfD(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_cc);
   5819           } else {
   5820             __ MovtD(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_cc);
   5821           }
   5822           break;
   5823       }
   5824       break;
   5825   }
   5826 }
   5827 
   5828 void InstructionCodeGeneratorMIPS::GenConditionalMoveR6(HSelect* select) {
   5829   LocationSummary* locations = select->GetLocations();
   5830   Location dst = locations->Out();
   5831   Location false_src = locations->InAt(0);
   5832   Location true_src = locations->InAt(1);
   5833   HInstruction* cond = select->InputAt(/* condition_input_index */ 2);
   5834   Register cond_reg = TMP;
   5835   FRegister fcond_reg = FTMP;
   5836   Primitive::Type cond_type = Primitive::kPrimInt;
   5837   bool cond_inverted = false;
   5838   Primitive::Type dst_type = select->GetType();
   5839 
   5840   if (IsBooleanValueOrMaterializedCondition(cond)) {
   5841     cond_reg = locations->InAt(/* condition_input_index */ 2).AsRegister<Register>();
   5842   } else {
   5843     HCondition* condition = cond->AsCondition();
   5844     LocationSummary* cond_locations = cond->GetLocations();
   5845     IfCondition if_cond = condition->GetCondition();
   5846     cond_type = condition->InputAt(0)->GetType();
   5847     switch (cond_type) {
   5848       default:
   5849         DCHECK_NE(cond_type, Primitive::kPrimLong);
   5850         cond_inverted = MaterializeIntCompare(if_cond, cond_locations, cond_reg);
   5851         break;
   5852       case Primitive::kPrimFloat:
   5853       case Primitive::kPrimDouble:
   5854         cond_inverted = MaterializeFpCompareR6(if_cond,
   5855                                                condition->IsGtBias(),
   5856                                                cond_type,
   5857                                                cond_locations,
   5858                                                fcond_reg);
   5859         break;
   5860     }
   5861   }
   5862 
   5863   if (true_src.IsConstant()) {
   5864     DCHECK(true_src.GetConstant()->IsZeroBitPattern());
   5865   }
   5866   if (false_src.IsConstant()) {
   5867     DCHECK(false_src.GetConstant()->IsZeroBitPattern());
   5868   }
   5869 
   5870   switch (dst_type) {
   5871     default:
   5872       if (Primitive::IsFloatingPointType(cond_type)) {
   5873         __ Mfc1(cond_reg, fcond_reg);
   5874       }
   5875       if (true_src.IsConstant()) {
   5876         if (cond_inverted) {
   5877           __ Selnez(dst.AsRegister<Register>(), false_src.AsRegister<Register>(), cond_reg);
   5878         } else {
   5879           __ Seleqz(dst.AsRegister<Register>(), false_src.AsRegister<Register>(), cond_reg);
   5880         }
   5881       } else if (false_src.IsConstant()) {
   5882         if (cond_inverted) {
   5883           __ Seleqz(dst.AsRegister<Register>(), true_src.AsRegister<Register>(), cond_reg);
   5884         } else {
   5885           __ Selnez(dst.AsRegister<Register>(), true_src.AsRegister<Register>(), cond_reg);
   5886         }
   5887       } else {
   5888         DCHECK_NE(cond_reg, AT);
   5889         if (cond_inverted) {
   5890           __ Seleqz(AT, true_src.AsRegister<Register>(), cond_reg);
   5891           __ Selnez(TMP, false_src.AsRegister<Register>(), cond_reg);
   5892         } else {
   5893           __ Selnez(AT, true_src.AsRegister<Register>(), cond_reg);
   5894           __ Seleqz(TMP, false_src.AsRegister<Register>(), cond_reg);
   5895         }
   5896         __ Or(dst.AsRegister<Register>(), AT, TMP);
   5897       }
   5898       break;
   5899     case Primitive::kPrimLong: {
   5900       if (Primitive::IsFloatingPointType(cond_type)) {
   5901         __ Mfc1(cond_reg, fcond_reg);
   5902       }
   5903       Register dst_lo = dst.AsRegisterPairLow<Register>();
   5904       Register dst_hi = dst.AsRegisterPairHigh<Register>();
   5905       if (true_src.IsConstant()) {
   5906         Register src_lo = false_src.AsRegisterPairLow<Register>();
   5907         Register src_hi = false_src.AsRegisterPairHigh<Register>();
   5908         if (cond_inverted) {
   5909           __ Selnez(dst_lo, src_lo, cond_reg);
   5910           __ Selnez(dst_hi, src_hi, cond_reg);
   5911         } else {
   5912           __ Seleqz(dst_lo, src_lo, cond_reg);
   5913           __ Seleqz(dst_hi, src_hi, cond_reg);
   5914         }
   5915       } else {
   5916         DCHECK(false_src.IsConstant());
   5917         Register src_lo = true_src.AsRegisterPairLow<Register>();
   5918         Register src_hi = true_src.AsRegisterPairHigh<Register>();
   5919         if (cond_inverted) {
   5920           __ Seleqz(dst_lo, src_lo, cond_reg);
   5921           __ Seleqz(dst_hi, src_hi, cond_reg);
   5922         } else {
   5923           __ Selnez(dst_lo, src_lo, cond_reg);
   5924           __ Selnez(dst_hi, src_hi, cond_reg);
   5925         }
   5926       }
   5927       break;
   5928     }
   5929     case Primitive::kPrimFloat: {
   5930       if (!Primitive::IsFloatingPointType(cond_type)) {
   5931         // sel*.fmt tests bit 0 of the condition register, account for that.
   5932         __ Sltu(TMP, ZERO, cond_reg);
   5933         __ Mtc1(TMP, fcond_reg);
   5934       }
   5935       FRegister dst_reg = dst.AsFpuRegister<FRegister>();
   5936       if (true_src.IsConstant()) {
   5937         FRegister src_reg = false_src.AsFpuRegister<FRegister>();
   5938         if (cond_inverted) {
   5939           __ SelnezS(dst_reg, src_reg, fcond_reg);
   5940         } else {
   5941           __ SeleqzS(dst_reg, src_reg, fcond_reg);
   5942         }
   5943       } else if (false_src.IsConstant()) {
   5944         FRegister src_reg = true_src.AsFpuRegister<FRegister>();
   5945         if (cond_inverted) {
   5946           __ SeleqzS(dst_reg, src_reg, fcond_reg);
   5947         } else {
   5948           __ SelnezS(dst_reg, src_reg, fcond_reg);
   5949         }
   5950       } else {
   5951         if (cond_inverted) {
   5952           __ SelS(fcond_reg,
   5953                   true_src.AsFpuRegister<FRegister>(),
   5954                   false_src.AsFpuRegister<FRegister>());
   5955         } else {
   5956           __ SelS(fcond_reg,
   5957                   false_src.AsFpuRegister<FRegister>(),
   5958                   true_src.AsFpuRegister<FRegister>());
   5959         }
   5960         __ MovS(dst_reg, fcond_reg);
   5961       }
   5962       break;
   5963     }
   5964     case Primitive::kPrimDouble: {
   5965       if (!Primitive::IsFloatingPointType(cond_type)) {
   5966         // sel*.fmt tests bit 0 of the condition register, account for that.
   5967         __ Sltu(TMP, ZERO, cond_reg);
   5968         __ Mtc1(TMP, fcond_reg);
   5969       }
   5970       FRegister dst_reg = dst.AsFpuRegister<FRegister>();
   5971       if (true_src.IsConstant()) {
   5972         FRegister src_reg = false_src.AsFpuRegister<FRegister>();
   5973         if (cond_inverted) {
   5974           __ SelnezD(dst_reg, src_reg, fcond_reg);
   5975         } else {
   5976           __ SeleqzD(dst_reg, src_reg, fcond_reg);
   5977         }
   5978       } else if (false_src.IsConstant()) {
   5979         FRegister src_reg = true_src.AsFpuRegister<FRegister>();
   5980         if (cond_inverted) {
   5981           __ SeleqzD(dst_reg, src_reg, fcond_reg);
   5982         } else {
   5983           __ SelnezD(dst_reg, src_reg, fcond_reg);
   5984         }
   5985       } else {
   5986         if (cond_inverted) {
   5987           __ SelD(fcond_reg,
   5988                   true_src.AsFpuRegister<FRegister>(),
   5989                   false_src.AsFpuRegister<FRegister>());
   5990         } else {
   5991           __ SelD(fcond_reg,
   5992                   false_src.AsFpuRegister<FRegister>(),
   5993                   true_src.AsFpuRegister<FRegister>());
   5994         }
   5995         __ MovD(dst_reg, fcond_reg);
   5996       }
   5997       break;
   5998     }
   5999   }
   6000 }
   6001 
   6002 void LocationsBuilderMIPS::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
   6003   LocationSummary* locations = new (GetGraph()->GetArena())
   6004       LocationSummary(flag, LocationSummary::kNoCall);
   6005   locations->SetOut(Location::RequiresRegister());
   6006 }
   6007 
   6008 void InstructionCodeGeneratorMIPS::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
   6009   __ LoadFromOffset(kLoadWord,
   6010                     flag->GetLocations()->Out().AsRegister<Register>(),
   6011                     SP,
   6012                     codegen_->GetStackOffsetOfShouldDeoptimizeFlag());
   6013 }
   6014 
   6015 void LocationsBuilderMIPS::VisitSelect(HSelect* select) {
   6016   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select);
   6017   CanMoveConditionally(select, codegen_->GetInstructionSetFeatures().IsR6(), locations);
   6018 }
   6019 
   6020 void InstructionCodeGeneratorMIPS::VisitSelect(HSelect* select) {
   6021   bool is_r6 = codegen_->GetInstructionSetFeatures().IsR6();
   6022   if (CanMoveConditionally(select, is_r6, /* locations_to_set */ nullptr)) {
   6023     if (is_r6) {
   6024       GenConditionalMoveR6(select);
   6025     } else {
   6026       GenConditionalMoveR2(select);
   6027     }
   6028   } else {
   6029     LocationSummary* locations = select->GetLocations();
   6030     MipsLabel false_target;
   6031     GenerateTestAndBranch(select,
   6032                           /* condition_input_index */ 2,
   6033                           /* true_target */ nullptr,
   6034                           &false_target);
   6035     codegen_->MoveLocation(locations->Out(), locations->InAt(1), select->GetType());
   6036     __ Bind(&false_target);
   6037   }
   6038 }
   6039 
   6040 void LocationsBuilderMIPS::VisitNativeDebugInfo(HNativeDebugInfo* info) {
   6041   new (GetGraph()->GetArena()) LocationSummary(info);
   6042 }
   6043 
   6044 void InstructionCodeGeneratorMIPS::VisitNativeDebugInfo(HNativeDebugInfo*) {
   6045   // MaybeRecordNativeDebugInfo is already called implicitly in CodeGenerator::Compile.
   6046 }
   6047 
   6048 void CodeGeneratorMIPS::GenerateNop() {
   6049   __ Nop();
   6050 }
   6051 
   6052 void LocationsBuilderMIPS::HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info) {
   6053   Primitive::Type field_type = field_info.GetFieldType();
   6054   bool is_wide = (field_type == Primitive::kPrimLong) || (field_type == Primitive::kPrimDouble);
   6055   bool generate_volatile = field_info.IsVolatile() && is_wide;
   6056   bool object_field_get_with_read_barrier =
   6057       kEmitCompilerReadBarrier && (field_type == Primitive::kPrimNot);
   6058   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
   6059       instruction,
   6060       generate_volatile
   6061           ? LocationSummary::kCallOnMainOnly
   6062           : (object_field_get_with_read_barrier
   6063               ? LocationSummary::kCallOnSlowPath
   6064               : LocationSummary::kNoCall));
   6065 
   6066   if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
   6067     locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty());  // No caller-save registers.
   6068   }
   6069   locations->SetInAt(0, Location::RequiresRegister());
   6070   if (generate_volatile) {
   6071     InvokeRuntimeCallingConvention calling_convention;
   6072     // need A0 to hold base + offset
   6073     locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
   6074     if (field_type == Primitive::kPrimLong) {
   6075       locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimLong));
   6076     } else {
   6077       // Use Location::Any() to prevent situations when running out of available fp registers.
   6078       locations->SetOut(Location::Any());
   6079       // Need some temp core regs since FP results are returned in core registers
   6080       Location reg = calling_convention.GetReturnLocation(Primitive::kPrimLong);
   6081       locations->AddTemp(Location::RegisterLocation(reg.AsRegisterPairLow<Register>()));
   6082       locations->AddTemp(Location::RegisterLocation(reg.AsRegisterPairHigh<Register>()));
   6083     }
   6084   } else {
   6085     if (Primitive::IsFloatingPointType(instruction->GetType())) {
   6086       locations->SetOut(Location::RequiresFpuRegister());
   6087     } else {
   6088       // The output overlaps in the case of an object field get with
   6089       // read barriers enabled: we do not want the move to overwrite the
   6090       // object's location, as we need it to emit the read barrier.
   6091       locations->SetOut(Location::RequiresRegister(),
   6092                         object_field_get_with_read_barrier
   6093                             ? Location::kOutputOverlap
   6094                             : Location::kNoOutputOverlap);
   6095     }
   6096     if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
   6097       // We need a temporary register for the read barrier marking slow
   6098       // path in CodeGeneratorMIPS::GenerateFieldLoadWithBakerReadBarrier.
   6099       locations->AddTemp(Location::RequiresRegister());
   6100     }
   6101   }
   6102 }
   6103 
   6104 void InstructionCodeGeneratorMIPS::HandleFieldGet(HInstruction* instruction,
   6105                                                   const FieldInfo& field_info,
   6106                                                   uint32_t dex_pc) {
   6107   Primitive::Type type = field_info.GetFieldType();
   6108   LocationSummary* locations = instruction->GetLocations();
   6109   Location obj_loc = locations->InAt(0);
   6110   Register obj = obj_loc.AsRegister<Register>();
   6111   Location dst_loc = locations->Out();
   6112   LoadOperandType load_type = kLoadUnsignedByte;
   6113   bool is_volatile = field_info.IsVolatile();
   6114   uint32_t offset = field_info.GetFieldOffset().Uint32Value();
   6115   auto null_checker = GetImplicitNullChecker(instruction, codegen_);
   6116 
   6117   switch (type) {
   6118     case Primitive::kPrimBoolean:
   6119       load_type = kLoadUnsignedByte;
   6120       break;
   6121     case Primitive::kPrimByte:
   6122       load_type = kLoadSignedByte;
   6123       break;
   6124     case Primitive::kPrimShort:
   6125       load_type = kLoadSignedHalfword;
   6126       break;
   6127     case Primitive::kPrimChar:
   6128       load_type = kLoadUnsignedHalfword;
   6129       break;
   6130     case Primitive::kPrimInt:
   6131     case Primitive::kPrimFloat:
   6132     case Primitive::kPrimNot:
   6133       load_type = kLoadWord;
   6134       break;
   6135     case Primitive::kPrimLong:
   6136     case Primitive::kPrimDouble:
   6137       load_type = kLoadDoubleword;
   6138       break;
   6139     case Primitive::kPrimVoid:
   6140       LOG(FATAL) << "Unreachable type " << type;
   6141       UNREACHABLE();
   6142   }
   6143 
   6144   if (is_volatile && load_type == kLoadDoubleword) {
   6145     InvokeRuntimeCallingConvention calling_convention;
   6146     __ Addiu32(locations->GetTemp(0).AsRegister<Register>(), obj, offset);
   6147     // Do implicit Null check
   6148     __ Lw(ZERO, locations->GetTemp(0).AsRegister<Register>(), 0);
   6149     codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
   6150     codegen_->InvokeRuntime(kQuickA64Load, instruction, dex_pc);
   6151     CheckEntrypointTypes<kQuickA64Load, int64_t, volatile const int64_t*>();
   6152     if (type == Primitive::kPrimDouble) {
   6153       // FP results are returned in core registers. Need to move them.
   6154       if (dst_loc.IsFpuRegister()) {
   6155         __ Mtc1(locations->GetTemp(1).AsRegister<Register>(), dst_loc.AsFpuRegister<FRegister>());
   6156         __ MoveToFpuHigh(locations->GetTemp(2).AsRegister<Register>(),
   6157                          dst_loc.AsFpuRegister<FRegister>());
   6158       } else {
   6159         DCHECK(dst_loc.IsDoubleStackSlot());
   6160         __ StoreToOffset(kStoreWord,
   6161                          locations->GetTemp(1).AsRegister<Register>(),
   6162                          SP,
   6163                          dst_loc.GetStackIndex());
   6164         __ StoreToOffset(kStoreWord,
   6165                          locations->GetTemp(2).AsRegister<Register>(),
   6166                          SP,
   6167                          dst_loc.GetStackIndex() + 4);
   6168       }
   6169     }
   6170   } else {
   6171     if (type == Primitive::kPrimNot) {
   6172       // /* HeapReference<Object> */ dst = *(obj + offset)
   6173       if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
   6174         Location temp_loc = locations->GetTemp(0);
   6175         // Note that a potential implicit null check is handled in this
   6176         // CodeGeneratorMIPS::GenerateFieldLoadWithBakerReadBarrier call.
   6177         codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
   6178                                                         dst_loc,
   6179                                                         obj,
   6180                                                         offset,
   6181                                                         temp_loc,
   6182                                                         /* needs_null_check */ true);
   6183         if (is_volatile) {
   6184           GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
   6185         }
   6186       } else {
   6187         __ LoadFromOffset(kLoadWord, dst_loc.AsRegister<Register>(), obj, offset, null_checker);
   6188         if (is_volatile) {
   6189           GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
   6190         }
   6191         // If read barriers are enabled, emit read barriers other than
   6192         // Baker's using a slow path (and also unpoison the loaded
   6193         // reference, if heap poisoning is enabled).
   6194         codegen_->MaybeGenerateReadBarrierSlow(instruction, dst_loc, dst_loc, obj_loc, offset);
   6195       }
   6196     } else if (!Primitive::IsFloatingPointType(type)) {
   6197       Register dst;
   6198       if (type == Primitive::kPrimLong) {
   6199         DCHECK(dst_loc.IsRegisterPair());
   6200         dst = dst_loc.AsRegisterPairLow<Register>();
   6201       } else {
   6202         DCHECK(dst_loc.IsRegister());
   6203         dst = dst_loc.AsRegister<Register>();
   6204       }
   6205       __ LoadFromOffset(load_type, dst, obj, offset, null_checker);
   6206     } else {
   6207       DCHECK(dst_loc.IsFpuRegister());
   6208       FRegister dst = dst_loc.AsFpuRegister<FRegister>();
   6209       if (type == Primitive::kPrimFloat) {
   6210         __ LoadSFromOffset(dst, obj, offset, null_checker);
   6211       } else {
   6212         __ LoadDFromOffset(dst, obj, offset, null_checker);
   6213       }
   6214     }
   6215   }
   6216 
   6217   // Memory barriers, in the case of references, are handled in the
   6218   // previous switch statement.
   6219   if (is_volatile && (type != Primitive::kPrimNot)) {
   6220     GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
   6221   }
   6222 }
   6223 
   6224 void LocationsBuilderMIPS::HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info) {
   6225   Primitive::Type field_type = field_info.GetFieldType();
   6226   bool is_wide = (field_type == Primitive::kPrimLong) || (field_type == Primitive::kPrimDouble);
   6227   bool generate_volatile = field_info.IsVolatile() && is_wide;
   6228   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
   6229       instruction, generate_volatile ? LocationSummary::kCallOnMainOnly : LocationSummary::kNoCall);
   6230 
   6231   locations->SetInAt(0, Location::RequiresRegister());
   6232   if (generate_volatile) {
   6233     InvokeRuntimeCallingConvention calling_convention;
   6234     // need A0 to hold base + offset
   6235     locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
   6236     if (field_type == Primitive::kPrimLong) {
   6237       locations->SetInAt(1, Location::RegisterPairLocation(
   6238           calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
   6239     } else {
   6240       // Use Location::Any() to prevent situations when running out of available fp registers.
   6241       locations->SetInAt(1, Location::Any());
   6242       // Pass FP parameters in core registers.
   6243       locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
   6244       locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(3)));
   6245     }
   6246   } else {
   6247     if (Primitive::IsFloatingPointType(field_type)) {
   6248       locations->SetInAt(1, FpuRegisterOrConstantForStore(instruction->InputAt(1)));
   6249     } else {
   6250       locations->SetInAt(1, RegisterOrZeroConstant(instruction->InputAt(1)));
   6251     }
   6252   }
   6253 }
   6254 
   6255 void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction,
   6256                                                   const FieldInfo& field_info,
   6257                                                   uint32_t dex_pc,
   6258                                                   bool value_can_be_null) {
   6259   Primitive::Type type = field_info.GetFieldType();
   6260   LocationSummary* locations = instruction->GetLocations();
   6261   Register obj = locations->InAt(0).AsRegister<Register>();
   6262   Location value_location = locations->InAt(1);
   6263   StoreOperandType store_type = kStoreByte;
   6264   bool is_volatile = field_info.IsVolatile();
   6265   uint32_t offset = field_info.GetFieldOffset().Uint32Value();
   6266   bool needs_write_barrier = CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1));
   6267   auto null_checker = GetImplicitNullChecker(instruction, codegen_);
   6268 
   6269   switch (type) {
   6270     case Primitive::kPrimBoolean:
   6271     case Primitive::kPrimByte:
   6272       store_type = kStoreByte;
   6273       break;
   6274     case Primitive::kPrimShort:
   6275     case Primitive::kPrimChar:
   6276       store_type = kStoreHalfword;
   6277       break;
   6278     case Primitive::kPrimInt:
   6279     case Primitive::kPrimFloat:
   6280     case Primitive::kPrimNot:
   6281       store_type = kStoreWord;
   6282       break;
   6283     case Primitive::kPrimLong:
   6284     case Primitive::kPrimDouble:
   6285       store_type = kStoreDoubleword;
   6286       break;
   6287     case Primitive::kPrimVoid:
   6288       LOG(FATAL) << "Unreachable type " << type;
   6289       UNREACHABLE();
   6290   }
   6291 
   6292   if (is_volatile) {
   6293     GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
   6294   }
   6295 
   6296   if (is_volatile && store_type == kStoreDoubleword) {
   6297     InvokeRuntimeCallingConvention calling_convention;
   6298     __ Addiu32(locations->GetTemp(0).AsRegister<Register>(), obj, offset);
   6299     // Do implicit Null check.
   6300     __ Lw(ZERO, locations->GetTemp(0).AsRegister<Register>(), 0);
   6301     codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
   6302     if (type == Primitive::kPrimDouble) {
   6303       // Pass FP parameters in core registers.
   6304       if (value_location.IsFpuRegister()) {
   6305         __ Mfc1(locations->GetTemp(1).AsRegister<Register>(),
   6306                 value_location.AsFpuRegister<FRegister>());
   6307         __ MoveFromFpuHigh(locations->GetTemp(2).AsRegister<Register>(),
   6308                            value_location.AsFpuRegister<FRegister>());
   6309       } else if (value_location.IsDoubleStackSlot()) {
   6310         __ LoadFromOffset(kLoadWord,
   6311                           locations->GetTemp(1).AsRegister<Register>(),
   6312                           SP,
   6313                           value_location.GetStackIndex());
   6314         __ LoadFromOffset(kLoadWord,
   6315                           locations->GetTemp(2).AsRegister<Register>(),
   6316                           SP,
   6317                           value_location.GetStackIndex() + 4);
   6318       } else {
   6319         DCHECK(value_location.IsConstant());
   6320         DCHECK(value_location.GetConstant()->IsDoubleConstant());
   6321         int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant());
   6322         __ LoadConst64(locations->GetTemp(2).AsRegister<Register>(),
   6323                        locations->GetTemp(1).AsRegister<Register>(),
   6324                        value);
   6325       }
   6326     }
   6327     codegen_->InvokeRuntime(kQuickA64Store, instruction, dex_pc);
   6328     CheckEntrypointTypes<kQuickA64Store, void, volatile int64_t *, int64_t>();
   6329   } else {
   6330     if (value_location.IsConstant()) {
   6331       int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant());
   6332       __ StoreConstToOffset(store_type, value, obj, offset, TMP, null_checker);
   6333     } else if (!Primitive::IsFloatingPointType(type)) {
   6334       Register src;
   6335       if (type == Primitive::kPrimLong) {
   6336         src = value_location.AsRegisterPairLow<Register>();
   6337       } else {
   6338         src = value_location.AsRegister<Register>();
   6339       }
   6340       if (kPoisonHeapReferences && needs_write_barrier) {
   6341         // Note that in the case where `value` is a null reference,
   6342         // we do not enter this block, as a null reference does not
   6343         // need poisoning.
   6344         DCHECK_EQ(type, Primitive::kPrimNot);
   6345         __ PoisonHeapReference(TMP, src);
   6346         __ StoreToOffset(store_type, TMP, obj, offset, null_checker);
   6347       } else {
   6348         __ StoreToOffset(store_type, src, obj, offset, null_checker);
   6349       }
   6350     } else {
   6351       FRegister src = value_location.AsFpuRegister<FRegister>();
   6352       if (type == Primitive::kPrimFloat) {
   6353         __ StoreSToOffset(src, obj, offset, null_checker);
   6354       } else {
   6355         __ StoreDToOffset(src, obj, offset, null_checker);
   6356       }
   6357     }
   6358   }
   6359 
   6360   if (needs_write_barrier) {
   6361     Register src = value_location.AsRegister<Register>();
   6362     codegen_->MarkGCCard(obj, src, value_can_be_null);
   6363   }
   6364 
   6365   if (is_volatile) {
   6366     GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
   6367   }
   6368 }
   6369 
   6370 void LocationsBuilderMIPS::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
   6371   HandleFieldGet(instruction, instruction->GetFieldInfo());
   6372 }
   6373 
   6374 void InstructionCodeGeneratorMIPS::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
   6375   HandleFieldGet(instruction, instruction->GetFieldInfo(), instruction->GetDexPc());
   6376 }
   6377 
   6378 void LocationsBuilderMIPS::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
   6379   HandleFieldSet(instruction, instruction->GetFieldInfo());
   6380 }
   6381 
   6382 void InstructionCodeGeneratorMIPS::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
   6383   HandleFieldSet(instruction,
   6384                  instruction->GetFieldInfo(),
   6385                  instruction->GetDexPc(),
   6386                  instruction->GetValueCanBeNull());
   6387 }
   6388 
   6389 void InstructionCodeGeneratorMIPS::GenerateReferenceLoadOneRegister(
   6390     HInstruction* instruction,
   6391     Location out,
   6392     uint32_t offset,
   6393     Location maybe_temp,
   6394     ReadBarrierOption read_barrier_option) {
   6395   Register out_reg = out.AsRegister<Register>();
   6396   if (read_barrier_option == kWithReadBarrier) {
   6397     CHECK(kEmitCompilerReadBarrier);
   6398     DCHECK(maybe_temp.IsRegister()) << maybe_temp;
   6399     if (kUseBakerReadBarrier) {
   6400       // Load with fast path based Baker's read barrier.
   6401       // /* HeapReference<Object> */ out = *(out + offset)
   6402       codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
   6403                                                       out,
   6404                                                       out_reg,
   6405                                                       offset,
   6406                                                       maybe_temp,
   6407                                                       /* needs_null_check */ false);
   6408     } else {
   6409       // Load with slow path based read barrier.
   6410       // Save the value of `out` into `maybe_temp` before overwriting it
   6411       // in the following move operation, as we will need it for the
   6412       // read barrier below.
   6413       __ Move(maybe_temp.AsRegister<Register>(), out_reg);
   6414       // /* HeapReference<Object> */ out = *(out + offset)
   6415       __ LoadFromOffset(kLoadWord, out_reg, out_reg, offset);
   6416       codegen_->GenerateReadBarrierSlow(instruction, out, out, maybe_temp, offset);
   6417     }
   6418   } else {
   6419     // Plain load with no read barrier.
   6420     // /* HeapReference<Object> */ out = *(out + offset)
   6421     __ LoadFromOffset(kLoadWord, out_reg, out_reg, offset);
   6422     __ MaybeUnpoisonHeapReference(out_reg);
   6423   }
   6424 }
   6425 
   6426 void InstructionCodeGeneratorMIPS::GenerateReferenceLoadTwoRegisters(
   6427     HInstruction* instruction,
   6428     Location out,
   6429     Location obj,
   6430     uint32_t offset,
   6431     Location maybe_temp,
   6432     ReadBarrierOption read_barrier_option) {
   6433   Register out_reg = out.AsRegister<Register>();
   6434   Register obj_reg = obj.AsRegister<Register>();
   6435   if (read_barrier_option == kWithReadBarrier) {
   6436     CHECK(kEmitCompilerReadBarrier);
   6437     if (kUseBakerReadBarrier) {
   6438       DCHECK(maybe_temp.IsRegister()) << maybe_temp;
   6439       // Load with fast path based Baker's read barrier.
   6440       // /* HeapReference<Object> */ out = *(obj + offset)
   6441       codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
   6442                                                       out,
   6443                                                       obj_reg,
   6444                                                       offset,
   6445                                                       maybe_temp,
   6446                                                       /* needs_null_check */ false);
   6447     } else {
   6448       // Load with slow path based read barrier.
   6449       // /* HeapReference<Object> */ out = *(obj + offset)
   6450       __ LoadFromOffset(kLoadWord, out_reg, obj_reg, offset);
   6451       codegen_->GenerateReadBarrierSlow(instruction, out, out, obj, offset);
   6452     }
   6453   } else {
   6454     // Plain load with no read barrier.
   6455     // /* HeapReference<Object> */ out = *(obj + offset)
   6456     __ LoadFromOffset(kLoadWord, out_reg, obj_reg, offset);
   6457     __ MaybeUnpoisonHeapReference(out_reg);
   6458   }
   6459 }
   6460 
   6461 void InstructionCodeGeneratorMIPS::GenerateGcRootFieldLoad(HInstruction* instruction,
   6462                                                            Location root,
   6463                                                            Register obj,
   6464                                                            uint32_t offset,
   6465                                                            ReadBarrierOption read_barrier_option) {
   6466   Register root_reg = root.AsRegister<Register>();
   6467   if (read_barrier_option == kWithReadBarrier) {
   6468     DCHECK(kEmitCompilerReadBarrier);
   6469     if (kUseBakerReadBarrier) {
   6470       // Fast path implementation of art::ReadBarrier::BarrierForRoot when
   6471       // Baker's read barrier are used:
   6472       //
   6473       //   root = obj.field;
   6474       //   temp = Thread::Current()->pReadBarrierMarkReg ## root.reg()
   6475       //   if (temp != null) {
   6476       //     root = temp(root)
   6477       //   }
   6478 
   6479       // /* GcRoot<mirror::Object> */ root = *(obj + offset)
   6480       __ LoadFromOffset(kLoadWord, root_reg, obj, offset);
   6481       static_assert(
   6482           sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(GcRoot<mirror::Object>),
   6483           "art::mirror::CompressedReference<mirror::Object> and art::GcRoot<mirror::Object> "
   6484           "have different sizes.");
   6485       static_assert(sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(int32_t),
   6486                     "art::mirror::CompressedReference<mirror::Object> and int32_t "
   6487                     "have different sizes.");
   6488 
   6489       // Slow path marking the GC root `root`.
   6490       Location temp = Location::RegisterLocation(T9);
   6491       SlowPathCodeMIPS* slow_path =
   6492           new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathMIPS(
   6493               instruction,
   6494               root,
   6495               /*entrypoint*/ temp);
   6496       codegen_->AddSlowPath(slow_path);
   6497 
   6498       // temp = Thread::Current()->pReadBarrierMarkReg ## root.reg()
   6499       const int32_t entry_point_offset =
   6500           Thread::ReadBarrierMarkEntryPointsOffset<kMipsPointerSize>(root.reg() - 1);
   6501       // Loading the entrypoint does not require a load acquire since it is only changed when
   6502       // threads are suspended or running a checkpoint.
   6503       __ LoadFromOffset(kLoadWord, temp.AsRegister<Register>(), TR, entry_point_offset);
   6504       // The entrypoint is null when the GC is not marking, this prevents one load compared to
   6505       // checking GetIsGcMarking.
   6506       __ Bnez(temp.AsRegister<Register>(), slow_path->GetEntryLabel());
   6507       __ Bind(slow_path->GetExitLabel());
   6508     } else {
   6509       // GC root loaded through a slow path for read barriers other
   6510       // than Baker's.
   6511       // /* GcRoot<mirror::Object>* */ root = obj + offset
   6512       __ Addiu32(root_reg, obj, offset);
   6513       // /* mirror::Object* */ root = root->Read()
   6514       codegen_->GenerateReadBarrierForRootSlow(instruction, root, root);
   6515     }
   6516   } else {
   6517     // Plain GC root load with no read barrier.
   6518     // /* GcRoot<mirror::Object> */ root = *(obj + offset)
   6519     __ LoadFromOffset(kLoadWord, root_reg, obj, offset);
   6520     // Note that GC roots are not affected by heap poisoning, thus we
   6521     // do not have to unpoison `root_reg` here.
   6522   }
   6523 }
   6524 
   6525 void CodeGeneratorMIPS::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
   6526                                                               Location ref,
   6527                                                               Register obj,
   6528                                                               uint32_t offset,
   6529                                                               Location temp,
   6530                                                               bool needs_null_check) {
   6531   DCHECK(kEmitCompilerReadBarrier);
   6532   DCHECK(kUseBakerReadBarrier);
   6533 
   6534   // /* HeapReference<Object> */ ref = *(obj + offset)
   6535   Location no_index = Location::NoLocation();
   6536   ScaleFactor no_scale_factor = TIMES_1;
   6537   GenerateReferenceLoadWithBakerReadBarrier(instruction,
   6538                                             ref,
   6539                                             obj,
   6540                                             offset,
   6541                                             no_index,
   6542                                             no_scale_factor,
   6543                                             temp,
   6544                                             needs_null_check);
   6545 }
   6546 
   6547 void CodeGeneratorMIPS::GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction,
   6548                                                               Location ref,
   6549                                                               Register obj,
   6550                                                               uint32_t data_offset,
   6551                                                               Location index,
   6552                                                               Location temp,
   6553                                                               bool needs_null_check) {
   6554   DCHECK(kEmitCompilerReadBarrier);
   6555   DCHECK(kUseBakerReadBarrier);
   6556 
   6557   static_assert(
   6558       sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
   6559       "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
   6560   // /* HeapReference<Object> */ ref =
   6561   //     *(obj + data_offset + index * sizeof(HeapReference<Object>))
   6562   ScaleFactor scale_factor = TIMES_4;
   6563   GenerateReferenceLoadWithBakerReadBarrier(instruction,
   6564                                             ref,
   6565                                             obj,
   6566                                             data_offset,
   6567                                             index,
   6568                                             scale_factor,
   6569                                             temp,
   6570                                             needs_null_check);
   6571 }
   6572 
   6573 void CodeGeneratorMIPS::GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
   6574                                                                   Location ref,
   6575                                                                   Register obj,
   6576                                                                   uint32_t offset,
   6577                                                                   Location index,
   6578                                                                   ScaleFactor scale_factor,
   6579                                                                   Location temp,
   6580                                                                   bool needs_null_check,
   6581                                                                   bool always_update_field) {
   6582   DCHECK(kEmitCompilerReadBarrier);
   6583   DCHECK(kUseBakerReadBarrier);
   6584 
   6585   // In slow path based read barriers, the read barrier call is
   6586   // inserted after the original load. However, in fast path based
   6587   // Baker's read barriers, we need to perform the load of
   6588   // mirror::Object::monitor_ *before* the original reference load.
   6589   // This load-load ordering is required by the read barrier.
   6590   // The fast path/slow path (for Baker's algorithm) should look like:
   6591   //
   6592   //   uint32_t rb_state = Lockword(obj->monitor_).ReadBarrierState();
   6593   //   lfence;  // Load fence or artificial data dependency to prevent load-load reordering
   6594   //   HeapReference<Object> ref = *src;  // Original reference load.
   6595   //   bool is_gray = (rb_state == ReadBarrier::GrayState());
   6596   //   if (is_gray) {
   6597   //     ref = ReadBarrier::Mark(ref);  // Performed by runtime entrypoint slow path.
   6598   //   }
   6599   //
   6600   // Note: the original implementation in ReadBarrier::Barrier is
   6601   // slightly more complex as it performs additional checks that we do
   6602   // not do here for performance reasons.
   6603 
   6604   Register ref_reg = ref.AsRegister<Register>();
   6605   Register temp_reg = temp.AsRegister<Register>();
   6606   uint32_t monitor_offset = mirror::Object::MonitorOffset().Int32Value();
   6607 
   6608   // /* int32_t */ monitor = obj->monitor_
   6609   __ LoadFromOffset(kLoadWord, temp_reg, obj, monitor_offset);
   6610   if (needs_null_check) {
   6611     MaybeRecordImplicitNullCheck(instruction);
   6612   }
   6613   // /* LockWord */ lock_word = LockWord(monitor)
   6614   static_assert(sizeof(LockWord) == sizeof(int32_t),
   6615                 "art::LockWord and int32_t have different sizes.");
   6616 
   6617   __ Sync(0);  // Barrier to prevent load-load reordering.
   6618 
   6619   // The actual reference load.
   6620   if (index.IsValid()) {
   6621     // Load types involving an "index": ArrayGet,
   6622     // UnsafeGetObject/UnsafeGetObjectVolatile and UnsafeCASObject
   6623     // intrinsics.
   6624     // /* HeapReference<Object> */ ref = *(obj + offset + (index << scale_factor))
   6625     if (index.IsConstant()) {
   6626       size_t computed_offset =
   6627           (index.GetConstant()->AsIntConstant()->GetValue() << scale_factor) + offset;
   6628       __ LoadFromOffset(kLoadWord, ref_reg, obj, computed_offset);
   6629     } else {
   6630       // Handle the special case of the
   6631       // UnsafeGetObject/UnsafeGetObjectVolatile and UnsafeCASObject
   6632       // intrinsics, which use a register pair as index ("long
   6633       // offset"), of which only the low part contains data.
   6634       Register index_reg = index.IsRegisterPair()
   6635           ? index.AsRegisterPairLow<Register>()
   6636           : index.AsRegister<Register>();
   6637       __ ShiftAndAdd(TMP, index_reg, obj, scale_factor, TMP);
   6638       __ LoadFromOffset(kLoadWord, ref_reg, TMP, offset);
   6639     }
   6640   } else {
   6641     // /* HeapReference<Object> */ ref = *(obj + offset)
   6642     __ LoadFromOffset(kLoadWord, ref_reg, obj, offset);
   6643   }
   6644 
   6645   // Object* ref = ref_addr->AsMirrorPtr()
   6646   __ MaybeUnpoisonHeapReference(ref_reg);
   6647 
   6648   // Slow path marking the object `ref` when it is gray.
   6649   SlowPathCodeMIPS* slow_path;
   6650   if (always_update_field) {
   6651     // ReadBarrierMarkAndUpdateFieldSlowPathMIPS only supports address
   6652     // of the form `obj + field_offset`, where `obj` is a register and
   6653     // `field_offset` is a register pair (of which only the lower half
   6654     // is used). Thus `offset` and `scale_factor` above are expected
   6655     // to be null in this code path.
   6656     DCHECK_EQ(offset, 0u);
   6657     DCHECK_EQ(scale_factor, ScaleFactor::TIMES_1);
   6658     slow_path = new (GetGraph()->GetArena())
   6659         ReadBarrierMarkAndUpdateFieldSlowPathMIPS(instruction,
   6660                                                   ref,
   6661                                                   obj,
   6662                                                   /* field_offset */ index,
   6663                                                   temp_reg);
   6664   } else {
   6665     slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathMIPS(instruction, ref);
   6666   }
   6667   AddSlowPath(slow_path);
   6668 
   6669   // if (rb_state == ReadBarrier::GrayState())
   6670   //   ref = ReadBarrier::Mark(ref);
   6671   // Given the numeric representation, it's enough to check the low bit of the
   6672   // rb_state. We do that by shifting the bit into the sign bit (31) and
   6673   // performing a branch on less than zero.
   6674   static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
   6675   static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
   6676   static_assert(LockWord::kReadBarrierStateSize == 1, "Expecting 1-bit read barrier state size");
   6677   __ Sll(temp_reg, temp_reg, 31 - LockWord::kReadBarrierStateShift);
   6678   __ Bltz(temp_reg, slow_path->GetEntryLabel());
   6679   __ Bind(slow_path->GetExitLabel());
   6680 }
   6681 
   6682 void CodeGeneratorMIPS::GenerateReadBarrierSlow(HInstruction* instruction,
   6683                                                 Location out,
   6684                                                 Location ref,
   6685                                                 Location obj,
   6686                                                 uint32_t offset,
   6687                                                 Location index) {
   6688   DCHECK(kEmitCompilerReadBarrier);
   6689 
   6690   // Insert a slow path based read barrier *after* the reference load.
   6691   //
   6692   // If heap poisoning is enabled, the unpoisoning of the loaded
   6693   // reference will be carried out by the runtime within the slow
   6694   // path.
   6695   //
   6696   // Note that `ref` currently does not get unpoisoned (when heap
   6697   // poisoning is enabled), which is alright as the `ref` argument is
   6698   // not used by the artReadBarrierSlow entry point.
   6699   //
   6700   // TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
   6701   SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena())
   6702       ReadBarrierForHeapReferenceSlowPathMIPS(instruction, out, ref, obj, offset, index);
   6703   AddSlowPath(slow_path);
   6704 
   6705   __ B(slow_path->GetEntryLabel());
   6706   __ Bind(slow_path->GetExitLabel());
   6707 }
   6708 
   6709 void CodeGeneratorMIPS::MaybeGenerateReadBarrierSlow(HInstruction* instruction,
   6710                                                      Location out,
   6711                                                      Location ref,
   6712                                                      Location obj,
   6713                                                      uint32_t offset,
   6714                                                      Location index) {
   6715   if (kEmitCompilerReadBarrier) {
   6716     // Baker's read barriers shall be handled by the fast path
   6717     // (CodeGeneratorMIPS::GenerateReferenceLoadWithBakerReadBarrier).
   6718     DCHECK(!kUseBakerReadBarrier);
   6719     // If heap poisoning is enabled, unpoisoning will be taken care of
   6720     // by the runtime within the slow path.
   6721     GenerateReadBarrierSlow(instruction, out, ref, obj, offset, index);
   6722   } else if (kPoisonHeapReferences) {
   6723     __ UnpoisonHeapReference(out.AsRegister<Register>());
   6724   }
   6725 }
   6726 
   6727 void CodeGeneratorMIPS::GenerateReadBarrierForRootSlow(HInstruction* instruction,
   6728                                                        Location out,
   6729                                                        Location root) {
   6730   DCHECK(kEmitCompilerReadBarrier);
   6731 
   6732   // Insert a slow path based read barrier *after* the GC root load.
   6733   //
   6734   // Note that GC roots are not affected by heap poisoning, so we do
   6735   // not need to do anything special for this here.
   6736   SlowPathCodeMIPS* slow_path =
   6737       new (GetGraph()->GetArena()) ReadBarrierForRootSlowPathMIPS(instruction, out, root);
   6738   AddSlowPath(slow_path);
   6739 
   6740   __ B(slow_path->GetEntryLabel());
   6741   __ Bind(slow_path->GetExitLabel());
   6742 }
   6743 
   6744 void LocationsBuilderMIPS::VisitInstanceOf(HInstanceOf* instruction) {
   6745   LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
   6746   TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
   6747   bool baker_read_barrier_slow_path = false;
   6748   switch (type_check_kind) {
   6749     case TypeCheckKind::kExactCheck:
   6750     case TypeCheckKind::kAbstractClassCheck:
   6751     case TypeCheckKind::kClassHierarchyCheck:
   6752     case TypeCheckKind::kArrayObjectCheck:
   6753       call_kind =
   6754           kEmitCompilerReadBarrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
   6755       baker_read_barrier_slow_path = kUseBakerReadBarrier;
   6756       break;
   6757     case TypeCheckKind::kArrayCheck:
   6758     case TypeCheckKind::kUnresolvedCheck:
   6759     case TypeCheckKind::kInterfaceCheck:
   6760       call_kind = LocationSummary::kCallOnSlowPath;
   6761       break;
   6762   }
   6763 
   6764   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
   6765   if (baker_read_barrier_slow_path) {
   6766     locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty());  // No caller-save registers.
   6767   }
   6768   locations->SetInAt(0, Location::RequiresRegister());
   6769   locations->SetInAt(1, Location::RequiresRegister());
   6770   // The output does overlap inputs.
   6771   // Note that TypeCheckSlowPathMIPS uses this register too.
   6772   locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
   6773   locations->AddRegisterTemps(NumberOfInstanceOfTemps(type_check_kind));
   6774 }
   6775 
   6776 void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
   6777   TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
   6778   LocationSummary* locations = instruction->GetLocations();
   6779   Location obj_loc = locations->InAt(0);
   6780   Register obj = obj_loc.AsRegister<Register>();
   6781   Register cls = locations->InAt(1).AsRegister<Register>();
   6782   Location out_loc = locations->Out();
   6783   Register out = out_loc.AsRegister<Register>();
   6784   const size_t num_temps = NumberOfInstanceOfTemps(type_check_kind);
   6785   DCHECK_LE(num_temps, 1u);
   6786   Location maybe_temp_loc = (num_temps >= 1) ? locations->GetTemp(0) : Location::NoLocation();
   6787   uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
   6788   uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
   6789   uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
   6790   uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
   6791   MipsLabel done;
   6792   SlowPathCodeMIPS* slow_path = nullptr;
   6793 
   6794   // Return 0 if `obj` is null.
   6795   // Avoid this check if we know `obj` is not null.
   6796   if (instruction->MustDoNullCheck()) {
   6797     __ Move(out, ZERO);
   6798     __ Beqz(obj, &done);
   6799   }
   6800 
   6801   switch (type_check_kind) {
   6802     case TypeCheckKind::kExactCheck: {
   6803       // /* HeapReference<Class> */ out = obj->klass_
   6804       GenerateReferenceLoadTwoRegisters(instruction,
   6805                                         out_loc,
   6806                                         obj_loc,
   6807                                         class_offset,
   6808                                         maybe_temp_loc,
   6809                                         kCompilerReadBarrierOption);
   6810       // Classes must be equal for the instanceof to succeed.
   6811       __ Xor(out, out, cls);
   6812       __ Sltiu(out, out, 1);
   6813       break;
   6814     }
   6815 
   6816     case TypeCheckKind::kAbstractClassCheck: {
   6817       // /* HeapReference<Class> */ out = obj->klass_
   6818       GenerateReferenceLoadTwoRegisters(instruction,
   6819                                         out_loc,
   6820                                         obj_loc,
   6821                                         class_offset,
   6822                                         maybe_temp_loc,
   6823                                         kCompilerReadBarrierOption);
   6824       // If the class is abstract, we eagerly fetch the super class of the
   6825       // object to avoid doing a comparison we know will fail.
   6826       MipsLabel loop;
   6827       __ Bind(&loop);
   6828       // /* HeapReference<Class> */ out = out->super_class_
   6829       GenerateReferenceLoadOneRegister(instruction,
   6830                                        out_loc,
   6831                                        super_offset,
   6832                                        maybe_temp_loc,
   6833                                        kCompilerReadBarrierOption);
   6834       // If `out` is null, we use it for the result, and jump to `done`.
   6835       __ Beqz(out, &done);
   6836       __ Bne(out, cls, &loop);
   6837       __ LoadConst32(out, 1);
   6838       break;
   6839     }
   6840 
   6841     case TypeCheckKind::kClassHierarchyCheck: {
   6842       // /* HeapReference<Class> */ out = obj->klass_
   6843       GenerateReferenceLoadTwoRegisters(instruction,
   6844                                         out_loc,
   6845                                         obj_loc,
   6846                                         class_offset,
   6847                                         maybe_temp_loc,
   6848                                         kCompilerReadBarrierOption);
   6849       // Walk over the class hierarchy to find a match.
   6850       MipsLabel loop, success;
   6851       __ Bind(&loop);
   6852       __ Beq(out, cls, &success);
   6853       // /* HeapReference<Class> */ out = out->super_class_
   6854       GenerateReferenceLoadOneRegister(instruction,
   6855                                        out_loc,
   6856                                        super_offset,
   6857                                        maybe_temp_loc,
   6858                                        kCompilerReadBarrierOption);
   6859       __ Bnez(out, &loop);
   6860       // If `out` is null, we use it for the result, and jump to `done`.
   6861       __ B(&done);
   6862       __ Bind(&success);
   6863       __ LoadConst32(out, 1);
   6864       break;
   6865     }
   6866 
   6867     case TypeCheckKind::kArrayObjectCheck: {
   6868       // /* HeapReference<Class> */ out = obj->klass_
   6869       GenerateReferenceLoadTwoRegisters(instruction,
   6870                                         out_loc,
   6871                                         obj_loc,
   6872                                         class_offset,
   6873                                         maybe_temp_loc,
   6874                                         kCompilerReadBarrierOption);
   6875       // Do an exact check.
   6876       MipsLabel success;
   6877       __ Beq(out, cls, &success);
   6878       // Otherwise, we need to check that the object's class is a non-primitive array.
   6879       // /* HeapReference<Class> */ out = out->component_type_
   6880       GenerateReferenceLoadOneRegister(instruction,
   6881                                        out_loc,
   6882                                        component_offset,
   6883                                        maybe_temp_loc,
   6884                                        kCompilerReadBarrierOption);
   6885       // If `out` is null, we use it for the result, and jump to `done`.
   6886       __ Beqz(out, &done);
   6887       __ LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset);
   6888       static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
   6889       __ Sltiu(out, out, 1);
   6890       __ B(&done);
   6891       __ Bind(&success);
   6892       __ LoadConst32(out, 1);
   6893       break;
   6894     }
   6895 
   6896     case TypeCheckKind::kArrayCheck: {
   6897       // No read barrier since the slow path will retry upon failure.
   6898       // /* HeapReference<Class> */ out = obj->klass_
   6899       GenerateReferenceLoadTwoRegisters(instruction,
   6900                                         out_loc,
   6901                                         obj_loc,
   6902                                         class_offset,
   6903                                         maybe_temp_loc,
   6904                                         kWithoutReadBarrier);
   6905       DCHECK(locations->OnlyCallsOnSlowPath());
   6906       slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS(instruction,
   6907                                                                      /* is_fatal */ false);
   6908       codegen_->AddSlowPath(slow_path);
   6909       __ Bne(out, cls, slow_path->GetEntryLabel());
   6910       __ LoadConst32(out, 1);
   6911       break;
   6912     }
   6913 
   6914     case TypeCheckKind::kUnresolvedCheck:
   6915     case TypeCheckKind::kInterfaceCheck: {
   6916       // Note that we indeed only call on slow path, but we always go
   6917       // into the slow path for the unresolved and interface check
   6918       // cases.
   6919       //
   6920       // We cannot directly call the InstanceofNonTrivial runtime
   6921       // entry point without resorting to a type checking slow path
   6922       // here (i.e. by calling InvokeRuntime directly), as it would
   6923       // require to assign fixed registers for the inputs of this
   6924       // HInstanceOf instruction (following the runtime calling
   6925       // convention), which might be cluttered by the potential first
   6926       // read barrier emission at the beginning of this method.
   6927       //
   6928       // TODO: Introduce a new runtime entry point taking the object
   6929       // to test (instead of its class) as argument, and let it deal
   6930       // with the read barrier issues. This will let us refactor this
   6931       // case of the `switch` code as it was previously (with a direct
   6932       // call to the runtime not using a type checking slow path).
   6933       // This should also be beneficial for the other cases above.
   6934       DCHECK(locations->OnlyCallsOnSlowPath());
   6935       slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS(instruction,
   6936                                                                      /* is_fatal */ false);
   6937       codegen_->AddSlowPath(slow_path);
   6938       __ B(slow_path->GetEntryLabel());
   6939       break;
   6940     }
   6941   }
   6942 
   6943   __ Bind(&done);
   6944 
   6945   if (slow_path != nullptr) {
   6946     __ Bind(slow_path->GetExitLabel());
   6947   }
   6948 }
   6949 
   6950 void LocationsBuilderMIPS::VisitIntConstant(HIntConstant* constant) {
   6951   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
   6952   locations->SetOut(Location::ConstantLocation(constant));
   6953 }
   6954 
   6955 void InstructionCodeGeneratorMIPS::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
   6956   // Will be generated at use site.
   6957 }
   6958 
   6959 void LocationsBuilderMIPS::VisitNullConstant(HNullConstant* constant) {
   6960   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
   6961   locations->SetOut(Location::ConstantLocation(constant));
   6962 }
   6963 
   6964 void InstructionCodeGeneratorMIPS::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
   6965   // Will be generated at use site.
   6966 }
   6967 
   6968 void LocationsBuilderMIPS::HandleInvoke(HInvoke* invoke) {
   6969   InvokeDexCallingConventionVisitorMIPS calling_convention_visitor;
   6970   CodeGenerator::CreateCommonInvokeLocationSummary(invoke, &calling_convention_visitor);
   6971 }
   6972 
   6973 void LocationsBuilderMIPS::VisitInvokeInterface(HInvokeInterface* invoke) {
   6974   HandleInvoke(invoke);
   6975   // The register T7 is required to be used for the hidden argument in
   6976   // art_quick_imt_conflict_trampoline, so add the hidden argument.
   6977   invoke->GetLocations()->AddTemp(Location::RegisterLocation(T7));
   6978 }
   6979 
   6980 void InstructionCodeGeneratorMIPS::VisitInvokeInterface(HInvokeInterface* invoke) {
   6981   // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
   6982   Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
   6983   Location receiver = invoke->GetLocations()->InAt(0);
   6984   uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
   6985   Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsPointerSize);
   6986 
   6987   // Set the hidden argument.
   6988   __ LoadConst32(invoke->GetLocations()->GetTemp(1).AsRegister<Register>(),
   6989                  invoke->GetDexMethodIndex());
   6990 
   6991   // temp = object->GetClass();
   6992   if (receiver.IsStackSlot()) {
   6993     __ LoadFromOffset(kLoadWord, temp, SP, receiver.GetStackIndex());
   6994     __ LoadFromOffset(kLoadWord, temp, temp, class_offset);
   6995   } else {
   6996     __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
   6997   }
   6998   codegen_->MaybeRecordImplicitNullCheck(invoke);
   6999   // Instead of simply (possibly) unpoisoning `temp` here, we should
   7000   // emit a read barrier for the previous class reference load.
   7001   // However this is not required in practice, as this is an
   7002   // intermediate/temporary reference and because the current
   7003   // concurrent copying collector keeps the from-space memory
   7004   // intact/accessible until the end of the marking phase (the
   7005   // concurrent copying collector may not in the future).
   7006   __ MaybeUnpoisonHeapReference(temp);
   7007   __ LoadFromOffset(kLoadWord, temp, temp,
   7008       mirror::Class::ImtPtrOffset(kMipsPointerSize).Uint32Value());
   7009   uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
   7010       invoke->GetImtIndex(), kMipsPointerSize));
   7011   // temp = temp->GetImtEntryAt(method_offset);
   7012   __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
   7013   // T9 = temp->GetEntryPoint();
   7014   __ LoadFromOffset(kLoadWord, T9, temp, entry_point.Int32Value());
   7015   // T9();
   7016   __ Jalr(T9);
   7017   __ NopIfNoReordering();
   7018   DCHECK(!codegen_->IsLeafMethod());
   7019   codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
   7020 }
   7021 
   7022 void LocationsBuilderMIPS::VisitInvokeVirtual(HInvokeVirtual* invoke) {
   7023   IntrinsicLocationsBuilderMIPS intrinsic(codegen_);
   7024   if (intrinsic.TryDispatch(invoke)) {
   7025     return;
   7026   }
   7027 
   7028   HandleInvoke(invoke);
   7029 }
   7030 
   7031 void LocationsBuilderMIPS::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
   7032   // Explicit clinit checks triggered by static invokes must have been pruned by
   7033   // art::PrepareForRegisterAllocation.
   7034   DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
   7035 
   7036   bool is_r6 = codegen_->GetInstructionSetFeatures().IsR6();
   7037   bool has_extra_input = invoke->HasPcRelativeMethodLoadKind() && !is_r6;
   7038 
   7039   IntrinsicLocationsBuilderMIPS intrinsic(codegen_);
   7040   if (intrinsic.TryDispatch(invoke)) {
   7041     if (invoke->GetLocations()->CanCall() && has_extra_input) {
   7042       invoke->GetLocations()->SetInAt(invoke->GetSpecialInputIndex(), Location::Any());
   7043     }
   7044     return;
   7045   }
   7046 
   7047   HandleInvoke(invoke);
   7048 
   7049   // Add the extra input register if either the dex cache array base register
   7050   // or the PC-relative base register for accessing literals is needed.
   7051   if (has_extra_input) {
   7052     invoke->GetLocations()->SetInAt(invoke->GetSpecialInputIndex(), Location::RequiresRegister());
   7053   }
   7054 }
   7055 
   7056 void LocationsBuilderMIPS::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
   7057   HandleInvoke(invoke);
   7058 }
   7059 
   7060 void InstructionCodeGeneratorMIPS::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
   7061   codegen_->GenerateInvokePolymorphicCall(invoke);
   7062 }
   7063 
   7064 static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorMIPS* codegen) {
   7065   if (invoke->GetLocations()->Intrinsified()) {
   7066     IntrinsicCodeGeneratorMIPS intrinsic(codegen);
   7067     intrinsic.Dispatch(invoke);
   7068     return true;
   7069   }
   7070   return false;
   7071 }
   7072 
   7073 HLoadString::LoadKind CodeGeneratorMIPS::GetSupportedLoadStringKind(
   7074     HLoadString::LoadKind desired_string_load_kind) {
   7075   // We disable PC-relative load on pre-R6 when there is an irreducible loop, as the optimization
   7076   // is incompatible with it.
   7077   // TODO: Create as many HMipsComputeBaseMethodAddress instructions as needed for methods
   7078   // with irreducible loops.
   7079   bool has_irreducible_loops = GetGraph()->HasIrreducibleLoops();
   7080   bool is_r6 = GetInstructionSetFeatures().IsR6();
   7081   bool fallback_load = has_irreducible_loops && !is_r6;
   7082   switch (desired_string_load_kind) {
   7083     case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
   7084     case HLoadString::LoadKind::kBssEntry:
   7085       DCHECK(!Runtime::Current()->UseJitCompilation());
   7086       break;
   7087     case HLoadString::LoadKind::kBootImageAddress:
   7088       break;
   7089     case HLoadString::LoadKind::kJitTableAddress:
   7090       DCHECK(Runtime::Current()->UseJitCompilation());
   7091       fallback_load = false;
   7092       break;
   7093     case HLoadString::LoadKind::kRuntimeCall:
   7094       fallback_load = false;
   7095       break;
   7096   }
   7097   if (fallback_load) {
   7098     desired_string_load_kind = HLoadString::LoadKind::kRuntimeCall;
   7099   }
   7100   return desired_string_load_kind;
   7101 }
   7102 
   7103 HLoadClass::LoadKind CodeGeneratorMIPS::GetSupportedLoadClassKind(
   7104     HLoadClass::LoadKind desired_class_load_kind) {
   7105   // We disable PC-relative load on pre-R6 when there is an irreducible loop, as the optimization
   7106   // is incompatible with it.
   7107   // TODO: Create as many HMipsComputeBaseMethodAddress instructions as needed for methods
   7108   // with irreducible loops.
   7109   bool has_irreducible_loops = GetGraph()->HasIrreducibleLoops();
   7110   bool is_r6 = GetInstructionSetFeatures().IsR6();
   7111   bool fallback_load = has_irreducible_loops && !is_r6;
   7112   switch (desired_class_load_kind) {
   7113     case HLoadClass::LoadKind::kInvalid:
   7114       LOG(FATAL) << "UNREACHABLE";
   7115       UNREACHABLE();
   7116     case HLoadClass::LoadKind::kReferrersClass:
   7117       fallback_load = false;
   7118       break;
   7119     case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
   7120     case HLoadClass::LoadKind::kBssEntry:
   7121       DCHECK(!Runtime::Current()->UseJitCompilation());
   7122       break;
   7123     case HLoadClass::LoadKind::kBootImageAddress:
   7124       break;
   7125     case HLoadClass::LoadKind::kJitTableAddress:
   7126       DCHECK(Runtime::Current()->UseJitCompilation());
   7127       fallback_load = false;
   7128       break;
   7129     case HLoadClass::LoadKind::kRuntimeCall:
   7130       fallback_load = false;
   7131       break;
   7132   }
   7133   if (fallback_load) {
   7134     desired_class_load_kind = HLoadClass::LoadKind::kRuntimeCall;
   7135   }
   7136   return desired_class_load_kind;
   7137 }
   7138 
   7139 Register CodeGeneratorMIPS::GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke,
   7140                                                                   Register temp) {
   7141   CHECK(!GetInstructionSetFeatures().IsR6());
   7142   CHECK_EQ(invoke->InputCount(), invoke->GetNumberOfArguments() + 1u);
   7143   Location location = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
   7144   if (!invoke->GetLocations()->Intrinsified()) {
   7145     return location.AsRegister<Register>();
   7146   }
   7147   // For intrinsics we allow any location, so it may be on the stack.
   7148   if (!location.IsRegister()) {
   7149     __ LoadFromOffset(kLoadWord, temp, SP, location.GetStackIndex());
   7150     return temp;
   7151   }
   7152   // For register locations, check if the register was saved. If so, get it from the stack.
   7153   // Note: There is a chance that the register was saved but not overwritten, so we could
   7154   // save one load. However, since this is just an intrinsic slow path we prefer this
   7155   // simple and more robust approach rather that trying to determine if that's the case.
   7156   SlowPathCode* slow_path = GetCurrentSlowPath();
   7157   DCHECK(slow_path != nullptr);  // For intrinsified invokes the call is emitted on the slow path.
   7158   if (slow_path->IsCoreRegisterSaved(location.AsRegister<Register>())) {
   7159     int stack_offset = slow_path->GetStackOffsetOfCoreRegister(location.AsRegister<Register>());
   7160     __ LoadFromOffset(kLoadWord, temp, SP, stack_offset);
   7161     return temp;
   7162   }
   7163   return location.AsRegister<Register>();
   7164 }
   7165 
   7166 HInvokeStaticOrDirect::DispatchInfo CodeGeneratorMIPS::GetSupportedInvokeStaticOrDirectDispatch(
   7167       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
   7168       HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
   7169   HInvokeStaticOrDirect::DispatchInfo dispatch_info = desired_dispatch_info;
   7170   // We disable PC-relative load on pre-R6 when there is an irreducible loop, as the optimization
   7171   // is incompatible with it.
   7172   // TODO: Create as many HMipsComputeBaseMethodAddress instructions as needed for methods
   7173   // with irreducible loops.
   7174   bool has_irreducible_loops = GetGraph()->HasIrreducibleLoops();
   7175   bool is_r6 = GetInstructionSetFeatures().IsR6();
   7176   bool fallback_load = has_irreducible_loops && !is_r6;
   7177   switch (dispatch_info.method_load_kind) {
   7178     case HInvokeStaticOrDirect::MethodLoadKind::kBootImageLinkTimePcRelative:
   7179     case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry:
   7180       break;
   7181     default:
   7182       fallback_load = false;
   7183       break;
   7184   }
   7185   if (fallback_load) {
   7186     dispatch_info.method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall;
   7187     dispatch_info.method_load_data = 0;
   7188   }
   7189   return dispatch_info;
   7190 }
   7191 
   7192 void CodeGeneratorMIPS::GenerateStaticOrDirectCall(
   7193     HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path) {
   7194   // All registers are assumed to be correctly set up per the calling convention.
   7195   Location callee_method = temp;  // For all kinds except kRecursive, callee will be in temp.
   7196   HInvokeStaticOrDirect::MethodLoadKind method_load_kind = invoke->GetMethodLoadKind();
   7197   HInvokeStaticOrDirect::CodePtrLocation code_ptr_location = invoke->GetCodePtrLocation();
   7198   bool is_r6 = GetInstructionSetFeatures().IsR6();
   7199   Register base_reg = (invoke->HasPcRelativeMethodLoadKind() && !is_r6)
   7200       ? GetInvokeStaticOrDirectExtraParameter(invoke, temp.AsRegister<Register>())
   7201       : ZERO;
   7202 
   7203   switch (method_load_kind) {
   7204     case HInvokeStaticOrDirect::MethodLoadKind::kStringInit: {
   7205       // temp = thread->string_init_entrypoint
   7206       uint32_t offset =
   7207           GetThreadOffset<kMipsPointerSize>(invoke->GetStringInitEntryPoint()).Int32Value();
   7208       __ LoadFromOffset(kLoadWord,
   7209                         temp.AsRegister<Register>(),
   7210                         TR,
   7211                         offset);
   7212       break;
   7213     }
   7214     case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
   7215       callee_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
   7216       break;
   7217     case HInvokeStaticOrDirect::MethodLoadKind::kBootImageLinkTimePcRelative: {
   7218       DCHECK(GetCompilerOptions().IsBootImage());
   7219       PcRelativePatchInfo* info_high = NewPcRelativeMethodPatch(invoke->GetTargetMethod());
   7220       PcRelativePatchInfo* info_low =
   7221           NewPcRelativeMethodPatch(invoke->GetTargetMethod(), info_high);
   7222       bool reordering = __ SetReorder(false);
   7223       Register temp_reg = temp.AsRegister<Register>();
   7224       EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, base_reg, info_low);
   7225       __ Addiu(temp_reg, TMP, /* placeholder */ 0x5678);
   7226       __ SetReorder(reordering);
   7227       break;
   7228     }
   7229     case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
   7230       __ LoadConst32(temp.AsRegister<Register>(), invoke->GetMethodAddress());
   7231       break;
   7232     case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry: {
   7233       PcRelativePatchInfo* info_high = NewMethodBssEntryPatch(
   7234           MethodReference(&GetGraph()->GetDexFile(), invoke->GetDexMethodIndex()));
   7235       PcRelativePatchInfo* info_low = NewMethodBssEntryPatch(
   7236           MethodReference(&GetGraph()->GetDexFile(), invoke->GetDexMethodIndex()), info_high);
   7237       Register temp_reg = temp.AsRegister<Register>();
   7238       bool reordering = __ SetReorder(false);
   7239       EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, base_reg, info_low);
   7240       __ Lw(temp_reg, TMP, /* placeholder */ 0x5678);
   7241       __ SetReorder(reordering);
   7242       break;
   7243     }
   7244     case HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall: {
   7245       GenerateInvokeStaticOrDirectRuntimeCall(invoke, temp, slow_path);
   7246       return;  // No code pointer retrieval; the runtime performs the call directly.
   7247     }
   7248   }
   7249 
   7250   switch (code_ptr_location) {
   7251     case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
   7252       __ Bal(&frame_entry_label_);
   7253       break;
   7254     case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
   7255       // T9 = callee_method->entry_point_from_quick_compiled_code_;
   7256       __ LoadFromOffset(kLoadWord,
   7257                         T9,
   7258                         callee_method.AsRegister<Register>(),
   7259                         ArtMethod::EntryPointFromQuickCompiledCodeOffset(
   7260                             kMipsPointerSize).Int32Value());
   7261       // T9()
   7262       __ Jalr(T9);
   7263       __ NopIfNoReordering();
   7264       break;
   7265   }
   7266   RecordPcInfo(invoke, invoke->GetDexPc(), slow_path);
   7267 
   7268   DCHECK(!IsLeafMethod());
   7269 }
   7270 
   7271 void InstructionCodeGeneratorMIPS::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
   7272   // Explicit clinit checks triggered by static invokes must have been pruned by
   7273   // art::PrepareForRegisterAllocation.
   7274   DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
   7275 
   7276   if (TryGenerateIntrinsicCode(invoke, codegen_)) {
   7277     return;
   7278   }
   7279 
   7280   LocationSummary* locations = invoke->GetLocations();
   7281   codegen_->GenerateStaticOrDirectCall(invoke,
   7282                                        locations->HasTemps()
   7283                                            ? locations->GetTemp(0)
   7284                                            : Location::NoLocation());
   7285 }
   7286 
   7287 void CodeGeneratorMIPS::GenerateVirtualCall(
   7288     HInvokeVirtual* invoke, Location temp_location, SlowPathCode* slow_path) {
   7289   // Use the calling convention instead of the location of the receiver, as
   7290   // intrinsics may have put the receiver in a different register. In the intrinsics
   7291   // slow path, the arguments have been moved to the right place, so here we are
   7292   // guaranteed that the receiver is the first register of the calling convention.
   7293   InvokeDexCallingConvention calling_convention;
   7294   Register receiver = calling_convention.GetRegisterAt(0);
   7295 
   7296   Register temp = temp_location.AsRegister<Register>();
   7297   size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
   7298       invoke->GetVTableIndex(), kMipsPointerSize).SizeValue();
   7299   uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
   7300   Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsPointerSize);
   7301 
   7302   // temp = object->GetClass();
   7303   __ LoadFromOffset(kLoadWord, temp, receiver, class_offset);
   7304   MaybeRecordImplicitNullCheck(invoke);
   7305   // Instead of simply (possibly) unpoisoning `temp` here, we should
   7306   // emit a read barrier for the previous class reference load.
   7307   // However this is not required in practice, as this is an
   7308   // intermediate/temporary reference and because the current
   7309   // concurrent copying collector keeps the from-space memory
   7310   // intact/accessible until the end of the marking phase (the
   7311   // concurrent copying collector may not in the future).
   7312   __ MaybeUnpoisonHeapReference(temp);
   7313   // temp = temp->GetMethodAt(method_offset);
   7314   __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
   7315   // T9 = temp->GetEntryPoint();
   7316   __ LoadFromOffset(kLoadWord, T9, temp, entry_point.Int32Value());
   7317   // T9();
   7318   __ Jalr(T9);
   7319   __ NopIfNoReordering();
   7320   RecordPcInfo(invoke, invoke->GetDexPc(), slow_path);
   7321 }
   7322 
   7323 void InstructionCodeGeneratorMIPS::VisitInvokeVirtual(HInvokeVirtual* invoke) {
   7324   if (TryGenerateIntrinsicCode(invoke, codegen_)) {
   7325     return;
   7326   }
   7327 
   7328   codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0));
   7329   DCHECK(!codegen_->IsLeafMethod());
   7330 }
   7331 
   7332 void LocationsBuilderMIPS::VisitLoadClass(HLoadClass* cls) {
   7333   HLoadClass::LoadKind load_kind = cls->GetLoadKind();
   7334   if (load_kind == HLoadClass::LoadKind::kRuntimeCall) {
   7335     InvokeRuntimeCallingConvention calling_convention;
   7336     Location loc = Location::RegisterLocation(calling_convention.GetRegisterAt(0));
   7337     CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(cls, loc, loc);
   7338     return;
   7339   }
   7340   DCHECK(!cls->NeedsAccessCheck());
   7341   const bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
   7342   const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
   7343   LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
   7344       ? LocationSummary::kCallOnSlowPath
   7345       : LocationSummary::kNoCall;
   7346   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
   7347   if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) {
   7348     locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty());  // No caller-save registers.
   7349   }
   7350   switch (load_kind) {
   7351     // We need an extra register for PC-relative literals on R2.
   7352     case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
   7353     case HLoadClass::LoadKind::kBootImageAddress:
   7354     case HLoadClass::LoadKind::kBssEntry:
   7355       if (isR6) {
   7356         break;
   7357       }
   7358       FALLTHROUGH_INTENDED;
   7359     case HLoadClass::LoadKind::kReferrersClass:
   7360       locations->SetInAt(0, Location::RequiresRegister());
   7361       break;
   7362     default:
   7363       break;
   7364   }
   7365   locations->SetOut(Location::RequiresRegister());
   7366   if (load_kind == HLoadClass::LoadKind::kBssEntry) {
   7367     if (!kUseReadBarrier || kUseBakerReadBarrier) {
   7368       // Rely on the type resolution or initialization and marking to save everything we need.
   7369       // Request a temp to hold the BSS entry location for the slow path.
   7370       locations->AddTemp(Location::RequiresRegister());
   7371       RegisterSet caller_saves = RegisterSet::Empty();
   7372       InvokeRuntimeCallingConvention calling_convention;
   7373       caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
   7374       locations->SetCustomSlowPathCallerSaves(caller_saves);
   7375     } else {
   7376       // For non-Baker read barriers we have a temp-clobbering call.
   7377     }
   7378   }
   7379 }
   7380 
   7381 // NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
   7382 // move.
   7383 void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS {
   7384   HLoadClass::LoadKind load_kind = cls->GetLoadKind();
   7385   if (load_kind == HLoadClass::LoadKind::kRuntimeCall) {
   7386     codegen_->GenerateLoadClassRuntimeCall(cls);
   7387     return;
   7388   }
   7389   DCHECK(!cls->NeedsAccessCheck());
   7390 
   7391   LocationSummary* locations = cls->GetLocations();
   7392   Location out_loc = locations->Out();
   7393   Register out = out_loc.AsRegister<Register>();
   7394   Register base_or_current_method_reg;
   7395   bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
   7396   switch (load_kind) {
   7397     // We need an extra register for PC-relative literals on R2.
   7398     case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
   7399     case HLoadClass::LoadKind::kBootImageAddress:
   7400     case HLoadClass::LoadKind::kBssEntry:
   7401       base_or_current_method_reg = isR6 ? ZERO : locations->InAt(0).AsRegister<Register>();
   7402       break;
   7403     case HLoadClass::LoadKind::kReferrersClass:
   7404     case HLoadClass::LoadKind::kRuntimeCall:
   7405       base_or_current_method_reg = locations->InAt(0).AsRegister<Register>();
   7406       break;
   7407     default:
   7408       base_or_current_method_reg = ZERO;
   7409       break;
   7410   }
   7411 
   7412   const ReadBarrierOption read_barrier_option = cls->IsInBootImage()
   7413       ? kWithoutReadBarrier
   7414       : kCompilerReadBarrierOption;
   7415   bool generate_null_check = false;
   7416   CodeGeneratorMIPS::PcRelativePatchInfo* bss_info_high = nullptr;
   7417   switch (load_kind) {
   7418     case HLoadClass::LoadKind::kReferrersClass: {
   7419       DCHECK(!cls->CanCallRuntime());
   7420       DCHECK(!cls->MustGenerateClinitCheck());
   7421       // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_
   7422       GenerateGcRootFieldLoad(cls,
   7423                               out_loc,
   7424                               base_or_current_method_reg,
   7425                               ArtMethod::DeclaringClassOffset().Int32Value(),
   7426                               read_barrier_option);
   7427       break;
   7428     }
   7429     case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
   7430       DCHECK(codegen_->GetCompilerOptions().IsBootImage());
   7431       DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
   7432       CodeGeneratorMIPS::PcRelativePatchInfo* info_high =
   7433           codegen_->NewPcRelativeTypePatch(cls->GetDexFile(), cls->GetTypeIndex());
   7434       CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
   7435           codegen_->NewPcRelativeTypePatch(cls->GetDexFile(), cls->GetTypeIndex(), info_high);
   7436       bool reordering = __ SetReorder(false);
   7437       codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high,
   7438                                                      out,
   7439                                                      base_or_current_method_reg,
   7440                                                      info_low);
   7441       __ Addiu(out, out, /* placeholder */ 0x5678);
   7442       __ SetReorder(reordering);
   7443       break;
   7444     }
   7445     case HLoadClass::LoadKind::kBootImageAddress: {
   7446       DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
   7447       uint32_t address = dchecked_integral_cast<uint32_t>(
   7448           reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
   7449       DCHECK_NE(address, 0u);
   7450       __ LoadLiteral(out,
   7451                      base_or_current_method_reg,
   7452                      codegen_->DeduplicateBootImageAddressLiteral(address));
   7453       break;
   7454     }
   7455     case HLoadClass::LoadKind::kBssEntry: {
   7456       bss_info_high = codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
   7457       CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
   7458           codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex(), bss_info_high);
   7459       constexpr bool non_baker_read_barrier = kUseReadBarrier && !kUseBakerReadBarrier;
   7460       Register temp = non_baker_read_barrier ? out : locations->GetTemp(0).AsRegister<Register>();
   7461       bool reordering = __ SetReorder(false);
   7462       codegen_->EmitPcRelativeAddressPlaceholderHigh(bss_info_high,
   7463                                                      temp,
   7464                                                      base_or_current_method_reg,
   7465                                                      info_low);
   7466       GenerateGcRootFieldLoad(cls, out_loc, temp, /* placeholder */ 0x5678, read_barrier_option);
   7467       __ SetReorder(reordering);
   7468       generate_null_check = true;
   7469       break;
   7470     }
   7471     case HLoadClass::LoadKind::kJitTableAddress: {
   7472       CodeGeneratorMIPS::JitPatchInfo* info = codegen_->NewJitRootClassPatch(cls->GetDexFile(),
   7473                                                                              cls->GetTypeIndex(),
   7474                                                                              cls->GetClass());
   7475       bool reordering = __ SetReorder(false);
   7476       __ Bind(&info->high_label);
   7477       __ Lui(out, /* placeholder */ 0x1234);
   7478       GenerateGcRootFieldLoad(cls, out_loc, out, /* placeholder */ 0x5678, read_barrier_option);
   7479       __ SetReorder(reordering);
   7480       break;
   7481     }
   7482     case HLoadClass::LoadKind::kRuntimeCall:
   7483     case HLoadClass::LoadKind::kInvalid:
   7484       LOG(FATAL) << "UNREACHABLE";
   7485       UNREACHABLE();
   7486   }
   7487 
   7488   if (generate_null_check || cls->MustGenerateClinitCheck()) {
   7489     DCHECK(cls->CanCallRuntime());
   7490     SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS(
   7491         cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck(), bss_info_high);
   7492     codegen_->AddSlowPath(slow_path);
   7493     if (generate_null_check) {
   7494       __ Beqz(out, slow_path->GetEntryLabel());
   7495     }
   7496     if (cls->MustGenerateClinitCheck()) {
   7497       GenerateClassInitializationCheck(slow_path, out);
   7498     } else {
   7499       __ Bind(slow_path->GetExitLabel());
   7500     }
   7501   }
   7502 }
   7503 
   7504 static int32_t GetExceptionTlsOffset() {
   7505   return Thread::ExceptionOffset<kMipsPointerSize>().Int32Value();
   7506 }
   7507 
   7508 void LocationsBuilderMIPS::VisitLoadException(HLoadException* load) {
   7509   LocationSummary* locations =
   7510       new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
   7511   locations->SetOut(Location::RequiresRegister());
   7512 }
   7513 
   7514 void InstructionCodeGeneratorMIPS::VisitLoadException(HLoadException* load) {
   7515   Register out = load->GetLocations()->Out().AsRegister<Register>();
   7516   __ LoadFromOffset(kLoadWord, out, TR, GetExceptionTlsOffset());
   7517 }
   7518 
   7519 void LocationsBuilderMIPS::VisitClearException(HClearException* clear) {
   7520   new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
   7521 }
   7522 
   7523 void InstructionCodeGeneratorMIPS::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
   7524   __ StoreToOffset(kStoreWord, ZERO, TR, GetExceptionTlsOffset());
   7525 }
   7526 
   7527 void LocationsBuilderMIPS::VisitLoadString(HLoadString* load) {
   7528   LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load);
   7529   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
   7530   HLoadString::LoadKind load_kind = load->GetLoadKind();
   7531   const bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
   7532   switch (load_kind) {
   7533     // We need an extra register for PC-relative literals on R2.
   7534     case HLoadString::LoadKind::kBootImageAddress:
   7535     case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
   7536     case HLoadString::LoadKind::kBssEntry:
   7537       if (isR6) {
   7538         break;
   7539       }
   7540       FALLTHROUGH_INTENDED;
   7541     // We need an extra register for PC-relative dex cache accesses.
   7542     case HLoadString::LoadKind::kRuntimeCall:
   7543       locations->SetInAt(0, Location::RequiresRegister());
   7544       break;
   7545     default:
   7546       break;
   7547   }
   7548   if (load_kind == HLoadString::LoadKind::kRuntimeCall) {
   7549     InvokeRuntimeCallingConvention calling_convention;
   7550     locations->SetOut(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
   7551   } else {
   7552     locations->SetOut(Location::RequiresRegister());
   7553     if (load_kind == HLoadString::LoadKind::kBssEntry) {
   7554       if (!kUseReadBarrier || kUseBakerReadBarrier) {
   7555         // Rely on the pResolveString and marking to save everything we need.
   7556         // Request a temp to hold the BSS entry location for the slow path.
   7557         locations->AddTemp(Location::RequiresRegister());
   7558         RegisterSet caller_saves = RegisterSet::Empty();
   7559         InvokeRuntimeCallingConvention calling_convention;
   7560         caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
   7561         locations->SetCustomSlowPathCallerSaves(caller_saves);
   7562       } else {
   7563         // For non-Baker read barriers we have a temp-clobbering call.
   7564       }
   7565     }
   7566   }
   7567 }
   7568 
   7569 // NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
   7570 // move.
   7571 void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_SAFETY_ANALYSIS {
   7572   HLoadString::LoadKind load_kind = load->GetLoadKind();
   7573   LocationSummary* locations = load->GetLocations();
   7574   Location out_loc = locations->Out();
   7575   Register out = out_loc.AsRegister<Register>();
   7576   Register base_or_current_method_reg;
   7577   bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
   7578   switch (load_kind) {
   7579     // We need an extra register for PC-relative literals on R2.
   7580     case HLoadString::LoadKind::kBootImageAddress:
   7581     case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
   7582     case HLoadString::LoadKind::kBssEntry:
   7583       base_or_current_method_reg = isR6 ? ZERO : locations->InAt(0).AsRegister<Register>();
   7584       break;
   7585     default:
   7586       base_or_current_method_reg = ZERO;
   7587       break;
   7588   }
   7589 
   7590   switch (load_kind) {
   7591     case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
   7592       DCHECK(codegen_->GetCompilerOptions().IsBootImage());
   7593       CodeGeneratorMIPS::PcRelativePatchInfo* info_high =
   7594           codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
   7595       CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
   7596           codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex(), info_high);
   7597       bool reordering = __ SetReorder(false);
   7598       codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high,
   7599                                                      out,
   7600                                                      base_or_current_method_reg,
   7601                                                      info_low);
   7602       __ Addiu(out, out, /* placeholder */ 0x5678);
   7603       __ SetReorder(reordering);
   7604       return;  // No dex cache slow path.
   7605     }
   7606     case HLoadString::LoadKind::kBootImageAddress: {
   7607       uint32_t address = dchecked_integral_cast<uint32_t>(
   7608           reinterpret_cast<uintptr_t>(load->GetString().Get()));
   7609       DCHECK_NE(address, 0u);
   7610       __ LoadLiteral(out,
   7611                      base_or_current_method_reg,
   7612                      codegen_->DeduplicateBootImageAddressLiteral(address));
   7613       return;  // No dex cache slow path.
   7614     }
   7615     case HLoadString::LoadKind::kBssEntry: {
   7616       DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
   7617       CodeGeneratorMIPS::PcRelativePatchInfo* info_high =
   7618           codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex());
   7619       CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
   7620           codegen_->NewPcRelativeStringPatch(load->GetDexFile(), load->GetStringIndex(), info_high);
   7621       constexpr bool non_baker_read_barrier = kUseReadBarrier && !kUseBakerReadBarrier;
   7622       Register temp = non_baker_read_barrier ? out : locations->GetTemp(0).AsRegister<Register>();
   7623       bool reordering = __ SetReorder(false);
   7624       codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high,
   7625                                                      temp,
   7626                                                      base_or_current_method_reg,
   7627                                                      info_low);
   7628       GenerateGcRootFieldLoad(load,
   7629                               out_loc,
   7630                               temp,
   7631                               /* placeholder */ 0x5678,
   7632                               kCompilerReadBarrierOption);
   7633       __ SetReorder(reordering);
   7634       SlowPathCodeMIPS* slow_path =
   7635           new (GetGraph()->GetArena()) LoadStringSlowPathMIPS(load, info_high);
   7636       codegen_->AddSlowPath(slow_path);
   7637       __ Beqz(out, slow_path->GetEntryLabel());
   7638       __ Bind(slow_path->GetExitLabel());
   7639       return;
   7640     }
   7641     case HLoadString::LoadKind::kJitTableAddress: {
   7642       CodeGeneratorMIPS::JitPatchInfo* info =
   7643           codegen_->NewJitRootStringPatch(load->GetDexFile(),
   7644                                           load->GetStringIndex(),
   7645                                           load->GetString());
   7646       bool reordering = __ SetReorder(false);
   7647       __ Bind(&info->high_label);
   7648       __ Lui(out, /* placeholder */ 0x1234);
   7649       GenerateGcRootFieldLoad(load,
   7650                               out_loc,
   7651                               out,
   7652                               /* placeholder */ 0x5678,
   7653                               kCompilerReadBarrierOption);
   7654       __ SetReorder(reordering);
   7655       return;
   7656     }
   7657     default:
   7658       break;
   7659   }
   7660 
   7661   // TODO: Re-add the compiler code to do string dex cache lookup again.
   7662   DCHECK(load_kind == HLoadString::LoadKind::kRuntimeCall);
   7663   InvokeRuntimeCallingConvention calling_convention;
   7664   DCHECK_EQ(calling_convention.GetRegisterAt(0), out);
   7665   __ LoadConst32(calling_convention.GetRegisterAt(0), load->GetStringIndex().index_);
   7666   codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc());
   7667   CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
   7668 }
   7669 
   7670 void LocationsBuilderMIPS::VisitLongConstant(HLongConstant* constant) {
   7671   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
   7672   locations->SetOut(Location::ConstantLocation(constant));
   7673 }
   7674 
   7675 void InstructionCodeGeneratorMIPS::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
   7676   // Will be generated at use site.
   7677 }
   7678 
   7679 void LocationsBuilderMIPS::VisitMonitorOperation(HMonitorOperation* instruction) {
   7680   LocationSummary* locations =
   7681       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
   7682   InvokeRuntimeCallingConvention calling_convention;
   7683   locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
   7684 }
   7685 
   7686 void InstructionCodeGeneratorMIPS::VisitMonitorOperation(HMonitorOperation* instruction) {
   7687   if (instruction->IsEnter()) {
   7688     codegen_->InvokeRuntime(kQuickLockObject, instruction, instruction->GetDexPc());
   7689     CheckEntrypointTypes<kQuickLockObject, void, mirror::Object*>();
   7690   } else {
   7691     codegen_->InvokeRuntime(kQuickUnlockObject, instruction, instruction->GetDexPc());
   7692   }
   7693   CheckEntrypointTypes<kQuickUnlockObject, void, mirror::Object*>();
   7694 }
   7695 
   7696 void LocationsBuilderMIPS::VisitMul(HMul* mul) {
   7697   LocationSummary* locations =
   7698       new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
   7699   switch (mul->GetResultType()) {
   7700     case Primitive::kPrimInt:
   7701     case Primitive::kPrimLong:
   7702       locations->SetInAt(0, Location::RequiresRegister());
   7703       locations->SetInAt(1, Location::RequiresRegister());
   7704       locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   7705       break;
   7706 
   7707     case Primitive::kPrimFloat:
   7708     case Primitive::kPrimDouble:
   7709       locations->SetInAt(0, Location::RequiresFpuRegister());
   7710       locations->SetInAt(1, Location::RequiresFpuRegister());
   7711       locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
   7712       break;
   7713 
   7714     default:
   7715       LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
   7716   }
   7717 }
   7718 
   7719 void InstructionCodeGeneratorMIPS::VisitMul(HMul* instruction) {
   7720   Primitive::Type type = instruction->GetType();
   7721   LocationSummary* locations = instruction->GetLocations();
   7722   bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
   7723 
   7724   switch (type) {
   7725     case Primitive::kPrimInt: {
   7726       Register dst = locations->Out().AsRegister<Register>();
   7727       Register lhs = locations->InAt(0).AsRegister<Register>();
   7728       Register rhs = locations->InAt(1).AsRegister<Register>();
   7729 
   7730       if (isR6) {
   7731         __ MulR6(dst, lhs, rhs);
   7732       } else {
   7733         __ MulR2(dst, lhs, rhs);
   7734       }
   7735       break;
   7736     }
   7737     case Primitive::kPrimLong: {
   7738       Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
   7739       Register dst_low = locations->Out().AsRegisterPairLow<Register>();
   7740       Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>();
   7741       Register lhs_low = locations->InAt(0).AsRegisterPairLow<Register>();
   7742       Register rhs_high = locations->InAt(1).AsRegisterPairHigh<Register>();
   7743       Register rhs_low = locations->InAt(1).AsRegisterPairLow<Register>();
   7744 
   7745       // Extra checks to protect caused by the existance of A1_A2.
   7746       // The algorithm is wrong if dst_high is either lhs_lo or rhs_lo:
   7747       // (e.g. lhs=a0_a1, rhs=a2_a3 and dst=a1_a2).
   7748       DCHECK_NE(dst_high, lhs_low);
   7749       DCHECK_NE(dst_high, rhs_low);
   7750 
   7751       // A_B * C_D
   7752       // dst_hi:  [ low(A*D) + low(B*C) + hi(B*D) ]
   7753       // dst_lo:  [ low(B*D) ]
   7754       // Note: R2 and R6 MUL produce the low 32 bit of the multiplication result.
   7755 
   7756       if (isR6) {
   7757         __ MulR6(TMP, lhs_high, rhs_low);
   7758         __ MulR6(dst_high, lhs_low, rhs_high);
   7759         __ Addu(dst_high, dst_high, TMP);
   7760         __ MuhuR6(TMP, lhs_low, rhs_low);
   7761         __ Addu(dst_high, dst_high, TMP);
   7762         __ MulR6(dst_low, lhs_low, rhs_low);
   7763       } else {
   7764         __ MulR2(TMP, lhs_high, rhs_low);
   7765         __ MulR2(dst_high, lhs_low, rhs_high);
   7766         __ Addu(dst_high, dst_high, TMP);
   7767         __ MultuR2(lhs_low, rhs_low);
   7768         __ Mfhi(TMP);
   7769         __ Addu(dst_high, dst_high, TMP);
   7770         __ Mflo(dst_low);
   7771       }
   7772       break;
   7773     }
   7774     case Primitive::kPrimFloat:
   7775     case Primitive::kPrimDouble: {
   7776       FRegister dst = locations->Out().AsFpuRegister<FRegister>();
   7777       FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
   7778       FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
   7779       if (type == Primitive::kPrimFloat) {
   7780         __ MulS(dst, lhs, rhs);
   7781       } else {
   7782         __ MulD(dst, lhs, rhs);
   7783       }
   7784       break;
   7785     }
   7786     default:
   7787       LOG(FATAL) << "Unexpected mul type " << type;
   7788   }
   7789 }
   7790 
   7791 void LocationsBuilderMIPS::VisitNeg(HNeg* neg) {
   7792   LocationSummary* locations =
   7793       new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
   7794   switch (neg->GetResultType()) {
   7795     case Primitive::kPrimInt:
   7796     case Primitive::kPrimLong:
   7797       locations->SetInAt(0, Location::RequiresRegister());
   7798       locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   7799       break;
   7800 
   7801     case Primitive::kPrimFloat:
   7802     case Primitive::kPrimDouble:
   7803       locations->SetInAt(0, Location::RequiresFpuRegister());
   7804       locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
   7805       break;
   7806 
   7807     default:
   7808       LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
   7809   }
   7810 }
   7811 
   7812 void InstructionCodeGeneratorMIPS::VisitNeg(HNeg* instruction) {
   7813   Primitive::Type type = instruction->GetType();
   7814   LocationSummary* locations = instruction->GetLocations();
   7815 
   7816   switch (type) {
   7817     case Primitive::kPrimInt: {
   7818       Register dst = locations->Out().AsRegister<Register>();
   7819       Register src = locations->InAt(0).AsRegister<Register>();
   7820       __ Subu(dst, ZERO, src);
   7821       break;
   7822     }
   7823     case Primitive::kPrimLong: {
   7824       Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
   7825       Register dst_low = locations->Out().AsRegisterPairLow<Register>();
   7826       Register src_high = locations->InAt(0).AsRegisterPairHigh<Register>();
   7827       Register src_low = locations->InAt(0).AsRegisterPairLow<Register>();
   7828       __ Subu(dst_low, ZERO, src_low);
   7829       __ Sltu(TMP, ZERO, dst_low);
   7830       __ Subu(dst_high, ZERO, src_high);
   7831       __ Subu(dst_high, dst_high, TMP);
   7832       break;
   7833     }
   7834     case Primitive::kPrimFloat:
   7835     case Primitive::kPrimDouble: {
   7836       FRegister dst = locations->Out().AsFpuRegister<FRegister>();
   7837       FRegister src = locations->InAt(0).AsFpuRegister<FRegister>();
   7838       if (type == Primitive::kPrimFloat) {
   7839         __ NegS(dst, src);
   7840       } else {
   7841         __ NegD(dst, src);
   7842       }
   7843       break;
   7844     }
   7845     default:
   7846       LOG(FATAL) << "Unexpected neg type " << type;
   7847   }
   7848 }
   7849 
   7850 void LocationsBuilderMIPS::VisitNewArray(HNewArray* instruction) {
   7851   LocationSummary* locations =
   7852       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
   7853   InvokeRuntimeCallingConvention calling_convention;
   7854   locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
   7855   locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
   7856   locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
   7857 }
   7858 
   7859 void InstructionCodeGeneratorMIPS::VisitNewArray(HNewArray* instruction) {
   7860   // Note: if heap poisoning is enabled, the entry point takes care
   7861   // of poisoning the reference.
   7862   QuickEntrypointEnum entrypoint =
   7863       CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
   7864   codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
   7865   CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
   7866   DCHECK(!codegen_->IsLeafMethod());
   7867 }
   7868 
   7869 void LocationsBuilderMIPS::VisitNewInstance(HNewInstance* instruction) {
   7870   LocationSummary* locations =
   7871       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
   7872   InvokeRuntimeCallingConvention calling_convention;
   7873   if (instruction->IsStringAlloc()) {
   7874     locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument));
   7875   } else {
   7876     locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
   7877   }
   7878   locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
   7879 }
   7880 
   7881 void InstructionCodeGeneratorMIPS::VisitNewInstance(HNewInstance* instruction) {
   7882   // Note: if heap poisoning is enabled, the entry point takes care
   7883   // of poisoning the reference.
   7884   if (instruction->IsStringAlloc()) {
   7885     // String is allocated through StringFactory. Call NewEmptyString entry point.
   7886     Register temp = instruction->GetLocations()->GetTemp(0).AsRegister<Register>();
   7887     MemberOffset code_offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsPointerSize);
   7888     __ LoadFromOffset(kLoadWord, temp, TR, QUICK_ENTRY_POINT(pNewEmptyString));
   7889     __ LoadFromOffset(kLoadWord, T9, temp, code_offset.Int32Value());
   7890     __ Jalr(T9);
   7891     __ NopIfNoReordering();
   7892     codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
   7893   } else {
   7894     codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
   7895     CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
   7896   }
   7897 }
   7898 
   7899 void LocationsBuilderMIPS::VisitNot(HNot* instruction) {
   7900   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
   7901   locations->SetInAt(0, Location::RequiresRegister());
   7902   locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   7903 }
   7904 
   7905 void InstructionCodeGeneratorMIPS::VisitNot(HNot* instruction) {
   7906   Primitive::Type type = instruction->GetType();
   7907   LocationSummary* locations = instruction->GetLocations();
   7908 
   7909   switch (type) {
   7910     case Primitive::kPrimInt: {
   7911       Register dst = locations->Out().AsRegister<Register>();
   7912       Register src = locations->InAt(0).AsRegister<Register>();
   7913       __ Nor(dst, src, ZERO);
   7914       break;
   7915     }
   7916 
   7917     case Primitive::kPrimLong: {
   7918       Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
   7919       Register dst_low = locations->Out().AsRegisterPairLow<Register>();
   7920       Register src_high = locations->InAt(0).AsRegisterPairHigh<Register>();
   7921       Register src_low = locations->InAt(0).AsRegisterPairLow<Register>();
   7922       __ Nor(dst_high, src_high, ZERO);
   7923       __ Nor(dst_low, src_low, ZERO);
   7924       break;
   7925     }
   7926 
   7927     default:
   7928       LOG(FATAL) << "Unexpected type for not operation " << instruction->GetResultType();
   7929   }
   7930 }
   7931 
   7932 void LocationsBuilderMIPS::VisitBooleanNot(HBooleanNot* instruction) {
   7933   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
   7934   locations->SetInAt(0, Location::RequiresRegister());
   7935   locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   7936 }
   7937 
   7938 void InstructionCodeGeneratorMIPS::VisitBooleanNot(HBooleanNot* instruction) {
   7939   LocationSummary* locations = instruction->GetLocations();
   7940   __ Xori(locations->Out().AsRegister<Register>(),
   7941           locations->InAt(0).AsRegister<Register>(),
   7942           1);
   7943 }
   7944 
   7945 void LocationsBuilderMIPS::VisitNullCheck(HNullCheck* instruction) {
   7946   LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction);
   7947   locations->SetInAt(0, Location::RequiresRegister());
   7948 }
   7949 
   7950 void CodeGeneratorMIPS::GenerateImplicitNullCheck(HNullCheck* instruction) {
   7951   if (CanMoveNullCheckToUser(instruction)) {
   7952     return;
   7953   }
   7954   Location obj = instruction->GetLocations()->InAt(0);
   7955 
   7956   __ Lw(ZERO, obj.AsRegister<Register>(), 0);
   7957   RecordPcInfo(instruction, instruction->GetDexPc());
   7958 }
   7959 
   7960 void CodeGeneratorMIPS::GenerateExplicitNullCheck(HNullCheck* instruction) {
   7961   SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathMIPS(instruction);
   7962   AddSlowPath(slow_path);
   7963 
   7964   Location obj = instruction->GetLocations()->InAt(0);
   7965 
   7966   __ Beqz(obj.AsRegister<Register>(), slow_path->GetEntryLabel());
   7967 }
   7968 
   7969 void InstructionCodeGeneratorMIPS::VisitNullCheck(HNullCheck* instruction) {
   7970   codegen_->GenerateNullCheck(instruction);
   7971 }
   7972 
   7973 void LocationsBuilderMIPS::VisitOr(HOr* instruction) {
   7974   HandleBinaryOp(instruction);
   7975 }
   7976 
   7977 void InstructionCodeGeneratorMIPS::VisitOr(HOr* instruction) {
   7978   HandleBinaryOp(instruction);
   7979 }
   7980 
   7981 void LocationsBuilderMIPS::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
   7982   LOG(FATAL) << "Unreachable";
   7983 }
   7984 
   7985 void InstructionCodeGeneratorMIPS::VisitParallelMove(HParallelMove* instruction) {
   7986   codegen_->GetMoveResolver()->EmitNativeCode(instruction);
   7987 }
   7988 
   7989 void LocationsBuilderMIPS::VisitParameterValue(HParameterValue* instruction) {
   7990   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
   7991   Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
   7992   if (location.IsStackSlot()) {
   7993     location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
   7994   } else if (location.IsDoubleStackSlot()) {
   7995     location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
   7996   }
   7997   locations->SetOut(location);
   7998 }
   7999 
   8000 void InstructionCodeGeneratorMIPS::VisitParameterValue(HParameterValue* instruction
   8001                                                          ATTRIBUTE_UNUSED) {
   8002   // Nothing to do, the parameter is already at its location.
   8003 }
   8004 
   8005 void LocationsBuilderMIPS::VisitCurrentMethod(HCurrentMethod* instruction) {
   8006   LocationSummary* locations =
   8007       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
   8008   locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
   8009 }
   8010 
   8011 void InstructionCodeGeneratorMIPS::VisitCurrentMethod(HCurrentMethod* instruction
   8012                                                         ATTRIBUTE_UNUSED) {
   8013   // Nothing to do, the method is already at its location.
   8014 }
   8015 
   8016 void LocationsBuilderMIPS::VisitPhi(HPhi* instruction) {
   8017   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
   8018   for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
   8019     locations->SetInAt(i, Location::Any());
   8020   }
   8021   locations->SetOut(Location::Any());
   8022 }
   8023 
   8024 void InstructionCodeGeneratorMIPS::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
   8025   LOG(FATAL) << "Unreachable";
   8026 }
   8027 
   8028 void LocationsBuilderMIPS::VisitRem(HRem* rem) {
   8029   Primitive::Type type = rem->GetResultType();
   8030   LocationSummary::CallKind call_kind =
   8031       (type == Primitive::kPrimInt) ? LocationSummary::kNoCall : LocationSummary::kCallOnMainOnly;
   8032   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
   8033 
   8034   switch (type) {
   8035     case Primitive::kPrimInt:
   8036       locations->SetInAt(0, Location::RequiresRegister());
   8037       locations->SetInAt(1, Location::RegisterOrConstant(rem->InputAt(1)));
   8038       locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   8039       break;
   8040 
   8041     case Primitive::kPrimLong: {
   8042       InvokeRuntimeCallingConvention calling_convention;
   8043       locations->SetInAt(0, Location::RegisterPairLocation(
   8044           calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
   8045       locations->SetInAt(1, Location::RegisterPairLocation(
   8046           calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
   8047       locations->SetOut(calling_convention.GetReturnLocation(type));
   8048       break;
   8049     }
   8050 
   8051     case Primitive::kPrimFloat:
   8052     case Primitive::kPrimDouble: {
   8053       InvokeRuntimeCallingConvention calling_convention;
   8054       locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
   8055       locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
   8056       locations->SetOut(calling_convention.GetReturnLocation(type));
   8057       break;
   8058     }
   8059 
   8060     default:
   8061       LOG(FATAL) << "Unexpected rem type " << type;
   8062   }
   8063 }
   8064 
   8065 void InstructionCodeGeneratorMIPS::VisitRem(HRem* instruction) {
   8066   Primitive::Type type = instruction->GetType();
   8067 
   8068   switch (type) {
   8069     case Primitive::kPrimInt:
   8070       GenerateDivRemIntegral(instruction);
   8071       break;
   8072     case Primitive::kPrimLong: {
   8073       codegen_->InvokeRuntime(kQuickLmod, instruction, instruction->GetDexPc());
   8074       CheckEntrypointTypes<kQuickLmod, int64_t, int64_t, int64_t>();
   8075       break;
   8076     }
   8077     case Primitive::kPrimFloat: {
   8078       codegen_->InvokeRuntime(kQuickFmodf, instruction, instruction->GetDexPc());
   8079       CheckEntrypointTypes<kQuickFmodf, float, float, float>();
   8080       break;
   8081     }
   8082     case Primitive::kPrimDouble: {
   8083       codegen_->InvokeRuntime(kQuickFmod, instruction, instruction->GetDexPc());
   8084       CheckEntrypointTypes<kQuickFmod, double, double, double>();
   8085       break;
   8086     }
   8087     default:
   8088       LOG(FATAL) << "Unexpected rem type " << type;
   8089   }
   8090 }
   8091 
   8092 void LocationsBuilderMIPS::VisitConstructorFence(HConstructorFence* constructor_fence) {
   8093   constructor_fence->SetLocations(nullptr);
   8094 }
   8095 
   8096 void InstructionCodeGeneratorMIPS::VisitConstructorFence(
   8097     HConstructorFence* constructor_fence ATTRIBUTE_UNUSED) {
   8098   GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
   8099 }
   8100 
   8101 void LocationsBuilderMIPS::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
   8102   memory_barrier->SetLocations(nullptr);
   8103 }
   8104 
   8105 void InstructionCodeGeneratorMIPS::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
   8106   GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
   8107 }
   8108 
   8109 void LocationsBuilderMIPS::VisitReturn(HReturn* ret) {
   8110   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(ret);
   8111   Primitive::Type return_type = ret->InputAt(0)->GetType();
   8112   locations->SetInAt(0, MipsReturnLocation(return_type));
   8113 }
   8114 
   8115 void InstructionCodeGeneratorMIPS::VisitReturn(HReturn* ret ATTRIBUTE_UNUSED) {
   8116   codegen_->GenerateFrameExit();
   8117 }
   8118 
   8119 void LocationsBuilderMIPS::VisitReturnVoid(HReturnVoid* ret) {
   8120   ret->SetLocations(nullptr);
   8121 }
   8122 
   8123 void InstructionCodeGeneratorMIPS::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) {
   8124   codegen_->GenerateFrameExit();
   8125 }
   8126 
   8127 void LocationsBuilderMIPS::VisitRor(HRor* ror) {
   8128   HandleShift(ror);
   8129 }
   8130 
   8131 void InstructionCodeGeneratorMIPS::VisitRor(HRor* ror) {
   8132   HandleShift(ror);
   8133 }
   8134 
   8135 void LocationsBuilderMIPS::VisitShl(HShl* shl) {
   8136   HandleShift(shl);
   8137 }
   8138 
   8139 void InstructionCodeGeneratorMIPS::VisitShl(HShl* shl) {
   8140   HandleShift(shl);
   8141 }
   8142 
   8143 void LocationsBuilderMIPS::VisitShr(HShr* shr) {
   8144   HandleShift(shr);
   8145 }
   8146 
   8147 void InstructionCodeGeneratorMIPS::VisitShr(HShr* shr) {
   8148   HandleShift(shr);
   8149 }
   8150 
   8151 void LocationsBuilderMIPS::VisitSub(HSub* instruction) {
   8152   HandleBinaryOp(instruction);
   8153 }
   8154 
   8155 void InstructionCodeGeneratorMIPS::VisitSub(HSub* instruction) {
   8156   HandleBinaryOp(instruction);
   8157 }
   8158 
   8159 void LocationsBuilderMIPS::VisitStaticFieldGet(HStaticFieldGet* instruction) {
   8160   HandleFieldGet(instruction, instruction->GetFieldInfo());
   8161 }
   8162 
   8163 void InstructionCodeGeneratorMIPS::VisitStaticFieldGet(HStaticFieldGet* instruction) {
   8164   HandleFieldGet(instruction, instruction->GetFieldInfo(), instruction->GetDexPc());
   8165 }
   8166 
   8167 void LocationsBuilderMIPS::VisitStaticFieldSet(HStaticFieldSet* instruction) {
   8168   HandleFieldSet(instruction, instruction->GetFieldInfo());
   8169 }
   8170 
   8171 void InstructionCodeGeneratorMIPS::VisitStaticFieldSet(HStaticFieldSet* instruction) {
   8172   HandleFieldSet(instruction,
   8173                  instruction->GetFieldInfo(),
   8174                  instruction->GetDexPc(),
   8175                  instruction->GetValueCanBeNull());
   8176 }
   8177 
   8178 void LocationsBuilderMIPS::VisitUnresolvedInstanceFieldGet(
   8179     HUnresolvedInstanceFieldGet* instruction) {
   8180   FieldAccessCallingConventionMIPS calling_convention;
   8181   codegen_->CreateUnresolvedFieldLocationSummary(instruction,
   8182                                                  instruction->GetFieldType(),
   8183                                                  calling_convention);
   8184 }
   8185 
   8186 void InstructionCodeGeneratorMIPS::VisitUnresolvedInstanceFieldGet(
   8187     HUnresolvedInstanceFieldGet* instruction) {
   8188   FieldAccessCallingConventionMIPS calling_convention;
   8189   codegen_->GenerateUnresolvedFieldAccess(instruction,
   8190                                           instruction->GetFieldType(),
   8191                                           instruction->GetFieldIndex(),
   8192                                           instruction->GetDexPc(),
   8193                                           calling_convention);
   8194 }
   8195 
   8196 void LocationsBuilderMIPS::VisitUnresolvedInstanceFieldSet(
   8197     HUnresolvedInstanceFieldSet* instruction) {
   8198   FieldAccessCallingConventionMIPS calling_convention;
   8199   codegen_->CreateUnresolvedFieldLocationSummary(instruction,
   8200                                                  instruction->GetFieldType(),
   8201                                                  calling_convention);
   8202 }
   8203 
   8204 void InstructionCodeGeneratorMIPS::VisitUnresolvedInstanceFieldSet(
   8205     HUnresolvedInstanceFieldSet* instruction) {
   8206   FieldAccessCallingConventionMIPS calling_convention;
   8207   codegen_->GenerateUnresolvedFieldAccess(instruction,
   8208                                           instruction->GetFieldType(),
   8209                                           instruction->GetFieldIndex(),
   8210                                           instruction->GetDexPc(),
   8211                                           calling_convention);
   8212 }
   8213 
   8214 void LocationsBuilderMIPS::VisitUnresolvedStaticFieldGet(
   8215     HUnresolvedStaticFieldGet* instruction) {
   8216   FieldAccessCallingConventionMIPS calling_convention;
   8217   codegen_->CreateUnresolvedFieldLocationSummary(instruction,
   8218                                                  instruction->GetFieldType(),
   8219                                                  calling_convention);
   8220 }
   8221 
   8222 void InstructionCodeGeneratorMIPS::VisitUnresolvedStaticFieldGet(
   8223     HUnresolvedStaticFieldGet* instruction) {
   8224   FieldAccessCallingConventionMIPS calling_convention;
   8225   codegen_->GenerateUnresolvedFieldAccess(instruction,
   8226                                           instruction->GetFieldType(),
   8227                                           instruction->GetFieldIndex(),
   8228                                           instruction->GetDexPc(),
   8229                                           calling_convention);
   8230 }
   8231 
   8232 void LocationsBuilderMIPS::VisitUnresolvedStaticFieldSet(
   8233     HUnresolvedStaticFieldSet* instruction) {
   8234   FieldAccessCallingConventionMIPS calling_convention;
   8235   codegen_->CreateUnresolvedFieldLocationSummary(instruction,
   8236                                                  instruction->GetFieldType(),
   8237                                                  calling_convention);
   8238 }
   8239 
   8240 void InstructionCodeGeneratorMIPS::VisitUnresolvedStaticFieldSet(
   8241     HUnresolvedStaticFieldSet* instruction) {
   8242   FieldAccessCallingConventionMIPS calling_convention;
   8243   codegen_->GenerateUnresolvedFieldAccess(instruction,
   8244                                           instruction->GetFieldType(),
   8245                                           instruction->GetFieldIndex(),
   8246                                           instruction->GetDexPc(),
   8247                                           calling_convention);
   8248 }
   8249 
   8250 void LocationsBuilderMIPS::VisitSuspendCheck(HSuspendCheck* instruction) {
   8251   LocationSummary* locations =
   8252       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
   8253   // In suspend check slow path, usually there are no caller-save registers at all.
   8254   // If SIMD instructions are present, however, we force spilling all live SIMD
   8255   // registers in full width (since the runtime only saves/restores lower part).
   8256   locations->SetCustomSlowPathCallerSaves(
   8257       GetGraph()->HasSIMD() ? RegisterSet::AllFpu() : RegisterSet::Empty());
   8258 }
   8259 
   8260 void InstructionCodeGeneratorMIPS::VisitSuspendCheck(HSuspendCheck* instruction) {
   8261   HBasicBlock* block = instruction->GetBlock();
   8262   if (block->GetLoopInformation() != nullptr) {
   8263     DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
   8264     // The back edge will generate the suspend check.
   8265     return;
   8266   }
   8267   if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
   8268     // The goto will generate the suspend check.
   8269     return;
   8270   }
   8271   GenerateSuspendCheck(instruction, nullptr);
   8272 }
   8273 
   8274 void LocationsBuilderMIPS::VisitThrow(HThrow* instruction) {
   8275   LocationSummary* locations =
   8276       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
   8277   InvokeRuntimeCallingConvention calling_convention;
   8278   locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
   8279 }
   8280 
   8281 void InstructionCodeGeneratorMIPS::VisitThrow(HThrow* instruction) {
   8282   codegen_->InvokeRuntime(kQuickDeliverException, instruction, instruction->GetDexPc());
   8283   CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
   8284 }
   8285 
   8286 void LocationsBuilderMIPS::VisitTypeConversion(HTypeConversion* conversion) {
   8287   Primitive::Type input_type = conversion->GetInputType();
   8288   Primitive::Type result_type = conversion->GetResultType();
   8289   DCHECK_NE(input_type, result_type);
   8290   bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
   8291 
   8292   if ((input_type == Primitive::kPrimNot) || (input_type == Primitive::kPrimVoid) ||
   8293       (result_type == Primitive::kPrimNot) || (result_type == Primitive::kPrimVoid)) {
   8294     LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type;
   8295   }
   8296 
   8297   LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
   8298   if (!isR6 &&
   8299       ((Primitive::IsFloatingPointType(result_type) && input_type == Primitive::kPrimLong) ||
   8300        (result_type == Primitive::kPrimLong && Primitive::IsFloatingPointType(input_type)))) {
   8301     call_kind = LocationSummary::kCallOnMainOnly;
   8302   }
   8303 
   8304   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
   8305 
   8306   if (call_kind == LocationSummary::kNoCall) {
   8307     if (Primitive::IsFloatingPointType(input_type)) {
   8308       locations->SetInAt(0, Location::RequiresFpuRegister());
   8309     } else {
   8310       locations->SetInAt(0, Location::RequiresRegister());
   8311     }
   8312 
   8313     if (Primitive::IsFloatingPointType(result_type)) {
   8314       locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
   8315     } else {
   8316       locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   8317     }
   8318   } else {
   8319     InvokeRuntimeCallingConvention calling_convention;
   8320 
   8321     if (Primitive::IsFloatingPointType(input_type)) {
   8322       locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
   8323     } else {
   8324       DCHECK_EQ(input_type, Primitive::kPrimLong);
   8325       locations->SetInAt(0, Location::RegisterPairLocation(
   8326                  calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
   8327     }
   8328 
   8329     locations->SetOut(calling_convention.GetReturnLocation(result_type));
   8330   }
   8331 }
   8332 
   8333 void InstructionCodeGeneratorMIPS::VisitTypeConversion(HTypeConversion* conversion) {
   8334   LocationSummary* locations = conversion->GetLocations();
   8335   Primitive::Type result_type = conversion->GetResultType();
   8336   Primitive::Type input_type = conversion->GetInputType();
   8337   bool has_sign_extension = codegen_->GetInstructionSetFeatures().IsMipsIsaRevGreaterThanEqual2();
   8338   bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
   8339 
   8340   DCHECK_NE(input_type, result_type);
   8341 
   8342   if (result_type == Primitive::kPrimLong && Primitive::IsIntegralType(input_type)) {
   8343     Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
   8344     Register dst_low = locations->Out().AsRegisterPairLow<Register>();
   8345     Register src = locations->InAt(0).AsRegister<Register>();
   8346 
   8347     if (dst_low != src) {
   8348       __ Move(dst_low, src);
   8349     }
   8350     __ Sra(dst_high, src, 31);
   8351   } else if (Primitive::IsIntegralType(result_type) && Primitive::IsIntegralType(input_type)) {
   8352     Register dst = locations->Out().AsRegister<Register>();
   8353     Register src = (input_type == Primitive::kPrimLong)
   8354         ? locations->InAt(0).AsRegisterPairLow<Register>()
   8355         : locations->InAt(0).AsRegister<Register>();
   8356 
   8357     switch (result_type) {
   8358       case Primitive::kPrimChar:
   8359         __ Andi(dst, src, 0xFFFF);
   8360         break;
   8361       case Primitive::kPrimByte:
   8362         if (has_sign_extension) {
   8363           __ Seb(dst, src);
   8364         } else {
   8365           __ Sll(dst, src, 24);
   8366           __ Sra(dst, dst, 24);
   8367         }
   8368         break;
   8369       case Primitive::kPrimShort:
   8370         if (has_sign_extension) {
   8371           __ Seh(dst, src);
   8372         } else {
   8373           __ Sll(dst, src, 16);
   8374           __ Sra(dst, dst, 16);
   8375         }
   8376         break;
   8377       case Primitive::kPrimInt:
   8378         if (dst != src) {
   8379           __ Move(dst, src);
   8380         }
   8381         break;
   8382 
   8383       default:
   8384         LOG(FATAL) << "Unexpected type conversion from " << input_type
   8385                    << " to " << result_type;
   8386     }
   8387   } else if (Primitive::IsFloatingPointType(result_type) && Primitive::IsIntegralType(input_type)) {
   8388     if (input_type == Primitive::kPrimLong) {
   8389       if (isR6) {
   8390         // cvt.s.l/cvt.d.l requires MIPSR2+ with FR=1. MIPS32R6 is implemented as a secondary
   8391         // architecture on top of MIPS64R6, which has FR=1, and therefore can use the instruction.
   8392         Register src_high = locations->InAt(0).AsRegisterPairHigh<Register>();
   8393         Register src_low = locations->InAt(0).AsRegisterPairLow<Register>();
   8394         FRegister dst = locations->Out().AsFpuRegister<FRegister>();
   8395         __ Mtc1(src_low, FTMP);
   8396         __ Mthc1(src_high, FTMP);
   8397         if (result_type == Primitive::kPrimFloat) {
   8398           __ Cvtsl(dst, FTMP);
   8399         } else {
   8400           __ Cvtdl(dst, FTMP);
   8401         }
   8402       } else {
   8403         QuickEntrypointEnum entrypoint = (result_type == Primitive::kPrimFloat) ? kQuickL2f
   8404                                                                                 : kQuickL2d;
   8405         codegen_->InvokeRuntime(entrypoint, conversion, conversion->GetDexPc());
   8406         if (result_type == Primitive::kPrimFloat) {
   8407           CheckEntrypointTypes<kQuickL2f, float, int64_t>();
   8408         } else {
   8409           CheckEntrypointTypes<kQuickL2d, double, int64_t>();
   8410         }
   8411       }
   8412     } else {
   8413       Register src = locations->InAt(0).AsRegister<Register>();
   8414       FRegister dst = locations->Out().AsFpuRegister<FRegister>();
   8415       __ Mtc1(src, FTMP);
   8416       if (result_type == Primitive::kPrimFloat) {
   8417         __ Cvtsw(dst, FTMP);
   8418       } else {
   8419         __ Cvtdw(dst, FTMP);
   8420       }
   8421     }
   8422   } else if (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type)) {
   8423     CHECK(result_type == Primitive::kPrimInt || result_type == Primitive::kPrimLong);
   8424 
   8425     // When NAN2008=1 (R6), the truncate instruction caps the output at the minimum/maximum
   8426     // value of the output type if the input is outside of the range after the truncation or
   8427     // produces 0 when the input is a NaN. IOW, the three special cases produce three distinct
   8428     // results. This matches the desired float/double-to-int/long conversion exactly.
   8429     //
   8430     // When NAN2008=0 (R2 and before), the truncate instruction produces the maximum positive
   8431     // value when the input is either a NaN or is outside of the range of the output type
   8432     // after the truncation. IOW, the three special cases (NaN, too small, too big) produce
   8433     // the same result.
   8434     //
   8435     // The code takes care of the different behaviors by first comparing the input to the
   8436     // minimum output value (-2**-63 for truncating to long, -2**-31 for truncating to int).
   8437     // If the input is greater than or equal to the minimum, it procedes to the truncate
   8438     // instruction, which will handle such an input the same way irrespective of NAN2008.
   8439     // Otherwise the input is compared to itself to determine whether it is a NaN or not
   8440     // in order to return either zero or the minimum value.
   8441     if (result_type == Primitive::kPrimLong) {
   8442       if (isR6) {
   8443         // trunc.l.s/trunc.l.d requires MIPSR2+ with FR=1. MIPS32R6 is implemented as a secondary
   8444         // architecture on top of MIPS64R6, which has FR=1, and therefore can use the instruction.
   8445         FRegister src = locations->InAt(0).AsFpuRegister<FRegister>();
   8446         Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
   8447         Register dst_low = locations->Out().AsRegisterPairLow<Register>();
   8448 
   8449         if (input_type == Primitive::kPrimFloat) {
   8450           __ TruncLS(FTMP, src);
   8451         } else {
   8452           __ TruncLD(FTMP, src);
   8453         }
   8454         __ Mfc1(dst_low, FTMP);
   8455         __ Mfhc1(dst_high, FTMP);
   8456       } else {
   8457         QuickEntrypointEnum entrypoint = (input_type == Primitive::kPrimFloat) ? kQuickF2l
   8458                                                                                : kQuickD2l;
   8459         codegen_->InvokeRuntime(entrypoint, conversion, conversion->GetDexPc());
   8460         if (input_type == Primitive::kPrimFloat) {
   8461           CheckEntrypointTypes<kQuickF2l, int64_t, float>();
   8462         } else {
   8463           CheckEntrypointTypes<kQuickD2l, int64_t, double>();
   8464         }
   8465       }
   8466     } else {
   8467       FRegister src = locations->InAt(0).AsFpuRegister<FRegister>();
   8468       Register dst = locations->Out().AsRegister<Register>();
   8469       MipsLabel truncate;
   8470       MipsLabel done;
   8471 
   8472       if (!isR6) {
   8473         if (input_type == Primitive::kPrimFloat) {
   8474           uint32_t min_val = bit_cast<uint32_t, float>(std::numeric_limits<int32_t>::min());
   8475           __ LoadConst32(TMP, min_val);
   8476           __ Mtc1(TMP, FTMP);
   8477         } else {
   8478           uint64_t min_val = bit_cast<uint64_t, double>(std::numeric_limits<int32_t>::min());
   8479           __ LoadConst32(TMP, High32Bits(min_val));
   8480           __ Mtc1(ZERO, FTMP);
   8481           __ MoveToFpuHigh(TMP, FTMP);
   8482         }
   8483 
   8484         if (input_type == Primitive::kPrimFloat) {
   8485           __ ColeS(0, FTMP, src);
   8486         } else {
   8487           __ ColeD(0, FTMP, src);
   8488         }
   8489         __ Bc1t(0, &truncate);
   8490 
   8491         if (input_type == Primitive::kPrimFloat) {
   8492           __ CeqS(0, src, src);
   8493         } else {
   8494           __ CeqD(0, src, src);
   8495         }
   8496         __ LoadConst32(dst, std::numeric_limits<int32_t>::min());
   8497         __ Movf(dst, ZERO, 0);
   8498 
   8499         __ B(&done);
   8500 
   8501         __ Bind(&truncate);
   8502       }
   8503 
   8504       if (input_type == Primitive::kPrimFloat) {
   8505         __ TruncWS(FTMP, src);
   8506       } else {
   8507         __ TruncWD(FTMP, src);
   8508       }
   8509       __ Mfc1(dst, FTMP);
   8510 
   8511       if (!isR6) {
   8512         __ Bind(&done);
   8513       }
   8514     }
   8515   } else if (Primitive::IsFloatingPointType(result_type) &&
   8516              Primitive::IsFloatingPointType(input_type)) {
   8517     FRegister dst = locations->Out().AsFpuRegister<FRegister>();
   8518     FRegister src = locations->InAt(0).AsFpuRegister<FRegister>();
   8519     if (result_type == Primitive::kPrimFloat) {
   8520       __ Cvtsd(dst, src);
   8521     } else {
   8522       __ Cvtds(dst, src);
   8523     }
   8524   } else {
   8525     LOG(FATAL) << "Unexpected or unimplemented type conversion from " << input_type
   8526                 << " to " << result_type;
   8527   }
   8528 }
   8529 
   8530 void LocationsBuilderMIPS::VisitUShr(HUShr* ushr) {
   8531   HandleShift(ushr);
   8532 }
   8533 
   8534 void InstructionCodeGeneratorMIPS::VisitUShr(HUShr* ushr) {
   8535   HandleShift(ushr);
   8536 }
   8537 
   8538 void LocationsBuilderMIPS::VisitXor(HXor* instruction) {
   8539   HandleBinaryOp(instruction);
   8540 }
   8541 
   8542 void InstructionCodeGeneratorMIPS::VisitXor(HXor* instruction) {
   8543   HandleBinaryOp(instruction);
   8544 }
   8545 
   8546 void LocationsBuilderMIPS::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
   8547   // Nothing to do, this should be removed during prepare for register allocator.
   8548   LOG(FATAL) << "Unreachable";
   8549 }
   8550 
   8551 void InstructionCodeGeneratorMIPS::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
   8552   // Nothing to do, this should be removed during prepare for register allocator.
   8553   LOG(FATAL) << "Unreachable";
   8554 }
   8555 
   8556 void LocationsBuilderMIPS::VisitEqual(HEqual* comp) {
   8557   HandleCondition(comp);
   8558 }
   8559 
   8560 void InstructionCodeGeneratorMIPS::VisitEqual(HEqual* comp) {
   8561   HandleCondition(comp);
   8562 }
   8563 
   8564 void LocationsBuilderMIPS::VisitNotEqual(HNotEqual* comp) {
   8565   HandleCondition(comp);
   8566 }
   8567 
   8568 void InstructionCodeGeneratorMIPS::VisitNotEqual(HNotEqual* comp) {
   8569   HandleCondition(comp);
   8570 }
   8571 
   8572 void LocationsBuilderMIPS::VisitLessThan(HLessThan* comp) {
   8573   HandleCondition(comp);
   8574 }
   8575 
   8576 void InstructionCodeGeneratorMIPS::VisitLessThan(HLessThan* comp) {
   8577   HandleCondition(comp);
   8578 }
   8579 
   8580 void LocationsBuilderMIPS::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
   8581   HandleCondition(comp);
   8582 }
   8583 
   8584 void InstructionCodeGeneratorMIPS::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
   8585   HandleCondition(comp);
   8586 }
   8587 
   8588 void LocationsBuilderMIPS::VisitGreaterThan(HGreaterThan* comp) {
   8589   HandleCondition(comp);
   8590 }
   8591 
   8592 void InstructionCodeGeneratorMIPS::VisitGreaterThan(HGreaterThan* comp) {
   8593   HandleCondition(comp);
   8594 }
   8595 
   8596 void LocationsBuilderMIPS::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
   8597   HandleCondition(comp);
   8598 }
   8599 
   8600 void InstructionCodeGeneratorMIPS::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
   8601   HandleCondition(comp);
   8602 }
   8603 
   8604 void LocationsBuilderMIPS::VisitBelow(HBelow* comp) {
   8605   HandleCondition(comp);
   8606 }
   8607 
   8608 void InstructionCodeGeneratorMIPS::VisitBelow(HBelow* comp) {
   8609   HandleCondition(comp);
   8610 }
   8611 
   8612 void LocationsBuilderMIPS::VisitBelowOrEqual(HBelowOrEqual* comp) {
   8613   HandleCondition(comp);
   8614 }
   8615 
   8616 void InstructionCodeGeneratorMIPS::VisitBelowOrEqual(HBelowOrEqual* comp) {
   8617   HandleCondition(comp);
   8618 }
   8619 
   8620 void LocationsBuilderMIPS::VisitAbove(HAbove* comp) {
   8621   HandleCondition(comp);
   8622 }
   8623 
   8624 void InstructionCodeGeneratorMIPS::VisitAbove(HAbove* comp) {
   8625   HandleCondition(comp);
   8626 }
   8627 
   8628 void LocationsBuilderMIPS::VisitAboveOrEqual(HAboveOrEqual* comp) {
   8629   HandleCondition(comp);
   8630 }
   8631 
   8632 void InstructionCodeGeneratorMIPS::VisitAboveOrEqual(HAboveOrEqual* comp) {
   8633   HandleCondition(comp);
   8634 }
   8635 
   8636 void LocationsBuilderMIPS::VisitPackedSwitch(HPackedSwitch* switch_instr) {
   8637   LocationSummary* locations =
   8638       new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
   8639   locations->SetInAt(0, Location::RequiresRegister());
   8640 }
   8641 
   8642 void InstructionCodeGeneratorMIPS::GenPackedSwitchWithCompares(Register value_reg,
   8643                                                                int32_t lower_bound,
   8644                                                                uint32_t num_entries,
   8645                                                                HBasicBlock* switch_block,
   8646                                                                HBasicBlock* default_block) {
   8647   // Create a set of compare/jumps.
   8648   Register temp_reg = TMP;
   8649   __ Addiu32(temp_reg, value_reg, -lower_bound);
   8650   // Jump to default if index is negative
   8651   // Note: We don't check the case that index is positive while value < lower_bound, because in
   8652   // this case, index >= num_entries must be true. So that we can save one branch instruction.
   8653   __ Bltz(temp_reg, codegen_->GetLabelOf(default_block));
   8654 
   8655   const ArenaVector<HBasicBlock*>& successors = switch_block->GetSuccessors();
   8656   // Jump to successors[0] if value == lower_bound.
   8657   __ Beqz(temp_reg, codegen_->GetLabelOf(successors[0]));
   8658   int32_t last_index = 0;
   8659   for (; num_entries - last_index > 2; last_index += 2) {
   8660     __ Addiu(temp_reg, temp_reg, -2);
   8661     // Jump to successors[last_index + 1] if value < case_value[last_index + 2].
   8662     __ Bltz(temp_reg, codegen_->GetLabelOf(successors[last_index + 1]));
   8663     // Jump to successors[last_index + 2] if value == case_value[last_index + 2].
   8664     __ Beqz(temp_reg, codegen_->GetLabelOf(successors[last_index + 2]));
   8665   }
   8666   if (num_entries - last_index == 2) {
   8667     // The last missing case_value.
   8668     __ Addiu(temp_reg, temp_reg, -1);
   8669     __ Beqz(temp_reg, codegen_->GetLabelOf(successors[last_index + 1]));
   8670   }
   8671 
   8672   // And the default for any other value.
   8673   if (!codegen_->GoesToNextBlock(switch_block, default_block)) {
   8674     __ B(codegen_->GetLabelOf(default_block));
   8675   }
   8676 }
   8677 
   8678 void InstructionCodeGeneratorMIPS::GenTableBasedPackedSwitch(Register value_reg,
   8679                                                              Register constant_area,
   8680                                                              int32_t lower_bound,
   8681                                                              uint32_t num_entries,
   8682                                                              HBasicBlock* switch_block,
   8683                                                              HBasicBlock* default_block) {
   8684   // Create a jump table.
   8685   std::vector<MipsLabel*> labels(num_entries);
   8686   const ArenaVector<HBasicBlock*>& successors = switch_block->GetSuccessors();
   8687   for (uint32_t i = 0; i < num_entries; i++) {
   8688     labels[i] = codegen_->GetLabelOf(successors[i]);
   8689   }
   8690   JumpTable* table = __ CreateJumpTable(std::move(labels));
   8691 
   8692   // Is the value in range?
   8693   __ Addiu32(TMP, value_reg, -lower_bound);
   8694   if (IsInt<16>(static_cast<int32_t>(num_entries))) {
   8695     __ Sltiu(AT, TMP, num_entries);
   8696     __ Beqz(AT, codegen_->GetLabelOf(default_block));
   8697   } else {
   8698     __ LoadConst32(AT, num_entries);
   8699     __ Bgeu(TMP, AT, codegen_->GetLabelOf(default_block));
   8700   }
   8701 
   8702   // We are in the range of the table.
   8703   // Load the target address from the jump table, indexing by the value.
   8704   __ LoadLabelAddress(AT, constant_area, table->GetLabel());
   8705   __ ShiftAndAdd(TMP, TMP, AT, 2, TMP);
   8706   __ Lw(TMP, TMP, 0);
   8707   // Compute the absolute target address by adding the table start address
   8708   // (the table contains offsets to targets relative to its start).
   8709   __ Addu(TMP, TMP, AT);
   8710   // And jump.
   8711   __ Jr(TMP);
   8712   __ NopIfNoReordering();
   8713 }
   8714 
   8715 void InstructionCodeGeneratorMIPS::VisitPackedSwitch(HPackedSwitch* switch_instr) {
   8716   int32_t lower_bound = switch_instr->GetStartValue();
   8717   uint32_t num_entries = switch_instr->GetNumEntries();
   8718   LocationSummary* locations = switch_instr->GetLocations();
   8719   Register value_reg = locations->InAt(0).AsRegister<Register>();
   8720   HBasicBlock* switch_block = switch_instr->GetBlock();
   8721   HBasicBlock* default_block = switch_instr->GetDefaultBlock();
   8722 
   8723   if (codegen_->GetInstructionSetFeatures().IsR6() &&
   8724       num_entries > kPackedSwitchJumpTableThreshold) {
   8725     // R6 uses PC-relative addressing to access the jump table.
   8726     // R2, OTOH, requires an HMipsComputeBaseMethodAddress input to access
   8727     // the jump table and it is implemented by changing HPackedSwitch to
   8728     // HMipsPackedSwitch, which bears HMipsComputeBaseMethodAddress.
   8729     // See VisitMipsPackedSwitch() for the table-based implementation on R2.
   8730     GenTableBasedPackedSwitch(value_reg,
   8731                               ZERO,
   8732                               lower_bound,
   8733                               num_entries,
   8734                               switch_block,
   8735                               default_block);
   8736   } else {
   8737     GenPackedSwitchWithCompares(value_reg,
   8738                                 lower_bound,
   8739                                 num_entries,
   8740                                 switch_block,
   8741                                 default_block);
   8742   }
   8743 }
   8744 
   8745 void LocationsBuilderMIPS::VisitMipsPackedSwitch(HMipsPackedSwitch* switch_instr) {
   8746   LocationSummary* locations =
   8747       new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
   8748   locations->SetInAt(0, Location::RequiresRegister());
   8749   // Constant area pointer (HMipsComputeBaseMethodAddress).
   8750   locations->SetInAt(1, Location::RequiresRegister());
   8751 }
   8752 
   8753 void InstructionCodeGeneratorMIPS::VisitMipsPackedSwitch(HMipsPackedSwitch* switch_instr) {
   8754   int32_t lower_bound = switch_instr->GetStartValue();
   8755   uint32_t num_entries = switch_instr->GetNumEntries();
   8756   LocationSummary* locations = switch_instr->GetLocations();
   8757   Register value_reg = locations->InAt(0).AsRegister<Register>();
   8758   Register constant_area = locations->InAt(1).AsRegister<Register>();
   8759   HBasicBlock* switch_block = switch_instr->GetBlock();
   8760   HBasicBlock* default_block = switch_instr->GetDefaultBlock();
   8761 
   8762   // This is an R2-only path. HPackedSwitch has been changed to
   8763   // HMipsPackedSwitch, which bears HMipsComputeBaseMethodAddress
   8764   // required to address the jump table relative to PC.
   8765   GenTableBasedPackedSwitch(value_reg,
   8766                             constant_area,
   8767                             lower_bound,
   8768                             num_entries,
   8769                             switch_block,
   8770                             default_block);
   8771 }
   8772 
   8773 void LocationsBuilderMIPS::VisitMipsComputeBaseMethodAddress(
   8774     HMipsComputeBaseMethodAddress* insn) {
   8775   LocationSummary* locations =
   8776       new (GetGraph()->GetArena()) LocationSummary(insn, LocationSummary::kNoCall);
   8777   locations->SetOut(Location::RequiresRegister());
   8778 }
   8779 
   8780 void InstructionCodeGeneratorMIPS::VisitMipsComputeBaseMethodAddress(
   8781     HMipsComputeBaseMethodAddress* insn) {
   8782   LocationSummary* locations = insn->GetLocations();
   8783   Register reg = locations->Out().AsRegister<Register>();
   8784 
   8785   CHECK(!codegen_->GetInstructionSetFeatures().IsR6());
   8786 
   8787   // Generate a dummy PC-relative call to obtain PC.
   8788   __ Nal();
   8789   // Grab the return address off RA.
   8790   __ Move(reg, RA);
   8791 
   8792   // Remember this offset (the obtained PC value) for later use with constant area.
   8793   __ BindPcRelBaseLabel();
   8794 }
   8795 
   8796 void LocationsBuilderMIPS::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
   8797   // The trampoline uses the same calling convention as dex calling conventions,
   8798   // except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain
   8799   // the method_idx.
   8800   HandleInvoke(invoke);
   8801 }
   8802 
   8803 void InstructionCodeGeneratorMIPS::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
   8804   codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke);
   8805 }
   8806 
   8807 void LocationsBuilderMIPS::VisitClassTableGet(HClassTableGet* instruction) {
   8808   LocationSummary* locations =
   8809       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
   8810   locations->SetInAt(0, Location::RequiresRegister());
   8811   locations->SetOut(Location::RequiresRegister());
   8812 }
   8813 
   8814 void InstructionCodeGeneratorMIPS::VisitClassTableGet(HClassTableGet* instruction) {
   8815   LocationSummary* locations = instruction->GetLocations();
   8816   if (instruction->GetTableKind() == HClassTableGet::TableKind::kVTable) {
   8817     uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
   8818         instruction->GetIndex(), kMipsPointerSize).SizeValue();
   8819     __ LoadFromOffset(kLoadWord,
   8820                       locations->Out().AsRegister<Register>(),
   8821                       locations->InAt(0).AsRegister<Register>(),
   8822                       method_offset);
   8823   } else {
   8824     uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
   8825         instruction->GetIndex(), kMipsPointerSize));
   8826     __ LoadFromOffset(kLoadWord,
   8827                       locations->Out().AsRegister<Register>(),
   8828                       locations->InAt(0).AsRegister<Register>(),
   8829                       mirror::Class::ImtPtrOffset(kMipsPointerSize).Uint32Value());
   8830     __ LoadFromOffset(kLoadWord,
   8831                       locations->Out().AsRegister<Register>(),
   8832                       locations->Out().AsRegister<Register>(),
   8833                       method_offset);
   8834   }
   8835 }
   8836 
   8837 #undef __
   8838 #undef QUICK_ENTRY_POINT
   8839 
   8840 }  // namespace mips
   8841 }  // namespace art
   8842