Home | History | Annotate | Download | only in optimizing
      1 /*
      2  * Copyright (C) 2015 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "code_generator_mips.h"
     18 
     19 #include "arch/mips/asm_support_mips.h"
     20 #include "arch/mips/entrypoints_direct_mips.h"
     21 #include "arch/mips/instruction_set_features_mips.h"
     22 #include "art_method.h"
     23 #include "class_table.h"
     24 #include "code_generator_utils.h"
     25 #include "compiled_method.h"
     26 #include "entrypoints/quick/quick_entrypoints.h"
     27 #include "entrypoints/quick/quick_entrypoints_enum.h"
     28 #include "gc/accounting/card_table.h"
     29 #include "heap_poisoning.h"
     30 #include "intrinsics.h"
     31 #include "intrinsics_mips.h"
     32 #include "linker/linker_patch.h"
     33 #include "mirror/array-inl.h"
     34 #include "mirror/class-inl.h"
     35 #include "offsets.h"
     36 #include "stack_map_stream.h"
     37 #include "thread.h"
     38 #include "utils/assembler.h"
     39 #include "utils/mips/assembler_mips.h"
     40 #include "utils/stack_checks.h"
     41 
     42 namespace art {
     43 namespace mips {
     44 
     45 static constexpr int kCurrentMethodStackOffset = 0;
     46 static constexpr Register kMethodRegisterArgument = A0;
     47 
     48 // Flags controlling the use of thunks for Baker read barriers.
     49 constexpr bool kBakerReadBarrierThunksEnableForFields = true;
     50 constexpr bool kBakerReadBarrierThunksEnableForArrays = true;
     51 constexpr bool kBakerReadBarrierThunksEnableForGcRoots = true;
     52 
     53 Location MipsReturnLocation(DataType::Type return_type) {
     54   switch (return_type) {
     55     case DataType::Type::kReference:
     56     case DataType::Type::kBool:
     57     case DataType::Type::kUint8:
     58     case DataType::Type::kInt8:
     59     case DataType::Type::kUint16:
     60     case DataType::Type::kInt16:
     61     case DataType::Type::kUint32:
     62     case DataType::Type::kInt32:
     63       return Location::RegisterLocation(V0);
     64 
     65     case DataType::Type::kUint64:
     66     case DataType::Type::kInt64:
     67       return Location::RegisterPairLocation(V0, V1);
     68 
     69     case DataType::Type::kFloat32:
     70     case DataType::Type::kFloat64:
     71       return Location::FpuRegisterLocation(F0);
     72 
     73     case DataType::Type::kVoid:
     74       return Location();
     75   }
     76   UNREACHABLE();
     77 }
     78 
     79 Location InvokeDexCallingConventionVisitorMIPS::GetReturnLocation(DataType::Type type) const {
     80   return MipsReturnLocation(type);
     81 }
     82 
     83 Location InvokeDexCallingConventionVisitorMIPS::GetMethodLocation() const {
     84   return Location::RegisterLocation(kMethodRegisterArgument);
     85 }
     86 
     87 Location InvokeDexCallingConventionVisitorMIPS::GetNextLocation(DataType::Type type) {
     88   Location next_location;
     89 
     90   switch (type) {
     91     case DataType::Type::kReference:
     92     case DataType::Type::kBool:
     93     case DataType::Type::kUint8:
     94     case DataType::Type::kInt8:
     95     case DataType::Type::kUint16:
     96     case DataType::Type::kInt16:
     97     case DataType::Type::kInt32: {
     98       uint32_t gp_index = gp_index_++;
     99       if (gp_index < calling_convention.GetNumberOfRegisters()) {
    100         next_location = Location::RegisterLocation(calling_convention.GetRegisterAt(gp_index));
    101       } else {
    102         size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
    103         next_location = Location::StackSlot(stack_offset);
    104       }
    105       break;
    106     }
    107 
    108     case DataType::Type::kInt64: {
    109       uint32_t gp_index = gp_index_;
    110       gp_index_ += 2;
    111       if (gp_index + 1 < calling_convention.GetNumberOfRegisters()) {
    112         Register reg = calling_convention.GetRegisterAt(gp_index);
    113         if (reg == A1 || reg == A3) {
    114           gp_index_++;  // Skip A1(A3), and use A2_A3(T0_T1) instead.
    115           gp_index++;
    116         }
    117         Register low_even = calling_convention.GetRegisterAt(gp_index);
    118         Register high_odd = calling_convention.GetRegisterAt(gp_index + 1);
    119         DCHECK_EQ(low_even + 1, high_odd);
    120         next_location = Location::RegisterPairLocation(low_even, high_odd);
    121       } else {
    122         size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
    123         next_location = Location::DoubleStackSlot(stack_offset);
    124       }
    125       break;
    126     }
    127 
    128     // Note: both float and double types are stored in even FPU registers. On 32 bit FPU, double
    129     // will take up the even/odd pair, while floats are stored in even regs only.
    130     // On 64 bit FPU, both double and float are stored in even registers only.
    131     case DataType::Type::kFloat32:
    132     case DataType::Type::kFloat64: {
    133       uint32_t float_index = float_index_++;
    134       if (float_index < calling_convention.GetNumberOfFpuRegisters()) {
    135         next_location = Location::FpuRegisterLocation(
    136             calling_convention.GetFpuRegisterAt(float_index));
    137       } else {
    138         size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
    139         next_location = DataType::Is64BitType(type) ? Location::DoubleStackSlot(stack_offset)
    140                                                     : Location::StackSlot(stack_offset);
    141       }
    142       break;
    143     }
    144 
    145     case DataType::Type::kUint32:
    146     case DataType::Type::kUint64:
    147     case DataType::Type::kVoid:
    148       LOG(FATAL) << "Unexpected parameter type " << type;
    149       break;
    150   }
    151 
    152   // Space on the stack is reserved for all arguments.
    153   stack_index_ += DataType::Is64BitType(type) ? 2 : 1;
    154 
    155   return next_location;
    156 }
    157 
    158 Location InvokeRuntimeCallingConvention::GetReturnLocation(DataType::Type type) {
    159   return MipsReturnLocation(type);
    160 }
    161 
    162 // NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
    163 #define __ down_cast<CodeGeneratorMIPS*>(codegen)->GetAssembler()->  // NOLINT
    164 #define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, x).Int32Value()
    165 
    166 class BoundsCheckSlowPathMIPS : public SlowPathCodeMIPS {
    167  public:
    168   explicit BoundsCheckSlowPathMIPS(HBoundsCheck* instruction) : SlowPathCodeMIPS(instruction) {}
    169 
    170   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
    171     LocationSummary* locations = instruction_->GetLocations();
    172     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
    173     __ Bind(GetEntryLabel());
    174     if (instruction_->CanThrowIntoCatchBlock()) {
    175       // Live registers will be restored in the catch block if caught.
    176       SaveLiveRegisters(codegen, instruction_->GetLocations());
    177     }
    178     // We're moving two locations to locations that could overlap, so we need a parallel
    179     // move resolver.
    180     InvokeRuntimeCallingConvention calling_convention;
    181     codegen->EmitParallelMoves(locations->InAt(0),
    182                                Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
    183                                DataType::Type::kInt32,
    184                                locations->InAt(1),
    185                                Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
    186                                DataType::Type::kInt32);
    187     QuickEntrypointEnum entrypoint = instruction_->AsBoundsCheck()->IsStringCharAt()
    188         ? kQuickThrowStringBounds
    189         : kQuickThrowArrayBounds;
    190     mips_codegen->InvokeRuntime(entrypoint, instruction_, instruction_->GetDexPc(), this);
    191     CheckEntrypointTypes<kQuickThrowStringBounds, void, int32_t, int32_t>();
    192     CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
    193   }
    194 
    195   bool IsFatal() const OVERRIDE { return true; }
    196 
    197   const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathMIPS"; }
    198 
    199  private:
    200   DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathMIPS);
    201 };
    202 
    203 class DivZeroCheckSlowPathMIPS : public SlowPathCodeMIPS {
    204  public:
    205   explicit DivZeroCheckSlowPathMIPS(HDivZeroCheck* instruction) : SlowPathCodeMIPS(instruction) {}
    206 
    207   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
    208     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
    209     __ Bind(GetEntryLabel());
    210     mips_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
    211     CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
    212   }
    213 
    214   bool IsFatal() const OVERRIDE { return true; }
    215 
    216   const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathMIPS"; }
    217 
    218  private:
    219   DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathMIPS);
    220 };
    221 
    222 class LoadClassSlowPathMIPS : public SlowPathCodeMIPS {
    223  public:
    224   LoadClassSlowPathMIPS(HLoadClass* cls,
    225                         HInstruction* at,
    226                         uint32_t dex_pc,
    227                         bool do_clinit)
    228       : SlowPathCodeMIPS(at),
    229         cls_(cls),
    230         dex_pc_(dex_pc),
    231         do_clinit_(do_clinit) {
    232     DCHECK(at->IsLoadClass() || at->IsClinitCheck());
    233   }
    234 
    235   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
    236     LocationSummary* locations = instruction_->GetLocations();
    237     Location out = locations->Out();
    238     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
    239     InvokeRuntimeCallingConvention calling_convention;
    240     DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
    241     __ Bind(GetEntryLabel());
    242     SaveLiveRegisters(codegen, locations);
    243 
    244     dex::TypeIndex type_index = cls_->GetTypeIndex();
    245     __ LoadConst32(calling_convention.GetRegisterAt(0), type_index.index_);
    246     QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
    247                                                 : kQuickInitializeType;
    248     mips_codegen->InvokeRuntime(entrypoint, instruction_, dex_pc_, this);
    249     if (do_clinit_) {
    250       CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
    251     } else {
    252       CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
    253     }
    254 
    255     // Move the class to the desired location.
    256     if (out.IsValid()) {
    257       DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
    258       DataType::Type type = instruction_->GetType();
    259       mips_codegen->MoveLocation(out,
    260                                  Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
    261                                  type);
    262     }
    263     RestoreLiveRegisters(codegen, locations);
    264 
    265     __ B(GetExitLabel());
    266   }
    267 
    268   const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathMIPS"; }
    269 
    270  private:
    271   // The class this slow path will load.
    272   HLoadClass* const cls_;
    273 
    274   // The dex PC of `at_`.
    275   const uint32_t dex_pc_;
    276 
    277   // Whether to initialize the class.
    278   const bool do_clinit_;
    279 
    280   DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathMIPS);
    281 };
    282 
    283 class LoadStringSlowPathMIPS : public SlowPathCodeMIPS {
    284  public:
    285   explicit LoadStringSlowPathMIPS(HLoadString* instruction)
    286       : SlowPathCodeMIPS(instruction) {}
    287 
    288   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
    289     DCHECK(instruction_->IsLoadString());
    290     DCHECK_EQ(instruction_->AsLoadString()->GetLoadKind(), HLoadString::LoadKind::kBssEntry);
    291     LocationSummary* locations = instruction_->GetLocations();
    292     DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
    293     const dex::StringIndex string_index = instruction_->AsLoadString()->GetStringIndex();
    294     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
    295     InvokeRuntimeCallingConvention calling_convention;
    296     __ Bind(GetEntryLabel());
    297     SaveLiveRegisters(codegen, locations);
    298 
    299     __ LoadConst32(calling_convention.GetRegisterAt(0), string_index.index_);
    300     mips_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
    301     CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
    302 
    303     DataType::Type type = instruction_->GetType();
    304     mips_codegen->MoveLocation(locations->Out(),
    305                                Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
    306                                type);
    307     RestoreLiveRegisters(codegen, locations);
    308 
    309     __ B(GetExitLabel());
    310   }
    311 
    312   const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathMIPS"; }
    313 
    314  private:
    315   DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS);
    316 };
    317 
    318 class NullCheckSlowPathMIPS : public SlowPathCodeMIPS {
    319  public:
    320   explicit NullCheckSlowPathMIPS(HNullCheck* instr) : SlowPathCodeMIPS(instr) {}
    321 
    322   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
    323     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
    324     __ Bind(GetEntryLabel());
    325     if (instruction_->CanThrowIntoCatchBlock()) {
    326       // Live registers will be restored in the catch block if caught.
    327       SaveLiveRegisters(codegen, instruction_->GetLocations());
    328     }
    329     mips_codegen->InvokeRuntime(kQuickThrowNullPointer,
    330                                 instruction_,
    331                                 instruction_->GetDexPc(),
    332                                 this);
    333     CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
    334   }
    335 
    336   bool IsFatal() const OVERRIDE { return true; }
    337 
    338   const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathMIPS"; }
    339 
    340  private:
    341   DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathMIPS);
    342 };
    343 
    344 class SuspendCheckSlowPathMIPS : public SlowPathCodeMIPS {
    345  public:
    346   SuspendCheckSlowPathMIPS(HSuspendCheck* instruction, HBasicBlock* successor)
    347       : SlowPathCodeMIPS(instruction), successor_(successor) {}
    348 
    349   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
    350     LocationSummary* locations = instruction_->GetLocations();
    351     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
    352     __ Bind(GetEntryLabel());
    353     SaveLiveRegisters(codegen, locations);     // Only saves live vector registers for SIMD.
    354     mips_codegen->InvokeRuntime(kQuickTestSuspend, instruction_, instruction_->GetDexPc(), this);
    355     CheckEntrypointTypes<kQuickTestSuspend, void, void>();
    356     RestoreLiveRegisters(codegen, locations);  // Only restores live vector registers for SIMD.
    357     if (successor_ == nullptr) {
    358       __ B(GetReturnLabel());
    359     } else {
    360       __ B(mips_codegen->GetLabelOf(successor_));
    361     }
    362   }
    363 
    364   MipsLabel* GetReturnLabel() {
    365     DCHECK(successor_ == nullptr);
    366     return &return_label_;
    367   }
    368 
    369   const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathMIPS"; }
    370 
    371   HBasicBlock* GetSuccessor() const {
    372     return successor_;
    373   }
    374 
    375  private:
    376   // If not null, the block to branch to after the suspend check.
    377   HBasicBlock* const successor_;
    378 
    379   // If `successor_` is null, the label to branch to after the suspend check.
    380   MipsLabel return_label_;
    381 
    382   DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathMIPS);
    383 };
    384 
    385 class TypeCheckSlowPathMIPS : public SlowPathCodeMIPS {
    386  public:
    387   explicit TypeCheckSlowPathMIPS(HInstruction* instruction, bool is_fatal)
    388       : SlowPathCodeMIPS(instruction), is_fatal_(is_fatal) {}
    389 
    390   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
    391     LocationSummary* locations = instruction_->GetLocations();
    392     uint32_t dex_pc = instruction_->GetDexPc();
    393     DCHECK(instruction_->IsCheckCast()
    394            || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
    395     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
    396 
    397     __ Bind(GetEntryLabel());
    398     if (!is_fatal_ || instruction_->CanThrowIntoCatchBlock()) {
    399       SaveLiveRegisters(codegen, locations);
    400     }
    401 
    402     // We're moving two locations to locations that could overlap, so we need a parallel
    403     // move resolver.
    404     InvokeRuntimeCallingConvention calling_convention;
    405     codegen->EmitParallelMoves(locations->InAt(0),
    406                                Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
    407                                DataType::Type::kReference,
    408                                locations->InAt(1),
    409                                Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
    410                                DataType::Type::kReference);
    411     if (instruction_->IsInstanceOf()) {
    412       mips_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this);
    413       CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*>();
    414       DataType::Type ret_type = instruction_->GetType();
    415       Location ret_loc = calling_convention.GetReturnLocation(ret_type);
    416       mips_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
    417     } else {
    418       DCHECK(instruction_->IsCheckCast());
    419       mips_codegen->InvokeRuntime(kQuickCheckInstanceOf, instruction_, dex_pc, this);
    420       CheckEntrypointTypes<kQuickCheckInstanceOf, void, mirror::Object*, mirror::Class*>();
    421     }
    422 
    423     if (!is_fatal_) {
    424       RestoreLiveRegisters(codegen, locations);
    425       __ B(GetExitLabel());
    426     }
    427   }
    428 
    429   const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathMIPS"; }
    430 
    431   bool IsFatal() const OVERRIDE { return is_fatal_; }
    432 
    433  private:
    434   const bool is_fatal_;
    435 
    436   DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathMIPS);
    437 };
    438 
    439 class DeoptimizationSlowPathMIPS : public SlowPathCodeMIPS {
    440  public:
    441   explicit DeoptimizationSlowPathMIPS(HDeoptimize* instruction)
    442     : SlowPathCodeMIPS(instruction) {}
    443 
    444   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
    445     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
    446     __ Bind(GetEntryLabel());
    447     LocationSummary* locations = instruction_->GetLocations();
    448     SaveLiveRegisters(codegen, locations);
    449     InvokeRuntimeCallingConvention calling_convention;
    450     __ LoadConst32(calling_convention.GetRegisterAt(0),
    451                    static_cast<uint32_t>(instruction_->AsDeoptimize()->GetDeoptimizationKind()));
    452     mips_codegen->InvokeRuntime(kQuickDeoptimize, instruction_, instruction_->GetDexPc(), this);
    453     CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>();
    454   }
    455 
    456   const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathMIPS"; }
    457 
    458  private:
    459   DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS);
    460 };
    461 
    462 class ArraySetSlowPathMIPS : public SlowPathCodeMIPS {
    463  public:
    464   explicit ArraySetSlowPathMIPS(HInstruction* instruction) : SlowPathCodeMIPS(instruction) {}
    465 
    466   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
    467     LocationSummary* locations = instruction_->GetLocations();
    468     __ Bind(GetEntryLabel());
    469     SaveLiveRegisters(codegen, locations);
    470 
    471     InvokeRuntimeCallingConvention calling_convention;
    472     HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
    473     parallel_move.AddMove(
    474         locations->InAt(0),
    475         Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
    476         DataType::Type::kReference,
    477         nullptr);
    478     parallel_move.AddMove(
    479         locations->InAt(1),
    480         Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
    481         DataType::Type::kInt32,
    482         nullptr);
    483     parallel_move.AddMove(
    484         locations->InAt(2),
    485         Location::RegisterLocation(calling_convention.GetRegisterAt(2)),
    486         DataType::Type::kReference,
    487         nullptr);
    488     codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
    489 
    490     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
    491     mips_codegen->InvokeRuntime(kQuickAputObject, instruction_, instruction_->GetDexPc(), this);
    492     CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>();
    493     RestoreLiveRegisters(codegen, locations);
    494     __ B(GetExitLabel());
    495   }
    496 
    497   const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathMIPS"; }
    498 
    499  private:
    500   DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathMIPS);
    501 };
    502 
    503 // Slow path marking an object reference `ref` during a read
    504 // barrier. The field `obj.field` in the object `obj` holding this
    505 // reference does not get updated by this slow path after marking (see
    506 // ReadBarrierMarkAndUpdateFieldSlowPathMIPS below for that).
    507 //
    508 // This means that after the execution of this slow path, `ref` will
    509 // always be up-to-date, but `obj.field` may not; i.e., after the
    510 // flip, `ref` will be a to-space reference, but `obj.field` will
    511 // probably still be a from-space reference (unless it gets updated by
    512 // another thread, or if another thread installed another object
    513 // reference (different from `ref`) in `obj.field`).
    514 //
    515 // If `entrypoint` is a valid location it is assumed to already be
    516 // holding the entrypoint. The case where the entrypoint is passed in
    517 // is for the GcRoot read barrier.
    518 class ReadBarrierMarkSlowPathMIPS : public SlowPathCodeMIPS {
    519  public:
    520   ReadBarrierMarkSlowPathMIPS(HInstruction* instruction,
    521                               Location ref,
    522                               Location entrypoint = Location::NoLocation())
    523       : SlowPathCodeMIPS(instruction), ref_(ref), entrypoint_(entrypoint) {
    524     DCHECK(kEmitCompilerReadBarrier);
    525   }
    526 
    527   const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkSlowPathMIPS"; }
    528 
    529   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
    530     LocationSummary* locations = instruction_->GetLocations();
    531     Register ref_reg = ref_.AsRegister<Register>();
    532     DCHECK(locations->CanCall());
    533     DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_reg)) << ref_reg;
    534     DCHECK(instruction_->IsInstanceFieldGet() ||
    535            instruction_->IsStaticFieldGet() ||
    536            instruction_->IsArrayGet() ||
    537            instruction_->IsArraySet() ||
    538            instruction_->IsLoadClass() ||
    539            instruction_->IsLoadString() ||
    540            instruction_->IsInstanceOf() ||
    541            instruction_->IsCheckCast() ||
    542            (instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()) ||
    543            (instruction_->IsInvokeStaticOrDirect() && instruction_->GetLocations()->Intrinsified()))
    544         << "Unexpected instruction in read barrier marking slow path: "
    545         << instruction_->DebugName();
    546 
    547     __ Bind(GetEntryLabel());
    548     // No need to save live registers; it's taken care of by the
    549     // entrypoint. Also, there is no need to update the stack mask,
    550     // as this runtime call will not trigger a garbage collection.
    551     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
    552     DCHECK((V0 <= ref_reg && ref_reg <= T7) ||
    553            (S2 <= ref_reg && ref_reg <= S7) ||
    554            (ref_reg == FP)) << ref_reg;
    555     // "Compact" slow path, saving two moves.
    556     //
    557     // Instead of using the standard runtime calling convention (input
    558     // and output in A0 and V0 respectively):
    559     //
    560     //   A0 <- ref
    561     //   V0 <- ReadBarrierMark(A0)
    562     //   ref <- V0
    563     //
    564     // we just use rX (the register containing `ref`) as input and output
    565     // of a dedicated entrypoint:
    566     //
    567     //   rX <- ReadBarrierMarkRegX(rX)
    568     //
    569     if (entrypoint_.IsValid()) {
    570       mips_codegen->ValidateInvokeRuntimeWithoutRecordingPcInfo(instruction_, this);
    571       DCHECK_EQ(entrypoint_.AsRegister<Register>(), T9);
    572       __ Jalr(entrypoint_.AsRegister<Register>());
    573       __ NopIfNoReordering();
    574     } else {
    575       int32_t entry_point_offset =
    576           Thread::ReadBarrierMarkEntryPointsOffset<kMipsPointerSize>(ref_reg - 1);
    577       // This runtime call does not require a stack map.
    578       mips_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset,
    579                                                         instruction_,
    580                                                         this,
    581                                                         /* direct */ false);
    582     }
    583     __ B(GetExitLabel());
    584   }
    585 
    586  private:
    587   // The location (register) of the marked object reference.
    588   const Location ref_;
    589 
    590   // The location of the entrypoint if already loaded.
    591   const Location entrypoint_;
    592 
    593   DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkSlowPathMIPS);
    594 };
    595 
    596 // Slow path marking an object reference `ref` during a read barrier,
    597 // and if needed, atomically updating the field `obj.field` in the
    598 // object `obj` holding this reference after marking (contrary to
    599 // ReadBarrierMarkSlowPathMIPS above, which never tries to update
    600 // `obj.field`).
    601 //
    602 // This means that after the execution of this slow path, both `ref`
    603 // and `obj.field` will be up-to-date; i.e., after the flip, both will
    604 // hold the same to-space reference (unless another thread installed
    605 // another object reference (different from `ref`) in `obj.field`).
    606 class ReadBarrierMarkAndUpdateFieldSlowPathMIPS : public SlowPathCodeMIPS {
    607  public:
    608   ReadBarrierMarkAndUpdateFieldSlowPathMIPS(HInstruction* instruction,
    609                                             Location ref,
    610                                             Register obj,
    611                                             Location field_offset,
    612                                             Register temp1)
    613       : SlowPathCodeMIPS(instruction),
    614         ref_(ref),
    615         obj_(obj),
    616         field_offset_(field_offset),
    617         temp1_(temp1) {
    618     DCHECK(kEmitCompilerReadBarrier);
    619   }
    620 
    621   const char* GetDescription() const OVERRIDE {
    622     return "ReadBarrierMarkAndUpdateFieldSlowPathMIPS";
    623   }
    624 
    625   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
    626     LocationSummary* locations = instruction_->GetLocations();
    627     Register ref_reg = ref_.AsRegister<Register>();
    628     DCHECK(locations->CanCall());
    629     DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_reg)) << ref_reg;
    630     // This slow path is only used by the UnsafeCASObject intrinsic.
    631     DCHECK((instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()))
    632         << "Unexpected instruction in read barrier marking and field updating slow path: "
    633         << instruction_->DebugName();
    634     DCHECK(instruction_->GetLocations()->Intrinsified());
    635     DCHECK_EQ(instruction_->AsInvoke()->GetIntrinsic(), Intrinsics::kUnsafeCASObject);
    636     DCHECK(field_offset_.IsRegisterPair()) << field_offset_;
    637 
    638     __ Bind(GetEntryLabel());
    639 
    640     // Save the old reference.
    641     // Note that we cannot use AT or TMP to save the old reference, as those
    642     // are used by the code that follows, but we need the old reference after
    643     // the call to the ReadBarrierMarkRegX entry point.
    644     DCHECK_NE(temp1_, AT);
    645     DCHECK_NE(temp1_, TMP);
    646     __ Move(temp1_, ref_reg);
    647 
    648     // No need to save live registers; it's taken care of by the
    649     // entrypoint. Also, there is no need to update the stack mask,
    650     // as this runtime call will not trigger a garbage collection.
    651     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
    652     DCHECK((V0 <= ref_reg && ref_reg <= T7) ||
    653            (S2 <= ref_reg && ref_reg <= S7) ||
    654            (ref_reg == FP)) << ref_reg;
    655     // "Compact" slow path, saving two moves.
    656     //
    657     // Instead of using the standard runtime calling convention (input
    658     // and output in A0 and V0 respectively):
    659     //
    660     //   A0 <- ref
    661     //   V0 <- ReadBarrierMark(A0)
    662     //   ref <- V0
    663     //
    664     // we just use rX (the register containing `ref`) as input and output
    665     // of a dedicated entrypoint:
    666     //
    667     //   rX <- ReadBarrierMarkRegX(rX)
    668     //
    669     int32_t entry_point_offset =
    670         Thread::ReadBarrierMarkEntryPointsOffset<kMipsPointerSize>(ref_reg - 1);
    671     // This runtime call does not require a stack map.
    672     mips_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset,
    673                                                       instruction_,
    674                                                       this,
    675                                                       /* direct */ false);
    676 
    677     // If the new reference is different from the old reference,
    678     // update the field in the holder (`*(obj_ + field_offset_)`).
    679     //
    680     // Note that this field could also hold a different object, if
    681     // another thread had concurrently changed it. In that case, the
    682     // the compare-and-set (CAS) loop below would abort, leaving the
    683     // field as-is.
    684     MipsLabel done;
    685     __ Beq(temp1_, ref_reg, &done);
    686 
    687     // Update the the holder's field atomically.  This may fail if
    688     // mutator updates before us, but it's OK.  This is achieved
    689     // using a strong compare-and-set (CAS) operation with relaxed
    690     // memory synchronization ordering, where the expected value is
    691     // the old reference and the desired value is the new reference.
    692 
    693     // Convenience aliases.
    694     Register base = obj_;
    695     // The UnsafeCASObject intrinsic uses a register pair as field
    696     // offset ("long offset"), of which only the low part contains
    697     // data.
    698     Register offset = field_offset_.AsRegisterPairLow<Register>();
    699     Register expected = temp1_;
    700     Register value = ref_reg;
    701     Register tmp_ptr = TMP;      // Pointer to actual memory.
    702     Register tmp = AT;           // Value in memory.
    703 
    704     __ Addu(tmp_ptr, base, offset);
    705 
    706     if (kPoisonHeapReferences) {
    707       __ PoisonHeapReference(expected);
    708       // Do not poison `value` if it is the same register as
    709       // `expected`, which has just been poisoned.
    710       if (value != expected) {
    711         __ PoisonHeapReference(value);
    712       }
    713     }
    714 
    715     // do {
    716     //   tmp = [r_ptr] - expected;
    717     // } while (tmp == 0 && failure([r_ptr] <- r_new_value));
    718 
    719     bool is_r6 = mips_codegen->GetInstructionSetFeatures().IsR6();
    720     MipsLabel loop_head, exit_loop;
    721     __ Bind(&loop_head);
    722     if (is_r6) {
    723       __ LlR6(tmp, tmp_ptr);
    724     } else {
    725       __ LlR2(tmp, tmp_ptr);
    726     }
    727     __ Bne(tmp, expected, &exit_loop);
    728     __ Move(tmp, value);
    729     if (is_r6) {
    730       __ ScR6(tmp, tmp_ptr);
    731     } else {
    732       __ ScR2(tmp, tmp_ptr);
    733     }
    734     __ Beqz(tmp, &loop_head);
    735     __ Bind(&exit_loop);
    736 
    737     if (kPoisonHeapReferences) {
    738       __ UnpoisonHeapReference(expected);
    739       // Do not unpoison `value` if it is the same register as
    740       // `expected`, which has just been unpoisoned.
    741       if (value != expected) {
    742         __ UnpoisonHeapReference(value);
    743       }
    744     }
    745 
    746     __ Bind(&done);
    747     __ B(GetExitLabel());
    748   }
    749 
    750  private:
    751   // The location (register) of the marked object reference.
    752   const Location ref_;
    753   // The register containing the object holding the marked object reference field.
    754   const Register obj_;
    755   // The location of the offset of the marked reference field within `obj_`.
    756   Location field_offset_;
    757 
    758   const Register temp1_;
    759 
    760   DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkAndUpdateFieldSlowPathMIPS);
    761 };
    762 
    763 // Slow path generating a read barrier for a heap reference.
    764 class ReadBarrierForHeapReferenceSlowPathMIPS : public SlowPathCodeMIPS {
    765  public:
    766   ReadBarrierForHeapReferenceSlowPathMIPS(HInstruction* instruction,
    767                                           Location out,
    768                                           Location ref,
    769                                           Location obj,
    770                                           uint32_t offset,
    771                                           Location index)
    772       : SlowPathCodeMIPS(instruction),
    773         out_(out),
    774         ref_(ref),
    775         obj_(obj),
    776         offset_(offset),
    777         index_(index) {
    778     DCHECK(kEmitCompilerReadBarrier);
    779     // If `obj` is equal to `out` or `ref`, it means the initial object
    780     // has been overwritten by (or after) the heap object reference load
    781     // to be instrumented, e.g.:
    782     //
    783     //   __ LoadFromOffset(kLoadWord, out, out, offset);
    784     //   codegen_->GenerateReadBarrierSlow(instruction, out_loc, out_loc, out_loc, offset);
    785     //
    786     // In that case, we have lost the information about the original
    787     // object, and the emitted read barrier cannot work properly.
    788     DCHECK(!obj.Equals(out)) << "obj=" << obj << " out=" << out;
    789     DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref;
    790   }
    791 
    792   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
    793     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
    794     LocationSummary* locations = instruction_->GetLocations();
    795     Register reg_out = out_.AsRegister<Register>();
    796     DCHECK(locations->CanCall());
    797     DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out));
    798     DCHECK(instruction_->IsInstanceFieldGet() ||
    799            instruction_->IsStaticFieldGet() ||
    800            instruction_->IsArrayGet() ||
    801            instruction_->IsInstanceOf() ||
    802            instruction_->IsCheckCast() ||
    803            (instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()))
    804         << "Unexpected instruction in read barrier for heap reference slow path: "
    805         << instruction_->DebugName();
    806 
    807     __ Bind(GetEntryLabel());
    808     SaveLiveRegisters(codegen, locations);
    809 
    810     // We may have to change the index's value, but as `index_` is a
    811     // constant member (like other "inputs" of this slow path),
    812     // introduce a copy of it, `index`.
    813     Location index = index_;
    814     if (index_.IsValid()) {
    815       // Handle `index_` for HArrayGet and UnsafeGetObject/UnsafeGetObjectVolatile intrinsics.
    816       if (instruction_->IsArrayGet()) {
    817         // Compute the actual memory offset and store it in `index`.
    818         Register index_reg = index_.AsRegister<Register>();
    819         DCHECK(locations->GetLiveRegisters()->ContainsCoreRegister(index_reg));
    820         if (codegen->IsCoreCalleeSaveRegister(index_reg)) {
    821           // We are about to change the value of `index_reg` (see the
    822           // calls to art::mips::MipsAssembler::Sll and
    823           // art::mips::MipsAssembler::Addiu32 below), but it has
    824           // not been saved by the previous call to
    825           // art::SlowPathCode::SaveLiveRegisters, as it is a
    826           // callee-save register --
    827           // art::SlowPathCode::SaveLiveRegisters does not consider
    828           // callee-save registers, as it has been designed with the
    829           // assumption that callee-save registers are supposed to be
    830           // handled by the called function.  So, as a callee-save
    831           // register, `index_reg` _would_ eventually be saved onto
    832           // the stack, but it would be too late: we would have
    833           // changed its value earlier.  Therefore, we manually save
    834           // it here into another freely available register,
    835           // `free_reg`, chosen of course among the caller-save
    836           // registers (as a callee-save `free_reg` register would
    837           // exhibit the same problem).
    838           //
    839           // Note we could have requested a temporary register from
    840           // the register allocator instead; but we prefer not to, as
    841           // this is a slow path, and we know we can find a
    842           // caller-save register that is available.
    843           Register free_reg = FindAvailableCallerSaveRegister(codegen);
    844           __ Move(free_reg, index_reg);
    845           index_reg = free_reg;
    846           index = Location::RegisterLocation(index_reg);
    847         } else {
    848           // The initial register stored in `index_` has already been
    849           // saved in the call to art::SlowPathCode::SaveLiveRegisters
    850           // (as it is not a callee-save register), so we can freely
    851           // use it.
    852         }
    853         // Shifting the index value contained in `index_reg` by the scale
    854         // factor (2) cannot overflow in practice, as the runtime is
    855         // unable to allocate object arrays with a size larger than
    856         // 2^26 - 1 (that is, 2^28 - 4 bytes).
    857         __ Sll(index_reg, index_reg, TIMES_4);
    858         static_assert(
    859             sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
    860             "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
    861         __ Addiu32(index_reg, index_reg, offset_);
    862       } else {
    863         // In the case of the UnsafeGetObject/UnsafeGetObjectVolatile
    864         // intrinsics, `index_` is not shifted by a scale factor of 2
    865         // (as in the case of ArrayGet), as it is actually an offset
    866         // to an object field within an object.
    867         DCHECK(instruction_->IsInvoke()) << instruction_->DebugName();
    868         DCHECK(instruction_->GetLocations()->Intrinsified());
    869         DCHECK((instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObject) ||
    870                (instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile))
    871             << instruction_->AsInvoke()->GetIntrinsic();
    872         DCHECK_EQ(offset_, 0U);
    873         DCHECK(index_.IsRegisterPair());
    874         // UnsafeGet's offset location is a register pair, the low
    875         // part contains the correct offset.
    876         index = index_.ToLow();
    877       }
    878     }
    879 
    880     // We're moving two or three locations to locations that could
    881     // overlap, so we need a parallel move resolver.
    882     InvokeRuntimeCallingConvention calling_convention;
    883     HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
    884     parallel_move.AddMove(ref_,
    885                           Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
    886                           DataType::Type::kReference,
    887                           nullptr);
    888     parallel_move.AddMove(obj_,
    889                           Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
    890                           DataType::Type::kReference,
    891                           nullptr);
    892     if (index.IsValid()) {
    893       parallel_move.AddMove(index,
    894                             Location::RegisterLocation(calling_convention.GetRegisterAt(2)),
    895                             DataType::Type::kInt32,
    896                             nullptr);
    897       codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
    898     } else {
    899       codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
    900       __ LoadConst32(calling_convention.GetRegisterAt(2), offset_);
    901     }
    902     mips_codegen->InvokeRuntime(kQuickReadBarrierSlow,
    903                                 instruction_,
    904                                 instruction_->GetDexPc(),
    905                                 this);
    906     CheckEntrypointTypes<
    907         kQuickReadBarrierSlow, mirror::Object*, mirror::Object*, mirror::Object*, uint32_t>();
    908     mips_codegen->MoveLocation(out_,
    909                                calling_convention.GetReturnLocation(DataType::Type::kReference),
    910                                DataType::Type::kReference);
    911 
    912     RestoreLiveRegisters(codegen, locations);
    913     __ B(GetExitLabel());
    914   }
    915 
    916   const char* GetDescription() const OVERRIDE { return "ReadBarrierForHeapReferenceSlowPathMIPS"; }
    917 
    918  private:
    919   Register FindAvailableCallerSaveRegister(CodeGenerator* codegen) {
    920     size_t ref = static_cast<int>(ref_.AsRegister<Register>());
    921     size_t obj = static_cast<int>(obj_.AsRegister<Register>());
    922     for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) {
    923       if (i != ref &&
    924           i != obj &&
    925           !codegen->IsCoreCalleeSaveRegister(i) &&
    926           !codegen->IsBlockedCoreRegister(i)) {
    927         return static_cast<Register>(i);
    928       }
    929     }
    930     // We shall never fail to find a free caller-save register, as
    931     // there are more than two core caller-save registers on MIPS
    932     // (meaning it is possible to find one which is different from
    933     // `ref` and `obj`).
    934     DCHECK_GT(codegen->GetNumberOfCoreCallerSaveRegisters(), 2u);
    935     LOG(FATAL) << "Could not find a free caller-save register";
    936     UNREACHABLE();
    937   }
    938 
    939   const Location out_;
    940   const Location ref_;
    941   const Location obj_;
    942   const uint32_t offset_;
    943   // An additional location containing an index to an array.
    944   // Only used for HArrayGet and the UnsafeGetObject &
    945   // UnsafeGetObjectVolatile intrinsics.
    946   const Location index_;
    947 
    948   DISALLOW_COPY_AND_ASSIGN(ReadBarrierForHeapReferenceSlowPathMIPS);
    949 };
    950 
    951 // Slow path generating a read barrier for a GC root.
    952 class ReadBarrierForRootSlowPathMIPS : public SlowPathCodeMIPS {
    953  public:
    954   ReadBarrierForRootSlowPathMIPS(HInstruction* instruction, Location out, Location root)
    955       : SlowPathCodeMIPS(instruction), out_(out), root_(root) {
    956     DCHECK(kEmitCompilerReadBarrier);
    957   }
    958 
    959   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
    960     LocationSummary* locations = instruction_->GetLocations();
    961     Register reg_out = out_.AsRegister<Register>();
    962     DCHECK(locations->CanCall());
    963     DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out));
    964     DCHECK(instruction_->IsLoadClass() || instruction_->IsLoadString())
    965         << "Unexpected instruction in read barrier for GC root slow path: "
    966         << instruction_->DebugName();
    967 
    968     __ Bind(GetEntryLabel());
    969     SaveLiveRegisters(codegen, locations);
    970 
    971     InvokeRuntimeCallingConvention calling_convention;
    972     CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
    973     mips_codegen->MoveLocation(Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
    974                                root_,
    975                                DataType::Type::kReference);
    976     mips_codegen->InvokeRuntime(kQuickReadBarrierForRootSlow,
    977                                 instruction_,
    978                                 instruction_->GetDexPc(),
    979                                 this);
    980     CheckEntrypointTypes<kQuickReadBarrierForRootSlow, mirror::Object*, GcRoot<mirror::Object>*>();
    981     mips_codegen->MoveLocation(out_,
    982                                calling_convention.GetReturnLocation(DataType::Type::kReference),
    983                                DataType::Type::kReference);
    984 
    985     RestoreLiveRegisters(codegen, locations);
    986     __ B(GetExitLabel());
    987   }
    988 
    989   const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathMIPS"; }
    990 
    991  private:
    992   const Location out_;
    993   const Location root_;
    994 
    995   DISALLOW_COPY_AND_ASSIGN(ReadBarrierForRootSlowPathMIPS);
    996 };
    997 
    998 CodeGeneratorMIPS::CodeGeneratorMIPS(HGraph* graph,
    999                                      const MipsInstructionSetFeatures& isa_features,
   1000                                      const CompilerOptions& compiler_options,
   1001                                      OptimizingCompilerStats* stats)
   1002     : CodeGenerator(graph,
   1003                     kNumberOfCoreRegisters,
   1004                     kNumberOfFRegisters,
   1005                     kNumberOfRegisterPairs,
   1006                     ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
   1007                                         arraysize(kCoreCalleeSaves)),
   1008                     ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
   1009                                         arraysize(kFpuCalleeSaves)),
   1010                     compiler_options,
   1011                     stats),
   1012       block_labels_(nullptr),
   1013       location_builder_(graph, this),
   1014       instruction_visitor_(graph, this),
   1015       move_resolver_(graph->GetAllocator(), this),
   1016       assembler_(graph->GetAllocator(), &isa_features),
   1017       isa_features_(isa_features),
   1018       uint32_literals_(std::less<uint32_t>(),
   1019                        graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
   1020       boot_image_method_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
   1021       method_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
   1022       boot_image_type_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
   1023       type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
   1024       boot_image_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
   1025       string_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
   1026       jit_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
   1027       jit_class_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
   1028       clobbered_ra_(false) {
   1029   // Save RA (containing the return address) to mimic Quick.
   1030   AddAllocatedRegister(Location::RegisterLocation(RA));
   1031 }
   1032 
   1033 #undef __
   1034 // NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
   1035 #define __ down_cast<MipsAssembler*>(GetAssembler())->  // NOLINT
   1036 #define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, x).Int32Value()
   1037 
   1038 void CodeGeneratorMIPS::Finalize(CodeAllocator* allocator) {
   1039   // Ensure that we fix up branches.
   1040   __ FinalizeCode();
   1041 
   1042   // Adjust native pc offsets in stack maps.
   1043   StackMapStream* stack_map_stream = GetStackMapStream();
   1044   for (size_t i = 0, num = stack_map_stream->GetNumberOfStackMaps(); i != num; ++i) {
   1045     uint32_t old_position =
   1046         stack_map_stream->GetStackMap(i).native_pc_code_offset.Uint32Value(InstructionSet::kMips);
   1047     uint32_t new_position = __ GetAdjustedPosition(old_position);
   1048     DCHECK_GE(new_position, old_position);
   1049     stack_map_stream->SetStackMapNativePcOffset(i, new_position);
   1050   }
   1051 
   1052   // Adjust pc offsets for the disassembly information.
   1053   if (disasm_info_ != nullptr) {
   1054     GeneratedCodeInterval* frame_entry_interval = disasm_info_->GetFrameEntryInterval();
   1055     frame_entry_interval->start = __ GetAdjustedPosition(frame_entry_interval->start);
   1056     frame_entry_interval->end = __ GetAdjustedPosition(frame_entry_interval->end);
   1057     for (auto& it : *disasm_info_->GetInstructionIntervals()) {
   1058       it.second.start = __ GetAdjustedPosition(it.second.start);
   1059       it.second.end = __ GetAdjustedPosition(it.second.end);
   1060     }
   1061     for (auto& it : *disasm_info_->GetSlowPathIntervals()) {
   1062       it.code_interval.start = __ GetAdjustedPosition(it.code_interval.start);
   1063       it.code_interval.end = __ GetAdjustedPosition(it.code_interval.end);
   1064     }
   1065   }
   1066 
   1067   CodeGenerator::Finalize(allocator);
   1068 }
   1069 
   1070 MipsAssembler* ParallelMoveResolverMIPS::GetAssembler() const {
   1071   return codegen_->GetAssembler();
   1072 }
   1073 
   1074 void ParallelMoveResolverMIPS::EmitMove(size_t index) {
   1075   DCHECK_LT(index, moves_.size());
   1076   MoveOperands* move = moves_[index];
   1077   codegen_->MoveLocation(move->GetDestination(), move->GetSource(), move->GetType());
   1078 }
   1079 
   1080 void ParallelMoveResolverMIPS::EmitSwap(size_t index) {
   1081   DCHECK_LT(index, moves_.size());
   1082   MoveOperands* move = moves_[index];
   1083   DataType::Type type = move->GetType();
   1084   Location loc1 = move->GetDestination();
   1085   Location loc2 = move->GetSource();
   1086 
   1087   DCHECK(!loc1.IsConstant());
   1088   DCHECK(!loc2.IsConstant());
   1089 
   1090   if (loc1.Equals(loc2)) {
   1091     return;
   1092   }
   1093 
   1094   if (loc1.IsRegister() && loc2.IsRegister()) {
   1095     // Swap 2 GPRs.
   1096     Register r1 = loc1.AsRegister<Register>();
   1097     Register r2 = loc2.AsRegister<Register>();
   1098     __ Move(TMP, r2);
   1099     __ Move(r2, r1);
   1100     __ Move(r1, TMP);
   1101   } else if (loc1.IsFpuRegister() && loc2.IsFpuRegister()) {
   1102     if (codegen_->GetGraph()->HasSIMD()) {
   1103       __ MoveV(static_cast<VectorRegister>(FTMP), VectorRegisterFrom(loc1));
   1104       __ MoveV(VectorRegisterFrom(loc1), VectorRegisterFrom(loc2));
   1105       __ MoveV(VectorRegisterFrom(loc2), static_cast<VectorRegister>(FTMP));
   1106     } else {
   1107       FRegister f1 = loc1.AsFpuRegister<FRegister>();
   1108       FRegister f2 = loc2.AsFpuRegister<FRegister>();
   1109       if (type == DataType::Type::kFloat32) {
   1110         __ MovS(FTMP, f2);
   1111         __ MovS(f2, f1);
   1112         __ MovS(f1, FTMP);
   1113       } else {
   1114         DCHECK_EQ(type, DataType::Type::kFloat64);
   1115         __ MovD(FTMP, f2);
   1116         __ MovD(f2, f1);
   1117         __ MovD(f1, FTMP);
   1118       }
   1119     }
   1120   } else if ((loc1.IsRegister() && loc2.IsFpuRegister()) ||
   1121              (loc1.IsFpuRegister() && loc2.IsRegister())) {
   1122     // Swap FPR and GPR.
   1123     DCHECK_EQ(type, DataType::Type::kFloat32);  // Can only swap a float.
   1124     FRegister f1 = loc1.IsFpuRegister() ? loc1.AsFpuRegister<FRegister>()
   1125                                         : loc2.AsFpuRegister<FRegister>();
   1126     Register r2 = loc1.IsRegister() ? loc1.AsRegister<Register>() : loc2.AsRegister<Register>();
   1127     __ Move(TMP, r2);
   1128     __ Mfc1(r2, f1);
   1129     __ Mtc1(TMP, f1);
   1130   } else if (loc1.IsRegisterPair() && loc2.IsRegisterPair()) {
   1131     // Swap 2 GPR register pairs.
   1132     Register r1 = loc1.AsRegisterPairLow<Register>();
   1133     Register r2 = loc2.AsRegisterPairLow<Register>();
   1134     __ Move(TMP, r2);
   1135     __ Move(r2, r1);
   1136     __ Move(r1, TMP);
   1137     r1 = loc1.AsRegisterPairHigh<Register>();
   1138     r2 = loc2.AsRegisterPairHigh<Register>();
   1139     __ Move(TMP, r2);
   1140     __ Move(r2, r1);
   1141     __ Move(r1, TMP);
   1142   } else if ((loc1.IsRegisterPair() && loc2.IsFpuRegister()) ||
   1143              (loc1.IsFpuRegister() && loc2.IsRegisterPair())) {
   1144     // Swap FPR and GPR register pair.
   1145     DCHECK_EQ(type, DataType::Type::kFloat64);
   1146     FRegister f1 = loc1.IsFpuRegister() ? loc1.AsFpuRegister<FRegister>()
   1147                                         : loc2.AsFpuRegister<FRegister>();
   1148     Register r2_l = loc1.IsRegisterPair() ? loc1.AsRegisterPairLow<Register>()
   1149                                           : loc2.AsRegisterPairLow<Register>();
   1150     Register r2_h = loc1.IsRegisterPair() ? loc1.AsRegisterPairHigh<Register>()
   1151                                           : loc2.AsRegisterPairHigh<Register>();
   1152     // Use 2 temporary registers because we can't first swap the low 32 bits of an FPR and
   1153     // then swap the high 32 bits of the same FPR. mtc1 makes the high 32 bits of an FPR
   1154     // unpredictable and the following mfch1 will fail.
   1155     __ Mfc1(TMP, f1);
   1156     __ MoveFromFpuHigh(AT, f1);
   1157     __ Mtc1(r2_l, f1);
   1158     __ MoveToFpuHigh(r2_h, f1);
   1159     __ Move(r2_l, TMP);
   1160     __ Move(r2_h, AT);
   1161   } else if (loc1.IsStackSlot() && loc2.IsStackSlot()) {
   1162     Exchange(loc1.GetStackIndex(), loc2.GetStackIndex(), /* double_slot */ false);
   1163   } else if (loc1.IsDoubleStackSlot() && loc2.IsDoubleStackSlot()) {
   1164     Exchange(loc1.GetStackIndex(), loc2.GetStackIndex(), /* double_slot */ true);
   1165   } else if (loc1.IsSIMDStackSlot() && loc2.IsSIMDStackSlot()) {
   1166     ExchangeQuadSlots(loc1.GetStackIndex(), loc2.GetStackIndex());
   1167   } else if ((loc1.IsRegister() && loc2.IsStackSlot()) ||
   1168              (loc1.IsStackSlot() && loc2.IsRegister())) {
   1169     Register reg = loc1.IsRegister() ? loc1.AsRegister<Register>() : loc2.AsRegister<Register>();
   1170     intptr_t offset = loc1.IsStackSlot() ? loc1.GetStackIndex() : loc2.GetStackIndex();
   1171     __ Move(TMP, reg);
   1172     __ LoadFromOffset(kLoadWord, reg, SP, offset);
   1173     __ StoreToOffset(kStoreWord, TMP, SP, offset);
   1174   } else if ((loc1.IsRegisterPair() && loc2.IsDoubleStackSlot()) ||
   1175              (loc1.IsDoubleStackSlot() && loc2.IsRegisterPair())) {
   1176     Register reg_l = loc1.IsRegisterPair() ? loc1.AsRegisterPairLow<Register>()
   1177                                            : loc2.AsRegisterPairLow<Register>();
   1178     Register reg_h = loc1.IsRegisterPair() ? loc1.AsRegisterPairHigh<Register>()
   1179                                            : loc2.AsRegisterPairHigh<Register>();
   1180     intptr_t offset_l = loc1.IsDoubleStackSlot() ? loc1.GetStackIndex() : loc2.GetStackIndex();
   1181     intptr_t offset_h = loc1.IsDoubleStackSlot() ? loc1.GetHighStackIndex(kMipsWordSize)
   1182                                                  : loc2.GetHighStackIndex(kMipsWordSize);
   1183     __ Move(TMP, reg_l);
   1184     __ LoadFromOffset(kLoadWord, reg_l, SP, offset_l);
   1185     __ StoreToOffset(kStoreWord, TMP, SP, offset_l);
   1186     __ Move(TMP, reg_h);
   1187     __ LoadFromOffset(kLoadWord, reg_h, SP, offset_h);
   1188     __ StoreToOffset(kStoreWord, TMP, SP, offset_h);
   1189   } else if ((loc1.IsFpuRegister() && loc2.IsSIMDStackSlot()) ||
   1190              (loc1.IsSIMDStackSlot() && loc2.IsFpuRegister())) {
   1191     Location fp_loc = loc1.IsFpuRegister() ? loc1 : loc2;
   1192     intptr_t offset = loc1.IsFpuRegister() ? loc2.GetStackIndex() : loc1.GetStackIndex();
   1193     __ MoveV(static_cast<VectorRegister>(FTMP), VectorRegisterFrom(fp_loc));
   1194     __ LoadQFromOffset(fp_loc.AsFpuRegister<FRegister>(), SP, offset);
   1195     __ StoreQToOffset(FTMP, SP, offset);
   1196   } else if (loc1.IsFpuRegister() || loc2.IsFpuRegister()) {
   1197     FRegister reg = loc1.IsFpuRegister() ? loc1.AsFpuRegister<FRegister>()
   1198                                          : loc2.AsFpuRegister<FRegister>();
   1199     intptr_t offset = loc1.IsFpuRegister() ? loc2.GetStackIndex() : loc1.GetStackIndex();
   1200     if (type == DataType::Type::kFloat32) {
   1201       __ MovS(FTMP, reg);
   1202       __ LoadSFromOffset(reg, SP, offset);
   1203       __ StoreSToOffset(FTMP, SP, offset);
   1204     } else {
   1205       DCHECK_EQ(type, DataType::Type::kFloat64);
   1206       __ MovD(FTMP, reg);
   1207       __ LoadDFromOffset(reg, SP, offset);
   1208       __ StoreDToOffset(FTMP, SP, offset);
   1209     }
   1210   } else {
   1211     LOG(FATAL) << "Swap between " << loc1 << " and " << loc2 << " is unsupported";
   1212   }
   1213 }
   1214 
   1215 void ParallelMoveResolverMIPS::RestoreScratch(int reg) {
   1216   __ Pop(static_cast<Register>(reg));
   1217 }
   1218 
   1219 void ParallelMoveResolverMIPS::SpillScratch(int reg) {
   1220   __ Push(static_cast<Register>(reg));
   1221 }
   1222 
   1223 void ParallelMoveResolverMIPS::Exchange(int index1, int index2, bool double_slot) {
   1224   // Allocate a scratch register other than TMP, if available.
   1225   // Else, spill V0 (arbitrary choice) and use it as a scratch register (it will be
   1226   // automatically unspilled when the scratch scope object is destroyed).
   1227   ScratchRegisterScope ensure_scratch(this, TMP, V0, codegen_->GetNumberOfCoreRegisters());
   1228   // If V0 spills onto the stack, SP-relative offsets need to be adjusted.
   1229   int stack_offset = ensure_scratch.IsSpilled() ? kStackAlignment : 0;
   1230   for (int i = 0; i <= (double_slot ? 1 : 0); i++, stack_offset += kMipsWordSize) {
   1231     __ LoadFromOffset(kLoadWord,
   1232                       Register(ensure_scratch.GetRegister()),
   1233                       SP,
   1234                       index1 + stack_offset);
   1235     __ LoadFromOffset(kLoadWord,
   1236                       TMP,
   1237                       SP,
   1238                       index2 + stack_offset);
   1239     __ StoreToOffset(kStoreWord,
   1240                      Register(ensure_scratch.GetRegister()),
   1241                      SP,
   1242                      index2 + stack_offset);
   1243     __ StoreToOffset(kStoreWord, TMP, SP, index1 + stack_offset);
   1244   }
   1245 }
   1246 
   1247 void ParallelMoveResolverMIPS::ExchangeQuadSlots(int index1, int index2) {
   1248   __ LoadQFromOffset(FTMP, SP, index1);
   1249   __ LoadQFromOffset(FTMP2, SP, index2);
   1250   __ StoreQToOffset(FTMP, SP, index2);
   1251   __ StoreQToOffset(FTMP2, SP, index1);
   1252 }
   1253 
   1254 void CodeGeneratorMIPS::ComputeSpillMask() {
   1255   core_spill_mask_ = allocated_registers_.GetCoreRegisters() & core_callee_save_mask_;
   1256   fpu_spill_mask_ = allocated_registers_.GetFloatingPointRegisters() & fpu_callee_save_mask_;
   1257   DCHECK_NE(core_spill_mask_, 0u) << "At least the return address register must be saved";
   1258   // If there're FPU callee-saved registers and there's an odd number of GPR callee-saved
   1259   // registers, include the ZERO register to force alignment of FPU callee-saved registers
   1260   // within the stack frame.
   1261   if ((fpu_spill_mask_ != 0) && (POPCOUNT(core_spill_mask_) % 2 != 0)) {
   1262     core_spill_mask_ |= (1 << ZERO);
   1263   }
   1264 }
   1265 
   1266 bool CodeGeneratorMIPS::HasAllocatedCalleeSaveRegisters() const {
   1267   // If RA is clobbered by PC-relative operations on R2 and it's the only spilled register
   1268   // (this can happen in leaf methods), force CodeGenerator::InitializeCodeGeneration()
   1269   // into the path that creates a stack frame so that RA can be explicitly saved and restored.
   1270   // RA can't otherwise be saved/restored when it's the only spilled register.
   1271   return CodeGenerator::HasAllocatedCalleeSaveRegisters() || clobbered_ra_;
   1272 }
   1273 
   1274 static dwarf::Reg DWARFReg(Register reg) {
   1275   return dwarf::Reg::MipsCore(static_cast<int>(reg));
   1276 }
   1277 
   1278 // TODO: mapping of floating-point registers to DWARF.
   1279 
   1280 void CodeGeneratorMIPS::GenerateFrameEntry() {
   1281   __ Bind(&frame_entry_label_);
   1282 
   1283   if (GetCompilerOptions().CountHotnessInCompiledCode()) {
   1284     __ Lhu(TMP, kMethodRegisterArgument, ArtMethod::HotnessCountOffset().Int32Value());
   1285     __ Addiu(TMP, TMP, 1);
   1286     __ Sh(TMP, kMethodRegisterArgument, ArtMethod::HotnessCountOffset().Int32Value());
   1287   }
   1288 
   1289   bool do_overflow_check =
   1290       FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kMips) || !IsLeafMethod();
   1291 
   1292   if (do_overflow_check) {
   1293     __ LoadFromOffset(kLoadWord,
   1294                       ZERO,
   1295                       SP,
   1296                       -static_cast<int32_t>(GetStackOverflowReservedBytes(InstructionSet::kMips)));
   1297     RecordPcInfo(nullptr, 0);
   1298   }
   1299 
   1300   if (HasEmptyFrame()) {
   1301     CHECK_EQ(fpu_spill_mask_, 0u);
   1302     CHECK_EQ(core_spill_mask_, 1u << RA);
   1303     CHECK(!clobbered_ra_);
   1304     return;
   1305   }
   1306 
   1307   // Make sure the frame size isn't unreasonably large.
   1308   if (GetFrameSize() > GetStackOverflowReservedBytes(InstructionSet::kMips)) {
   1309     LOG(FATAL) << "Stack frame larger than "
   1310         << GetStackOverflowReservedBytes(InstructionSet::kMips) << " bytes";
   1311   }
   1312 
   1313   // Spill callee-saved registers.
   1314 
   1315   uint32_t ofs = GetFrameSize();
   1316   __ IncreaseFrameSize(ofs);
   1317 
   1318   for (uint32_t mask = core_spill_mask_; mask != 0; ) {
   1319     Register reg = static_cast<Register>(MostSignificantBit(mask));
   1320     mask ^= 1u << reg;
   1321     ofs -= kMipsWordSize;
   1322     // The ZERO register is only included for alignment.
   1323     if (reg != ZERO) {
   1324       __ StoreToOffset(kStoreWord, reg, SP, ofs);
   1325       __ cfi().RelOffset(DWARFReg(reg), ofs);
   1326     }
   1327   }
   1328 
   1329   for (uint32_t mask = fpu_spill_mask_; mask != 0; ) {
   1330     FRegister reg = static_cast<FRegister>(MostSignificantBit(mask));
   1331     mask ^= 1u << reg;
   1332     ofs -= kMipsDoublewordSize;
   1333     __ StoreDToOffset(reg, SP, ofs);
   1334     // TODO: __ cfi().RelOffset(DWARFReg(reg), ofs);
   1335   }
   1336 
   1337   // Save the current method if we need it. Note that we do not
   1338   // do this in HCurrentMethod, as the instruction might have been removed
   1339   // in the SSA graph.
   1340   if (RequiresCurrentMethod()) {
   1341     __ StoreToOffset(kStoreWord, kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
   1342   }
   1343 
   1344   if (GetGraph()->HasShouldDeoptimizeFlag()) {
   1345     // Initialize should deoptimize flag to 0.
   1346     __ StoreToOffset(kStoreWord, ZERO, SP, GetStackOffsetOfShouldDeoptimizeFlag());
   1347   }
   1348 }
   1349 
   1350 void CodeGeneratorMIPS::GenerateFrameExit() {
   1351   __ cfi().RememberState();
   1352 
   1353   if (!HasEmptyFrame()) {
   1354     // Restore callee-saved registers.
   1355 
   1356     // For better instruction scheduling restore RA before other registers.
   1357     uint32_t ofs = GetFrameSize();
   1358     for (uint32_t mask = core_spill_mask_; mask != 0; ) {
   1359       Register reg = static_cast<Register>(MostSignificantBit(mask));
   1360       mask ^= 1u << reg;
   1361       ofs -= kMipsWordSize;
   1362       // The ZERO register is only included for alignment.
   1363       if (reg != ZERO) {
   1364         __ LoadFromOffset(kLoadWord, reg, SP, ofs);
   1365         __ cfi().Restore(DWARFReg(reg));
   1366       }
   1367     }
   1368 
   1369     for (uint32_t mask = fpu_spill_mask_; mask != 0; ) {
   1370       FRegister reg = static_cast<FRegister>(MostSignificantBit(mask));
   1371       mask ^= 1u << reg;
   1372       ofs -= kMipsDoublewordSize;
   1373       __ LoadDFromOffset(reg, SP, ofs);
   1374       // TODO: __ cfi().Restore(DWARFReg(reg));
   1375     }
   1376 
   1377     size_t frame_size = GetFrameSize();
   1378     // Adjust the stack pointer in the delay slot if doing so doesn't break CFI.
   1379     bool exchange = IsInt<16>(static_cast<int32_t>(frame_size));
   1380     bool reordering = __ SetReorder(false);
   1381     if (exchange) {
   1382       __ Jr(RA);
   1383       __ DecreaseFrameSize(frame_size);  // Single instruction in delay slot.
   1384     } else {
   1385       __ DecreaseFrameSize(frame_size);
   1386       __ Jr(RA);
   1387       __ Nop();  // In delay slot.
   1388     }
   1389     __ SetReorder(reordering);
   1390   } else {
   1391     __ Jr(RA);
   1392     __ NopIfNoReordering();
   1393   }
   1394 
   1395   __ cfi().RestoreState();
   1396   __ cfi().DefCFAOffset(GetFrameSize());
   1397 }
   1398 
   1399 void CodeGeneratorMIPS::Bind(HBasicBlock* block) {
   1400   __ Bind(GetLabelOf(block));
   1401 }
   1402 
   1403 VectorRegister VectorRegisterFrom(Location location) {
   1404   DCHECK(location.IsFpuRegister());
   1405   return static_cast<VectorRegister>(location.AsFpuRegister<FRegister>());
   1406 }
   1407 
   1408 void CodeGeneratorMIPS::MoveLocation(Location destination,
   1409                                      Location source,
   1410                                      DataType::Type dst_type) {
   1411   if (source.Equals(destination)) {
   1412     return;
   1413   }
   1414 
   1415   if (source.IsConstant()) {
   1416     MoveConstant(destination, source.GetConstant());
   1417   } else {
   1418     if (destination.IsRegister()) {
   1419       if (source.IsRegister()) {
   1420         __ Move(destination.AsRegister<Register>(), source.AsRegister<Register>());
   1421       } else if (source.IsFpuRegister()) {
   1422         __ Mfc1(destination.AsRegister<Register>(), source.AsFpuRegister<FRegister>());
   1423       } else {
   1424         DCHECK(source.IsStackSlot()) << "Cannot move from " << source << " to " << destination;
   1425       __ LoadFromOffset(kLoadWord, destination.AsRegister<Register>(), SP, source.GetStackIndex());
   1426       }
   1427     } else if (destination.IsRegisterPair()) {
   1428       if (source.IsRegisterPair()) {
   1429         __ Move(destination.AsRegisterPairHigh<Register>(), source.AsRegisterPairHigh<Register>());
   1430         __ Move(destination.AsRegisterPairLow<Register>(), source.AsRegisterPairLow<Register>());
   1431       } else if (source.IsFpuRegister()) {
   1432         Register dst_high = destination.AsRegisterPairHigh<Register>();
   1433         Register dst_low =  destination.AsRegisterPairLow<Register>();
   1434         FRegister src = source.AsFpuRegister<FRegister>();
   1435         __ Mfc1(dst_low, src);
   1436         __ MoveFromFpuHigh(dst_high, src);
   1437       } else {
   1438         DCHECK(source.IsDoubleStackSlot())
   1439             << "Cannot move from " << source << " to " << destination;
   1440         int32_t off = source.GetStackIndex();
   1441         Register r = destination.AsRegisterPairLow<Register>();
   1442         __ LoadFromOffset(kLoadDoubleword, r, SP, off);
   1443       }
   1444     } else if (destination.IsFpuRegister()) {
   1445       if (source.IsRegister()) {
   1446         DCHECK(!DataType::Is64BitType(dst_type));
   1447         __ Mtc1(source.AsRegister<Register>(), destination.AsFpuRegister<FRegister>());
   1448       } else if (source.IsRegisterPair()) {
   1449         DCHECK(DataType::Is64BitType(dst_type));
   1450         FRegister dst = destination.AsFpuRegister<FRegister>();
   1451         Register src_high = source.AsRegisterPairHigh<Register>();
   1452         Register src_low = source.AsRegisterPairLow<Register>();
   1453         __ Mtc1(src_low, dst);
   1454         __ MoveToFpuHigh(src_high, dst);
   1455       } else if (source.IsFpuRegister()) {
   1456         if (GetGraph()->HasSIMD()) {
   1457           __ MoveV(VectorRegisterFrom(destination),
   1458                    VectorRegisterFrom(source));
   1459         } else {
   1460           if (DataType::Is64BitType(dst_type)) {
   1461             __ MovD(destination.AsFpuRegister<FRegister>(), source.AsFpuRegister<FRegister>());
   1462           } else {
   1463             DCHECK_EQ(dst_type, DataType::Type::kFloat32);
   1464             __ MovS(destination.AsFpuRegister<FRegister>(), source.AsFpuRegister<FRegister>());
   1465           }
   1466         }
   1467       } else if (source.IsSIMDStackSlot()) {
   1468         __ LoadQFromOffset(destination.AsFpuRegister<FRegister>(), SP, source.GetStackIndex());
   1469       } else if (source.IsDoubleStackSlot()) {
   1470         DCHECK(DataType::Is64BitType(dst_type));
   1471         __ LoadDFromOffset(destination.AsFpuRegister<FRegister>(), SP, source.GetStackIndex());
   1472       } else {
   1473         DCHECK(!DataType::Is64BitType(dst_type));
   1474         DCHECK(source.IsStackSlot()) << "Cannot move from " << source << " to " << destination;
   1475         __ LoadSFromOffset(destination.AsFpuRegister<FRegister>(), SP, source.GetStackIndex());
   1476       }
   1477     } else if (destination.IsSIMDStackSlot()) {
   1478       if (source.IsFpuRegister()) {
   1479         __ StoreQToOffset(source.AsFpuRegister<FRegister>(), SP, destination.GetStackIndex());
   1480       } else {
   1481         DCHECK(source.IsSIMDStackSlot());
   1482         __ LoadQFromOffset(FTMP, SP, source.GetStackIndex());
   1483         __ StoreQToOffset(FTMP, SP, destination.GetStackIndex());
   1484       }
   1485     } else if (destination.IsDoubleStackSlot()) {
   1486       int32_t dst_offset = destination.GetStackIndex();
   1487       if (source.IsRegisterPair()) {
   1488         __ StoreToOffset(kStoreDoubleword, source.AsRegisterPairLow<Register>(), SP, dst_offset);
   1489       } else if (source.IsFpuRegister()) {
   1490         __ StoreDToOffset(source.AsFpuRegister<FRegister>(), SP, dst_offset);
   1491       } else {
   1492         DCHECK(source.IsDoubleStackSlot())
   1493             << "Cannot move from " << source << " to " << destination;
   1494         __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex());
   1495         __ StoreToOffset(kStoreWord, TMP, SP, dst_offset);
   1496         __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex() + 4);
   1497         __ StoreToOffset(kStoreWord, TMP, SP, dst_offset + 4);
   1498       }
   1499     } else {
   1500       DCHECK(destination.IsStackSlot()) << destination;
   1501       int32_t dst_offset = destination.GetStackIndex();
   1502       if (source.IsRegister()) {
   1503         __ StoreToOffset(kStoreWord, source.AsRegister<Register>(), SP, dst_offset);
   1504       } else if (source.IsFpuRegister()) {
   1505         __ StoreSToOffset(source.AsFpuRegister<FRegister>(), SP, dst_offset);
   1506       } else {
   1507         DCHECK(source.IsStackSlot()) << "Cannot move from " << source << " to " << destination;
   1508         __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex());
   1509         __ StoreToOffset(kStoreWord, TMP, SP, dst_offset);
   1510       }
   1511     }
   1512   }
   1513 }
   1514 
   1515 void CodeGeneratorMIPS::MoveConstant(Location destination, HConstant* c) {
   1516   if (c->IsIntConstant() || c->IsNullConstant()) {
   1517     // Move 32 bit constant.
   1518     int32_t value = GetInt32ValueOf(c);
   1519     if (destination.IsRegister()) {
   1520       Register dst = destination.AsRegister<Register>();
   1521       __ LoadConst32(dst, value);
   1522     } else {
   1523       DCHECK(destination.IsStackSlot())
   1524           << "Cannot move " << c->DebugName() << " to " << destination;
   1525       __ StoreConstToOffset(kStoreWord, value, SP, destination.GetStackIndex(), TMP);
   1526     }
   1527   } else if (c->IsLongConstant()) {
   1528     // Move 64 bit constant.
   1529     int64_t value = GetInt64ValueOf(c);
   1530     if (destination.IsRegisterPair()) {
   1531       Register r_h = destination.AsRegisterPairHigh<Register>();
   1532       Register r_l = destination.AsRegisterPairLow<Register>();
   1533       __ LoadConst64(r_h, r_l, value);
   1534     } else {
   1535       DCHECK(destination.IsDoubleStackSlot())
   1536           << "Cannot move " << c->DebugName() << " to " << destination;
   1537       __ StoreConstToOffset(kStoreDoubleword, value, SP, destination.GetStackIndex(), TMP);
   1538     }
   1539   } else if (c->IsFloatConstant()) {
   1540     // Move 32 bit float constant.
   1541     int32_t value = GetInt32ValueOf(c);
   1542     if (destination.IsFpuRegister()) {
   1543       __ LoadSConst32(destination.AsFpuRegister<FRegister>(), value, TMP);
   1544     } else {
   1545       DCHECK(destination.IsStackSlot())
   1546           << "Cannot move " << c->DebugName() << " to " << destination;
   1547       __ StoreConstToOffset(kStoreWord, value, SP, destination.GetStackIndex(), TMP);
   1548     }
   1549   } else {
   1550     // Move 64 bit double constant.
   1551     DCHECK(c->IsDoubleConstant()) << c->DebugName();
   1552     int64_t value = GetInt64ValueOf(c);
   1553     if (destination.IsFpuRegister()) {
   1554       FRegister fd = destination.AsFpuRegister<FRegister>();
   1555       __ LoadDConst64(fd, value, TMP);
   1556     } else {
   1557       DCHECK(destination.IsDoubleStackSlot())
   1558           << "Cannot move " << c->DebugName() << " to " << destination;
   1559       __ StoreConstToOffset(kStoreDoubleword, value, SP, destination.GetStackIndex(), TMP);
   1560     }
   1561   }
   1562 }
   1563 
   1564 void CodeGeneratorMIPS::MoveConstant(Location destination, int32_t value) {
   1565   DCHECK(destination.IsRegister());
   1566   Register dst = destination.AsRegister<Register>();
   1567   __ LoadConst32(dst, value);
   1568 }
   1569 
   1570 void CodeGeneratorMIPS::AddLocationAsTemp(Location location, LocationSummary* locations) {
   1571   if (location.IsRegister()) {
   1572     locations->AddTemp(location);
   1573   } else if (location.IsRegisterPair()) {
   1574     locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairLow<Register>()));
   1575     locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairHigh<Register>()));
   1576   } else {
   1577     UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
   1578   }
   1579 }
   1580 
   1581 template <linker::LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
   1582 inline void CodeGeneratorMIPS::EmitPcRelativeLinkerPatches(
   1583     const ArenaDeque<PcRelativePatchInfo>& infos,
   1584     ArenaVector<linker::LinkerPatch>* linker_patches) {
   1585   for (const PcRelativePatchInfo& info : infos) {
   1586     const DexFile* dex_file = info.target_dex_file;
   1587     size_t offset_or_index = info.offset_or_index;
   1588     DCHECK(info.label.IsBound());
   1589     uint32_t literal_offset = __ GetLabelLocation(&info.label);
   1590     // On R2 we use HMipsComputeBaseMethodAddress and patch relative to
   1591     // the assembler's base label used for PC-relative addressing.
   1592     const PcRelativePatchInfo& info_high = info.patch_info_high ? *info.patch_info_high : info;
   1593     uint32_t pc_rel_offset = info_high.pc_rel_label.IsBound()
   1594         ? __ GetLabelLocation(&info_high.pc_rel_label)
   1595         : __ GetPcRelBaseLabelLocation();
   1596     linker_patches->push_back(Factory(literal_offset, dex_file, pc_rel_offset, offset_or_index));
   1597   }
   1598 }
   1599 
   1600 void CodeGeneratorMIPS::EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) {
   1601   DCHECK(linker_patches->empty());
   1602   size_t size =
   1603       boot_image_method_patches_.size() +
   1604       method_bss_entry_patches_.size() +
   1605       boot_image_type_patches_.size() +
   1606       type_bss_entry_patches_.size() +
   1607       boot_image_string_patches_.size() +
   1608       string_bss_entry_patches_.size();
   1609   linker_patches->reserve(size);
   1610   if (GetCompilerOptions().IsBootImage()) {
   1611     EmitPcRelativeLinkerPatches<linker::LinkerPatch::RelativeMethodPatch>(
   1612         boot_image_method_patches_, linker_patches);
   1613     EmitPcRelativeLinkerPatches<linker::LinkerPatch::RelativeTypePatch>(
   1614         boot_image_type_patches_, linker_patches);
   1615     EmitPcRelativeLinkerPatches<linker::LinkerPatch::RelativeStringPatch>(
   1616         boot_image_string_patches_, linker_patches);
   1617   } else {
   1618     DCHECK(boot_image_method_patches_.empty());
   1619     EmitPcRelativeLinkerPatches<linker::LinkerPatch::TypeClassTablePatch>(
   1620         boot_image_type_patches_, linker_patches);
   1621     EmitPcRelativeLinkerPatches<linker::LinkerPatch::StringInternTablePatch>(
   1622         boot_image_string_patches_, linker_patches);
   1623   }
   1624   EmitPcRelativeLinkerPatches<linker::LinkerPatch::MethodBssEntryPatch>(
   1625       method_bss_entry_patches_, linker_patches);
   1626   EmitPcRelativeLinkerPatches<linker::LinkerPatch::TypeBssEntryPatch>(
   1627       type_bss_entry_patches_, linker_patches);
   1628   EmitPcRelativeLinkerPatches<linker::LinkerPatch::StringBssEntryPatch>(
   1629       string_bss_entry_patches_, linker_patches);
   1630   DCHECK_EQ(size, linker_patches->size());
   1631 }
   1632 
   1633 CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewBootImageMethodPatch(
   1634     MethodReference target_method,
   1635     const PcRelativePatchInfo* info_high) {
   1636   return NewPcRelativePatch(
   1637       target_method.dex_file, target_method.index, info_high, &boot_image_method_patches_);
   1638 }
   1639 
   1640 CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewMethodBssEntryPatch(
   1641     MethodReference target_method,
   1642     const PcRelativePatchInfo* info_high) {
   1643   return NewPcRelativePatch(
   1644       target_method.dex_file, target_method.index, info_high, &method_bss_entry_patches_);
   1645 }
   1646 
   1647 CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewBootImageTypePatch(
   1648     const DexFile& dex_file,
   1649     dex::TypeIndex type_index,
   1650     const PcRelativePatchInfo* info_high) {
   1651   return NewPcRelativePatch(&dex_file, type_index.index_, info_high, &boot_image_type_patches_);
   1652 }
   1653 
   1654 CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewTypeBssEntryPatch(
   1655     const DexFile& dex_file,
   1656     dex::TypeIndex type_index,
   1657     const PcRelativePatchInfo* info_high) {
   1658   return NewPcRelativePatch(&dex_file, type_index.index_, info_high, &type_bss_entry_patches_);
   1659 }
   1660 
   1661 CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewBootImageStringPatch(
   1662     const DexFile& dex_file,
   1663     dex::StringIndex string_index,
   1664     const PcRelativePatchInfo* info_high) {
   1665   return NewPcRelativePatch(
   1666       &dex_file, string_index.index_, info_high, &boot_image_string_patches_);
   1667 }
   1668 
   1669 CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewStringBssEntryPatch(
   1670     const DexFile& dex_file,
   1671     dex::StringIndex string_index,
   1672     const PcRelativePatchInfo* info_high) {
   1673   return NewPcRelativePatch(&dex_file, string_index.index_, info_high, &string_bss_entry_patches_);
   1674 }
   1675 
   1676 CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewPcRelativePatch(
   1677     const DexFile* dex_file,
   1678     uint32_t offset_or_index,
   1679     const PcRelativePatchInfo* info_high,
   1680     ArenaDeque<PcRelativePatchInfo>* patches) {
   1681   patches->emplace_back(dex_file, offset_or_index, info_high);
   1682   return &patches->back();
   1683 }
   1684 
   1685 Literal* CodeGeneratorMIPS::DeduplicateUint32Literal(uint32_t value, Uint32ToLiteralMap* map) {
   1686   return map->GetOrCreate(
   1687       value,
   1688       [this, value]() { return __ NewLiteral<uint32_t>(value); });
   1689 }
   1690 
   1691 Literal* CodeGeneratorMIPS::DeduplicateBootImageAddressLiteral(uint32_t address) {
   1692   return DeduplicateUint32Literal(dchecked_integral_cast<uint32_t>(address), &uint32_literals_);
   1693 }
   1694 
   1695 void CodeGeneratorMIPS::EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo* info_high,
   1696                                                              Register out,
   1697                                                              Register base) {
   1698   DCHECK(!info_high->patch_info_high);
   1699   DCHECK_NE(out, base);
   1700   bool reordering = __ SetReorder(false);
   1701   if (GetInstructionSetFeatures().IsR6()) {
   1702     DCHECK_EQ(base, ZERO);
   1703     __ Bind(&info_high->label);
   1704     __ Bind(&info_high->pc_rel_label);
   1705     // Add the high half of a 32-bit offset to PC.
   1706     __ Auipc(out, /* placeholder */ 0x1234);
   1707     __ SetReorder(reordering);
   1708   } else {
   1709     // If base is ZERO, emit NAL to obtain the actual base.
   1710     if (base == ZERO) {
   1711       // Generate a dummy PC-relative call to obtain PC.
   1712       __ Nal();
   1713     }
   1714     __ Bind(&info_high->label);
   1715     __ Lui(out, /* placeholder */ 0x1234);
   1716     // If we emitted the NAL, bind the pc_rel_label, otherwise base is a register holding
   1717     // the HMipsComputeBaseMethodAddress which has its own label stored in MipsAssembler.
   1718     if (base == ZERO) {
   1719       __ Bind(&info_high->pc_rel_label);
   1720     }
   1721     __ SetReorder(reordering);
   1722     // Add the high half of a 32-bit offset to PC.
   1723     __ Addu(out, out, (base == ZERO) ? RA : base);
   1724   }
   1725   // A following instruction will add the sign-extended low half of the 32-bit
   1726   // offset to `out` (e.g. lw, jialc, addiu).
   1727 }
   1728 
   1729 CodeGeneratorMIPS::JitPatchInfo* CodeGeneratorMIPS::NewJitRootStringPatch(
   1730     const DexFile& dex_file,
   1731     dex::StringIndex string_index,
   1732     Handle<mirror::String> handle) {
   1733   ReserveJitStringRoot(StringReference(&dex_file, string_index), handle);
   1734   jit_string_patches_.emplace_back(dex_file, string_index.index_);
   1735   return &jit_string_patches_.back();
   1736 }
   1737 
   1738 CodeGeneratorMIPS::JitPatchInfo* CodeGeneratorMIPS::NewJitRootClassPatch(
   1739     const DexFile& dex_file,
   1740     dex::TypeIndex type_index,
   1741     Handle<mirror::Class> handle) {
   1742   ReserveJitClassRoot(TypeReference(&dex_file, type_index), handle);
   1743   jit_class_patches_.emplace_back(dex_file, type_index.index_);
   1744   return &jit_class_patches_.back();
   1745 }
   1746 
   1747 void CodeGeneratorMIPS::PatchJitRootUse(uint8_t* code,
   1748                                         const uint8_t* roots_data,
   1749                                         const CodeGeneratorMIPS::JitPatchInfo& info,
   1750                                         uint64_t index_in_table) const {
   1751   uint32_t high_literal_offset = GetAssembler().GetLabelLocation(&info.high_label);
   1752   uint32_t low_literal_offset = GetAssembler().GetLabelLocation(&info.low_label);
   1753   uintptr_t address =
   1754       reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>);
   1755   uint32_t addr32 = dchecked_integral_cast<uint32_t>(address);
   1756   // lui reg, addr32_high
   1757   DCHECK_EQ(code[high_literal_offset + 0], 0x34);
   1758   DCHECK_EQ(code[high_literal_offset + 1], 0x12);
   1759   DCHECK_EQ((code[high_literal_offset + 2] & 0xE0), 0x00);
   1760   DCHECK_EQ(code[high_literal_offset + 3], 0x3C);
   1761   // instr reg, reg, addr32_low
   1762   DCHECK_EQ(code[low_literal_offset + 0], 0x78);
   1763   DCHECK_EQ(code[low_literal_offset + 1], 0x56);
   1764   addr32 += (addr32 & 0x8000) << 1;  // Account for sign extension in "instr reg, reg, addr32_low".
   1765   // lui reg, addr32_high
   1766   code[high_literal_offset + 0] = static_cast<uint8_t>(addr32 >> 16);
   1767   code[high_literal_offset + 1] = static_cast<uint8_t>(addr32 >> 24);
   1768   // instr reg, reg, addr32_low
   1769   code[low_literal_offset + 0] = static_cast<uint8_t>(addr32 >> 0);
   1770   code[low_literal_offset + 1] = static_cast<uint8_t>(addr32 >> 8);
   1771 }
   1772 
   1773 void CodeGeneratorMIPS::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) {
   1774   for (const JitPatchInfo& info : jit_string_patches_) {
   1775     StringReference string_reference(&info.target_dex_file, dex::StringIndex(info.index));
   1776     uint64_t index_in_table = GetJitStringRootIndex(string_reference);
   1777     PatchJitRootUse(code, roots_data, info, index_in_table);
   1778   }
   1779   for (const JitPatchInfo& info : jit_class_patches_) {
   1780     TypeReference type_reference(&info.target_dex_file, dex::TypeIndex(info.index));
   1781     uint64_t index_in_table = GetJitClassRootIndex(type_reference);
   1782     PatchJitRootUse(code, roots_data, info, index_in_table);
   1783   }
   1784 }
   1785 
   1786 void CodeGeneratorMIPS::MarkGCCard(Register object,
   1787                                    Register value,
   1788                                    bool value_can_be_null) {
   1789   MipsLabel done;
   1790   Register card = AT;
   1791   Register temp = TMP;
   1792   if (value_can_be_null) {
   1793     __ Beqz(value, &done);
   1794   }
   1795   __ LoadFromOffset(kLoadWord,
   1796                     card,
   1797                     TR,
   1798                     Thread::CardTableOffset<kMipsPointerSize>().Int32Value());
   1799   __ Srl(temp, object, gc::accounting::CardTable::kCardShift);
   1800   __ Addu(temp, card, temp);
   1801   __ Sb(card, temp, 0);
   1802   if (value_can_be_null) {
   1803     __ Bind(&done);
   1804   }
   1805 }
   1806 
   1807 void CodeGeneratorMIPS::SetupBlockedRegisters() const {
   1808   // ZERO, K0, K1, GP, SP, RA are always reserved and can't be allocated.
   1809   blocked_core_registers_[ZERO] = true;
   1810   blocked_core_registers_[K0] = true;
   1811   blocked_core_registers_[K1] = true;
   1812   blocked_core_registers_[GP] = true;
   1813   blocked_core_registers_[SP] = true;
   1814   blocked_core_registers_[RA] = true;
   1815 
   1816   // AT and TMP(T8) are used as temporary/scratch registers
   1817   // (similar to how AT is used by MIPS assemblers).
   1818   blocked_core_registers_[AT] = true;
   1819   blocked_core_registers_[TMP] = true;
   1820   blocked_fpu_registers_[FTMP] = true;
   1821 
   1822   if (GetInstructionSetFeatures().HasMsa()) {
   1823     // To be used just for MSA instructions.
   1824     blocked_fpu_registers_[FTMP2] = true;
   1825   }
   1826 
   1827   // Reserve suspend and thread registers.
   1828   blocked_core_registers_[S0] = true;
   1829   blocked_core_registers_[TR] = true;
   1830 
   1831   // Reserve T9 for function calls
   1832   blocked_core_registers_[T9] = true;
   1833 
   1834   // Reserve odd-numbered FPU registers.
   1835   for (size_t i = 1; i < kNumberOfFRegisters; i += 2) {
   1836     blocked_fpu_registers_[i] = true;
   1837   }
   1838 
   1839   if (GetGraph()->IsDebuggable()) {
   1840     // Stubs do not save callee-save floating point registers. If the graph
   1841     // is debuggable, we need to deal with these registers differently. For
   1842     // now, just block them.
   1843     for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
   1844       blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
   1845     }
   1846   }
   1847 }
   1848 
   1849 size_t CodeGeneratorMIPS::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
   1850   __ StoreToOffset(kStoreWord, Register(reg_id), SP, stack_index);
   1851   return kMipsWordSize;
   1852 }
   1853 
   1854 size_t CodeGeneratorMIPS::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
   1855   __ LoadFromOffset(kLoadWord, Register(reg_id), SP, stack_index);
   1856   return kMipsWordSize;
   1857 }
   1858 
   1859 size_t CodeGeneratorMIPS::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
   1860   if (GetGraph()->HasSIMD()) {
   1861     __ StoreQToOffset(FRegister(reg_id), SP, stack_index);
   1862   } else {
   1863     __ StoreDToOffset(FRegister(reg_id), SP, stack_index);
   1864   }
   1865   return GetFloatingPointSpillSlotSize();
   1866 }
   1867 
   1868 size_t CodeGeneratorMIPS::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
   1869   if (GetGraph()->HasSIMD()) {
   1870     __ LoadQFromOffset(FRegister(reg_id), SP, stack_index);
   1871   } else {
   1872     __ LoadDFromOffset(FRegister(reg_id), SP, stack_index);
   1873   }
   1874   return GetFloatingPointSpillSlotSize();
   1875 }
   1876 
   1877 void CodeGeneratorMIPS::DumpCoreRegister(std::ostream& stream, int reg) const {
   1878   stream << Register(reg);
   1879 }
   1880 
   1881 void CodeGeneratorMIPS::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
   1882   stream << FRegister(reg);
   1883 }
   1884 
   1885 constexpr size_t kMipsDirectEntrypointRuntimeOffset = 16;
   1886 
   1887 void CodeGeneratorMIPS::InvokeRuntime(QuickEntrypointEnum entrypoint,
   1888                                       HInstruction* instruction,
   1889                                       uint32_t dex_pc,
   1890                                       SlowPathCode* slow_path) {
   1891   ValidateInvokeRuntime(entrypoint, instruction, slow_path);
   1892   GenerateInvokeRuntime(GetThreadOffset<kMipsPointerSize>(entrypoint).Int32Value(),
   1893                         IsDirectEntrypoint(entrypoint));
   1894   if (EntrypointRequiresStackMap(entrypoint)) {
   1895     RecordPcInfo(instruction, dex_pc, slow_path);
   1896   }
   1897 }
   1898 
   1899 void CodeGeneratorMIPS::InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,
   1900                                                             HInstruction* instruction,
   1901                                                             SlowPathCode* slow_path,
   1902                                                             bool direct) {
   1903   ValidateInvokeRuntimeWithoutRecordingPcInfo(instruction, slow_path);
   1904   GenerateInvokeRuntime(entry_point_offset, direct);
   1905 }
   1906 
   1907 void CodeGeneratorMIPS::GenerateInvokeRuntime(int32_t entry_point_offset, bool direct) {
   1908   bool reordering = __ SetReorder(false);
   1909   __ LoadFromOffset(kLoadWord, T9, TR, entry_point_offset);
   1910   __ Jalr(T9);
   1911   if (direct) {
   1912     // Reserve argument space on stack (for $a0-$a3) for
   1913     // entrypoints that directly reference native implementations.
   1914     // Called function may use this space to store $a0-$a3 regs.
   1915     __ IncreaseFrameSize(kMipsDirectEntrypointRuntimeOffset);  // Single instruction in delay slot.
   1916     __ DecreaseFrameSize(kMipsDirectEntrypointRuntimeOffset);
   1917   } else {
   1918     __ Nop();  // In delay slot.
   1919   }
   1920   __ SetReorder(reordering);
   1921 }
   1922 
   1923 void InstructionCodeGeneratorMIPS::GenerateClassInitializationCheck(SlowPathCodeMIPS* slow_path,
   1924                                                                     Register class_reg) {
   1925   constexpr size_t status_lsb_position = SubtypeCheckBits::BitStructSizeOf();
   1926   const size_t status_byte_offset =
   1927       mirror::Class::StatusOffset().SizeValue() + (status_lsb_position / kBitsPerByte);
   1928   constexpr uint32_t shifted_initialized_value =
   1929       enum_cast<uint32_t>(ClassStatus::kInitialized) << (status_lsb_position % kBitsPerByte);
   1930 
   1931   __ LoadFromOffset(kLoadUnsignedByte, TMP, class_reg, status_byte_offset);
   1932   __ Sltiu(TMP, TMP, shifted_initialized_value);
   1933   __ Bnez(TMP, slow_path->GetEntryLabel());
   1934   // Even if the initialized flag is set, we need to ensure consistent memory ordering.
   1935   __ Sync(0);
   1936   __ Bind(slow_path->GetExitLabel());
   1937 }
   1938 
   1939 void InstructionCodeGeneratorMIPS::GenerateMemoryBarrier(MemBarrierKind kind ATTRIBUTE_UNUSED) {
   1940   __ Sync(0);  // Only stype 0 is supported.
   1941 }
   1942 
   1943 void InstructionCodeGeneratorMIPS::GenerateSuspendCheck(HSuspendCheck* instruction,
   1944                                                         HBasicBlock* successor) {
   1945   SuspendCheckSlowPathMIPS* slow_path =
   1946       down_cast<SuspendCheckSlowPathMIPS*>(instruction->GetSlowPath());
   1947 
   1948   if (slow_path == nullptr) {
   1949     slow_path =
   1950         new (codegen_->GetScopedAllocator()) SuspendCheckSlowPathMIPS(instruction, successor);
   1951     instruction->SetSlowPath(slow_path);
   1952     codegen_->AddSlowPath(slow_path);
   1953     if (successor != nullptr) {
   1954       DCHECK(successor->IsLoopHeader());
   1955     }
   1956   } else {
   1957     DCHECK_EQ(slow_path->GetSuccessor(), successor);
   1958   }
   1959 
   1960   __ LoadFromOffset(kLoadUnsignedHalfword,
   1961                     TMP,
   1962                     TR,
   1963                     Thread::ThreadFlagsOffset<kMipsPointerSize>().Int32Value());
   1964   if (successor == nullptr) {
   1965     __ Bnez(TMP, slow_path->GetEntryLabel());
   1966     __ Bind(slow_path->GetReturnLabel());
   1967   } else {
   1968     __ Beqz(TMP, codegen_->GetLabelOf(successor));
   1969     __ B(slow_path->GetEntryLabel());
   1970     // slow_path will return to GetLabelOf(successor).
   1971   }
   1972 }
   1973 
   1974 InstructionCodeGeneratorMIPS::InstructionCodeGeneratorMIPS(HGraph* graph,
   1975                                                            CodeGeneratorMIPS* codegen)
   1976       : InstructionCodeGenerator(graph, codegen),
   1977         assembler_(codegen->GetAssembler()),
   1978         codegen_(codegen) {}
   1979 
   1980 void LocationsBuilderMIPS::HandleBinaryOp(HBinaryOperation* instruction) {
   1981   DCHECK_EQ(instruction->InputCount(), 2U);
   1982   LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
   1983   DataType::Type type = instruction->GetResultType();
   1984   bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
   1985   switch (type) {
   1986     case DataType::Type::kInt32: {
   1987       locations->SetInAt(0, Location::RequiresRegister());
   1988       HInstruction* right = instruction->InputAt(1);
   1989       bool can_use_imm = false;
   1990       if (right->IsConstant()) {
   1991         int32_t imm = CodeGenerator::GetInt32ValueOf(right->AsConstant());
   1992         if (instruction->IsAnd() || instruction->IsOr() || instruction->IsXor()) {
   1993           can_use_imm = IsUint<16>(imm);
   1994         } else {
   1995           DCHECK(instruction->IsSub() || instruction->IsAdd());
   1996           if (instruction->IsSub()) {
   1997             imm = -imm;
   1998           }
   1999           if (isR6) {
   2000             bool single_use = right->GetUses().HasExactlyOneElement();
   2001             int16_t imm_high = High16Bits(imm);
   2002             int16_t imm_low = Low16Bits(imm);
   2003             if (imm_low < 0) {
   2004               imm_high += 1;
   2005             }
   2006             can_use_imm = !((imm_high != 0) && (imm_low != 0)) || single_use;
   2007           } else {
   2008             can_use_imm = IsInt<16>(imm);
   2009           }
   2010         }
   2011       }
   2012       if (can_use_imm)
   2013         locations->SetInAt(1, Location::ConstantLocation(right->AsConstant()));
   2014       else
   2015         locations->SetInAt(1, Location::RequiresRegister());
   2016       locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   2017       break;
   2018     }
   2019 
   2020     case DataType::Type::kInt64: {
   2021       locations->SetInAt(0, Location::RequiresRegister());
   2022       locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
   2023       locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   2024       break;
   2025     }
   2026 
   2027     case DataType::Type::kFloat32:
   2028     case DataType::Type::kFloat64:
   2029       DCHECK(instruction->IsAdd() || instruction->IsSub());
   2030       locations->SetInAt(0, Location::RequiresFpuRegister());
   2031       locations->SetInAt(1, Location::RequiresFpuRegister());
   2032       locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
   2033       break;
   2034 
   2035     default:
   2036       LOG(FATAL) << "Unexpected " << instruction->DebugName() << " type " << type;
   2037   }
   2038 }
   2039 
   2040 void InstructionCodeGeneratorMIPS::HandleBinaryOp(HBinaryOperation* instruction) {
   2041   DataType::Type type = instruction->GetType();
   2042   LocationSummary* locations = instruction->GetLocations();
   2043   bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
   2044 
   2045   switch (type) {
   2046     case DataType::Type::kInt32: {
   2047       Register dst = locations->Out().AsRegister<Register>();
   2048       Register lhs = locations->InAt(0).AsRegister<Register>();
   2049       Location rhs_location = locations->InAt(1);
   2050 
   2051       Register rhs_reg = ZERO;
   2052       int32_t rhs_imm = 0;
   2053       bool use_imm = rhs_location.IsConstant();
   2054       if (use_imm) {
   2055         rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
   2056       } else {
   2057         rhs_reg = rhs_location.AsRegister<Register>();
   2058       }
   2059 
   2060       if (instruction->IsAnd()) {
   2061         if (use_imm)
   2062           __ Andi(dst, lhs, rhs_imm);
   2063         else
   2064           __ And(dst, lhs, rhs_reg);
   2065       } else if (instruction->IsOr()) {
   2066         if (use_imm)
   2067           __ Ori(dst, lhs, rhs_imm);
   2068         else
   2069           __ Or(dst, lhs, rhs_reg);
   2070       } else if (instruction->IsXor()) {
   2071         if (use_imm)
   2072           __ Xori(dst, lhs, rhs_imm);
   2073         else
   2074           __ Xor(dst, lhs, rhs_reg);
   2075       } else {
   2076         DCHECK(instruction->IsAdd() || instruction->IsSub());
   2077         if (use_imm) {
   2078           if (instruction->IsSub()) {
   2079             rhs_imm = -rhs_imm;
   2080           }
   2081           if (IsInt<16>(rhs_imm)) {
   2082             __ Addiu(dst, lhs, rhs_imm);
   2083           } else {
   2084             DCHECK(isR6);
   2085             int16_t rhs_imm_high = High16Bits(rhs_imm);
   2086             int16_t rhs_imm_low = Low16Bits(rhs_imm);
   2087             if (rhs_imm_low < 0) {
   2088               rhs_imm_high += 1;
   2089             }
   2090             __ Aui(dst, lhs, rhs_imm_high);
   2091             if (rhs_imm_low != 0) {
   2092               __ Addiu(dst, dst, rhs_imm_low);
   2093             }
   2094           }
   2095         } else if (instruction->IsAdd()) {
   2096           __ Addu(dst, lhs, rhs_reg);
   2097         } else {
   2098           DCHECK(instruction->IsSub());
   2099           __ Subu(dst, lhs, rhs_reg);
   2100         }
   2101       }
   2102       break;
   2103     }
   2104 
   2105     case DataType::Type::kInt64: {
   2106       Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
   2107       Register dst_low = locations->Out().AsRegisterPairLow<Register>();
   2108       Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>();
   2109       Register lhs_low = locations->InAt(0).AsRegisterPairLow<Register>();
   2110       Location rhs_location = locations->InAt(1);
   2111       bool use_imm = rhs_location.IsConstant();
   2112       if (!use_imm) {
   2113         Register rhs_high = rhs_location.AsRegisterPairHigh<Register>();
   2114         Register rhs_low = rhs_location.AsRegisterPairLow<Register>();
   2115         if (instruction->IsAnd()) {
   2116           __ And(dst_low, lhs_low, rhs_low);
   2117           __ And(dst_high, lhs_high, rhs_high);
   2118         } else if (instruction->IsOr()) {
   2119           __ Or(dst_low, lhs_low, rhs_low);
   2120           __ Or(dst_high, lhs_high, rhs_high);
   2121         } else if (instruction->IsXor()) {
   2122           __ Xor(dst_low, lhs_low, rhs_low);
   2123           __ Xor(dst_high, lhs_high, rhs_high);
   2124         } else if (instruction->IsAdd()) {
   2125           if (lhs_low == rhs_low) {
   2126             // Special case for lhs = rhs and the sum potentially overwriting both lhs and rhs.
   2127             __ Slt(TMP, lhs_low, ZERO);
   2128             __ Addu(dst_low, lhs_low, rhs_low);
   2129           } else {
   2130             __ Addu(dst_low, lhs_low, rhs_low);
   2131             // If the sum overwrites rhs, lhs remains unchanged, otherwise rhs remains unchanged.
   2132             __ Sltu(TMP, dst_low, (dst_low == rhs_low) ? lhs_low : rhs_low);
   2133           }
   2134           __ Addu(dst_high, lhs_high, rhs_high);
   2135           __ Addu(dst_high, dst_high, TMP);
   2136         } else {
   2137           DCHECK(instruction->IsSub());
   2138           __ Sltu(TMP, lhs_low, rhs_low);
   2139           __ Subu(dst_low, lhs_low, rhs_low);
   2140           __ Subu(dst_high, lhs_high, rhs_high);
   2141           __ Subu(dst_high, dst_high, TMP);
   2142         }
   2143       } else {
   2144         int64_t value = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant()->AsConstant());
   2145         if (instruction->IsOr()) {
   2146           uint32_t low = Low32Bits(value);
   2147           uint32_t high = High32Bits(value);
   2148           if (IsUint<16>(low)) {
   2149             if (dst_low != lhs_low || low != 0) {
   2150               __ Ori(dst_low, lhs_low, low);
   2151             }
   2152           } else {
   2153             __ LoadConst32(TMP, low);
   2154             __ Or(dst_low, lhs_low, TMP);
   2155           }
   2156           if (IsUint<16>(high)) {
   2157             if (dst_high != lhs_high || high != 0) {
   2158               __ Ori(dst_high, lhs_high, high);
   2159             }
   2160           } else {
   2161             if (high != low) {
   2162               __ LoadConst32(TMP, high);
   2163             }
   2164             __ Or(dst_high, lhs_high, TMP);
   2165           }
   2166         } else if (instruction->IsXor()) {
   2167           uint32_t low = Low32Bits(value);
   2168           uint32_t high = High32Bits(value);
   2169           if (IsUint<16>(low)) {
   2170             if (dst_low != lhs_low || low != 0) {
   2171               __ Xori(dst_low, lhs_low, low);
   2172             }
   2173           } else {
   2174             __ LoadConst32(TMP, low);
   2175             __ Xor(dst_low, lhs_low, TMP);
   2176           }
   2177           if (IsUint<16>(high)) {
   2178             if (dst_high != lhs_high || high != 0) {
   2179               __ Xori(dst_high, lhs_high, high);
   2180             }
   2181           } else {
   2182             if (high != low) {
   2183               __ LoadConst32(TMP, high);
   2184             }
   2185             __ Xor(dst_high, lhs_high, TMP);
   2186           }
   2187         } else if (instruction->IsAnd()) {
   2188           uint32_t low = Low32Bits(value);
   2189           uint32_t high = High32Bits(value);
   2190           if (IsUint<16>(low)) {
   2191             __ Andi(dst_low, lhs_low, low);
   2192           } else if (low != 0xFFFFFFFF) {
   2193             __ LoadConst32(TMP, low);
   2194             __ And(dst_low, lhs_low, TMP);
   2195           } else if (dst_low != lhs_low) {
   2196             __ Move(dst_low, lhs_low);
   2197           }
   2198           if (IsUint<16>(high)) {
   2199             __ Andi(dst_high, lhs_high, high);
   2200           } else if (high != 0xFFFFFFFF) {
   2201             if (high != low) {
   2202               __ LoadConst32(TMP, high);
   2203             }
   2204             __ And(dst_high, lhs_high, TMP);
   2205           } else if (dst_high != lhs_high) {
   2206             __ Move(dst_high, lhs_high);
   2207           }
   2208         } else {
   2209           if (instruction->IsSub()) {
   2210             value = -value;
   2211           } else {
   2212             DCHECK(instruction->IsAdd());
   2213           }
   2214           int32_t low = Low32Bits(value);
   2215           int32_t high = High32Bits(value);
   2216           if (IsInt<16>(low)) {
   2217             if (dst_low != lhs_low || low != 0) {
   2218               __ Addiu(dst_low, lhs_low, low);
   2219             }
   2220             if (low != 0) {
   2221               __ Sltiu(AT, dst_low, low);
   2222             }
   2223           } else {
   2224             __ LoadConst32(TMP, low);
   2225             __ Addu(dst_low, lhs_low, TMP);
   2226             __ Sltu(AT, dst_low, TMP);
   2227           }
   2228           if (IsInt<16>(high)) {
   2229             if (dst_high != lhs_high || high != 0) {
   2230               __ Addiu(dst_high, lhs_high, high);
   2231             }
   2232           } else {
   2233             if (high != low) {
   2234               __ LoadConst32(TMP, high);
   2235             }
   2236             __ Addu(dst_high, lhs_high, TMP);
   2237           }
   2238           if (low != 0) {
   2239             __ Addu(dst_high, dst_high, AT);
   2240           }
   2241         }
   2242       }
   2243       break;
   2244     }
   2245 
   2246     case DataType::Type::kFloat32:
   2247     case DataType::Type::kFloat64: {
   2248       FRegister dst = locations->Out().AsFpuRegister<FRegister>();
   2249       FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
   2250       FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
   2251       if (instruction->IsAdd()) {
   2252         if (type == DataType::Type::kFloat32) {
   2253           __ AddS(dst, lhs, rhs);
   2254         } else {
   2255           __ AddD(dst, lhs, rhs);
   2256         }
   2257       } else {
   2258         DCHECK(instruction->IsSub());
   2259         if (type == DataType::Type::kFloat32) {
   2260           __ SubS(dst, lhs, rhs);
   2261         } else {
   2262           __ SubD(dst, lhs, rhs);
   2263         }
   2264       }
   2265       break;
   2266     }
   2267 
   2268     default:
   2269       LOG(FATAL) << "Unexpected binary operation type " << type;
   2270   }
   2271 }
   2272 
   2273 void LocationsBuilderMIPS::HandleShift(HBinaryOperation* instr) {
   2274   DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor());
   2275 
   2276   LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instr);
   2277   DataType::Type type = instr->GetResultType();
   2278   switch (type) {
   2279     case DataType::Type::kInt32:
   2280       locations->SetInAt(0, Location::RequiresRegister());
   2281       locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
   2282       locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   2283       break;
   2284     case DataType::Type::kInt64:
   2285       locations->SetInAt(0, Location::RequiresRegister());
   2286       locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
   2287       locations->SetOut(Location::RequiresRegister());
   2288       break;
   2289     default:
   2290       LOG(FATAL) << "Unexpected shift type " << type;
   2291   }
   2292 }
   2293 
   2294 static constexpr size_t kMipsBitsPerWord = kMipsWordSize * kBitsPerByte;
   2295 
   2296 void InstructionCodeGeneratorMIPS::HandleShift(HBinaryOperation* instr) {
   2297   DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor());
   2298   LocationSummary* locations = instr->GetLocations();
   2299   DataType::Type type = instr->GetType();
   2300 
   2301   Location rhs_location = locations->InAt(1);
   2302   bool use_imm = rhs_location.IsConstant();
   2303   Register rhs_reg = use_imm ? ZERO : rhs_location.AsRegister<Register>();
   2304   int64_t rhs_imm = use_imm ? CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant()) : 0;
   2305   const uint32_t shift_mask =
   2306       (type == DataType::Type::kInt32) ? kMaxIntShiftDistance : kMaxLongShiftDistance;
   2307   const uint32_t shift_value = rhs_imm & shift_mask;
   2308   // Are the INS (Insert Bit Field) and ROTR instructions supported?
   2309   bool has_ins_rotr = codegen_->GetInstructionSetFeatures().IsMipsIsaRevGreaterThanEqual2();
   2310 
   2311   switch (type) {
   2312     case DataType::Type::kInt32: {
   2313       Register dst = locations->Out().AsRegister<Register>();
   2314       Register lhs = locations->InAt(0).AsRegister<Register>();
   2315       if (use_imm) {
   2316         if (shift_value == 0) {
   2317           if (dst != lhs) {
   2318             __ Move(dst, lhs);
   2319           }
   2320         } else if (instr->IsShl()) {
   2321           __ Sll(dst, lhs, shift_value);
   2322         } else if (instr->IsShr()) {
   2323           __ Sra(dst, lhs, shift_value);
   2324         } else if (instr->IsUShr()) {
   2325           __ Srl(dst, lhs, shift_value);
   2326         } else {
   2327           if (has_ins_rotr) {
   2328             __ Rotr(dst, lhs, shift_value);
   2329           } else {
   2330             __ Sll(TMP, lhs, (kMipsBitsPerWord - shift_value) & shift_mask);
   2331             __ Srl(dst, lhs, shift_value);
   2332             __ Or(dst, dst, TMP);
   2333           }
   2334         }
   2335       } else {
   2336         if (instr->IsShl()) {
   2337           __ Sllv(dst, lhs, rhs_reg);
   2338         } else if (instr->IsShr()) {
   2339           __ Srav(dst, lhs, rhs_reg);
   2340         } else if (instr->IsUShr()) {
   2341           __ Srlv(dst, lhs, rhs_reg);
   2342         } else {
   2343           if (has_ins_rotr) {
   2344             __ Rotrv(dst, lhs, rhs_reg);
   2345           } else {
   2346             __ Subu(TMP, ZERO, rhs_reg);
   2347             // 32-bit shift instructions use the 5 least significant bits of the shift count, so
   2348             // shifting by `-rhs_reg` is equivalent to shifting by `(32 - rhs_reg) & 31`. The case
   2349             // when `rhs_reg & 31 == 0` is OK even though we don't shift `lhs` left all the way out
   2350             // by 32, because the result in this case is computed as `(lhs >> 0) | (lhs << 0)`,
   2351             // IOW, the OR'd values are equal.
   2352             __ Sllv(TMP, lhs, TMP);
   2353             __ Srlv(dst, lhs, rhs_reg);
   2354             __ Or(dst, dst, TMP);
   2355           }
   2356         }
   2357       }
   2358       break;
   2359     }
   2360 
   2361     case DataType::Type::kInt64: {
   2362       Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
   2363       Register dst_low = locations->Out().AsRegisterPairLow<Register>();
   2364       Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>();
   2365       Register lhs_low = locations->InAt(0).AsRegisterPairLow<Register>();
   2366       if (use_imm) {
   2367           if (shift_value == 0) {
   2368             codegen_->MoveLocation(locations->Out(), locations->InAt(0), type);
   2369           } else if (shift_value < kMipsBitsPerWord) {
   2370             if (has_ins_rotr) {
   2371               if (instr->IsShl()) {
   2372                 __ Srl(dst_high, lhs_low, kMipsBitsPerWord - shift_value);
   2373                 __ Ins(dst_high, lhs_high, shift_value, kMipsBitsPerWord - shift_value);
   2374                 __ Sll(dst_low, lhs_low, shift_value);
   2375               } else if (instr->IsShr()) {
   2376                 __ Srl(dst_low, lhs_low, shift_value);
   2377                 __ Ins(dst_low, lhs_high, kMipsBitsPerWord - shift_value, shift_value);
   2378                 __ Sra(dst_high, lhs_high, shift_value);
   2379               } else if (instr->IsUShr()) {
   2380                 __ Srl(dst_low, lhs_low, shift_value);
   2381                 __ Ins(dst_low, lhs_high, kMipsBitsPerWord - shift_value, shift_value);
   2382                 __ Srl(dst_high, lhs_high, shift_value);
   2383               } else {
   2384                 __ Srl(dst_low, lhs_low, shift_value);
   2385                 __ Ins(dst_low, lhs_high, kMipsBitsPerWord - shift_value, shift_value);
   2386                 __ Srl(dst_high, lhs_high, shift_value);
   2387                 __ Ins(dst_high, lhs_low, kMipsBitsPerWord - shift_value, shift_value);
   2388               }
   2389             } else {
   2390               if (instr->IsShl()) {
   2391                 __ Sll(dst_low, lhs_low, shift_value);
   2392                 __ Srl(TMP, lhs_low, kMipsBitsPerWord - shift_value);
   2393                 __ Sll(dst_high, lhs_high, shift_value);
   2394                 __ Or(dst_high, dst_high, TMP);
   2395               } else if (instr->IsShr()) {
   2396                 __ Sra(dst_high, lhs_high, shift_value);
   2397                 __ Sll(TMP, lhs_high, kMipsBitsPerWord - shift_value);
   2398                 __ Srl(dst_low, lhs_low, shift_value);
   2399                 __ Or(dst_low, dst_low, TMP);
   2400               } else if (instr->IsUShr()) {
   2401                 __ Srl(dst_high, lhs_high, shift_value);
   2402                 __ Sll(TMP, lhs_high, kMipsBitsPerWord - shift_value);
   2403                 __ Srl(dst_low, lhs_low, shift_value);
   2404                 __ Or(dst_low, dst_low, TMP);
   2405               } else {
   2406                 __ Srl(TMP, lhs_low, shift_value);
   2407                 __ Sll(dst_low, lhs_high, kMipsBitsPerWord - shift_value);
   2408                 __ Or(dst_low, dst_low, TMP);
   2409                 __ Srl(TMP, lhs_high, shift_value);
   2410                 __ Sll(dst_high, lhs_low, kMipsBitsPerWord - shift_value);
   2411                 __ Or(dst_high, dst_high, TMP);
   2412               }
   2413             }
   2414           } else {
   2415             const uint32_t shift_value_high = shift_value - kMipsBitsPerWord;
   2416             if (instr->IsShl()) {
   2417               __ Sll(dst_high, lhs_low, shift_value_high);
   2418               __ Move(dst_low, ZERO);
   2419             } else if (instr->IsShr()) {
   2420               __ Sra(dst_low, lhs_high, shift_value_high);
   2421               __ Sra(dst_high, dst_low, kMipsBitsPerWord - 1);
   2422             } else if (instr->IsUShr()) {
   2423               __ Srl(dst_low, lhs_high, shift_value_high);
   2424               __ Move(dst_high, ZERO);
   2425             } else {
   2426               if (shift_value == kMipsBitsPerWord) {
   2427                 // 64-bit rotation by 32 is just a swap.
   2428                 __ Move(dst_low, lhs_high);
   2429                 __ Move(dst_high, lhs_low);
   2430               } else {
   2431                 if (has_ins_rotr) {
   2432                   __ Srl(dst_low, lhs_high, shift_value_high);
   2433                   __ Ins(dst_low, lhs_low, kMipsBitsPerWord - shift_value_high, shift_value_high);
   2434                   __ Srl(dst_high, lhs_low, shift_value_high);
   2435                   __ Ins(dst_high, lhs_high, kMipsBitsPerWord - shift_value_high, shift_value_high);
   2436                 } else {
   2437                   __ Sll(TMP, lhs_low, kMipsBitsPerWord - shift_value_high);
   2438                   __ Srl(dst_low, lhs_high, shift_value_high);
   2439                   __ Or(dst_low, dst_low, TMP);
   2440                   __ Sll(TMP, lhs_high, kMipsBitsPerWord - shift_value_high);
   2441                   __ Srl(dst_high, lhs_low, shift_value_high);
   2442                   __ Or(dst_high, dst_high, TMP);
   2443                 }
   2444               }
   2445             }
   2446           }
   2447       } else {
   2448         const bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
   2449         MipsLabel done;
   2450         if (instr->IsShl()) {
   2451           __ Sllv(dst_low, lhs_low, rhs_reg);
   2452           __ Nor(AT, ZERO, rhs_reg);
   2453           __ Srl(TMP, lhs_low, 1);
   2454           __ Srlv(TMP, TMP, AT);
   2455           __ Sllv(dst_high, lhs_high, rhs_reg);
   2456           __ Or(dst_high, dst_high, TMP);
   2457           __ Andi(TMP, rhs_reg, kMipsBitsPerWord);
   2458           if (isR6) {
   2459             __ Beqzc(TMP, &done, /* is_bare */ true);
   2460             __ Move(dst_high, dst_low);
   2461             __ Move(dst_low, ZERO);
   2462           } else {
   2463             __ Movn(dst_high, dst_low, TMP);
   2464             __ Movn(dst_low, ZERO, TMP);
   2465           }
   2466         } else if (instr->IsShr()) {
   2467           __ Srav(dst_high, lhs_high, rhs_reg);
   2468           __ Nor(AT, ZERO, rhs_reg);
   2469           __ Sll(TMP, lhs_high, 1);
   2470           __ Sllv(TMP, TMP, AT);
   2471           __ Srlv(dst_low, lhs_low, rhs_reg);
   2472           __ Or(dst_low, dst_low, TMP);
   2473           __ Andi(TMP, rhs_reg, kMipsBitsPerWord);
   2474           if (isR6) {
   2475             __ Beqzc(TMP, &done, /* is_bare */ true);
   2476             __ Move(dst_low, dst_high);
   2477             __ Sra(dst_high, dst_high, 31);
   2478           } else {
   2479             __ Sra(AT, dst_high, 31);
   2480             __ Movn(dst_low, dst_high, TMP);
   2481             __ Movn(dst_high, AT, TMP);
   2482           }
   2483         } else if (instr->IsUShr()) {
   2484           __ Srlv(dst_high, lhs_high, rhs_reg);
   2485           __ Nor(AT, ZERO, rhs_reg);
   2486           __ Sll(TMP, lhs_high, 1);
   2487           __ Sllv(TMP, TMP, AT);
   2488           __ Srlv(dst_low, lhs_low, rhs_reg);
   2489           __ Or(dst_low, dst_low, TMP);
   2490           __ Andi(TMP, rhs_reg, kMipsBitsPerWord);
   2491           if (isR6) {
   2492             __ Beqzc(TMP, &done, /* is_bare */ true);
   2493             __ Move(dst_low, dst_high);
   2494             __ Move(dst_high, ZERO);
   2495           } else {
   2496             __ Movn(dst_low, dst_high, TMP);
   2497             __ Movn(dst_high, ZERO, TMP);
   2498           }
   2499         } else {  // Rotate.
   2500           __ Nor(AT, ZERO, rhs_reg);
   2501           __ Srlv(TMP, lhs_low, rhs_reg);
   2502           __ Sll(dst_low, lhs_high, 1);
   2503           __ Sllv(dst_low, dst_low, AT);
   2504           __ Or(dst_low, dst_low, TMP);
   2505           __ Srlv(TMP, lhs_high, rhs_reg);
   2506           __ Sll(dst_high, lhs_low, 1);
   2507           __ Sllv(dst_high, dst_high, AT);
   2508           __ Or(dst_high, dst_high, TMP);
   2509           __ Andi(TMP, rhs_reg, kMipsBitsPerWord);
   2510           if (isR6) {
   2511             __ Beqzc(TMP, &done, /* is_bare */ true);
   2512             __ Move(TMP, dst_high);
   2513             __ Move(dst_high, dst_low);
   2514             __ Move(dst_low, TMP);
   2515           } else {
   2516             __ Movn(AT, dst_high, TMP);
   2517             __ Movn(dst_high, dst_low, TMP);
   2518             __ Movn(dst_low, AT, TMP);
   2519           }
   2520         }
   2521         __ Bind(&done);
   2522       }
   2523       break;
   2524     }
   2525 
   2526     default:
   2527       LOG(FATAL) << "Unexpected shift operation type " << type;
   2528   }
   2529 }
   2530 
   2531 void LocationsBuilderMIPS::VisitAdd(HAdd* instruction) {
   2532   HandleBinaryOp(instruction);
   2533 }
   2534 
   2535 void InstructionCodeGeneratorMIPS::VisitAdd(HAdd* instruction) {
   2536   HandleBinaryOp(instruction);
   2537 }
   2538 
   2539 void LocationsBuilderMIPS::VisitAnd(HAnd* instruction) {
   2540   HandleBinaryOp(instruction);
   2541 }
   2542 
   2543 void InstructionCodeGeneratorMIPS::VisitAnd(HAnd* instruction) {
   2544   HandleBinaryOp(instruction);
   2545 }
   2546 
   2547 void LocationsBuilderMIPS::VisitArrayGet(HArrayGet* instruction) {
   2548   DataType::Type type = instruction->GetType();
   2549   bool object_array_get_with_read_barrier =
   2550       kEmitCompilerReadBarrier && (type == DataType::Type::kReference);
   2551   LocationSummary* locations =
   2552       new (GetGraph()->GetAllocator()) LocationSummary(instruction,
   2553                                                        object_array_get_with_read_barrier
   2554                                                            ? LocationSummary::kCallOnSlowPath
   2555                                                            : LocationSummary::kNoCall);
   2556   if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
   2557     locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty());  // No caller-save registers.
   2558   }
   2559   locations->SetInAt(0, Location::RequiresRegister());
   2560   locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
   2561   if (DataType::IsFloatingPointType(type)) {
   2562     locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
   2563   } else {
   2564     // The output overlaps in the case of an object array get with
   2565     // read barriers enabled: we do not want the move to overwrite the
   2566     // array's location, as we need it to emit the read barrier.
   2567     locations->SetOut(Location::RequiresRegister(),
   2568                       object_array_get_with_read_barrier
   2569                           ? Location::kOutputOverlap
   2570                           : Location::kNoOutputOverlap);
   2571   }
   2572   // We need a temporary register for the read barrier marking slow
   2573   // path in CodeGeneratorMIPS::GenerateArrayLoadWithBakerReadBarrier.
   2574   if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
   2575     bool temp_needed = instruction->GetIndex()->IsConstant()
   2576         ? !kBakerReadBarrierThunksEnableForFields
   2577         : !kBakerReadBarrierThunksEnableForArrays;
   2578     if (temp_needed) {
   2579       locations->AddTemp(Location::RequiresRegister());
   2580     }
   2581   }
   2582 }
   2583 
   2584 static auto GetImplicitNullChecker(HInstruction* instruction, CodeGeneratorMIPS* codegen) {
   2585   auto null_checker = [codegen, instruction]() {
   2586     codegen->MaybeRecordImplicitNullCheck(instruction);
   2587   };
   2588   return null_checker;
   2589 }
   2590 
   2591 void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) {
   2592   LocationSummary* locations = instruction->GetLocations();
   2593   Location obj_loc = locations->InAt(0);
   2594   Register obj = obj_loc.AsRegister<Register>();
   2595   Location out_loc = locations->Out();
   2596   Location index = locations->InAt(1);
   2597   uint32_t data_offset = CodeGenerator::GetArrayDataOffset(instruction);
   2598   auto null_checker = GetImplicitNullChecker(instruction, codegen_);
   2599 
   2600   DataType::Type type = instruction->GetType();
   2601   const bool maybe_compressed_char_at = mirror::kUseStringCompression &&
   2602                                         instruction->IsStringCharAt();
   2603   switch (type) {
   2604     case DataType::Type::kBool:
   2605     case DataType::Type::kUint8: {
   2606       Register out = out_loc.AsRegister<Register>();
   2607       if (index.IsConstant()) {
   2608         size_t offset =
   2609             (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
   2610         __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset, null_checker);
   2611       } else {
   2612         __ Addu(TMP, obj, index.AsRegister<Register>());
   2613         __ LoadFromOffset(kLoadUnsignedByte, out, TMP, data_offset, null_checker);
   2614       }
   2615       break;
   2616     }
   2617 
   2618     case DataType::Type::kInt8: {
   2619       Register out = out_loc.AsRegister<Register>();
   2620       if (index.IsConstant()) {
   2621         size_t offset =
   2622             (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
   2623         __ LoadFromOffset(kLoadSignedByte, out, obj, offset, null_checker);
   2624       } else {
   2625         __ Addu(TMP, obj, index.AsRegister<Register>());
   2626         __ LoadFromOffset(kLoadSignedByte, out, TMP, data_offset, null_checker);
   2627       }
   2628       break;
   2629     }
   2630 
   2631     case DataType::Type::kUint16: {
   2632       Register out = out_loc.AsRegister<Register>();
   2633       if (maybe_compressed_char_at) {
   2634         uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
   2635         __ LoadFromOffset(kLoadWord, TMP, obj, count_offset, null_checker);
   2636         __ Sll(TMP, TMP, 31);    // Extract compression flag into the most significant bit of TMP.
   2637         static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
   2638                       "Expecting 0=compressed, 1=uncompressed");
   2639       }
   2640       if (index.IsConstant()) {
   2641         int32_t const_index = index.GetConstant()->AsIntConstant()->GetValue();
   2642         if (maybe_compressed_char_at) {
   2643           MipsLabel uncompressed_load, done;
   2644           __ Bnez(TMP, &uncompressed_load);
   2645           __ LoadFromOffset(kLoadUnsignedByte,
   2646                             out,
   2647                             obj,
   2648                             data_offset + (const_index << TIMES_1));
   2649           __ B(&done);
   2650           __ Bind(&uncompressed_load);
   2651           __ LoadFromOffset(kLoadUnsignedHalfword,
   2652                             out,
   2653                             obj,
   2654                             data_offset + (const_index << TIMES_2));
   2655           __ Bind(&done);
   2656         } else {
   2657           __ LoadFromOffset(kLoadUnsignedHalfword,
   2658                             out,
   2659                             obj,
   2660                             data_offset + (const_index << TIMES_2),
   2661                             null_checker);
   2662         }
   2663       } else {
   2664         Register index_reg = index.AsRegister<Register>();
   2665         if (maybe_compressed_char_at) {
   2666           MipsLabel uncompressed_load, done;
   2667           __ Bnez(TMP, &uncompressed_load);
   2668           __ Addu(TMP, obj, index_reg);
   2669           __ LoadFromOffset(kLoadUnsignedByte, out, TMP, data_offset);
   2670           __ B(&done);
   2671           __ Bind(&uncompressed_load);
   2672           __ ShiftAndAdd(TMP, index_reg, obj, TIMES_2, TMP);
   2673           __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset);
   2674           __ Bind(&done);
   2675         } else if (instruction->InputAt(1)->IsIntermediateArrayAddressIndex()) {
   2676           __ Addu(TMP, index_reg, obj);
   2677           __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset, null_checker);
   2678         } else {
   2679           __ ShiftAndAdd(TMP, index_reg, obj, TIMES_2, TMP);
   2680           __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset, null_checker);
   2681         }
   2682       }
   2683       break;
   2684     }
   2685 
   2686     case DataType::Type::kInt16: {
   2687       Register out = out_loc.AsRegister<Register>();
   2688       if (index.IsConstant()) {
   2689         size_t offset =
   2690             (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
   2691         __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset, null_checker);
   2692       } else if (instruction->InputAt(1)->IsIntermediateArrayAddressIndex()) {
   2693         __ Addu(TMP, index.AsRegister<Register>(), obj);
   2694         __ LoadFromOffset(kLoadSignedHalfword, out, TMP, data_offset, null_checker);
   2695       } else {
   2696         __ ShiftAndAdd(TMP, index.AsRegister<Register>(), obj, TIMES_2, TMP);
   2697         __ LoadFromOffset(kLoadSignedHalfword, out, TMP, data_offset, null_checker);
   2698       }
   2699       break;
   2700     }
   2701 
   2702     case DataType::Type::kInt32: {
   2703       DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
   2704       Register out = out_loc.AsRegister<Register>();
   2705       if (index.IsConstant()) {
   2706         size_t offset =
   2707             (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
   2708         __ LoadFromOffset(kLoadWord, out, obj, offset, null_checker);
   2709       } else if (instruction->InputAt(1)->IsIntermediateArrayAddressIndex()) {
   2710         __ Addu(TMP, index.AsRegister<Register>(), obj);
   2711         __ LoadFromOffset(kLoadWord, out, TMP, data_offset, null_checker);
   2712       } else {
   2713         __ ShiftAndAdd(TMP, index.AsRegister<Register>(), obj, TIMES_4, TMP);
   2714         __ LoadFromOffset(kLoadWord, out, TMP, data_offset, null_checker);
   2715       }
   2716       break;
   2717     }
   2718 
   2719     case DataType::Type::kReference: {
   2720       static_assert(
   2721           sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
   2722           "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
   2723       // /* HeapReference<Object> */ out =
   2724       //     *(obj + data_offset + index * sizeof(HeapReference<Object>))
   2725       if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
   2726         bool temp_needed = index.IsConstant()
   2727             ? !kBakerReadBarrierThunksEnableForFields
   2728             : !kBakerReadBarrierThunksEnableForArrays;
   2729         Location temp = temp_needed ? locations->GetTemp(0) : Location::NoLocation();
   2730         // Note that a potential implicit null check is handled in this
   2731         // CodeGeneratorMIPS::GenerateArrayLoadWithBakerReadBarrier call.
   2732         DCHECK(!instruction->CanDoImplicitNullCheckOn(instruction->InputAt(0)));
   2733         if (index.IsConstant()) {
   2734           // Array load with a constant index can be treated as a field load.
   2735           size_t offset =
   2736               (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
   2737           codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
   2738                                                           out_loc,
   2739                                                           obj,
   2740                                                           offset,
   2741                                                           temp,
   2742                                                           /* needs_null_check */ false);
   2743         } else {
   2744           codegen_->GenerateArrayLoadWithBakerReadBarrier(instruction,
   2745                                                           out_loc,
   2746                                                           obj,
   2747                                                           data_offset,
   2748                                                           index,
   2749                                                           temp,
   2750                                                           /* needs_null_check */ false);
   2751         }
   2752       } else {
   2753         Register out = out_loc.AsRegister<Register>();
   2754         if (index.IsConstant()) {
   2755           size_t offset =
   2756               (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
   2757           __ LoadFromOffset(kLoadWord, out, obj, offset, null_checker);
   2758           // If read barriers are enabled, emit read barriers other than
   2759           // Baker's using a slow path (and also unpoison the loaded
   2760           // reference, if heap poisoning is enabled).
   2761           codegen_->MaybeGenerateReadBarrierSlow(instruction, out_loc, out_loc, obj_loc, offset);
   2762         } else {
   2763           __ ShiftAndAdd(TMP, index.AsRegister<Register>(), obj, TIMES_4, TMP);
   2764           __ LoadFromOffset(kLoadWord, out, TMP, data_offset, null_checker);
   2765           // If read barriers are enabled, emit read barriers other than
   2766           // Baker's using a slow path (and also unpoison the loaded
   2767           // reference, if heap poisoning is enabled).
   2768           codegen_->MaybeGenerateReadBarrierSlow(instruction,
   2769                                                  out_loc,
   2770                                                  out_loc,
   2771                                                  obj_loc,
   2772                                                  data_offset,
   2773                                                  index);
   2774         }
   2775       }
   2776       break;
   2777     }
   2778 
   2779     case DataType::Type::kInt64: {
   2780       Register out = out_loc.AsRegisterPairLow<Register>();
   2781       if (index.IsConstant()) {
   2782         size_t offset =
   2783             (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
   2784         __ LoadFromOffset(kLoadDoubleword, out, obj, offset, null_checker);
   2785       } else if (instruction->InputAt(1)->IsIntermediateArrayAddressIndex()) {
   2786         __ Addu(TMP, index.AsRegister<Register>(), obj);
   2787         __ LoadFromOffset(kLoadDoubleword, out, TMP, data_offset, null_checker);
   2788       } else {
   2789         __ ShiftAndAdd(TMP, index.AsRegister<Register>(), obj, TIMES_8, TMP);
   2790         __ LoadFromOffset(kLoadDoubleword, out, TMP, data_offset, null_checker);
   2791       }
   2792       break;
   2793     }
   2794 
   2795     case DataType::Type::kFloat32: {
   2796       FRegister out = out_loc.AsFpuRegister<FRegister>();
   2797       if (index.IsConstant()) {
   2798         size_t offset =
   2799             (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
   2800         __ LoadSFromOffset(out, obj, offset, null_checker);
   2801       } else if (instruction->InputAt(1)->IsIntermediateArrayAddressIndex()) {
   2802         __ Addu(TMP, index.AsRegister<Register>(), obj);
   2803         __ LoadSFromOffset(out, TMP, data_offset, null_checker);
   2804       } else {
   2805         __ ShiftAndAdd(TMP, index.AsRegister<Register>(), obj, TIMES_4, TMP);
   2806         __ LoadSFromOffset(out, TMP, data_offset, null_checker);
   2807       }
   2808       break;
   2809     }
   2810 
   2811     case DataType::Type::kFloat64: {
   2812       FRegister out = out_loc.AsFpuRegister<FRegister>();
   2813       if (index.IsConstant()) {
   2814         size_t offset =
   2815             (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
   2816         __ LoadDFromOffset(out, obj, offset, null_checker);
   2817       } else if (instruction->InputAt(1)->IsIntermediateArrayAddressIndex()) {
   2818         __ Addu(TMP, index.AsRegister<Register>(), obj);
   2819         __ LoadDFromOffset(out, TMP, data_offset, null_checker);
   2820       } else {
   2821         __ ShiftAndAdd(TMP, index.AsRegister<Register>(), obj, TIMES_8, TMP);
   2822         __ LoadDFromOffset(out, TMP, data_offset, null_checker);
   2823       }
   2824       break;
   2825     }
   2826 
   2827     case DataType::Type::kUint32:
   2828     case DataType::Type::kUint64:
   2829     case DataType::Type::kVoid:
   2830       LOG(FATAL) << "Unreachable type " << instruction->GetType();
   2831       UNREACHABLE();
   2832   }
   2833 }
   2834 
   2835 void LocationsBuilderMIPS::VisitArrayLength(HArrayLength* instruction) {
   2836   LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
   2837   locations->SetInAt(0, Location::RequiresRegister());
   2838   locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   2839 }
   2840 
   2841 void InstructionCodeGeneratorMIPS::VisitArrayLength(HArrayLength* instruction) {
   2842   LocationSummary* locations = instruction->GetLocations();
   2843   uint32_t offset = CodeGenerator::GetArrayLengthOffset(instruction);
   2844   Register obj = locations->InAt(0).AsRegister<Register>();
   2845   Register out = locations->Out().AsRegister<Register>();
   2846   __ LoadFromOffset(kLoadWord, out, obj, offset);
   2847   codegen_->MaybeRecordImplicitNullCheck(instruction);
   2848   // Mask out compression flag from String's array length.
   2849   if (mirror::kUseStringCompression && instruction->IsStringLength()) {
   2850     __ Srl(out, out, 1u);
   2851   }
   2852 }
   2853 
   2854 Location LocationsBuilderMIPS::RegisterOrZeroConstant(HInstruction* instruction) {
   2855   return (instruction->IsConstant() && instruction->AsConstant()->IsZeroBitPattern())
   2856       ? Location::ConstantLocation(instruction->AsConstant())
   2857       : Location::RequiresRegister();
   2858 }
   2859 
   2860 Location LocationsBuilderMIPS::FpuRegisterOrConstantForStore(HInstruction* instruction) {
   2861   // We can store 0.0 directly (from the ZERO register) without loading it into an FPU register.
   2862   // We can store a non-zero float or double constant without first loading it into the FPU,
   2863   // but we should only prefer this if the constant has a single use.
   2864   if (instruction->IsConstant() &&
   2865       (instruction->AsConstant()->IsZeroBitPattern() ||
   2866        instruction->GetUses().HasExactlyOneElement())) {
   2867     return Location::ConstantLocation(instruction->AsConstant());
   2868     // Otherwise fall through and require an FPU register for the constant.
   2869   }
   2870   return Location::RequiresFpuRegister();
   2871 }
   2872 
   2873 void LocationsBuilderMIPS::VisitArraySet(HArraySet* instruction) {
   2874   DataType::Type value_type = instruction->GetComponentType();
   2875 
   2876   bool needs_write_barrier =
   2877       CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
   2878   bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
   2879 
   2880   LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
   2881       instruction,
   2882       may_need_runtime_call_for_type_check ?
   2883           LocationSummary::kCallOnSlowPath :
   2884           LocationSummary::kNoCall);
   2885 
   2886   locations->SetInAt(0, Location::RequiresRegister());
   2887   locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
   2888   if (DataType::IsFloatingPointType(instruction->InputAt(2)->GetType())) {
   2889     locations->SetInAt(2, FpuRegisterOrConstantForStore(instruction->InputAt(2)));
   2890   } else {
   2891     locations->SetInAt(2, RegisterOrZeroConstant(instruction->InputAt(2)));
   2892   }
   2893   if (needs_write_barrier) {
   2894     // Temporary register for the write barrier.
   2895     locations->AddTemp(Location::RequiresRegister());  // Possibly used for ref. poisoning too.
   2896   }
   2897 }
   2898 
   2899 void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) {
   2900   LocationSummary* locations = instruction->GetLocations();
   2901   Register obj = locations->InAt(0).AsRegister<Register>();
   2902   Location index = locations->InAt(1);
   2903   Location value_location = locations->InAt(2);
   2904   DataType::Type value_type = instruction->GetComponentType();
   2905   bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
   2906   bool needs_write_barrier =
   2907       CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
   2908   auto null_checker = GetImplicitNullChecker(instruction, codegen_);
   2909   Register base_reg = index.IsConstant() ? obj : TMP;
   2910 
   2911   switch (value_type) {
   2912     case DataType::Type::kBool:
   2913     case DataType::Type::kUint8:
   2914     case DataType::Type::kInt8: {
   2915       uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
   2916       if (index.IsConstant()) {
   2917         data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1;
   2918       } else {
   2919         __ Addu(base_reg, obj, index.AsRegister<Register>());
   2920       }
   2921       if (value_location.IsConstant()) {
   2922         int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
   2923         __ StoreConstToOffset(kStoreByte, value, base_reg, data_offset, TMP, null_checker);
   2924       } else {
   2925         Register value = value_location.AsRegister<Register>();
   2926         __ StoreToOffset(kStoreByte, value, base_reg, data_offset, null_checker);
   2927       }
   2928       break;
   2929     }
   2930 
   2931     case DataType::Type::kUint16:
   2932     case DataType::Type::kInt16: {
   2933       uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
   2934       if (index.IsConstant()) {
   2935         data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2;
   2936       } else if (instruction->InputAt(1)->IsIntermediateArrayAddressIndex()) {
   2937         __ Addu(base_reg, index.AsRegister<Register>(), obj);
   2938       } else {
   2939         __ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_2, base_reg);
   2940       }
   2941       if (value_location.IsConstant()) {
   2942         int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
   2943         __ StoreConstToOffset(kStoreHalfword, value, base_reg, data_offset, TMP, null_checker);
   2944       } else {
   2945         Register value = value_location.AsRegister<Register>();
   2946         __ StoreToOffset(kStoreHalfword, value, base_reg, data_offset, null_checker);
   2947       }
   2948       break;
   2949     }
   2950 
   2951     case DataType::Type::kInt32: {
   2952       uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
   2953       if (index.IsConstant()) {
   2954         data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
   2955       } else if (instruction->InputAt(1)->IsIntermediateArrayAddressIndex()) {
   2956         __ Addu(base_reg, index.AsRegister<Register>(), obj);
   2957       } else {
   2958         __ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_4, base_reg);
   2959       }
   2960       if (value_location.IsConstant()) {
   2961         int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
   2962         __ StoreConstToOffset(kStoreWord, value, base_reg, data_offset, TMP, null_checker);
   2963       } else {
   2964         Register value = value_location.AsRegister<Register>();
   2965         __ StoreToOffset(kStoreWord, value, base_reg, data_offset, null_checker);
   2966       }
   2967       break;
   2968     }
   2969 
   2970     case DataType::Type::kReference: {
   2971       if (value_location.IsConstant()) {
   2972         // Just setting null.
   2973         uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
   2974         if (index.IsConstant()) {
   2975           data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
   2976         } else {
   2977           __ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_4, base_reg);
   2978         }
   2979         int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
   2980         DCHECK_EQ(value, 0);
   2981         __ StoreConstToOffset(kStoreWord, value, base_reg, data_offset, TMP, null_checker);
   2982         DCHECK(!needs_write_barrier);
   2983         DCHECK(!may_need_runtime_call_for_type_check);
   2984         break;
   2985       }
   2986 
   2987       DCHECK(needs_write_barrier);
   2988       Register value = value_location.AsRegister<Register>();
   2989       Register temp1 = locations->GetTemp(0).AsRegister<Register>();
   2990       Register temp2 = TMP;  // Doesn't need to survive slow path.
   2991       uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
   2992       uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
   2993       uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
   2994       MipsLabel done;
   2995       SlowPathCodeMIPS* slow_path = nullptr;
   2996 
   2997       if (may_need_runtime_call_for_type_check) {
   2998         slow_path = new (codegen_->GetScopedAllocator()) ArraySetSlowPathMIPS(instruction);
   2999         codegen_->AddSlowPath(slow_path);
   3000         if (instruction->GetValueCanBeNull()) {
   3001           MipsLabel non_zero;
   3002           __ Bnez(value, &non_zero);
   3003           uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
   3004           if (index.IsConstant()) {
   3005             data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
   3006           } else if (instruction->InputAt(1)->IsIntermediateArrayAddressIndex()) {
   3007             __ Addu(base_reg, index.AsRegister<Register>(), obj);
   3008           } else {
   3009             __ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_4, base_reg);
   3010           }
   3011           __ StoreToOffset(kStoreWord, value, base_reg, data_offset, null_checker);
   3012           __ B(&done);
   3013           __ Bind(&non_zero);
   3014         }
   3015 
   3016         // Note that when read barriers are enabled, the type checks
   3017         // are performed without read barriers.  This is fine, even in
   3018         // the case where a class object is in the from-space after
   3019         // the flip, as a comparison involving such a type would not
   3020         // produce a false positive; it may of course produce a false
   3021         // negative, in which case we would take the ArraySet slow
   3022         // path.
   3023 
   3024         // /* HeapReference<Class> */ temp1 = obj->klass_
   3025         __ LoadFromOffset(kLoadWord, temp1, obj, class_offset, null_checker);
   3026         __ MaybeUnpoisonHeapReference(temp1);
   3027 
   3028         // /* HeapReference<Class> */ temp1 = temp1->component_type_
   3029         __ LoadFromOffset(kLoadWord, temp1, temp1, component_offset);
   3030         // /* HeapReference<Class> */ temp2 = value->klass_
   3031         __ LoadFromOffset(kLoadWord, temp2, value, class_offset);
   3032         // If heap poisoning is enabled, no need to unpoison `temp1`
   3033         // nor `temp2`, as we are comparing two poisoned references.
   3034 
   3035         if (instruction->StaticTypeOfArrayIsObjectArray()) {
   3036           MipsLabel do_put;
   3037           __ Beq(temp1, temp2, &do_put);
   3038           // If heap poisoning is enabled, the `temp1` reference has
   3039           // not been unpoisoned yet; unpoison it now.
   3040           __ MaybeUnpoisonHeapReference(temp1);
   3041 
   3042           // /* HeapReference<Class> */ temp1 = temp1->super_class_
   3043           __ LoadFromOffset(kLoadWord, temp1, temp1, super_offset);
   3044           // If heap poisoning is enabled, no need to unpoison
   3045           // `temp1`, as we are comparing against null below.
   3046           __ Bnez(temp1, slow_path->GetEntryLabel());
   3047           __ Bind(&do_put);
   3048         } else {
   3049           __ Bne(temp1, temp2, slow_path->GetEntryLabel());
   3050         }
   3051       }
   3052 
   3053       Register source = value;
   3054       if (kPoisonHeapReferences) {
   3055         // Note that in the case where `value` is a null reference,
   3056         // we do not enter this block, as a null reference does not
   3057         // need poisoning.
   3058         __ Move(temp1, value);
   3059         __ PoisonHeapReference(temp1);
   3060         source = temp1;
   3061       }
   3062 
   3063       uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
   3064       if (index.IsConstant()) {
   3065         data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
   3066       } else {
   3067         __ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_4, base_reg);
   3068       }
   3069       __ StoreToOffset(kStoreWord, source, base_reg, data_offset);
   3070 
   3071       if (!may_need_runtime_call_for_type_check) {
   3072         codegen_->MaybeRecordImplicitNullCheck(instruction);
   3073       }
   3074 
   3075       codegen_->MarkGCCard(obj, value, instruction->GetValueCanBeNull());
   3076 
   3077       if (done.IsLinked()) {
   3078         __ Bind(&done);
   3079       }
   3080 
   3081       if (slow_path != nullptr) {
   3082         __ Bind(slow_path->GetExitLabel());
   3083       }
   3084       break;
   3085     }
   3086 
   3087     case DataType::Type::kInt64: {
   3088       uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
   3089       if (index.IsConstant()) {
   3090         data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8;
   3091       } else if (instruction->InputAt(1)->IsIntermediateArrayAddressIndex()) {
   3092         __ Addu(base_reg, index.AsRegister<Register>(), obj);
   3093       } else {
   3094         __ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_8, base_reg);
   3095       }
   3096       if (value_location.IsConstant()) {
   3097         int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant());
   3098         __ StoreConstToOffset(kStoreDoubleword, value, base_reg, data_offset, TMP, null_checker);
   3099       } else {
   3100         Register value = value_location.AsRegisterPairLow<Register>();
   3101         __ StoreToOffset(kStoreDoubleword, value, base_reg, data_offset, null_checker);
   3102       }
   3103       break;
   3104     }
   3105 
   3106     case DataType::Type::kFloat32: {
   3107       uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
   3108       if (index.IsConstant()) {
   3109         data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
   3110       } else if (instruction->InputAt(1)->IsIntermediateArrayAddressIndex()) {
   3111         __ Addu(base_reg, index.AsRegister<Register>(), obj);
   3112       } else {
   3113         __ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_4, base_reg);
   3114       }
   3115       if (value_location.IsConstant()) {
   3116         int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
   3117         __ StoreConstToOffset(kStoreWord, value, base_reg, data_offset, TMP, null_checker);
   3118       } else {
   3119         FRegister value = value_location.AsFpuRegister<FRegister>();
   3120         __ StoreSToOffset(value, base_reg, data_offset, null_checker);
   3121       }
   3122       break;
   3123     }
   3124 
   3125     case DataType::Type::kFloat64: {
   3126       uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
   3127       if (index.IsConstant()) {
   3128         data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8;
   3129       } else if (instruction->InputAt(1)->IsIntermediateArrayAddressIndex()) {
   3130         __ Addu(base_reg, index.AsRegister<Register>(), obj);
   3131       } else {
   3132         __ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_8, base_reg);
   3133       }
   3134       if (value_location.IsConstant()) {
   3135         int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant());
   3136         __ StoreConstToOffset(kStoreDoubleword, value, base_reg, data_offset, TMP, null_checker);
   3137       } else {
   3138         FRegister value = value_location.AsFpuRegister<FRegister>();
   3139         __ StoreDToOffset(value, base_reg, data_offset, null_checker);
   3140       }
   3141       break;
   3142     }
   3143 
   3144     case DataType::Type::kUint32:
   3145     case DataType::Type::kUint64:
   3146     case DataType::Type::kVoid:
   3147       LOG(FATAL) << "Unreachable type " << instruction->GetType();
   3148       UNREACHABLE();
   3149   }
   3150 }
   3151 
   3152 void LocationsBuilderMIPS::VisitIntermediateArrayAddressIndex(
   3153     HIntermediateArrayAddressIndex* instruction) {
   3154   LocationSummary* locations =
   3155       new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
   3156 
   3157   HIntConstant* shift = instruction->GetShift()->AsIntConstant();
   3158 
   3159   locations->SetInAt(0, Location::RequiresRegister());
   3160   locations->SetInAt(1, Location::ConstantLocation(shift));
   3161   locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   3162 }
   3163 
   3164 void InstructionCodeGeneratorMIPS::VisitIntermediateArrayAddressIndex(
   3165     HIntermediateArrayAddressIndex* instruction) {
   3166   LocationSummary* locations = instruction->GetLocations();
   3167   Register index_reg = locations->InAt(0).AsRegister<Register>();
   3168   uint32_t shift = instruction->GetShift()->AsIntConstant()->GetValue();
   3169   __ Sll(locations->Out().AsRegister<Register>(), index_reg, shift);
   3170 }
   3171 
   3172 void LocationsBuilderMIPS::VisitBoundsCheck(HBoundsCheck* instruction) {
   3173   RegisterSet caller_saves = RegisterSet::Empty();
   3174   InvokeRuntimeCallingConvention calling_convention;
   3175   caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
   3176   caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
   3177   LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction, caller_saves);
   3178 
   3179   HInstruction* index = instruction->InputAt(0);
   3180   HInstruction* length = instruction->InputAt(1);
   3181 
   3182   bool const_index = false;
   3183   bool const_length = false;
   3184 
   3185   if (index->IsConstant()) {
   3186     if (length->IsConstant()) {
   3187       const_index = true;
   3188       const_length = true;
   3189     } else {
   3190       int32_t index_value = index->AsIntConstant()->GetValue();
   3191       if (index_value < 0 || IsInt<16>(index_value + 1)) {
   3192         const_index = true;
   3193       }
   3194     }
   3195   } else if (length->IsConstant()) {
   3196     int32_t length_value = length->AsIntConstant()->GetValue();
   3197     if (IsUint<15>(length_value)) {
   3198       const_length = true;
   3199     }
   3200   }
   3201 
   3202   locations->SetInAt(0, const_index
   3203       ? Location::ConstantLocation(index->AsConstant())
   3204       : Location::RequiresRegister());
   3205   locations->SetInAt(1, const_length
   3206       ? Location::ConstantLocation(length->AsConstant())
   3207       : Location::RequiresRegister());
   3208 }
   3209 
   3210 void InstructionCodeGeneratorMIPS::VisitBoundsCheck(HBoundsCheck* instruction) {
   3211   LocationSummary* locations = instruction->GetLocations();
   3212   Location index_loc = locations->InAt(0);
   3213   Location length_loc = locations->InAt(1);
   3214 
   3215   if (length_loc.IsConstant()) {
   3216     int32_t length = length_loc.GetConstant()->AsIntConstant()->GetValue();
   3217     if (index_loc.IsConstant()) {
   3218       int32_t index = index_loc.GetConstant()->AsIntConstant()->GetValue();
   3219       if (index < 0 || index >= length) {
   3220         BoundsCheckSlowPathMIPS* slow_path =
   3221             new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathMIPS(instruction);
   3222         codegen_->AddSlowPath(slow_path);
   3223         __ B(slow_path->GetEntryLabel());
   3224       } else {
   3225         // Nothing to be done.
   3226       }
   3227       return;
   3228     }
   3229 
   3230     BoundsCheckSlowPathMIPS* slow_path =
   3231         new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathMIPS(instruction);
   3232     codegen_->AddSlowPath(slow_path);
   3233     Register index = index_loc.AsRegister<Register>();
   3234     if (length == 0) {
   3235       __ B(slow_path->GetEntryLabel());
   3236     } else if (length == 1) {
   3237       __ Bnez(index, slow_path->GetEntryLabel());
   3238     } else {
   3239       DCHECK(IsUint<15>(length)) << length;
   3240       __ Sltiu(TMP, index, length);
   3241       __ Beqz(TMP, slow_path->GetEntryLabel());
   3242     }
   3243   } else {
   3244     Register length = length_loc.AsRegister<Register>();
   3245     BoundsCheckSlowPathMIPS* slow_path =
   3246         new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathMIPS(instruction);
   3247     codegen_->AddSlowPath(slow_path);
   3248     if (index_loc.IsConstant()) {
   3249       int32_t index = index_loc.GetConstant()->AsIntConstant()->GetValue();
   3250       if (index < 0) {
   3251         __ B(slow_path->GetEntryLabel());
   3252       } else if (index == 0) {
   3253         __ Blez(length, slow_path->GetEntryLabel());
   3254       } else {
   3255         DCHECK(IsInt<16>(index + 1)) << index;
   3256         __ Sltiu(TMP, length, index + 1);
   3257         __ Bnez(TMP, slow_path->GetEntryLabel());
   3258       }
   3259     } else {
   3260       Register index = index_loc.AsRegister<Register>();
   3261       __ Bgeu(index, length, slow_path->GetEntryLabel());
   3262     }
   3263   }
   3264 }
   3265 
   3266 // Temp is used for read barrier.
   3267 static size_t NumberOfInstanceOfTemps(TypeCheckKind type_check_kind) {
   3268   if (kEmitCompilerReadBarrier &&
   3269       !(kUseBakerReadBarrier && kBakerReadBarrierThunksEnableForFields) &&
   3270       (kUseBakerReadBarrier ||
   3271        type_check_kind == TypeCheckKind::kAbstractClassCheck ||
   3272        type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
   3273        type_check_kind == TypeCheckKind::kArrayObjectCheck)) {
   3274     return 1;
   3275   }
   3276   return 0;
   3277 }
   3278 
   3279 // Extra temp is used for read barrier.
   3280 static size_t NumberOfCheckCastTemps(TypeCheckKind type_check_kind) {
   3281   return 1 + NumberOfInstanceOfTemps(type_check_kind);
   3282 }
   3283 
   3284 void LocationsBuilderMIPS::VisitCheckCast(HCheckCast* instruction) {
   3285   TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
   3286   LocationSummary::CallKind call_kind = CodeGenerator::GetCheckCastCallKind(instruction);
   3287   LocationSummary* locations =
   3288       new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
   3289   locations->SetInAt(0, Location::RequiresRegister());
   3290   locations->SetInAt(1, Location::RequiresRegister());
   3291   locations->AddRegisterTemps(NumberOfCheckCastTemps(type_check_kind));
   3292 }
   3293 
   3294 void InstructionCodeGeneratorMIPS::VisitCheckCast(HCheckCast* instruction) {
   3295   TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
   3296   LocationSummary* locations = instruction->GetLocations();
   3297   Location obj_loc = locations->InAt(0);
   3298   Register obj = obj_loc.AsRegister<Register>();
   3299   Register cls = locations->InAt(1).AsRegister<Register>();
   3300   Location temp_loc = locations->GetTemp(0);
   3301   Register temp = temp_loc.AsRegister<Register>();
   3302   const size_t num_temps = NumberOfCheckCastTemps(type_check_kind);
   3303   DCHECK_LE(num_temps, 2u);
   3304   Location maybe_temp2_loc = (num_temps >= 2) ? locations->GetTemp(1) : Location::NoLocation();
   3305   const uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
   3306   const uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
   3307   const uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
   3308   const uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
   3309   const uint32_t iftable_offset = mirror::Class::IfTableOffset().Uint32Value();
   3310   const uint32_t array_length_offset = mirror::Array::LengthOffset().Uint32Value();
   3311   const uint32_t object_array_data_offset =
   3312       mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
   3313   MipsLabel done;
   3314 
   3315   bool is_type_check_slow_path_fatal = CodeGenerator::IsTypeCheckSlowPathFatal(instruction);
   3316   SlowPathCodeMIPS* slow_path =
   3317       new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS(
   3318           instruction, is_type_check_slow_path_fatal);
   3319   codegen_->AddSlowPath(slow_path);
   3320 
   3321   // Avoid this check if we know `obj` is not null.
   3322   if (instruction->MustDoNullCheck()) {
   3323     __ Beqz(obj, &done);
   3324   }
   3325 
   3326   switch (type_check_kind) {
   3327     case TypeCheckKind::kExactCheck:
   3328     case TypeCheckKind::kArrayCheck: {
   3329       // /* HeapReference<Class> */ temp = obj->klass_
   3330       GenerateReferenceLoadTwoRegisters(instruction,
   3331                                         temp_loc,
   3332                                         obj_loc,
   3333                                         class_offset,
   3334                                         maybe_temp2_loc,
   3335                                         kWithoutReadBarrier);
   3336       // Jump to slow path for throwing the exception or doing a
   3337       // more involved array check.
   3338       __ Bne(temp, cls, slow_path->GetEntryLabel());
   3339       break;
   3340     }
   3341 
   3342     case TypeCheckKind::kAbstractClassCheck: {
   3343       // /* HeapReference<Class> */ temp = obj->klass_
   3344       GenerateReferenceLoadTwoRegisters(instruction,
   3345                                         temp_loc,
   3346                                         obj_loc,
   3347                                         class_offset,
   3348                                         maybe_temp2_loc,
   3349                                         kWithoutReadBarrier);
   3350       // If the class is abstract, we eagerly fetch the super class of the
   3351       // object to avoid doing a comparison we know will fail.
   3352       MipsLabel loop;
   3353       __ Bind(&loop);
   3354       // /* HeapReference<Class> */ temp = temp->super_class_
   3355       GenerateReferenceLoadOneRegister(instruction,
   3356                                        temp_loc,
   3357                                        super_offset,
   3358                                        maybe_temp2_loc,
   3359                                        kWithoutReadBarrier);
   3360       // If the class reference currently in `temp` is null, jump to the slow path to throw the
   3361       // exception.
   3362       __ Beqz(temp, slow_path->GetEntryLabel());
   3363       // Otherwise, compare the classes.
   3364       __ Bne(temp, cls, &loop);
   3365       break;
   3366     }
   3367 
   3368     case TypeCheckKind::kClassHierarchyCheck: {
   3369       // /* HeapReference<Class> */ temp = obj->klass_
   3370       GenerateReferenceLoadTwoRegisters(instruction,
   3371                                         temp_loc,
   3372                                         obj_loc,
   3373                                         class_offset,
   3374                                         maybe_temp2_loc,
   3375                                         kWithoutReadBarrier);
   3376       // Walk over the class hierarchy to find a match.
   3377       MipsLabel loop;
   3378       __ Bind(&loop);
   3379       __ Beq(temp, cls, &done);
   3380       // /* HeapReference<Class> */ temp = temp->super_class_
   3381       GenerateReferenceLoadOneRegister(instruction,
   3382                                        temp_loc,
   3383                                        super_offset,
   3384                                        maybe_temp2_loc,
   3385                                        kWithoutReadBarrier);
   3386       // If the class reference currently in `temp` is null, jump to the slow path to throw the
   3387       // exception. Otherwise, jump to the beginning of the loop.
   3388       __ Bnez(temp, &loop);
   3389       __ B(slow_path->GetEntryLabel());
   3390       break;
   3391     }
   3392 
   3393     case TypeCheckKind::kArrayObjectCheck: {
   3394       // /* HeapReference<Class> */ temp = obj->klass_
   3395       GenerateReferenceLoadTwoRegisters(instruction,
   3396                                         temp_loc,
   3397                                         obj_loc,
   3398                                         class_offset,
   3399                                         maybe_temp2_loc,
   3400                                         kWithoutReadBarrier);
   3401       // Do an exact check.
   3402       __ Beq(temp, cls, &done);
   3403       // Otherwise, we need to check that the object's class is a non-primitive array.
   3404       // /* HeapReference<Class> */ temp = temp->component_type_
   3405       GenerateReferenceLoadOneRegister(instruction,
   3406                                        temp_loc,
   3407                                        component_offset,
   3408                                        maybe_temp2_loc,
   3409                                        kWithoutReadBarrier);
   3410       // If the component type is null, jump to the slow path to throw the exception.
   3411       __ Beqz(temp, slow_path->GetEntryLabel());
   3412       // Otherwise, the object is indeed an array, further check that this component
   3413       // type is not a primitive type.
   3414       __ LoadFromOffset(kLoadUnsignedHalfword, temp, temp, primitive_offset);
   3415       static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
   3416       __ Bnez(temp, slow_path->GetEntryLabel());
   3417       break;
   3418     }
   3419 
   3420     case TypeCheckKind::kUnresolvedCheck:
   3421       // We always go into the type check slow path for the unresolved check case.
   3422       // We cannot directly call the CheckCast runtime entry point
   3423       // without resorting to a type checking slow path here (i.e. by
   3424       // calling InvokeRuntime directly), as it would require to
   3425       // assign fixed registers for the inputs of this HInstanceOf
   3426       // instruction (following the runtime calling convention), which
   3427       // might be cluttered by the potential first read barrier
   3428       // emission at the beginning of this method.
   3429       __ B(slow_path->GetEntryLabel());
   3430       break;
   3431 
   3432     case TypeCheckKind::kInterfaceCheck: {
   3433       // Avoid read barriers to improve performance of the fast path. We can not get false
   3434       // positives by doing this.
   3435       // /* HeapReference<Class> */ temp = obj->klass_
   3436       GenerateReferenceLoadTwoRegisters(instruction,
   3437                                         temp_loc,
   3438                                         obj_loc,
   3439                                         class_offset,
   3440                                         maybe_temp2_loc,
   3441                                         kWithoutReadBarrier);
   3442       // /* HeapReference<Class> */ temp = temp->iftable_
   3443       GenerateReferenceLoadTwoRegisters(instruction,
   3444                                         temp_loc,
   3445                                         temp_loc,
   3446                                         iftable_offset,
   3447                                         maybe_temp2_loc,
   3448                                         kWithoutReadBarrier);
   3449       // Iftable is never null.
   3450       __ Lw(TMP, temp, array_length_offset);
   3451       // Loop through the iftable and check if any class matches.
   3452       MipsLabel loop;
   3453       __ Bind(&loop);
   3454       __ Addiu(temp, temp, 2 * kHeapReferenceSize);  // Possibly in delay slot on R2.
   3455       __ Beqz(TMP, slow_path->GetEntryLabel());
   3456       __ Lw(AT, temp, object_array_data_offset - 2 * kHeapReferenceSize);
   3457       __ MaybeUnpoisonHeapReference(AT);
   3458       // Go to next interface.
   3459       __ Addiu(TMP, TMP, -2);
   3460       // Compare the classes and continue the loop if they do not match.
   3461       __ Bne(AT, cls, &loop);
   3462       break;
   3463     }
   3464   }
   3465 
   3466   __ Bind(&done);
   3467   __ Bind(slow_path->GetExitLabel());
   3468 }
   3469 
   3470 void LocationsBuilderMIPS::VisitClinitCheck(HClinitCheck* check) {
   3471   LocationSummary* locations =
   3472       new (GetGraph()->GetAllocator()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
   3473   locations->SetInAt(0, Location::RequiresRegister());
   3474   if (check->HasUses()) {
   3475     locations->SetOut(Location::SameAsFirstInput());
   3476   }
   3477 }
   3478 
   3479 void InstructionCodeGeneratorMIPS::VisitClinitCheck(HClinitCheck* check) {
   3480   // We assume the class is not null.
   3481   SlowPathCodeMIPS* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS(
   3482       check->GetLoadClass(),
   3483       check,
   3484       check->GetDexPc(),
   3485       true);
   3486   codegen_->AddSlowPath(slow_path);
   3487   GenerateClassInitializationCheck(slow_path,
   3488                                    check->GetLocations()->InAt(0).AsRegister<Register>());
   3489 }
   3490 
   3491 void LocationsBuilderMIPS::VisitCompare(HCompare* compare) {
   3492   DataType::Type in_type = compare->InputAt(0)->GetType();
   3493 
   3494   LocationSummary* locations =
   3495       new (GetGraph()->GetAllocator()) LocationSummary(compare, LocationSummary::kNoCall);
   3496 
   3497   switch (in_type) {
   3498     case DataType::Type::kBool:
   3499     case DataType::Type::kUint8:
   3500     case DataType::Type::kInt8:
   3501     case DataType::Type::kUint16:
   3502     case DataType::Type::kInt16:
   3503     case DataType::Type::kInt32:
   3504       locations->SetInAt(0, Location::RequiresRegister());
   3505       locations->SetInAt(1, Location::RequiresRegister());
   3506       locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   3507       break;
   3508 
   3509     case DataType::Type::kInt64:
   3510       locations->SetInAt(0, Location::RequiresRegister());
   3511       locations->SetInAt(1, Location::RequiresRegister());
   3512       // Output overlaps because it is written before doing the low comparison.
   3513       locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
   3514       break;
   3515 
   3516     case DataType::Type::kFloat32:
   3517     case DataType::Type::kFloat64:
   3518       locations->SetInAt(0, Location::RequiresFpuRegister());
   3519       locations->SetInAt(1, Location::RequiresFpuRegister());
   3520       locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   3521       break;
   3522 
   3523     default:
   3524       LOG(FATAL) << "Unexpected type for compare operation " << in_type;
   3525   }
   3526 }
   3527 
   3528 void InstructionCodeGeneratorMIPS::VisitCompare(HCompare* instruction) {
   3529   LocationSummary* locations = instruction->GetLocations();
   3530   Register res = locations->Out().AsRegister<Register>();
   3531   DataType::Type in_type = instruction->InputAt(0)->GetType();
   3532   bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
   3533 
   3534   //  0 if: left == right
   3535   //  1 if: left  > right
   3536   // -1 if: left  < right
   3537   switch (in_type) {
   3538     case DataType::Type::kBool:
   3539     case DataType::Type::kUint8:
   3540     case DataType::Type::kInt8:
   3541     case DataType::Type::kUint16:
   3542     case DataType::Type::kInt16:
   3543     case DataType::Type::kInt32: {
   3544       Register lhs = locations->InAt(0).AsRegister<Register>();
   3545       Register rhs = locations->InAt(1).AsRegister<Register>();
   3546       __ Slt(TMP, lhs, rhs);
   3547       __ Slt(res, rhs, lhs);
   3548       __ Subu(res, res, TMP);
   3549       break;
   3550     }
   3551     case DataType::Type::kInt64: {
   3552       MipsLabel done;
   3553       Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>();
   3554       Register lhs_low  = locations->InAt(0).AsRegisterPairLow<Register>();
   3555       Register rhs_high = locations->InAt(1).AsRegisterPairHigh<Register>();
   3556       Register rhs_low  = locations->InAt(1).AsRegisterPairLow<Register>();
   3557       // TODO: more efficient (direct) comparison with a constant.
   3558       __ Slt(TMP, lhs_high, rhs_high);
   3559       __ Slt(AT, rhs_high, lhs_high);  // Inverted: is actually gt.
   3560       __ Subu(res, AT, TMP);           // Result -1:1:0 for [ <, >, == ].
   3561       __ Bnez(res, &done);             // If we compared ==, check if lower bits are also equal.
   3562       __ Sltu(TMP, lhs_low, rhs_low);
   3563       __ Sltu(AT, rhs_low, lhs_low);   // Inverted: is actually gt.
   3564       __ Subu(res, AT, TMP);           // Result -1:1:0 for [ <, >, == ].
   3565       __ Bind(&done);
   3566       break;
   3567     }
   3568 
   3569     case DataType::Type::kFloat32: {
   3570       bool gt_bias = instruction->IsGtBias();
   3571       FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
   3572       FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
   3573       MipsLabel done;
   3574       if (isR6) {
   3575         __ CmpEqS(FTMP, lhs, rhs);
   3576         __ LoadConst32(res, 0);
   3577         __ Bc1nez(FTMP, &done);
   3578         if (gt_bias) {
   3579           __ CmpLtS(FTMP, lhs, rhs);
   3580           __ LoadConst32(res, -1);
   3581           __ Bc1nez(FTMP, &done);
   3582           __ LoadConst32(res, 1);
   3583         } else {
   3584           __ CmpLtS(FTMP, rhs, lhs);
   3585           __ LoadConst32(res, 1);
   3586           __ Bc1nez(FTMP, &done);
   3587           __ LoadConst32(res, -1);
   3588         }
   3589       } else {
   3590         if (gt_bias) {
   3591           __ ColtS(0, lhs, rhs);
   3592           __ LoadConst32(res, -1);
   3593           __ Bc1t(0, &done);
   3594           __ CeqS(0, lhs, rhs);
   3595           __ LoadConst32(res, 1);
   3596           __ Movt(res, ZERO, 0);
   3597         } else {
   3598           __ ColtS(0, rhs, lhs);
   3599           __ LoadConst32(res, 1);
   3600           __ Bc1t(0, &done);
   3601           __ CeqS(0, lhs, rhs);
   3602           __ LoadConst32(res, -1);
   3603           __ Movt(res, ZERO, 0);
   3604         }
   3605       }
   3606       __ Bind(&done);
   3607       break;
   3608     }
   3609     case DataType::Type::kFloat64: {
   3610       bool gt_bias = instruction->IsGtBias();
   3611       FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
   3612       FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
   3613       MipsLabel done;
   3614       if (isR6) {
   3615         __ CmpEqD(FTMP, lhs, rhs);
   3616         __ LoadConst32(res, 0);
   3617         __ Bc1nez(FTMP, &done);
   3618         if (gt_bias) {
   3619           __ CmpLtD(FTMP, lhs, rhs);
   3620           __ LoadConst32(res, -1);
   3621           __ Bc1nez(FTMP, &done);
   3622           __ LoadConst32(res, 1);
   3623         } else {
   3624           __ CmpLtD(FTMP, rhs, lhs);
   3625           __ LoadConst32(res, 1);
   3626           __ Bc1nez(FTMP, &done);
   3627           __ LoadConst32(res, -1);
   3628         }
   3629       } else {
   3630         if (gt_bias) {
   3631           __ ColtD(0, lhs, rhs);
   3632           __ LoadConst32(res, -1);
   3633           __ Bc1t(0, &done);
   3634           __ CeqD(0, lhs, rhs);
   3635           __ LoadConst32(res, 1);
   3636           __ Movt(res, ZERO, 0);
   3637         } else {
   3638           __ ColtD(0, rhs, lhs);
   3639           __ LoadConst32(res, 1);
   3640           __ Bc1t(0, &done);
   3641           __ CeqD(0, lhs, rhs);
   3642           __ LoadConst32(res, -1);
   3643           __ Movt(res, ZERO, 0);
   3644         }
   3645       }
   3646       __ Bind(&done);
   3647       break;
   3648     }
   3649 
   3650     default:
   3651       LOG(FATAL) << "Unimplemented compare type " << in_type;
   3652   }
   3653 }
   3654 
   3655 void LocationsBuilderMIPS::HandleCondition(HCondition* instruction) {
   3656   LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
   3657   switch (instruction->InputAt(0)->GetType()) {
   3658     default:
   3659     case DataType::Type::kInt64:
   3660       locations->SetInAt(0, Location::RequiresRegister());
   3661       locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
   3662       break;
   3663 
   3664     case DataType::Type::kFloat32:
   3665     case DataType::Type::kFloat64:
   3666       locations->SetInAt(0, Location::RequiresFpuRegister());
   3667       locations->SetInAt(1, Location::RequiresFpuRegister());
   3668       break;
   3669   }
   3670   if (!instruction->IsEmittedAtUseSite()) {
   3671     locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   3672   }
   3673 }
   3674 
   3675 void InstructionCodeGeneratorMIPS::HandleCondition(HCondition* instruction) {
   3676   if (instruction->IsEmittedAtUseSite()) {
   3677     return;
   3678   }
   3679 
   3680   DataType::Type type = instruction->InputAt(0)->GetType();
   3681   LocationSummary* locations = instruction->GetLocations();
   3682 
   3683   switch (type) {
   3684     default:
   3685       // Integer case.
   3686       GenerateIntCompare(instruction->GetCondition(), locations);
   3687       return;
   3688 
   3689     case DataType::Type::kInt64:
   3690       GenerateLongCompare(instruction->GetCondition(), locations);
   3691       return;
   3692 
   3693     case DataType::Type::kFloat32:
   3694     case DataType::Type::kFloat64:
   3695       GenerateFpCompare(instruction->GetCondition(), instruction->IsGtBias(), type, locations);
   3696       return;
   3697   }
   3698 }
   3699 
   3700 void InstructionCodeGeneratorMIPS::DivRemOneOrMinusOne(HBinaryOperation* instruction) {
   3701   DCHECK(instruction->IsDiv() || instruction->IsRem());
   3702 
   3703   LocationSummary* locations = instruction->GetLocations();
   3704   Location second = locations->InAt(1);
   3705   DCHECK(second.IsConstant());
   3706   int64_t imm = Int64FromConstant(second.GetConstant());
   3707   DCHECK(imm == 1 || imm == -1);
   3708 
   3709   if (instruction->GetResultType() == DataType::Type::kInt32) {
   3710     Register out = locations->Out().AsRegister<Register>();
   3711     Register dividend = locations->InAt(0).AsRegister<Register>();
   3712 
   3713     if (instruction->IsRem()) {
   3714       __ Move(out, ZERO);
   3715     } else {
   3716       if (imm == -1) {
   3717         __ Subu(out, ZERO, dividend);
   3718       } else if (out != dividend) {
   3719         __ Move(out, dividend);
   3720       }
   3721     }
   3722   } else {
   3723     DCHECK_EQ(instruction->GetResultType(), DataType::Type::kInt64);
   3724     Register out_high = locations->Out().AsRegisterPairHigh<Register>();
   3725     Register out_low = locations->Out().AsRegisterPairLow<Register>();
   3726     Register in_high = locations->InAt(0).AsRegisterPairHigh<Register>();
   3727     Register in_low = locations->InAt(0).AsRegisterPairLow<Register>();
   3728 
   3729     if (instruction->IsRem()) {
   3730       __ Move(out_high, ZERO);
   3731       __ Move(out_low, ZERO);
   3732     } else {
   3733       if (imm == -1) {
   3734         __ Subu(out_low, ZERO, in_low);
   3735         __ Sltu(AT, ZERO, out_low);
   3736         __ Subu(out_high, ZERO, in_high);
   3737         __ Subu(out_high, out_high, AT);
   3738       } else {
   3739         __ Move(out_low, in_low);
   3740         __ Move(out_high, in_high);
   3741       }
   3742     }
   3743   }
   3744 }
   3745 
   3746 void InstructionCodeGeneratorMIPS::DivRemByPowerOfTwo(HBinaryOperation* instruction) {
   3747   DCHECK(instruction->IsDiv() || instruction->IsRem());
   3748 
   3749   LocationSummary* locations = instruction->GetLocations();
   3750   Location second = locations->InAt(1);
   3751   const bool is_r2_or_newer = codegen_->GetInstructionSetFeatures().IsMipsIsaRevGreaterThanEqual2();
   3752   const bool is_r6 = codegen_->GetInstructionSetFeatures().IsR6();
   3753   DCHECK(second.IsConstant());
   3754 
   3755   if (instruction->GetResultType() == DataType::Type::kInt32) {
   3756     Register out = locations->Out().AsRegister<Register>();
   3757     Register dividend = locations->InAt(0).AsRegister<Register>();
   3758     int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
   3759     uint32_t abs_imm = static_cast<uint32_t>(AbsOrMin(imm));
   3760     int ctz_imm = CTZ(abs_imm);
   3761 
   3762     if (instruction->IsDiv()) {
   3763       if (ctz_imm == 1) {
   3764         // Fast path for division by +/-2, which is very common.
   3765         __ Srl(TMP, dividend, 31);
   3766       } else {
   3767         __ Sra(TMP, dividend, 31);
   3768         __ Srl(TMP, TMP, 32 - ctz_imm);
   3769       }
   3770       __ Addu(out, dividend, TMP);
   3771       __ Sra(out, out, ctz_imm);
   3772       if (imm < 0) {
   3773         __ Subu(out, ZERO, out);
   3774       }
   3775     } else {
   3776       if (ctz_imm == 1) {
   3777         // Fast path for modulo +/-2, which is very common.
   3778         __ Sra(TMP, dividend, 31);
   3779         __ Subu(out, dividend, TMP);
   3780         __ Andi(out, out, 1);
   3781         __ Addu(out, out, TMP);
   3782       } else {
   3783         __ Sra(TMP, dividend, 31);
   3784         __ Srl(TMP, TMP, 32 - ctz_imm);
   3785         __ Addu(out, dividend, TMP);
   3786         if (IsUint<16>(abs_imm - 1)) {
   3787           __ Andi(out, out, abs_imm - 1);
   3788         } else {
   3789           if (is_r2_or_newer) {
   3790             __ Ins(out, ZERO, ctz_imm, 32 - ctz_imm);
   3791           } else {
   3792             __ Sll(out, out, 32 - ctz_imm);
   3793             __ Srl(out, out, 32 - ctz_imm);
   3794           }
   3795         }
   3796         __ Subu(out, out, TMP);
   3797       }
   3798     }
   3799   } else {
   3800     DCHECK_EQ(instruction->GetResultType(), DataType::Type::kInt64);
   3801     Register out_high = locations->Out().AsRegisterPairHigh<Register>();
   3802     Register out_low = locations->Out().AsRegisterPairLow<Register>();
   3803     Register in_high = locations->InAt(0).AsRegisterPairHigh<Register>();
   3804     Register in_low = locations->InAt(0).AsRegisterPairLow<Register>();
   3805     int64_t imm = Int64FromConstant(second.GetConstant());
   3806     uint64_t abs_imm = static_cast<uint64_t>(AbsOrMin(imm));
   3807     int ctz_imm = CTZ(abs_imm);
   3808 
   3809     if (instruction->IsDiv()) {
   3810       if (ctz_imm < 32) {
   3811         if (ctz_imm == 1) {
   3812           __ Srl(AT, in_high, 31);
   3813         } else {
   3814           __ Sra(AT, in_high, 31);
   3815           __ Srl(AT, AT, 32 - ctz_imm);
   3816         }
   3817         __ Addu(AT, AT, in_low);
   3818         __ Sltu(TMP, AT, in_low);
   3819         __ Addu(out_high, in_high, TMP);
   3820         __ Srl(out_low, AT, ctz_imm);
   3821         if (is_r2_or_newer) {
   3822           __ Ins(out_low, out_high, 32 - ctz_imm, ctz_imm);
   3823           __ Sra(out_high, out_high, ctz_imm);
   3824         } else {
   3825           __ Sll(AT, out_high, 32 - ctz_imm);
   3826           __ Sra(out_high, out_high, ctz_imm);
   3827           __ Or(out_low, out_low, AT);
   3828         }
   3829         if (imm < 0) {
   3830           __ Subu(out_low, ZERO, out_low);
   3831           __ Sltu(AT, ZERO, out_low);
   3832           __ Subu(out_high, ZERO, out_high);
   3833           __ Subu(out_high, out_high, AT);
   3834         }
   3835       } else if (ctz_imm == 32) {
   3836         __ Sra(AT, in_high, 31);
   3837         __ Addu(AT, AT, in_low);
   3838         __ Sltu(AT, AT, in_low);
   3839         __ Addu(out_low, in_high, AT);
   3840         if (imm < 0) {
   3841           __ Srl(TMP, out_low, 31);
   3842           __ Subu(out_low, ZERO, out_low);
   3843           __ Sltu(AT, ZERO, out_low);
   3844           __ Subu(out_high, TMP, AT);
   3845         } else {
   3846           __ Sra(out_high, out_low, 31);
   3847         }
   3848       } else if (ctz_imm < 63) {
   3849         __ Sra(AT, in_high, 31);
   3850         __ Srl(TMP, AT, 64 - ctz_imm);
   3851         __ Addu(AT, AT, in_low);
   3852         __ Sltu(AT, AT, in_low);
   3853         __ Addu(out_low, in_high, AT);
   3854         __ Addu(out_low, out_low, TMP);
   3855         __ Sra(out_low, out_low, ctz_imm - 32);
   3856         if (imm < 0) {
   3857           __ Subu(out_low, ZERO, out_low);
   3858         }
   3859         __ Sra(out_high, out_low, 31);
   3860       } else {
   3861         DCHECK_LT(imm, 0);
   3862         if (is_r6) {
   3863           __ Aui(AT, in_high, 0x8000);
   3864         } else {
   3865           __ Lui(AT, 0x8000);
   3866           __ Xor(AT, AT, in_high);
   3867         }
   3868         __ Or(AT, AT, in_low);
   3869         __ Sltiu(out_low, AT, 1);
   3870         __ Move(out_high, ZERO);
   3871       }
   3872     } else {
   3873       if ((ctz_imm == 1) && !is_r6) {
   3874         __ Andi(AT, in_low, 1);
   3875         __ Sll(TMP, in_low, 31);
   3876         __ And(TMP, in_high, TMP);
   3877         __ Sra(out_high, TMP, 31);
   3878         __ Or(out_low, out_high, AT);
   3879       } else if (ctz_imm < 32) {
   3880         __ Sra(AT, in_high, 31);
   3881         if (ctz_imm <= 16) {
   3882           __ Andi(out_low, in_low, abs_imm - 1);
   3883         } else if (is_r2_or_newer) {
   3884           __ Ext(out_low, in_low, 0, ctz_imm);
   3885         } else {
   3886           __ Sll(out_low, in_low, 32 - ctz_imm);
   3887           __ Srl(out_low, out_low, 32 - ctz_imm);
   3888         }
   3889         if (is_r6) {
   3890           __ Selnez(out_high, AT, out_low);
   3891         } else {
   3892           __ Movz(AT, ZERO, out_low);
   3893           __ Move(out_high, AT);
   3894         }
   3895         if (is_r2_or_newer) {
   3896           __ Ins(out_low, out_high, ctz_imm, 32 - ctz_imm);
   3897         } else {
   3898           __ Sll(AT, out_high, ctz_imm);
   3899           __ Or(out_low, out_low, AT);
   3900         }
   3901       } else if (ctz_imm == 32) {
   3902         __ Sra(AT, in_high, 31);
   3903         __ Move(out_low, in_low);
   3904         if (is_r6) {
   3905           __ Selnez(out_high, AT, out_low);
   3906         } else {
   3907           __ Movz(AT, ZERO, out_low);
   3908           __ Move(out_high, AT);
   3909         }
   3910       } else if (ctz_imm < 63) {
   3911         __ Sra(AT, in_high, 31);
   3912         __ Move(TMP, in_low);
   3913         if (ctz_imm - 32 <= 16) {
   3914           __ Andi(out_high, in_high, (1 << (ctz_imm - 32)) - 1);
   3915         } else if (is_r2_or_newer) {
   3916           __ Ext(out_high, in_high, 0, ctz_imm - 32);
   3917         } else {
   3918           __ Sll(out_high, in_high, 64 - ctz_imm);
   3919           __ Srl(out_high, out_high, 64 - ctz_imm);
   3920         }
   3921         __ Move(out_low, TMP);
   3922         __ Or(TMP, TMP, out_high);
   3923         if (is_r6) {
   3924           __ Selnez(AT, AT, TMP);
   3925         } else {
   3926           __ Movz(AT, ZERO, TMP);
   3927         }
   3928         if (is_r2_or_newer) {
   3929           __ Ins(out_high, AT, ctz_imm - 32, 64 - ctz_imm);
   3930         } else {
   3931           __ Sll(AT, AT, ctz_imm - 32);
   3932           __ Or(out_high, out_high, AT);
   3933         }
   3934       } else {
   3935         if (is_r6) {
   3936           __ Aui(AT, in_high, 0x8000);
   3937         } else {
   3938           __ Lui(AT, 0x8000);
   3939           __ Xor(AT, AT, in_high);
   3940         }
   3941         __ Or(AT, AT, in_low);
   3942         __ Sltiu(AT, AT, 1);
   3943         __ Sll(AT, AT, 31);
   3944         __ Move(out_low, in_low);
   3945         __ Xor(out_high, in_high, AT);
   3946       }
   3947     }
   3948   }
   3949 }
   3950 
   3951 void InstructionCodeGeneratorMIPS::GenerateDivRemWithAnyConstant(HBinaryOperation* instruction) {
   3952   DCHECK(instruction->IsDiv() || instruction->IsRem());
   3953   DCHECK_EQ(instruction->GetResultType(), DataType::Type::kInt32);
   3954 
   3955   LocationSummary* locations = instruction->GetLocations();
   3956   Location second = locations->InAt(1);
   3957   DCHECK(second.IsConstant());
   3958 
   3959   Register out = locations->Out().AsRegister<Register>();
   3960   Register dividend = locations->InAt(0).AsRegister<Register>();
   3961   int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
   3962 
   3963   int64_t magic;
   3964   int shift;
   3965   CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
   3966 
   3967   bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
   3968 
   3969   __ LoadConst32(TMP, magic);
   3970   if (isR6) {
   3971     __ MuhR6(TMP, dividend, TMP);
   3972   } else {
   3973     __ MultR2(dividend, TMP);
   3974     __ Mfhi(TMP);
   3975   }
   3976   if (imm > 0 && magic < 0) {
   3977     __ Addu(TMP, TMP, dividend);
   3978   } else if (imm < 0 && magic > 0) {
   3979     __ Subu(TMP, TMP, dividend);
   3980   }
   3981 
   3982   if (shift != 0) {
   3983     __ Sra(TMP, TMP, shift);
   3984   }
   3985 
   3986   if (instruction->IsDiv()) {
   3987     __ Sra(out, TMP, 31);
   3988     __ Subu(out, TMP, out);
   3989   } else {
   3990     __ Sra(AT, TMP, 31);
   3991     __ Subu(AT, TMP, AT);
   3992     __ LoadConst32(TMP, imm);
   3993     if (isR6) {
   3994       __ MulR6(TMP, AT, TMP);
   3995     } else {
   3996       __ MulR2(TMP, AT, TMP);
   3997     }
   3998     __ Subu(out, dividend, TMP);
   3999   }
   4000 }
   4001 
   4002 void InstructionCodeGeneratorMIPS::GenerateDivRemIntegral(HBinaryOperation* instruction) {
   4003   DCHECK(instruction->IsDiv() || instruction->IsRem());
   4004   DCHECK_EQ(instruction->GetResultType(), DataType::Type::kInt32);
   4005 
   4006   LocationSummary* locations = instruction->GetLocations();
   4007   Register out = locations->Out().AsRegister<Register>();
   4008   Location second = locations->InAt(1);
   4009 
   4010   if (second.IsConstant()) {
   4011     int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
   4012     if (imm == 0) {
   4013       // Do not generate anything. DivZeroCheck would prevent any code to be executed.
   4014     } else if (imm == 1 || imm == -1) {
   4015       DivRemOneOrMinusOne(instruction);
   4016     } else if (IsPowerOfTwo(AbsOrMin(imm))) {
   4017       DivRemByPowerOfTwo(instruction);
   4018     } else {
   4019       DCHECK(imm <= -2 || imm >= 2);
   4020       GenerateDivRemWithAnyConstant(instruction);
   4021     }
   4022   } else {
   4023     Register dividend = locations->InAt(0).AsRegister<Register>();
   4024     Register divisor = second.AsRegister<Register>();
   4025     bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
   4026     if (instruction->IsDiv()) {
   4027       if (isR6) {
   4028         __ DivR6(out, dividend, divisor);
   4029       } else {
   4030         __ DivR2(out, dividend, divisor);
   4031       }
   4032     } else {
   4033       if (isR6) {
   4034         __ ModR6(out, dividend, divisor);
   4035       } else {
   4036         __ ModR2(out, dividend, divisor);
   4037       }
   4038     }
   4039   }
   4040 }
   4041 
   4042 void LocationsBuilderMIPS::VisitDiv(HDiv* div) {
   4043   DataType::Type type = div->GetResultType();
   4044   bool call_long_div = false;
   4045   if (type == DataType::Type::kInt64) {
   4046     if (div->InputAt(1)->IsConstant()) {
   4047       int64_t imm = CodeGenerator::GetInt64ValueOf(div->InputAt(1)->AsConstant());
   4048       call_long_div = (imm != 0) && !IsPowerOfTwo(static_cast<uint64_t>(AbsOrMin(imm)));
   4049     } else {
   4050       call_long_div = true;
   4051     }
   4052   }
   4053   LocationSummary::CallKind call_kind = call_long_div
   4054       ? LocationSummary::kCallOnMainOnly
   4055       : LocationSummary::kNoCall;
   4056 
   4057   LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(div, call_kind);
   4058 
   4059   switch (type) {
   4060     case DataType::Type::kInt32:
   4061       locations->SetInAt(0, Location::RequiresRegister());
   4062       locations->SetInAt(1, Location::RegisterOrConstant(div->InputAt(1)));
   4063       locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   4064       break;
   4065 
   4066     case DataType::Type::kInt64: {
   4067       if (call_long_div) {
   4068         InvokeRuntimeCallingConvention calling_convention;
   4069         locations->SetInAt(0, Location::RegisterPairLocation(
   4070             calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
   4071         locations->SetInAt(1, Location::RegisterPairLocation(
   4072             calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
   4073         locations->SetOut(calling_convention.GetReturnLocation(type));
   4074       } else {
   4075         locations->SetInAt(0, Location::RequiresRegister());
   4076         locations->SetInAt(1, Location::ConstantLocation(div->InputAt(1)->AsConstant()));
   4077         locations->SetOut(Location::RequiresRegister());
   4078       }
   4079       break;
   4080     }
   4081 
   4082     case DataType::Type::kFloat32:
   4083     case DataType::Type::kFloat64:
   4084       locations->SetInAt(0, Location::RequiresFpuRegister());
   4085       locations->SetInAt(1, Location::RequiresFpuRegister());
   4086       locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
   4087       break;
   4088 
   4089     default:
   4090       LOG(FATAL) << "Unexpected div type " << type;
   4091   }
   4092 }
   4093 
   4094 void InstructionCodeGeneratorMIPS::VisitDiv(HDiv* instruction) {
   4095   DataType::Type type = instruction->GetType();
   4096   LocationSummary* locations = instruction->GetLocations();
   4097 
   4098   switch (type) {
   4099     case DataType::Type::kInt32:
   4100       GenerateDivRemIntegral(instruction);
   4101       break;
   4102     case DataType::Type::kInt64: {
   4103       if (locations->InAt(1).IsConstant()) {
   4104         int64_t imm = locations->InAt(1).GetConstant()->AsLongConstant()->GetValue();
   4105         if (imm == 0) {
   4106           // Do not generate anything. DivZeroCheck would prevent any code to be executed.
   4107         } else if (imm == 1 || imm == -1) {
   4108           DivRemOneOrMinusOne(instruction);
   4109         } else {
   4110           DCHECK(IsPowerOfTwo(static_cast<uint64_t>(AbsOrMin(imm))));
   4111           DivRemByPowerOfTwo(instruction);
   4112         }
   4113       } else {
   4114         codegen_->InvokeRuntime(kQuickLdiv, instruction, instruction->GetDexPc());
   4115         CheckEntrypointTypes<kQuickLdiv, int64_t, int64_t, int64_t>();
   4116       }
   4117       break;
   4118     }
   4119     case DataType::Type::kFloat32:
   4120     case DataType::Type::kFloat64: {
   4121       FRegister dst = locations->Out().AsFpuRegister<FRegister>();
   4122       FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
   4123       FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
   4124       if (type == DataType::Type::kFloat32) {
   4125         __ DivS(dst, lhs, rhs);
   4126       } else {
   4127         __ DivD(dst, lhs, rhs);
   4128       }
   4129       break;
   4130     }
   4131     default:
   4132       LOG(FATAL) << "Unexpected div type " << type;
   4133   }
   4134 }
   4135 
   4136 void LocationsBuilderMIPS::VisitDivZeroCheck(HDivZeroCheck* instruction) {
   4137   LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction);
   4138   locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
   4139 }
   4140 
   4141 void InstructionCodeGeneratorMIPS::VisitDivZeroCheck(HDivZeroCheck* instruction) {
   4142   SlowPathCodeMIPS* slow_path =
   4143       new (codegen_->GetScopedAllocator()) DivZeroCheckSlowPathMIPS(instruction);
   4144   codegen_->AddSlowPath(slow_path);
   4145   Location value = instruction->GetLocations()->InAt(0);
   4146   DataType::Type type = instruction->GetType();
   4147 
   4148   switch (type) {
   4149     case DataType::Type::kBool:
   4150     case DataType::Type::kUint8:
   4151     case DataType::Type::kInt8:
   4152     case DataType::Type::kUint16:
   4153     case DataType::Type::kInt16:
   4154     case DataType::Type::kInt32: {
   4155       if (value.IsConstant()) {
   4156         if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
   4157           __ B(slow_path->GetEntryLabel());
   4158         } else {
   4159           // A division by a non-null constant is valid. We don't need to perform
   4160           // any check, so simply fall through.
   4161         }
   4162       } else {
   4163         DCHECK(value.IsRegister()) << value;
   4164         __ Beqz(value.AsRegister<Register>(), slow_path->GetEntryLabel());
   4165       }
   4166       break;
   4167     }
   4168     case DataType::Type::kInt64: {
   4169       if (value.IsConstant()) {
   4170         if (value.GetConstant()->AsLongConstant()->GetValue() == 0) {
   4171           __ B(slow_path->GetEntryLabel());
   4172         } else {
   4173           // A division by a non-null constant is valid. We don't need to perform
   4174           // any check, so simply fall through.
   4175         }
   4176       } else {
   4177         DCHECK(value.IsRegisterPair()) << value;
   4178         __ Or(TMP, value.AsRegisterPairHigh<Register>(), value.AsRegisterPairLow<Register>());
   4179         __ Beqz(TMP, slow_path->GetEntryLabel());
   4180       }
   4181       break;
   4182     }
   4183     default:
   4184       LOG(FATAL) << "Unexpected type " << type << " for DivZeroCheck.";
   4185   }
   4186 }
   4187 
   4188 void LocationsBuilderMIPS::VisitDoubleConstant(HDoubleConstant* constant) {
   4189   LocationSummary* locations =
   4190       new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
   4191   locations->SetOut(Location::ConstantLocation(constant));
   4192 }
   4193 
   4194 void InstructionCodeGeneratorMIPS::VisitDoubleConstant(HDoubleConstant* cst ATTRIBUTE_UNUSED) {
   4195   // Will be generated at use site.
   4196 }
   4197 
   4198 void LocationsBuilderMIPS::VisitExit(HExit* exit) {
   4199   exit->SetLocations(nullptr);
   4200 }
   4201 
   4202 void InstructionCodeGeneratorMIPS::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
   4203 }
   4204 
   4205 void LocationsBuilderMIPS::VisitFloatConstant(HFloatConstant* constant) {
   4206   LocationSummary* locations =
   4207       new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
   4208   locations->SetOut(Location::ConstantLocation(constant));
   4209 }
   4210 
   4211 void InstructionCodeGeneratorMIPS::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
   4212   // Will be generated at use site.
   4213 }
   4214 
   4215 void LocationsBuilderMIPS::VisitGoto(HGoto* got) {
   4216   got->SetLocations(nullptr);
   4217 }
   4218 
   4219 void InstructionCodeGeneratorMIPS::HandleGoto(HInstruction* got, HBasicBlock* successor) {
   4220   if (successor->IsExitBlock()) {
   4221     DCHECK(got->GetPrevious()->AlwaysThrows());
   4222     return;  // no code needed
   4223   }
   4224 
   4225   HBasicBlock* block = got->GetBlock();
   4226   HInstruction* previous = got->GetPrevious();
   4227   HLoopInformation* info = block->GetLoopInformation();
   4228 
   4229   if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
   4230     if (codegen_->GetCompilerOptions().CountHotnessInCompiledCode()) {
   4231       __ Lw(AT, SP, kCurrentMethodStackOffset);
   4232       __ Lhu(TMP, AT, ArtMethod::HotnessCountOffset().Int32Value());
   4233       __ Addiu(TMP, TMP, 1);
   4234       __ Sh(TMP, AT, ArtMethod::HotnessCountOffset().Int32Value());
   4235     }
   4236     GenerateSuspendCheck(info->GetSuspendCheck(), successor);
   4237     return;
   4238   }
   4239   if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
   4240     GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
   4241   }
   4242   if (!codegen_->GoesToNextBlock(block, successor)) {
   4243     __ B(codegen_->GetLabelOf(successor));
   4244   }
   4245 }
   4246 
   4247 void InstructionCodeGeneratorMIPS::VisitGoto(HGoto* got) {
   4248   HandleGoto(got, got->GetSuccessor());
   4249 }
   4250 
   4251 void LocationsBuilderMIPS::VisitTryBoundary(HTryBoundary* try_boundary) {
   4252   try_boundary->SetLocations(nullptr);
   4253 }
   4254 
   4255 void InstructionCodeGeneratorMIPS::VisitTryBoundary(HTryBoundary* try_boundary) {
   4256   HBasicBlock* successor = try_boundary->GetNormalFlowSuccessor();
   4257   if (!successor->IsExitBlock()) {
   4258     HandleGoto(try_boundary, successor);
   4259   }
   4260 }
   4261 
   4262 void InstructionCodeGeneratorMIPS::GenerateIntCompare(IfCondition cond,
   4263                                                       LocationSummary* locations) {
   4264   Register dst = locations->Out().AsRegister<Register>();
   4265   Register lhs = locations->InAt(0).AsRegister<Register>();
   4266   Location rhs_location = locations->InAt(1);
   4267   Register rhs_reg = ZERO;
   4268   int64_t rhs_imm = 0;
   4269   bool use_imm = rhs_location.IsConstant();
   4270   if (use_imm) {
   4271     rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
   4272   } else {
   4273     rhs_reg = rhs_location.AsRegister<Register>();
   4274   }
   4275 
   4276   switch (cond) {
   4277     case kCondEQ:
   4278     case kCondNE:
   4279       if (use_imm && IsInt<16>(-rhs_imm)) {
   4280         if (rhs_imm == 0) {
   4281           if (cond == kCondEQ) {
   4282             __ Sltiu(dst, lhs, 1);
   4283           } else {
   4284             __ Sltu(dst, ZERO, lhs);
   4285           }
   4286         } else {
   4287           __ Addiu(dst, lhs, -rhs_imm);
   4288           if (cond == kCondEQ) {
   4289             __ Sltiu(dst, dst, 1);
   4290           } else {
   4291             __ Sltu(dst, ZERO, dst);
   4292           }
   4293         }
   4294       } else {
   4295         if (use_imm && IsUint<16>(rhs_imm)) {
   4296           __ Xori(dst, lhs, rhs_imm);
   4297         } else {
   4298           if (use_imm) {
   4299             rhs_reg = TMP;
   4300             __ LoadConst32(rhs_reg, rhs_imm);
   4301           }
   4302           __ Xor(dst, lhs, rhs_reg);
   4303         }
   4304         if (cond == kCondEQ) {
   4305           __ Sltiu(dst, dst, 1);
   4306         } else {
   4307           __ Sltu(dst, ZERO, dst);
   4308         }
   4309       }
   4310       break;
   4311 
   4312     case kCondLT:
   4313     case kCondGE:
   4314       if (use_imm && IsInt<16>(rhs_imm)) {
   4315         __ Slti(dst, lhs, rhs_imm);
   4316       } else {
   4317         if (use_imm) {
   4318           rhs_reg = TMP;
   4319           __ LoadConst32(rhs_reg, rhs_imm);
   4320         }
   4321         __ Slt(dst, lhs, rhs_reg);
   4322       }
   4323       if (cond == kCondGE) {
   4324         // Simulate lhs >= rhs via !(lhs < rhs) since there's
   4325         // only the slt instruction but no sge.
   4326         __ Xori(dst, dst, 1);
   4327       }
   4328       break;
   4329 
   4330     case kCondLE:
   4331     case kCondGT:
   4332       if (use_imm && IsInt<16>(rhs_imm + 1)) {
   4333         // Simulate lhs <= rhs via lhs < rhs + 1.
   4334         __ Slti(dst, lhs, rhs_imm + 1);
   4335         if (cond == kCondGT) {
   4336           // Simulate lhs > rhs via !(lhs <= rhs) since there's
   4337           // only the slti instruction but no sgti.
   4338           __ Xori(dst, dst, 1);
   4339         }
   4340       } else {
   4341         if (use_imm) {
   4342           rhs_reg = TMP;
   4343           __ LoadConst32(rhs_reg, rhs_imm);
   4344         }
   4345         __ Slt(dst, rhs_reg, lhs);
   4346         if (cond == kCondLE) {
   4347           // Simulate lhs <= rhs via !(rhs < lhs) since there's
   4348           // only the slt instruction but no sle.
   4349           __ Xori(dst, dst, 1);
   4350         }
   4351       }
   4352       break;
   4353 
   4354     case kCondB:
   4355     case kCondAE:
   4356       if (use_imm && IsInt<16>(rhs_imm)) {
   4357         // Sltiu sign-extends its 16-bit immediate operand before
   4358         // the comparison and thus lets us compare directly with
   4359         // unsigned values in the ranges [0, 0x7fff] and
   4360         // [0xffff8000, 0xffffffff].
   4361         __ Sltiu(dst, lhs, rhs_imm);
   4362       } else {
   4363         if (use_imm) {
   4364           rhs_reg = TMP;
   4365           __ LoadConst32(rhs_reg, rhs_imm);
   4366         }
   4367         __ Sltu(dst, lhs, rhs_reg);
   4368       }
   4369       if (cond == kCondAE) {
   4370         // Simulate lhs >= rhs via !(lhs < rhs) since there's
   4371         // only the sltu instruction but no sgeu.
   4372         __ Xori(dst, dst, 1);
   4373       }
   4374       break;
   4375 
   4376     case kCondBE:
   4377     case kCondA:
   4378       if (use_imm && (rhs_imm != -1) && IsInt<16>(rhs_imm + 1)) {
   4379         // Simulate lhs <= rhs via lhs < rhs + 1.
   4380         // Note that this only works if rhs + 1 does not overflow
   4381         // to 0, hence the check above.
   4382         // Sltiu sign-extends its 16-bit immediate operand before
   4383         // the comparison and thus lets us compare directly with
   4384         // unsigned values in the ranges [0, 0x7fff] and
   4385         // [0xffff8000, 0xffffffff].
   4386         __ Sltiu(dst, lhs, rhs_imm + 1);
   4387         if (cond == kCondA) {
   4388           // Simulate lhs > rhs via !(lhs <= rhs) since there's
   4389           // only the sltiu instruction but no sgtiu.
   4390           __ Xori(dst, dst, 1);
   4391         }
   4392       } else {
   4393         if (use_imm) {
   4394           rhs_reg = TMP;
   4395           __ LoadConst32(rhs_reg, rhs_imm);
   4396         }
   4397         __ Sltu(dst, rhs_reg, lhs);
   4398         if (cond == kCondBE) {
   4399           // Simulate lhs <= rhs via !(rhs < lhs) since there's
   4400           // only the sltu instruction but no sleu.
   4401           __ Xori(dst, dst, 1);
   4402         }
   4403       }
   4404       break;
   4405   }
   4406 }
   4407 
   4408 bool InstructionCodeGeneratorMIPS::MaterializeIntCompare(IfCondition cond,
   4409                                                          LocationSummary* input_locations,
   4410                                                          Register dst) {
   4411   Register lhs = input_locations->InAt(0).AsRegister<Register>();
   4412   Location rhs_location = input_locations->InAt(1);
   4413   Register rhs_reg = ZERO;
   4414   int64_t rhs_imm = 0;
   4415   bool use_imm = rhs_location.IsConstant();
   4416   if (use_imm) {
   4417     rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
   4418   } else {
   4419     rhs_reg = rhs_location.AsRegister<Register>();
   4420   }
   4421 
   4422   switch (cond) {
   4423     case kCondEQ:
   4424     case kCondNE:
   4425       if (use_imm && IsInt<16>(-rhs_imm)) {
   4426         __ Addiu(dst, lhs, -rhs_imm);
   4427       } else if (use_imm && IsUint<16>(rhs_imm)) {
   4428         __ Xori(dst, lhs, rhs_imm);
   4429       } else {
   4430         if (use_imm) {
   4431           rhs_reg = TMP;
   4432           __ LoadConst32(rhs_reg, rhs_imm);
   4433         }
   4434         __ Xor(dst, lhs, rhs_reg);
   4435       }
   4436       return (cond == kCondEQ);
   4437 
   4438     case kCondLT:
   4439     case kCondGE:
   4440       if (use_imm && IsInt<16>(rhs_imm)) {
   4441         __ Slti(dst, lhs, rhs_imm);
   4442       } else {
   4443         if (use_imm) {
   4444           rhs_reg = TMP;
   4445           __ LoadConst32(rhs_reg, rhs_imm);
   4446         }
   4447         __ Slt(dst, lhs, rhs_reg);
   4448       }
   4449       return (cond == kCondGE);
   4450 
   4451     case kCondLE:
   4452     case kCondGT:
   4453       if (use_imm && IsInt<16>(rhs_imm + 1)) {
   4454         // Simulate lhs <= rhs via lhs < rhs + 1.
   4455         __ Slti(dst, lhs, rhs_imm + 1);
   4456         return (cond == kCondGT);
   4457       } else {
   4458         if (use_imm) {
   4459           rhs_reg = TMP;
   4460           __ LoadConst32(rhs_reg, rhs_imm);
   4461         }
   4462         __ Slt(dst, rhs_reg, lhs);
   4463         return (cond == kCondLE);
   4464       }
   4465 
   4466     case kCondB:
   4467     case kCondAE:
   4468       if (use_imm && IsInt<16>(rhs_imm)) {
   4469         // Sltiu sign-extends its 16-bit immediate operand before
   4470         // the comparison and thus lets us compare directly with
   4471         // unsigned values in the ranges [0, 0x7fff] and
   4472         // [0xffff8000, 0xffffffff].
   4473         __ Sltiu(dst, lhs, rhs_imm);
   4474       } else {
   4475         if (use_imm) {
   4476           rhs_reg = TMP;
   4477           __ LoadConst32(rhs_reg, rhs_imm);
   4478         }
   4479         __ Sltu(dst, lhs, rhs_reg);
   4480       }
   4481       return (cond == kCondAE);
   4482 
   4483     case kCondBE:
   4484     case kCondA:
   4485       if (use_imm && (rhs_imm != -1) && IsInt<16>(rhs_imm + 1)) {
   4486         // Simulate lhs <= rhs via lhs < rhs + 1.
   4487         // Note that this only works if rhs + 1 does not overflow
   4488         // to 0, hence the check above.
   4489         // Sltiu sign-extends its 16-bit immediate operand before
   4490         // the comparison and thus lets us compare directly with
   4491         // unsigned values in the ranges [0, 0x7fff] and
   4492         // [0xffff8000, 0xffffffff].
   4493         __ Sltiu(dst, lhs, rhs_imm + 1);
   4494         return (cond == kCondA);
   4495       } else {
   4496         if (use_imm) {
   4497           rhs_reg = TMP;
   4498           __ LoadConst32(rhs_reg, rhs_imm);
   4499         }
   4500         __ Sltu(dst, rhs_reg, lhs);
   4501         return (cond == kCondBE);
   4502       }
   4503   }
   4504 }
   4505 
   4506 void InstructionCodeGeneratorMIPS::GenerateIntCompareAndBranch(IfCondition cond,
   4507                                                                LocationSummary* locations,
   4508                                                                MipsLabel* label) {
   4509   Register lhs = locations->InAt(0).AsRegister<Register>();
   4510   Location rhs_location = locations->InAt(1);
   4511   Register rhs_reg = ZERO;
   4512   int64_t rhs_imm = 0;
   4513   bool use_imm = rhs_location.IsConstant();
   4514   if (use_imm) {
   4515     rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
   4516   } else {
   4517     rhs_reg = rhs_location.AsRegister<Register>();
   4518   }
   4519 
   4520   if (use_imm && rhs_imm == 0) {
   4521     switch (cond) {
   4522       case kCondEQ:
   4523       case kCondBE:  // <= 0 if zero
   4524         __ Beqz(lhs, label);
   4525         break;
   4526       case kCondNE:
   4527       case kCondA:  // > 0 if non-zero
   4528         __ Bnez(lhs, label);
   4529         break;
   4530       case kCondLT:
   4531         __ Bltz(lhs, label);
   4532         break;
   4533       case kCondGE:
   4534         __ Bgez(lhs, label);
   4535         break;
   4536       case kCondLE:
   4537         __ Blez(lhs, label);
   4538         break;
   4539       case kCondGT:
   4540         __ Bgtz(lhs, label);
   4541         break;
   4542       case kCondB:  // always false
   4543         break;
   4544       case kCondAE:  // always true
   4545         __ B(label);
   4546         break;
   4547     }
   4548   } else {
   4549     bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
   4550     if (isR6 || !use_imm) {
   4551       if (use_imm) {
   4552         rhs_reg = TMP;
   4553         __ LoadConst32(rhs_reg, rhs_imm);
   4554       }
   4555       switch (cond) {
   4556         case kCondEQ:
   4557           __ Beq(lhs, rhs_reg, label);
   4558           break;
   4559         case kCondNE:
   4560           __ Bne(lhs, rhs_reg, label);
   4561           break;
   4562         case kCondLT:
   4563           __ Blt(lhs, rhs_reg, label);
   4564           break;
   4565         case kCondGE:
   4566           __ Bge(lhs, rhs_reg, label);
   4567           break;
   4568         case kCondLE:
   4569           __ Bge(rhs_reg, lhs, label);
   4570           break;
   4571         case kCondGT:
   4572           __ Blt(rhs_reg, lhs, label);
   4573           break;
   4574         case kCondB:
   4575           __ Bltu(lhs, rhs_reg, label);
   4576           break;
   4577         case kCondAE:
   4578           __ Bgeu(lhs, rhs_reg, label);
   4579           break;
   4580         case kCondBE:
   4581           __ Bgeu(rhs_reg, lhs, label);
   4582           break;
   4583         case kCondA:
   4584           __ Bltu(rhs_reg, lhs, label);
   4585           break;
   4586       }
   4587     } else {
   4588       // Special cases for more efficient comparison with constants on R2.
   4589       switch (cond) {
   4590         case kCondEQ:
   4591           __ LoadConst32(TMP, rhs_imm);
   4592           __ Beq(lhs, TMP, label);
   4593           break;
   4594         case kCondNE:
   4595           __ LoadConst32(TMP, rhs_imm);
   4596           __ Bne(lhs, TMP, label);
   4597           break;
   4598         case kCondLT:
   4599           if (IsInt<16>(rhs_imm)) {
   4600             __ Slti(TMP, lhs, rhs_imm);
   4601             __ Bnez(TMP, label);
   4602           } else {
   4603             __ LoadConst32(TMP, rhs_imm);
   4604             __ Blt(lhs, TMP, label);
   4605           }
   4606           break;
   4607         case kCondGE:
   4608           if (IsInt<16>(rhs_imm)) {
   4609             __ Slti(TMP, lhs, rhs_imm);
   4610             __ Beqz(TMP, label);
   4611           } else {
   4612             __ LoadConst32(TMP, rhs_imm);
   4613             __ Bge(lhs, TMP, label);
   4614           }
   4615           break;
   4616         case kCondLE:
   4617           if (IsInt<16>(rhs_imm + 1)) {
   4618             // Simulate lhs <= rhs via lhs < rhs + 1.
   4619             __ Slti(TMP, lhs, rhs_imm + 1);
   4620             __ Bnez(TMP, label);
   4621           } else {
   4622             __ LoadConst32(TMP, rhs_imm);
   4623             __ Bge(TMP, lhs, label);
   4624           }
   4625           break;
   4626         case kCondGT:
   4627           if (IsInt<16>(rhs_imm + 1)) {
   4628             // Simulate lhs > rhs via !(lhs < rhs + 1).
   4629             __ Slti(TMP, lhs, rhs_imm + 1);
   4630             __ Beqz(TMP, label);
   4631           } else {
   4632             __ LoadConst32(TMP, rhs_imm);
   4633             __ Blt(TMP, lhs, label);
   4634           }
   4635           break;
   4636         case kCondB:
   4637           if (IsInt<16>(rhs_imm)) {
   4638             __ Sltiu(TMP, lhs, rhs_imm);
   4639             __ Bnez(TMP, label);
   4640           } else {
   4641             __ LoadConst32(TMP, rhs_imm);
   4642             __ Bltu(lhs, TMP, label);
   4643           }
   4644           break;
   4645         case kCondAE:
   4646           if (IsInt<16>(rhs_imm)) {
   4647             __ Sltiu(TMP, lhs, rhs_imm);
   4648             __ Beqz(TMP, label);
   4649           } else {
   4650             __ LoadConst32(TMP, rhs_imm);
   4651             __ Bgeu(lhs, TMP, label);
   4652           }
   4653           break;
   4654         case kCondBE:
   4655           if ((rhs_imm != -1) && IsInt<16>(rhs_imm + 1)) {
   4656             // Simulate lhs <= rhs via lhs < rhs + 1.
   4657             // Note that this only works if rhs + 1 does not overflow
   4658             // to 0, hence the check above.
   4659             __ Sltiu(TMP, lhs, rhs_imm + 1);
   4660             __ Bnez(TMP, label);
   4661           } else {
   4662             __ LoadConst32(TMP, rhs_imm);
   4663             __ Bgeu(TMP, lhs, label);
   4664           }
   4665           break;
   4666         case kCondA:
   4667           if ((rhs_imm != -1) && IsInt<16>(rhs_imm + 1)) {
   4668             // Simulate lhs > rhs via !(lhs < rhs + 1).
   4669             // Note that this only works if rhs + 1 does not overflow
   4670             // to 0, hence the check above.
   4671             __ Sltiu(TMP, lhs, rhs_imm + 1);
   4672             __ Beqz(TMP, label);
   4673           } else {
   4674             __ LoadConst32(TMP, rhs_imm);
   4675             __ Bltu(TMP, lhs, label);
   4676           }
   4677           break;
   4678       }
   4679     }
   4680   }
   4681 }
   4682 
   4683 void InstructionCodeGeneratorMIPS::GenerateLongCompare(IfCondition cond,
   4684                                                        LocationSummary* locations) {
   4685   Register dst = locations->Out().AsRegister<Register>();
   4686   Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>();
   4687   Register lhs_low = locations->InAt(0).AsRegisterPairLow<Register>();
   4688   Location rhs_location = locations->InAt(1);
   4689   Register rhs_high = ZERO;
   4690   Register rhs_low = ZERO;
   4691   int64_t imm = 0;
   4692   uint32_t imm_high = 0;
   4693   uint32_t imm_low = 0;
   4694   bool use_imm = rhs_location.IsConstant();
   4695   if (use_imm) {
   4696     imm = rhs_location.GetConstant()->AsLongConstant()->GetValue();
   4697     imm_high = High32Bits(imm);
   4698     imm_low = Low32Bits(imm);
   4699   } else {
   4700     rhs_high = rhs_location.AsRegisterPairHigh<Register>();
   4701     rhs_low = rhs_location.AsRegisterPairLow<Register>();
   4702   }
   4703   if (use_imm && imm == 0) {
   4704     switch (cond) {
   4705       case kCondEQ:
   4706       case kCondBE:  // <= 0 if zero
   4707         __ Or(dst, lhs_high, lhs_low);
   4708         __ Sltiu(dst, dst, 1);
   4709         break;
   4710       case kCondNE:
   4711       case kCondA:  // > 0 if non-zero
   4712         __ Or(dst, lhs_high, lhs_low);
   4713         __ Sltu(dst, ZERO, dst);
   4714         break;
   4715       case kCondLT:
   4716         __ Slt(dst, lhs_high, ZERO);
   4717         break;
   4718       case kCondGE:
   4719         __ Slt(dst, lhs_high, ZERO);
   4720         __ Xori(dst, dst, 1);
   4721         break;
   4722       case kCondLE:
   4723         __ Or(TMP, lhs_high, lhs_low);
   4724         __ Sra(AT, lhs_high, 31);
   4725         __ Sltu(dst, AT, TMP);
   4726         __ Xori(dst, dst, 1);
   4727         break;
   4728       case kCondGT:
   4729         __ Or(TMP, lhs_high, lhs_low);
   4730         __ Sra(AT, lhs_high, 31);
   4731         __ Sltu(dst, AT, TMP);
   4732         break;
   4733       case kCondB:  // always false
   4734         __ Andi(dst, dst, 0);
   4735         break;
   4736       case kCondAE:  // always true
   4737         __ Ori(dst, ZERO, 1);
   4738         break;
   4739     }
   4740   } else if (use_imm) {
   4741     // TODO: more efficient comparison with constants without loading them into TMP/AT.
   4742     switch (cond) {
   4743       case kCondEQ:
   4744         __ LoadConst32(TMP, imm_high);
   4745         __ Xor(TMP, TMP, lhs_high);
   4746         __ LoadConst32(AT, imm_low);
   4747         __ Xor(AT, AT, lhs_low);
   4748         __ Or(dst, TMP, AT);
   4749         __ Sltiu(dst, dst, 1);
   4750         break;
   4751       case kCondNE:
   4752         __ LoadConst32(TMP, imm_high);
   4753         __ Xor(TMP, TMP, lhs_high);
   4754         __ LoadConst32(AT, imm_low);
   4755         __ Xor(AT, AT, lhs_low);
   4756         __ Or(dst, TMP, AT);
   4757         __ Sltu(dst, ZERO, dst);
   4758         break;
   4759       case kCondLT:
   4760       case kCondGE:
   4761         if (dst == lhs_low) {
   4762           __ LoadConst32(TMP, imm_low);
   4763           __ Sltu(dst, lhs_low, TMP);
   4764         }
   4765         __ LoadConst32(TMP, imm_high);
   4766         __ Slt(AT, lhs_high, TMP);
   4767         __ Slt(TMP, TMP, lhs_high);
   4768         if (dst != lhs_low) {
   4769           __ LoadConst32(dst, imm_low);
   4770           __ Sltu(dst, lhs_low, dst);
   4771         }
   4772         __ Slt(dst, TMP, dst);
   4773         __ Or(dst, dst, AT);
   4774         if (cond == kCondGE) {
   4775           __ Xori(dst, dst, 1);
   4776         }
   4777         break;
   4778       case kCondGT:
   4779       case kCondLE:
   4780         if (dst == lhs_low) {
   4781           __ LoadConst32(TMP, imm_low);
   4782           __ Sltu(dst, TMP, lhs_low);
   4783         }
   4784         __ LoadConst32(TMP, imm_high);
   4785         __ Slt(AT, TMP, lhs_high);
   4786         __ Slt(TMP, lhs_high, TMP);
   4787         if (dst != lhs_low) {
   4788           __ LoadConst32(dst, imm_low);
   4789           __ Sltu(dst, dst, lhs_low);
   4790         }
   4791         __ Slt(dst, TMP, dst);
   4792         __ Or(dst, dst, AT);
   4793         if (cond == kCondLE) {
   4794           __ Xori(dst, dst, 1);
   4795         }
   4796         break;
   4797       case kCondB:
   4798       case kCondAE:
   4799         if (dst == lhs_low) {
   4800           __ LoadConst32(TMP, imm_low);
   4801           __ Sltu(dst, lhs_low, TMP);
   4802         }
   4803         __ LoadConst32(TMP, imm_high);
   4804         __ Sltu(AT, lhs_high, TMP);
   4805         __ Sltu(TMP, TMP, lhs_high);
   4806         if (dst != lhs_low) {
   4807           __ LoadConst32(dst, imm_low);
   4808           __ Sltu(dst, lhs_low, dst);
   4809         }
   4810         __ Slt(dst, TMP, dst);
   4811         __ Or(dst, dst, AT);
   4812         if (cond == kCondAE) {
   4813           __ Xori(dst, dst, 1);
   4814         }
   4815         break;
   4816       case kCondA:
   4817       case kCondBE:
   4818         if (dst == lhs_low) {
   4819           __ LoadConst32(TMP, imm_low);
   4820           __ Sltu(dst, TMP, lhs_low);
   4821         }
   4822         __ LoadConst32(TMP, imm_high);
   4823         __ Sltu(AT, TMP, lhs_high);
   4824         __ Sltu(TMP, lhs_high, TMP);
   4825         if (dst != lhs_low) {
   4826           __ LoadConst32(dst, imm_low);
   4827           __ Sltu(dst, dst, lhs_low);
   4828         }
   4829         __ Slt(dst, TMP, dst);
   4830         __ Or(dst, dst, AT);
   4831         if (cond == kCondBE) {
   4832           __ Xori(dst, dst, 1);
   4833         }
   4834         break;
   4835     }
   4836   } else {
   4837     switch (cond) {
   4838       case kCondEQ:
   4839         __ Xor(TMP, lhs_high, rhs_high);
   4840         __ Xor(AT, lhs_low, rhs_low);
   4841         __ Or(dst, TMP, AT);
   4842         __ Sltiu(dst, dst, 1);
   4843         break;
   4844       case kCondNE:
   4845         __ Xor(TMP, lhs_high, rhs_high);
   4846         __ Xor(AT, lhs_low, rhs_low);
   4847         __ Or(dst, TMP, AT);
   4848         __ Sltu(dst, ZERO, dst);
   4849         break;
   4850       case kCondLT:
   4851       case kCondGE:
   4852         __ Slt(TMP, rhs_high, lhs_high);
   4853         __ Sltu(AT, lhs_low, rhs_low);
   4854         __ Slt(TMP, TMP, AT);
   4855         __ Slt(AT, lhs_high, rhs_high);
   4856         __ Or(dst, AT, TMP);
   4857         if (cond == kCondGE) {
   4858           __ Xori(dst, dst, 1);
   4859         }
   4860         break;
   4861       case kCondGT:
   4862       case kCondLE:
   4863         __ Slt(TMP, lhs_high, rhs_high);
   4864         __ Sltu(AT, rhs_low, lhs_low);
   4865         __ Slt(TMP, TMP, AT);
   4866         __ Slt(AT, rhs_high, lhs_high);
   4867         __ Or(dst, AT, TMP);
   4868         if (cond == kCondLE) {
   4869           __ Xori(dst, dst, 1);
   4870         }
   4871         break;
   4872       case kCondB:
   4873       case kCondAE:
   4874         __ Sltu(TMP, rhs_high, lhs_high);
   4875         __ Sltu(AT, lhs_low, rhs_low);
   4876         __ Slt(TMP, TMP, AT);
   4877         __ Sltu(AT, lhs_high, rhs_high);
   4878         __ Or(dst, AT, TMP);
   4879         if (cond == kCondAE) {
   4880           __ Xori(dst, dst, 1);
   4881         }
   4882         break;
   4883       case kCondA:
   4884       case kCondBE:
   4885         __ Sltu(TMP, lhs_high, rhs_high);
   4886         __ Sltu(AT, rhs_low, lhs_low);
   4887         __ Slt(TMP, TMP, AT);
   4888         __ Sltu(AT, rhs_high, lhs_high);
   4889         __ Or(dst, AT, TMP);
   4890         if (cond == kCondBE) {
   4891           __ Xori(dst, dst, 1);
   4892         }
   4893         break;
   4894     }
   4895   }
   4896 }
   4897 
   4898 void InstructionCodeGeneratorMIPS::GenerateLongCompareAndBranch(IfCondition cond,
   4899                                                                 LocationSummary* locations,
   4900                                                                 MipsLabel* label) {
   4901   Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>();
   4902   Register lhs_low = locations->InAt(0).AsRegisterPairLow<Register>();
   4903   Location rhs_location = locations->InAt(1);
   4904   Register rhs_high = ZERO;
   4905   Register rhs_low = ZERO;
   4906   int64_t imm = 0;
   4907   uint32_t imm_high = 0;
   4908   uint32_t imm_low = 0;
   4909   bool use_imm = rhs_location.IsConstant();
   4910   if (use_imm) {
   4911     imm = rhs_location.GetConstant()->AsLongConstant()->GetValue();
   4912     imm_high = High32Bits(imm);
   4913     imm_low = Low32Bits(imm);
   4914   } else {
   4915     rhs_high = rhs_location.AsRegisterPairHigh<Register>();
   4916     rhs_low = rhs_location.AsRegisterPairLow<Register>();
   4917   }
   4918 
   4919   if (use_imm && imm == 0) {
   4920     switch (cond) {
   4921       case kCondEQ:
   4922       case kCondBE:  // <= 0 if zero
   4923         __ Or(TMP, lhs_high, lhs_low);
   4924         __ Beqz(TMP, label);
   4925         break;
   4926       case kCondNE:
   4927       case kCondA:  // > 0 if non-zero
   4928         __ Or(TMP, lhs_high, lhs_low);
   4929         __ Bnez(TMP, label);
   4930         break;
   4931       case kCondLT:
   4932         __ Bltz(lhs_high, label);
   4933         break;
   4934       case kCondGE:
   4935         __ Bgez(lhs_high, label);
   4936         break;
   4937       case kCondLE:
   4938         __ Or(TMP, lhs_high, lhs_low);
   4939         __ Sra(AT, lhs_high, 31);
   4940         __ Bgeu(AT, TMP, label);
   4941         break;
   4942       case kCondGT:
   4943         __ Or(TMP, lhs_high, lhs_low);
   4944         __ Sra(AT, lhs_high, 31);
   4945         __ Bltu(AT, TMP, label);
   4946         break;
   4947       case kCondB:  // always false
   4948         break;
   4949       case kCondAE:  // always true
   4950         __ B(label);
   4951         break;
   4952     }
   4953   } else if (use_imm) {
   4954     // TODO: more efficient comparison with constants without loading them into TMP/AT.
   4955     switch (cond) {
   4956       case kCondEQ:
   4957         __ LoadConst32(TMP, imm_high);
   4958         __ Xor(TMP, TMP, lhs_high);
   4959         __ LoadConst32(AT, imm_low);
   4960         __ Xor(AT, AT, lhs_low);
   4961         __ Or(TMP, TMP, AT);
   4962         __ Beqz(TMP, label);
   4963         break;
   4964       case kCondNE:
   4965         __ LoadConst32(TMP, imm_high);
   4966         __ Xor(TMP, TMP, lhs_high);
   4967         __ LoadConst32(AT, imm_low);
   4968         __ Xor(AT, AT, lhs_low);
   4969         __ Or(TMP, TMP, AT);
   4970         __ Bnez(TMP, label);
   4971         break;
   4972       case kCondLT:
   4973         __ LoadConst32(TMP, imm_high);
   4974         __ Blt(lhs_high, TMP, label);
   4975         __ Slt(TMP, TMP, lhs_high);
   4976         __ LoadConst32(AT, imm_low);
   4977         __ Sltu(AT, lhs_low, AT);
   4978         __ Blt(TMP, AT, label);
   4979         break;
   4980       case kCondGE:
   4981         __ LoadConst32(TMP, imm_high);
   4982         __ Blt(TMP, lhs_high, label);
   4983         __ Slt(TMP, lhs_high, TMP);
   4984         __ LoadConst32(AT, imm_low);
   4985         __ Sltu(AT, lhs_low, AT);
   4986         __ Or(TMP, TMP, AT);
   4987         __ Beqz(TMP, label);
   4988         break;
   4989       case kCondLE:
   4990         __ LoadConst32(TMP, imm_high);
   4991         __ Blt(lhs_high, TMP, label);
   4992         __ Slt(TMP, TMP, lhs_high);
   4993         __ LoadConst32(AT, imm_low);
   4994         __ Sltu(AT, AT, lhs_low);
   4995         __ Or(TMP, TMP, AT);
   4996         __ Beqz(TMP, label);
   4997         break;
   4998       case kCondGT:
   4999         __ LoadConst32(TMP, imm_high);
   5000         __ Blt(TMP, lhs_high, label);
   5001         __ Slt(TMP, lhs_high, TMP);
   5002         __ LoadConst32(AT, imm_low);
   5003         __ Sltu(AT, AT, lhs_low);
   5004         __ Blt(TMP, AT, label);
   5005         break;
   5006       case kCondB:
   5007         __ LoadConst32(TMP, imm_high);
   5008         __ Bltu(lhs_high, TMP, label);
   5009         __ Sltu(TMP, TMP, lhs_high);
   5010         __ LoadConst32(AT, imm_low);
   5011         __ Sltu(AT, lhs_low, AT);
   5012         __ Blt(TMP, AT, label);
   5013         break;
   5014       case kCondAE:
   5015         __ LoadConst32(TMP, imm_high);
   5016         __ Bltu(TMP, lhs_high, label);
   5017         __ Sltu(TMP, lhs_high, TMP);
   5018         __ LoadConst32(AT, imm_low);
   5019         __ Sltu(AT, lhs_low, AT);
   5020         __ Or(TMP, TMP, AT);
   5021         __ Beqz(TMP, label);
   5022         break;
   5023       case kCondBE:
   5024         __ LoadConst32(TMP, imm_high);
   5025         __ Bltu(lhs_high, TMP, label);
   5026         __ Sltu(TMP, TMP, lhs_high);
   5027         __ LoadConst32(AT, imm_low);
   5028         __ Sltu(AT, AT, lhs_low);
   5029         __ Or(TMP, TMP, AT);
   5030         __ Beqz(TMP, label);
   5031         break;
   5032       case kCondA:
   5033         __ LoadConst32(TMP, imm_high);
   5034         __ Bltu(TMP, lhs_high, label);
   5035         __ Sltu(TMP, lhs_high, TMP);
   5036         __ LoadConst32(AT, imm_low);
   5037         __ Sltu(AT, AT, lhs_low);
   5038         __ Blt(TMP, AT, label);
   5039         break;
   5040     }
   5041   } else {
   5042     switch (cond) {
   5043       case kCondEQ:
   5044         __ Xor(TMP, lhs_high, rhs_high);
   5045         __ Xor(AT, lhs_low, rhs_low);
   5046         __ Or(TMP, TMP, AT);
   5047         __ Beqz(TMP, label);
   5048         break;
   5049       case kCondNE:
   5050         __ Xor(TMP, lhs_high, rhs_high);
   5051         __ Xor(AT, lhs_low, rhs_low);
   5052         __ Or(TMP, TMP, AT);
   5053         __ Bnez(TMP, label);
   5054         break;
   5055       case kCondLT:
   5056         __ Blt(lhs_high, rhs_high, label);
   5057         __ Slt(TMP, rhs_high, lhs_high);
   5058         __ Sltu(AT, lhs_low, rhs_low);
   5059         __ Blt(TMP, AT, label);
   5060         break;
   5061       case kCondGE:
   5062         __ Blt(rhs_high, lhs_high, label);
   5063         __ Slt(TMP, lhs_high, rhs_high);
   5064         __ Sltu(AT, lhs_low, rhs_low);
   5065         __ Or(TMP, TMP, AT);
   5066         __ Beqz(TMP, label);
   5067         break;
   5068       case kCondLE:
   5069         __ Blt(lhs_high, rhs_high, label);
   5070         __ Slt(TMP, rhs_high, lhs_high);
   5071         __ Sltu(AT, rhs_low, lhs_low);
   5072         __ Or(TMP, TMP, AT);
   5073         __ Beqz(TMP, label);
   5074         break;
   5075       case kCondGT:
   5076         __ Blt(rhs_high, lhs_high, label);
   5077         __ Slt(TMP, lhs_high, rhs_high);
   5078         __ Sltu(AT, rhs_low, lhs_low);
   5079         __ Blt(TMP, AT, label);
   5080         break;
   5081       case kCondB:
   5082         __ Bltu(lhs_high, rhs_high, label);
   5083         __ Sltu(TMP, rhs_high, lhs_high);
   5084         __ Sltu(AT, lhs_low, rhs_low);
   5085         __ Blt(TMP, AT, label);
   5086         break;
   5087       case kCondAE:
   5088         __ Bltu(rhs_high, lhs_high, label);
   5089         __ Sltu(TMP, lhs_high, rhs_high);
   5090         __ Sltu(AT, lhs_low, rhs_low);
   5091         __ Or(TMP, TMP, AT);
   5092         __ Beqz(TMP, label);
   5093         break;
   5094       case kCondBE:
   5095         __ Bltu(lhs_high, rhs_high, label);
   5096         __ Sltu(TMP, rhs_high, lhs_high);
   5097         __ Sltu(AT, rhs_low, lhs_low);
   5098         __ Or(TMP, TMP, AT);
   5099         __ Beqz(TMP, label);
   5100         break;
   5101       case kCondA:
   5102         __ Bltu(rhs_high, lhs_high, label);
   5103         __ Sltu(TMP, lhs_high, rhs_high);
   5104         __ Sltu(AT, rhs_low, lhs_low);
   5105         __ Blt(TMP, AT, label);
   5106         break;
   5107     }
   5108   }
   5109 }
   5110 
   5111 void InstructionCodeGeneratorMIPS::GenerateFpCompare(IfCondition cond,
   5112                                                      bool gt_bias,
   5113                                                      DataType::Type type,
   5114                                                      LocationSummary* locations) {
   5115   Register dst = locations->Out().AsRegister<Register>();
   5116   FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
   5117   FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
   5118   bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
   5119   if (type == DataType::Type::kFloat32) {
   5120     if (isR6) {
   5121       switch (cond) {
   5122         case kCondEQ:
   5123           __ CmpEqS(FTMP, lhs, rhs);
   5124           __ Mfc1(dst, FTMP);
   5125           __ Andi(dst, dst, 1);
   5126           break;
   5127         case kCondNE:
   5128           __ CmpEqS(FTMP, lhs, rhs);
   5129           __ Mfc1(dst, FTMP);
   5130           __ Addiu(dst, dst, 1);
   5131           break;
   5132         case kCondLT:
   5133           if (gt_bias) {
   5134             __ CmpLtS(FTMP, lhs, rhs);
   5135           } else {
   5136             __ CmpUltS(FTMP, lhs, rhs);
   5137           }
   5138           __ Mfc1(dst, FTMP);
   5139           __ Andi(dst, dst, 1);
   5140           break;
   5141         case kCondLE:
   5142           if (gt_bias) {
   5143             __ CmpLeS(FTMP, lhs, rhs);
   5144           } else {
   5145             __ CmpUleS(FTMP, lhs, rhs);
   5146           }
   5147           __ Mfc1(dst, FTMP);
   5148           __ Andi(dst, dst, 1);
   5149           break;
   5150         case kCondGT:
   5151           if (gt_bias) {
   5152             __ CmpUltS(FTMP, rhs, lhs);
   5153           } else {
   5154             __ CmpLtS(FTMP, rhs, lhs);
   5155           }
   5156           __ Mfc1(dst, FTMP);
   5157           __ Andi(dst, dst, 1);
   5158           break;
   5159         case kCondGE:
   5160           if (gt_bias) {
   5161             __ CmpUleS(FTMP, rhs, lhs);
   5162           } else {
   5163             __ CmpLeS(FTMP, rhs, lhs);
   5164           }
   5165           __ Mfc1(dst, FTMP);
   5166           __ Andi(dst, dst, 1);
   5167           break;
   5168         default:
   5169           LOG(FATAL) << "Unexpected non-floating-point condition " << cond;
   5170           UNREACHABLE();
   5171       }
   5172     } else {
   5173       switch (cond) {
   5174         case kCondEQ:
   5175           __ CeqS(0, lhs, rhs);
   5176           __ LoadConst32(dst, 1);
   5177           __ Movf(dst, ZERO, 0);
   5178           break;
   5179         case kCondNE:
   5180           __ CeqS(0, lhs, rhs);
   5181           __ LoadConst32(dst, 1);
   5182           __ Movt(dst, ZERO, 0);
   5183           break;
   5184         case kCondLT:
   5185           if (gt_bias) {
   5186             __ ColtS(0, lhs, rhs);
   5187           } else {
   5188             __ CultS(0, lhs, rhs);
   5189           }
   5190           __ LoadConst32(dst, 1);
   5191           __ Movf(dst, ZERO, 0);
   5192           break;
   5193         case kCondLE:
   5194           if (gt_bias) {
   5195             __ ColeS(0, lhs, rhs);
   5196           } else {
   5197             __ CuleS(0, lhs, rhs);
   5198           }
   5199           __ LoadConst32(dst, 1);
   5200           __ Movf(dst, ZERO, 0);
   5201           break;
   5202         case kCondGT:
   5203           if (gt_bias) {
   5204             __ CultS(0, rhs, lhs);
   5205           } else {
   5206             __ ColtS(0, rhs, lhs);
   5207           }
   5208           __ LoadConst32(dst, 1);
   5209           __ Movf(dst, ZERO, 0);
   5210           break;
   5211         case kCondGE:
   5212           if (gt_bias) {
   5213             __ CuleS(0, rhs, lhs);
   5214           } else {
   5215             __ ColeS(0, rhs, lhs);
   5216           }
   5217           __ LoadConst32(dst, 1);
   5218           __ Movf(dst, ZERO, 0);
   5219           break;
   5220         default:
   5221           LOG(FATAL) << "Unexpected non-floating-point condition " << cond;
   5222           UNREACHABLE();
   5223       }
   5224     }
   5225   } else {
   5226     DCHECK_EQ(type, DataType::Type::kFloat64);
   5227     if (isR6) {
   5228       switch (cond) {
   5229         case kCondEQ:
   5230           __ CmpEqD(FTMP, lhs, rhs);
   5231           __ Mfc1(dst, FTMP);
   5232           __ Andi(dst, dst, 1);
   5233           break;
   5234         case kCondNE:
   5235           __ CmpEqD(FTMP, lhs, rhs);
   5236           __ Mfc1(dst, FTMP);
   5237           __ Addiu(dst, dst, 1);
   5238           break;
   5239         case kCondLT:
   5240           if (gt_bias) {
   5241             __ CmpLtD(FTMP, lhs, rhs);
   5242           } else {
   5243             __ CmpUltD(FTMP, lhs, rhs);
   5244           }
   5245           __ Mfc1(dst, FTMP);
   5246           __ Andi(dst, dst, 1);
   5247           break;
   5248         case kCondLE:
   5249           if (gt_bias) {
   5250             __ CmpLeD(FTMP, lhs, rhs);
   5251           } else {
   5252             __ CmpUleD(FTMP, lhs, rhs);
   5253           }
   5254           __ Mfc1(dst, FTMP);
   5255           __ Andi(dst, dst, 1);
   5256           break;
   5257         case kCondGT:
   5258           if (gt_bias) {
   5259             __ CmpUltD(FTMP, rhs, lhs);
   5260           } else {
   5261             __ CmpLtD(FTMP, rhs, lhs);
   5262           }
   5263           __ Mfc1(dst, FTMP);
   5264           __ Andi(dst, dst, 1);
   5265           break;
   5266         case kCondGE:
   5267           if (gt_bias) {
   5268             __ CmpUleD(FTMP, rhs, lhs);
   5269           } else {
   5270             __ CmpLeD(FTMP, rhs, lhs);
   5271           }
   5272           __ Mfc1(dst, FTMP);
   5273           __ Andi(dst, dst, 1);
   5274           break;
   5275         default:
   5276           LOG(FATAL) << "Unexpected non-floating-point condition " << cond;
   5277           UNREACHABLE();
   5278       }
   5279     } else {
   5280       switch (cond) {
   5281         case kCondEQ:
   5282           __ CeqD(0, lhs, rhs);
   5283           __ LoadConst32(dst, 1);
   5284           __ Movf(dst, ZERO, 0);
   5285           break;
   5286         case kCondNE:
   5287           __ CeqD(0, lhs, rhs);
   5288           __ LoadConst32(dst, 1);
   5289           __ Movt(dst, ZERO, 0);
   5290           break;
   5291         case kCondLT:
   5292           if (gt_bias) {
   5293             __ ColtD(0, lhs, rhs);
   5294           } else {
   5295             __ CultD(0, lhs, rhs);
   5296           }
   5297           __ LoadConst32(dst, 1);
   5298           __ Movf(dst, ZERO, 0);
   5299           break;
   5300         case kCondLE:
   5301           if (gt_bias) {
   5302             __ ColeD(0, lhs, rhs);
   5303           } else {
   5304             __ CuleD(0, lhs, rhs);
   5305           }
   5306           __ LoadConst32(dst, 1);
   5307           __ Movf(dst, ZERO, 0);
   5308           break;
   5309         case kCondGT:
   5310           if (gt_bias) {
   5311             __ CultD(0, rhs, lhs);
   5312           } else {
   5313             __ ColtD(0, rhs, lhs);
   5314           }
   5315           __ LoadConst32(dst, 1);
   5316           __ Movf(dst, ZERO, 0);
   5317           break;
   5318         case kCondGE:
   5319           if (gt_bias) {
   5320             __ CuleD(0, rhs, lhs);
   5321           } else {
   5322             __ ColeD(0, rhs, lhs);
   5323           }
   5324           __ LoadConst32(dst, 1);
   5325           __ Movf(dst, ZERO, 0);
   5326           break;
   5327         default:
   5328           LOG(FATAL) << "Unexpected non-floating-point condition " << cond;
   5329           UNREACHABLE();
   5330       }
   5331     }
   5332   }
   5333 }
   5334 
   5335 bool InstructionCodeGeneratorMIPS::MaterializeFpCompareR2(IfCondition cond,
   5336                                                           bool gt_bias,
   5337                                                           DataType::Type type,
   5338                                                           LocationSummary* input_locations,
   5339                                                           int cc) {
   5340   FRegister lhs = input_locations->InAt(0).AsFpuRegister<FRegister>();
   5341   FRegister rhs = input_locations->InAt(1).AsFpuRegister<FRegister>();
   5342   CHECK(!codegen_->GetInstructionSetFeatures().IsR6());
   5343   if (type == DataType::Type::kFloat32) {
   5344     switch (cond) {
   5345       case kCondEQ:
   5346         __ CeqS(cc, lhs, rhs);
   5347         return false;
   5348       case kCondNE:
   5349         __ CeqS(cc, lhs, rhs);
   5350         return true;
   5351       case kCondLT:
   5352         if (gt_bias) {
   5353           __ ColtS(cc, lhs, rhs);
   5354         } else {
   5355           __ CultS(cc, lhs, rhs);
   5356         }
   5357         return false;
   5358       case kCondLE:
   5359         if (gt_bias) {
   5360           __ ColeS(cc, lhs, rhs);
   5361         } else {
   5362           __ CuleS(cc, lhs, rhs);
   5363         }
   5364         return false;
   5365       case kCondGT:
   5366         if (gt_bias) {
   5367           __ CultS(cc, rhs, lhs);
   5368         } else {
   5369           __ ColtS(cc, rhs, lhs);
   5370         }
   5371         return false;
   5372       case kCondGE:
   5373         if (gt_bias) {
   5374           __ CuleS(cc, rhs, lhs);
   5375         } else {
   5376           __ ColeS(cc, rhs, lhs);
   5377         }
   5378         return false;
   5379       default:
   5380         LOG(FATAL) << "Unexpected non-floating-point condition";
   5381         UNREACHABLE();
   5382     }
   5383   } else {
   5384     DCHECK_EQ(type, DataType::Type::kFloat64);
   5385     switch (cond) {
   5386       case kCondEQ:
   5387         __ CeqD(cc, lhs, rhs);
   5388         return false;
   5389       case kCondNE:
   5390         __ CeqD(cc, lhs, rhs);
   5391         return true;
   5392       case kCondLT:
   5393         if (gt_bias) {
   5394           __ ColtD(cc, lhs, rhs);
   5395         } else {
   5396           __ CultD(cc, lhs, rhs);
   5397         }
   5398         return false;
   5399       case kCondLE:
   5400         if (gt_bias) {
   5401           __ ColeD(cc, lhs, rhs);
   5402         } else {
   5403           __ CuleD(cc, lhs, rhs);
   5404         }
   5405         return false;
   5406       case kCondGT:
   5407         if (gt_bias) {
   5408           __ CultD(cc, rhs, lhs);
   5409         } else {
   5410           __ ColtD(cc, rhs, lhs);
   5411         }
   5412         return false;
   5413       case kCondGE:
   5414         if (gt_bias) {
   5415           __ CuleD(cc, rhs, lhs);
   5416         } else {
   5417           __ ColeD(cc, rhs, lhs);
   5418         }
   5419         return false;
   5420       default:
   5421         LOG(FATAL) << "Unexpected non-floating-point condition";
   5422         UNREACHABLE();
   5423     }
   5424   }
   5425 }
   5426 
   5427 bool InstructionCodeGeneratorMIPS::MaterializeFpCompareR6(IfCondition cond,
   5428                                                           bool gt_bias,
   5429                                                           DataType::Type type,
   5430                                                           LocationSummary* input_locations,
   5431                                                           FRegister dst) {
   5432   FRegister lhs = input_locations->InAt(0).AsFpuRegister<FRegister>();
   5433   FRegister rhs = input_locations->InAt(1).AsFpuRegister<FRegister>();
   5434   CHECK(codegen_->GetInstructionSetFeatures().IsR6());
   5435   if (type == DataType::Type::kFloat32) {
   5436     switch (cond) {
   5437       case kCondEQ:
   5438         __ CmpEqS(dst, lhs, rhs);
   5439         return false;
   5440       case kCondNE:
   5441         __ CmpEqS(dst, lhs, rhs);
   5442         return true;
   5443       case kCondLT:
   5444         if (gt_bias) {
   5445           __ CmpLtS(dst, lhs, rhs);
   5446         } else {
   5447           __ CmpUltS(dst, lhs, rhs);
   5448         }
   5449         return false;
   5450       case kCondLE:
   5451         if (gt_bias) {
   5452           __ CmpLeS(dst, lhs, rhs);
   5453         } else {
   5454           __ CmpUleS(dst, lhs, rhs);
   5455         }
   5456         return false;
   5457       case kCondGT:
   5458         if (gt_bias) {
   5459           __ CmpUltS(dst, rhs, lhs);
   5460         } else {
   5461           __ CmpLtS(dst, rhs, lhs);
   5462         }
   5463         return false;
   5464       case kCondGE:
   5465         if (gt_bias) {
   5466           __ CmpUleS(dst, rhs, lhs);
   5467         } else {
   5468           __ CmpLeS(dst, rhs, lhs);
   5469         }
   5470         return false;
   5471       default:
   5472         LOG(FATAL) << "Unexpected non-floating-point condition";
   5473         UNREACHABLE();
   5474     }
   5475   } else {
   5476     DCHECK_EQ(type, DataType::Type::kFloat64);
   5477     switch (cond) {
   5478       case kCondEQ:
   5479         __ CmpEqD(dst, lhs, rhs);
   5480         return false;
   5481       case kCondNE:
   5482         __ CmpEqD(dst, lhs, rhs);
   5483         return true;
   5484       case kCondLT:
   5485         if (gt_bias) {
   5486           __ CmpLtD(dst, lhs, rhs);
   5487         } else {
   5488           __ CmpUltD(dst, lhs, rhs);
   5489         }
   5490         return false;
   5491       case kCondLE:
   5492         if (gt_bias) {
   5493           __ CmpLeD(dst, lhs, rhs);
   5494         } else {
   5495           __ CmpUleD(dst, lhs, rhs);
   5496         }
   5497         return false;
   5498       case kCondGT:
   5499         if (gt_bias) {
   5500           __ CmpUltD(dst, rhs, lhs);
   5501         } else {
   5502           __ CmpLtD(dst, rhs, lhs);
   5503         }
   5504         return false;
   5505       case kCondGE:
   5506         if (gt_bias) {
   5507           __ CmpUleD(dst, rhs, lhs);
   5508         } else {
   5509           __ CmpLeD(dst, rhs, lhs);
   5510         }
   5511         return false;
   5512       default:
   5513         LOG(FATAL) << "Unexpected non-floating-point condition";
   5514         UNREACHABLE();
   5515     }
   5516   }
   5517 }
   5518 
   5519 void InstructionCodeGeneratorMIPS::GenerateFpCompareAndBranch(IfCondition cond,
   5520                                                               bool gt_bias,
   5521                                                               DataType::Type type,
   5522                                                               LocationSummary* locations,
   5523                                                               MipsLabel* label) {
   5524   FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
   5525   FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
   5526   bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
   5527   if (type == DataType::Type::kFloat32) {
   5528     if (isR6) {
   5529       switch (cond) {
   5530         case kCondEQ:
   5531           __ CmpEqS(FTMP, lhs, rhs);
   5532           __ Bc1nez(FTMP, label);
   5533           break;
   5534         case kCondNE:
   5535           __ CmpEqS(FTMP, lhs, rhs);
   5536           __ Bc1eqz(FTMP, label);
   5537           break;
   5538         case kCondLT:
   5539           if (gt_bias) {
   5540             __ CmpLtS(FTMP, lhs, rhs);
   5541           } else {
   5542             __ CmpUltS(FTMP, lhs, rhs);
   5543           }
   5544           __ Bc1nez(FTMP, label);
   5545           break;
   5546         case kCondLE:
   5547           if (gt_bias) {
   5548             __ CmpLeS(FTMP, lhs, rhs);
   5549           } else {
   5550             __ CmpUleS(FTMP, lhs, rhs);
   5551           }
   5552           __ Bc1nez(FTMP, label);
   5553           break;
   5554         case kCondGT:
   5555           if (gt_bias) {
   5556             __ CmpUltS(FTMP, rhs, lhs);
   5557           } else {
   5558             __ CmpLtS(FTMP, rhs, lhs);
   5559           }
   5560           __ Bc1nez(FTMP, label);
   5561           break;
   5562         case kCondGE:
   5563           if (gt_bias) {
   5564             __ CmpUleS(FTMP, rhs, lhs);
   5565           } else {
   5566             __ CmpLeS(FTMP, rhs, lhs);
   5567           }
   5568           __ Bc1nez(FTMP, label);
   5569           break;
   5570         default:
   5571           LOG(FATAL) << "Unexpected non-floating-point condition";
   5572           UNREACHABLE();
   5573       }
   5574     } else {
   5575       switch (cond) {
   5576         case kCondEQ:
   5577           __ CeqS(0, lhs, rhs);
   5578           __ Bc1t(0, label);
   5579           break;
   5580         case kCondNE:
   5581           __ CeqS(0, lhs, rhs);
   5582           __ Bc1f(0, label);
   5583           break;
   5584         case kCondLT:
   5585           if (gt_bias) {
   5586             __ ColtS(0, lhs, rhs);
   5587           } else {
   5588             __ CultS(0, lhs, rhs);
   5589           }
   5590           __ Bc1t(0, label);
   5591           break;
   5592         case kCondLE:
   5593           if (gt_bias) {
   5594             __ ColeS(0, lhs, rhs);
   5595           } else {
   5596             __ CuleS(0, lhs, rhs);
   5597           }
   5598           __ Bc1t(0, label);
   5599           break;
   5600         case kCondGT:
   5601           if (gt_bias) {
   5602             __ CultS(0, rhs, lhs);
   5603           } else {
   5604             __ ColtS(0, rhs, lhs);
   5605           }
   5606           __ Bc1t(0, label);
   5607           break;
   5608         case kCondGE:
   5609           if (gt_bias) {
   5610             __ CuleS(0, rhs, lhs);
   5611           } else {
   5612             __ ColeS(0, rhs, lhs);
   5613           }
   5614           __ Bc1t(0, label);
   5615           break;
   5616         default:
   5617           LOG(FATAL) << "Unexpected non-floating-point condition";
   5618           UNREACHABLE();
   5619       }
   5620     }
   5621   } else {
   5622     DCHECK_EQ(type, DataType::Type::kFloat64);
   5623     if (isR6) {
   5624       switch (cond) {
   5625         case kCondEQ:
   5626           __ CmpEqD(FTMP, lhs, rhs);
   5627           __ Bc1nez(FTMP, label);
   5628           break;
   5629         case kCondNE:
   5630           __ CmpEqD(FTMP, lhs, rhs);
   5631           __ Bc1eqz(FTMP, label);
   5632           break;
   5633         case kCondLT:
   5634           if (gt_bias) {
   5635             __ CmpLtD(FTMP, lhs, rhs);
   5636           } else {
   5637             __ CmpUltD(FTMP, lhs, rhs);
   5638           }
   5639           __ Bc1nez(FTMP, label);
   5640           break;
   5641         case kCondLE:
   5642           if (gt_bias) {
   5643             __ CmpLeD(FTMP, lhs, rhs);
   5644           } else {
   5645             __ CmpUleD(FTMP, lhs, rhs);
   5646           }
   5647           __ Bc1nez(FTMP, label);
   5648           break;
   5649         case kCondGT:
   5650           if (gt_bias) {
   5651             __ CmpUltD(FTMP, rhs, lhs);
   5652           } else {
   5653             __ CmpLtD(FTMP, rhs, lhs);
   5654           }
   5655           __ Bc1nez(FTMP, label);
   5656           break;
   5657         case kCondGE:
   5658           if (gt_bias) {
   5659             __ CmpUleD(FTMP, rhs, lhs);
   5660           } else {
   5661             __ CmpLeD(FTMP, rhs, lhs);
   5662           }
   5663           __ Bc1nez(FTMP, label);
   5664           break;
   5665         default:
   5666           LOG(FATAL) << "Unexpected non-floating-point condition";
   5667           UNREACHABLE();
   5668       }
   5669     } else {
   5670       switch (cond) {
   5671         case kCondEQ:
   5672           __ CeqD(0, lhs, rhs);
   5673           __ Bc1t(0, label);
   5674           break;
   5675         case kCondNE:
   5676           __ CeqD(0, lhs, rhs);
   5677           __ Bc1f(0, label);
   5678           break;
   5679         case kCondLT:
   5680           if (gt_bias) {
   5681             __ ColtD(0, lhs, rhs);
   5682           } else {
   5683             __ CultD(0, lhs, rhs);
   5684           }
   5685           __ Bc1t(0, label);
   5686           break;
   5687         case kCondLE:
   5688           if (gt_bias) {
   5689             __ ColeD(0, lhs, rhs);
   5690           } else {
   5691             __ CuleD(0, lhs, rhs);
   5692           }
   5693           __ Bc1t(0, label);
   5694           break;
   5695         case kCondGT:
   5696           if (gt_bias) {
   5697             __ CultD(0, rhs, lhs);
   5698           } else {
   5699             __ ColtD(0, rhs, lhs);
   5700           }
   5701           __ Bc1t(0, label);
   5702           break;
   5703         case kCondGE:
   5704           if (gt_bias) {
   5705             __ CuleD(0, rhs, lhs);
   5706           } else {
   5707             __ ColeD(0, rhs, lhs);
   5708           }
   5709           __ Bc1t(0, label);
   5710           break;
   5711         default:
   5712           LOG(FATAL) << "Unexpected non-floating-point condition";
   5713           UNREACHABLE();
   5714       }
   5715     }
   5716   }
   5717 }
   5718 
   5719 void InstructionCodeGeneratorMIPS::GenerateTestAndBranch(HInstruction* instruction,
   5720                                                          size_t condition_input_index,
   5721                                                          MipsLabel* true_target,
   5722                                                          MipsLabel* false_target) {
   5723   HInstruction* cond = instruction->InputAt(condition_input_index);
   5724 
   5725   if (true_target == nullptr && false_target == nullptr) {
   5726     // Nothing to do. The code always falls through.
   5727     return;
   5728   } else if (cond->IsIntConstant()) {
   5729     // Constant condition, statically compared against "true" (integer value 1).
   5730     if (cond->AsIntConstant()->IsTrue()) {
   5731       if (true_target != nullptr) {
   5732         __ B(true_target);
   5733       }
   5734     } else {
   5735       DCHECK(cond->AsIntConstant()->IsFalse()) << cond->AsIntConstant()->GetValue();
   5736       if (false_target != nullptr) {
   5737         __ B(false_target);
   5738       }
   5739     }
   5740     return;
   5741   }
   5742 
   5743   // The following code generates these patterns:
   5744   //  (1) true_target == nullptr && false_target != nullptr
   5745   //        - opposite condition true => branch to false_target
   5746   //  (2) true_target != nullptr && false_target == nullptr
   5747   //        - condition true => branch to true_target
   5748   //  (3) true_target != nullptr && false_target != nullptr
   5749   //        - condition true => branch to true_target
   5750   //        - branch to false_target
   5751   if (IsBooleanValueOrMaterializedCondition(cond)) {
   5752     // The condition instruction has been materialized, compare the output to 0.
   5753     Location cond_val = instruction->GetLocations()->InAt(condition_input_index);
   5754     DCHECK(cond_val.IsRegister());
   5755     if (true_target == nullptr) {
   5756       __ Beqz(cond_val.AsRegister<Register>(), false_target);
   5757     } else {
   5758       __ Bnez(cond_val.AsRegister<Register>(), true_target);
   5759     }
   5760   } else {
   5761     // The condition instruction has not been materialized, use its inputs as
   5762     // the comparison and its condition as the branch condition.
   5763     HCondition* condition = cond->AsCondition();
   5764     DataType::Type type = condition->InputAt(0)->GetType();
   5765     LocationSummary* locations = cond->GetLocations();
   5766     IfCondition if_cond = condition->GetCondition();
   5767     MipsLabel* branch_target = true_target;
   5768 
   5769     if (true_target == nullptr) {
   5770       if_cond = condition->GetOppositeCondition();
   5771       branch_target = false_target;
   5772     }
   5773 
   5774     switch (type) {
   5775       default:
   5776         GenerateIntCompareAndBranch(if_cond, locations, branch_target);
   5777         break;
   5778       case DataType::Type::kInt64:
   5779         GenerateLongCompareAndBranch(if_cond, locations, branch_target);
   5780         break;
   5781       case DataType::Type::kFloat32:
   5782       case DataType::Type::kFloat64:
   5783         GenerateFpCompareAndBranch(if_cond, condition->IsGtBias(), type, locations, branch_target);
   5784         break;
   5785     }
   5786   }
   5787 
   5788   // If neither branch falls through (case 3), the conditional branch to `true_target`
   5789   // was already emitted (case 2) and we need to emit a jump to `false_target`.
   5790   if (true_target != nullptr && false_target != nullptr) {
   5791     __ B(false_target);
   5792   }
   5793 }
   5794 
   5795 void LocationsBuilderMIPS::VisitIf(HIf* if_instr) {
   5796   LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(if_instr);
   5797   if (IsBooleanValueOrMaterializedCondition(if_instr->InputAt(0))) {
   5798     locations->SetInAt(0, Location::RequiresRegister());
   5799   }
   5800 }
   5801 
   5802 void InstructionCodeGeneratorMIPS::VisitIf(HIf* if_instr) {
   5803   HBasicBlock* true_successor = if_instr->IfTrueSuccessor();
   5804   HBasicBlock* false_successor = if_instr->IfFalseSuccessor();
   5805   MipsLabel* true_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), true_successor) ?
   5806       nullptr : codegen_->GetLabelOf(true_successor);
   5807   MipsLabel* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ?
   5808       nullptr : codegen_->GetLabelOf(false_successor);
   5809   GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
   5810 }
   5811 
   5812 void LocationsBuilderMIPS::VisitDeoptimize(HDeoptimize* deoptimize) {
   5813   LocationSummary* locations = new (GetGraph()->GetAllocator())
   5814       LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
   5815   InvokeRuntimeCallingConvention calling_convention;
   5816   RegisterSet caller_saves = RegisterSet::Empty();
   5817   caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
   5818   locations->SetCustomSlowPathCallerSaves(caller_saves);
   5819   if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) {
   5820     locations->SetInAt(0, Location::RequiresRegister());
   5821   }
   5822 }
   5823 
   5824 void InstructionCodeGeneratorMIPS::VisitDeoptimize(HDeoptimize* deoptimize) {
   5825   SlowPathCodeMIPS* slow_path =
   5826       deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathMIPS>(deoptimize);
   5827   GenerateTestAndBranch(deoptimize,
   5828                         /* condition_input_index */ 0,
   5829                         slow_path->GetEntryLabel(),
   5830                         /* false_target */ nullptr);
   5831 }
   5832 
   5833 // This function returns true if a conditional move can be generated for HSelect.
   5834 // Otherwise it returns false and HSelect must be implemented in terms of conditonal
   5835 // branches and regular moves.
   5836 //
   5837 // If `locations_to_set` isn't nullptr, its inputs and outputs are set for HSelect.
   5838 //
   5839 // While determining feasibility of a conditional move and setting inputs/outputs
   5840 // are two distinct tasks, this function does both because they share quite a bit
   5841 // of common logic.
   5842 static bool CanMoveConditionally(HSelect* select, bool is_r6, LocationSummary* locations_to_set) {
   5843   bool materialized = IsBooleanValueOrMaterializedCondition(select->GetCondition());
   5844   HInstruction* cond = select->InputAt(/* condition_input_index */ 2);
   5845   HCondition* condition = cond->AsCondition();
   5846 
   5847   DataType::Type cond_type =
   5848       materialized ? DataType::Type::kInt32 : condition->InputAt(0)->GetType();
   5849   DataType::Type dst_type = select->GetType();
   5850 
   5851   HConstant* cst_true_value = select->GetTrueValue()->AsConstant();
   5852   HConstant* cst_false_value = select->GetFalseValue()->AsConstant();
   5853   bool is_true_value_zero_constant =
   5854       (cst_true_value != nullptr && cst_true_value->IsZeroBitPattern());
   5855   bool is_false_value_zero_constant =
   5856       (cst_false_value != nullptr && cst_false_value->IsZeroBitPattern());
   5857 
   5858   bool can_move_conditionally = false;
   5859   bool use_const_for_false_in = false;
   5860   bool use_const_for_true_in = false;
   5861 
   5862   if (!cond->IsConstant()) {
   5863     switch (cond_type) {
   5864       default:
   5865         switch (dst_type) {
   5866           default:
   5867             // Moving int on int condition.
   5868             if (is_r6) {
   5869               if (is_true_value_zero_constant) {
   5870                 // seleqz out_reg, false_reg, cond_reg
   5871                 can_move_conditionally = true;
   5872                 use_const_for_true_in = true;
   5873               } else if (is_false_value_zero_constant) {
   5874                 // selnez out_reg, true_reg, cond_reg
   5875                 can_move_conditionally = true;
   5876                 use_const_for_false_in = true;
   5877               } else if (materialized) {
   5878                 // Not materializing unmaterialized int conditions
   5879                 // to keep the instruction count low.
   5880                 // selnez AT, true_reg, cond_reg
   5881                 // seleqz TMP, false_reg, cond_reg
   5882                 // or out_reg, AT, TMP
   5883                 can_move_conditionally = true;
   5884               }
   5885             } else {
   5886               // movn out_reg, true_reg/ZERO, cond_reg
   5887               can_move_conditionally = true;
   5888               use_const_for_true_in = is_true_value_zero_constant;
   5889             }
   5890             break;
   5891           case DataType::Type::kInt64:
   5892             // Moving long on int condition.
   5893             if (is_r6) {
   5894               if (is_true_value_zero_constant) {
   5895                 // seleqz out_reg_lo, false_reg_lo, cond_reg
   5896                 // seleqz out_reg_hi, false_reg_hi, cond_reg
   5897                 can_move_conditionally = true;
   5898                 use_const_for_true_in = true;
   5899               } else if (is_false_value_zero_constant) {
   5900                 // selnez out_reg_lo, true_reg_lo, cond_reg
   5901                 // selnez out_reg_hi, true_reg_hi, cond_reg
   5902                 can_move_conditionally = true;
   5903                 use_const_for_false_in = true;
   5904               }
   5905               // Other long conditional moves would generate 6+ instructions,
   5906               // which is too many.
   5907             } else {
   5908               // movn out_reg_lo, true_reg_lo/ZERO, cond_reg
   5909               // movn out_reg_hi, true_reg_hi/ZERO, cond_reg
   5910               can_move_conditionally = true;
   5911               use_const_for_true_in = is_true_value_zero_constant;
   5912             }
   5913             break;
   5914           case DataType::Type::kFloat32:
   5915           case DataType::Type::kFloat64:
   5916             // Moving float/double on int condition.
   5917             if (is_r6) {
   5918               if (materialized) {
   5919                 // Not materializing unmaterialized int conditions
   5920                 // to keep the instruction count low.
   5921                 can_move_conditionally = true;
   5922                 if (is_true_value_zero_constant) {
   5923                   // sltu TMP, ZERO, cond_reg
   5924                   // mtc1 TMP, temp_cond_reg
   5925                   // seleqz.fmt out_reg, false_reg, temp_cond_reg
   5926                   use_const_for_true_in = true;
   5927                 } else if (is_false_value_zero_constant) {
   5928                   // sltu TMP, ZERO, cond_reg
   5929                   // mtc1 TMP, temp_cond_reg
   5930                   // selnez.fmt out_reg, true_reg, temp_cond_reg
   5931                   use_const_for_false_in = true;
   5932                 } else {
   5933                   // sltu TMP, ZERO, cond_reg
   5934                   // mtc1 TMP, temp_cond_reg
   5935                   // sel.fmt temp_cond_reg, false_reg, true_reg
   5936                   // mov.fmt out_reg, temp_cond_reg
   5937                 }
   5938               }
   5939             } else {
   5940               // movn.fmt out_reg, true_reg, cond_reg
   5941               can_move_conditionally = true;
   5942             }
   5943             break;
   5944         }
   5945         break;
   5946       case DataType::Type::kInt64:
   5947         // We don't materialize long comparison now
   5948         // and use conditional branches instead.
   5949         break;
   5950       case DataType::Type::kFloat32:
   5951       case DataType::Type::kFloat64:
   5952         switch (dst_type) {
   5953           default:
   5954             // Moving int on float/double condition.
   5955             if (is_r6) {
   5956               if (is_true_value_zero_constant) {
   5957                 // mfc1 TMP, temp_cond_reg
   5958                 // seleqz out_reg, false_reg, TMP
   5959                 can_move_conditionally = true;
   5960                 use_const_for_true_in = true;
   5961               } else if (is_false_value_zero_constant) {
   5962                 // mfc1 TMP, temp_cond_reg
   5963                 // selnez out_reg, true_reg, TMP
   5964                 can_move_conditionally = true;
   5965                 use_const_for_false_in = true;
   5966               } else {
   5967                 // mfc1 TMP, temp_cond_reg
   5968                 // selnez AT, true_reg, TMP
   5969                 // seleqz TMP, false_reg, TMP
   5970                 // or out_reg, AT, TMP
   5971                 can_move_conditionally = true;
   5972               }
   5973             } else {
   5974               // movt out_reg, true_reg/ZERO, cc
   5975               can_move_conditionally = true;
   5976               use_const_for_true_in = is_true_value_zero_constant;
   5977             }
   5978             break;
   5979           case DataType::Type::kInt64:
   5980             // Moving long on float/double condition.
   5981             if (is_r6) {
   5982               if (is_true_value_zero_constant) {
   5983                 // mfc1 TMP, temp_cond_reg
   5984                 // seleqz out_reg_lo, false_reg_lo, TMP
   5985                 // seleqz out_reg_hi, false_reg_hi, TMP
   5986                 can_move_conditionally = true;
   5987                 use_const_for_true_in = true;
   5988               } else if (is_false_value_zero_constant) {
   5989                 // mfc1 TMP, temp_cond_reg
   5990                 // selnez out_reg_lo, true_reg_lo, TMP
   5991                 // selnez out_reg_hi, true_reg_hi, TMP
   5992                 can_move_conditionally = true;
   5993                 use_const_for_false_in = true;
   5994               }
   5995               // Other long conditional moves would generate 6+ instructions,
   5996               // which is too many.
   5997             } else {
   5998               // movt out_reg_lo, true_reg_lo/ZERO, cc
   5999               // movt out_reg_hi, true_reg_hi/ZERO, cc
   6000               can_move_conditionally = true;
   6001               use_const_for_true_in = is_true_value_zero_constant;
   6002             }
   6003             break;
   6004           case DataType::Type::kFloat32:
   6005           case DataType::Type::kFloat64:
   6006             // Moving float/double on float/double condition.
   6007             if (is_r6) {
   6008               can_move_conditionally = true;
   6009               if (is_true_value_zero_constant) {
   6010                 // seleqz.fmt out_reg, false_reg, temp_cond_reg
   6011                 use_const_for_true_in = true;
   6012               } else if (is_false_value_zero_constant) {
   6013                 // selnez.fmt out_reg, true_reg, temp_cond_reg
   6014                 use_const_for_false_in = true;
   6015               } else {
   6016                 // sel.fmt temp_cond_reg, false_reg, true_reg
   6017                 // mov.fmt out_reg, temp_cond_reg
   6018               }
   6019             } else {
   6020               // movt.fmt out_reg, true_reg, cc
   6021               can_move_conditionally = true;
   6022             }
   6023             break;
   6024         }
   6025         break;
   6026     }
   6027   }
   6028 
   6029   if (can_move_conditionally) {
   6030     DCHECK(!use_const_for_false_in || !use_const_for_true_in);
   6031   } else {
   6032     DCHECK(!use_const_for_false_in);
   6033     DCHECK(!use_const_for_true_in);
   6034   }
   6035 
   6036   if (locations_to_set != nullptr) {
   6037     if (use_const_for_false_in) {
   6038       locations_to_set->SetInAt(0, Location::ConstantLocation(cst_false_value));
   6039     } else {
   6040       locations_to_set->SetInAt(0,
   6041                                 DataType::IsFloatingPointType(dst_type)
   6042                                     ? Location::RequiresFpuRegister()
   6043                                     : Location::RequiresRegister());
   6044     }
   6045     if (use_const_for_true_in) {
   6046       locations_to_set->SetInAt(1, Location::ConstantLocation(cst_true_value));
   6047     } else {
   6048       locations_to_set->SetInAt(1,
   6049                                 DataType::IsFloatingPointType(dst_type)
   6050                                     ? Location::RequiresFpuRegister()
   6051                                     : Location::RequiresRegister());
   6052     }
   6053     if (materialized) {
   6054       locations_to_set->SetInAt(2, Location::RequiresRegister());
   6055     }
   6056     // On R6 we don't require the output to be the same as the
   6057     // first input for conditional moves unlike on R2.
   6058     bool is_out_same_as_first_in = !can_move_conditionally || !is_r6;
   6059     if (is_out_same_as_first_in) {
   6060       locations_to_set->SetOut(Location::SameAsFirstInput());
   6061     } else {
   6062       locations_to_set->SetOut(DataType::IsFloatingPointType(dst_type)
   6063                                    ? Location::RequiresFpuRegister()
   6064                                    : Location::RequiresRegister());
   6065     }
   6066   }
   6067 
   6068   return can_move_conditionally;
   6069 }
   6070 
   6071 void InstructionCodeGeneratorMIPS::GenConditionalMoveR2(HSelect* select) {
   6072   LocationSummary* locations = select->GetLocations();
   6073   Location dst = locations->Out();
   6074   Location src = locations->InAt(1);
   6075   Register src_reg = ZERO;
   6076   Register src_reg_high = ZERO;
   6077   HInstruction* cond = select->InputAt(/* condition_input_index */ 2);
   6078   Register cond_reg = TMP;
   6079   int cond_cc = 0;
   6080   DataType::Type cond_type = DataType::Type::kInt32;
   6081   bool cond_inverted = false;
   6082   DataType::Type dst_type = select->GetType();
   6083 
   6084   if (IsBooleanValueOrMaterializedCondition(cond)) {
   6085     cond_reg = locations->InAt(/* condition_input_index */ 2).AsRegister<Register>();
   6086   } else {
   6087     HCondition* condition = cond->AsCondition();
   6088     LocationSummary* cond_locations = cond->GetLocations();
   6089     IfCondition if_cond = condition->GetCondition();
   6090     cond_type = condition->InputAt(0)->GetType();
   6091     switch (cond_type) {
   6092       default:
   6093         DCHECK_NE(cond_type, DataType::Type::kInt64);
   6094         cond_inverted = MaterializeIntCompare(if_cond, cond_locations, cond_reg);
   6095         break;
   6096       case DataType::Type::kFloat32:
   6097       case DataType::Type::kFloat64:
   6098         cond_inverted = MaterializeFpCompareR2(if_cond,
   6099                                                condition->IsGtBias(),
   6100                                                cond_type,
   6101                                                cond_locations,
   6102                                                cond_cc);
   6103         break;
   6104     }
   6105   }
   6106 
   6107   DCHECK(dst.Equals(locations->InAt(0)));
   6108   if (src.IsRegister()) {
   6109     src_reg = src.AsRegister<Register>();
   6110   } else if (src.IsRegisterPair()) {
   6111     src_reg = src.AsRegisterPairLow<Register>();
   6112     src_reg_high = src.AsRegisterPairHigh<Register>();
   6113   } else if (src.IsConstant()) {
   6114     DCHECK(src.GetConstant()->IsZeroBitPattern());
   6115   }
   6116 
   6117   switch (cond_type) {
   6118     default:
   6119       switch (dst_type) {
   6120         default:
   6121           if (cond_inverted) {
   6122             __ Movz(dst.AsRegister<Register>(), src_reg, cond_reg);
   6123           } else {
   6124             __ Movn(dst.AsRegister<Register>(), src_reg, cond_reg);
   6125           }
   6126           break;
   6127         case DataType::Type::kInt64:
   6128           if (cond_inverted) {
   6129             __ Movz(dst.AsRegisterPairLow<Register>(), src_reg, cond_reg);
   6130             __ Movz(dst.AsRegisterPairHigh<Register>(), src_reg_high, cond_reg);
   6131           } else {
   6132             __ Movn(dst.AsRegisterPairLow<Register>(), src_reg, cond_reg);
   6133             __ Movn(dst.AsRegisterPairHigh<Register>(), src_reg_high, cond_reg);
   6134           }
   6135           break;
   6136         case DataType::Type::kFloat32:
   6137           if (cond_inverted) {
   6138             __ MovzS(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_reg);
   6139           } else {
   6140             __ MovnS(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_reg);
   6141           }
   6142           break;
   6143         case DataType::Type::kFloat64:
   6144           if (cond_inverted) {
   6145             __ MovzD(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_reg);
   6146           } else {
   6147             __ MovnD(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_reg);
   6148           }
   6149           break;
   6150       }
   6151       break;
   6152     case DataType::Type::kInt64:
   6153       LOG(FATAL) << "Unreachable";
   6154       UNREACHABLE();
   6155     case DataType::Type::kFloat32:
   6156     case DataType::Type::kFloat64:
   6157       switch (dst_type) {
   6158         default:
   6159           if (cond_inverted) {
   6160             __ Movf(dst.AsRegister<Register>(), src_reg, cond_cc);
   6161           } else {
   6162             __ Movt(dst.AsRegister<Register>(), src_reg, cond_cc);
   6163           }
   6164           break;
   6165         case DataType::Type::kInt64:
   6166           if (cond_inverted) {
   6167             __ Movf(dst.AsRegisterPairLow<Register>(), src_reg, cond_cc);
   6168             __ Movf(dst.AsRegisterPairHigh<Register>(), src_reg_high, cond_cc);
   6169           } else {
   6170             __ Movt(dst.AsRegisterPairLow<Register>(), src_reg, cond_cc);
   6171             __ Movt(dst.AsRegisterPairHigh<Register>(), src_reg_high, cond_cc);
   6172           }
   6173           break;
   6174         case DataType::Type::kFloat32:
   6175           if (cond_inverted) {
   6176             __ MovfS(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_cc);
   6177           } else {
   6178             __ MovtS(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_cc);
   6179           }
   6180           break;
   6181         case DataType::Type::kFloat64:
   6182           if (cond_inverted) {
   6183             __ MovfD(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_cc);
   6184           } else {
   6185             __ MovtD(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_cc);
   6186           }
   6187           break;
   6188       }
   6189       break;
   6190   }
   6191 }
   6192 
   6193 void InstructionCodeGeneratorMIPS::GenConditionalMoveR6(HSelect* select) {
   6194   LocationSummary* locations = select->GetLocations();
   6195   Location dst = locations->Out();
   6196   Location false_src = locations->InAt(0);
   6197   Location true_src = locations->InAt(1);
   6198   HInstruction* cond = select->InputAt(/* condition_input_index */ 2);
   6199   Register cond_reg = TMP;
   6200   FRegister fcond_reg = FTMP;
   6201   DataType::Type cond_type = DataType::Type::kInt32;
   6202   bool cond_inverted = false;
   6203   DataType::Type dst_type = select->GetType();
   6204 
   6205   if (IsBooleanValueOrMaterializedCondition(cond)) {
   6206     cond_reg = locations->InAt(/* condition_input_index */ 2).AsRegister<Register>();
   6207   } else {
   6208     HCondition* condition = cond->AsCondition();
   6209     LocationSummary* cond_locations = cond->GetLocations();
   6210     IfCondition if_cond = condition->GetCondition();
   6211     cond_type = condition->InputAt(0)->GetType();
   6212     switch (cond_type) {
   6213       default:
   6214         DCHECK_NE(cond_type, DataType::Type::kInt64);
   6215         cond_inverted = MaterializeIntCompare(if_cond, cond_locations, cond_reg);
   6216         break;
   6217       case DataType::Type::kFloat32:
   6218       case DataType::Type::kFloat64:
   6219         cond_inverted = MaterializeFpCompareR6(if_cond,
   6220                                                condition->IsGtBias(),
   6221                                                cond_type,
   6222                                                cond_locations,
   6223                                                fcond_reg);
   6224         break;
   6225     }
   6226   }
   6227 
   6228   if (true_src.IsConstant()) {
   6229     DCHECK(true_src.GetConstant()->IsZeroBitPattern());
   6230   }
   6231   if (false_src.IsConstant()) {
   6232     DCHECK(false_src.GetConstant()->IsZeroBitPattern());
   6233   }
   6234 
   6235   switch (dst_type) {
   6236     default:
   6237       if (DataType::IsFloatingPointType(cond_type)) {
   6238         __ Mfc1(cond_reg, fcond_reg);
   6239       }
   6240       if (true_src.IsConstant()) {
   6241         if (cond_inverted) {
   6242           __ Selnez(dst.AsRegister<Register>(), false_src.AsRegister<Register>(), cond_reg);
   6243         } else {
   6244           __ Seleqz(dst.AsRegister<Register>(), false_src.AsRegister<Register>(), cond_reg);
   6245         }
   6246       } else if (false_src.IsConstant()) {
   6247         if (cond_inverted) {
   6248           __ Seleqz(dst.AsRegister<Register>(), true_src.AsRegister<Register>(), cond_reg);
   6249         } else {
   6250           __ Selnez(dst.AsRegister<Register>(), true_src.AsRegister<Register>(), cond_reg);
   6251         }
   6252       } else {
   6253         DCHECK_NE(cond_reg, AT);
   6254         if (cond_inverted) {
   6255           __ Seleqz(AT, true_src.AsRegister<Register>(), cond_reg);
   6256           __ Selnez(TMP, false_src.AsRegister<Register>(), cond_reg);
   6257         } else {
   6258           __ Selnez(AT, true_src.AsRegister<Register>(), cond_reg);
   6259           __ Seleqz(TMP, false_src.AsRegister<Register>(), cond_reg);
   6260         }
   6261         __ Or(dst.AsRegister<Register>(), AT, TMP);
   6262       }
   6263       break;
   6264     case DataType::Type::kInt64: {
   6265       if (DataType::IsFloatingPointType(cond_type)) {
   6266         __ Mfc1(cond_reg, fcond_reg);
   6267       }
   6268       Register dst_lo = dst.AsRegisterPairLow<Register>();
   6269       Register dst_hi = dst.AsRegisterPairHigh<Register>();
   6270       if (true_src.IsConstant()) {
   6271         Register src_lo = false_src.AsRegisterPairLow<Register>();
   6272         Register src_hi = false_src.AsRegisterPairHigh<Register>();
   6273         if (cond_inverted) {
   6274           __ Selnez(dst_lo, src_lo, cond_reg);
   6275           __ Selnez(dst_hi, src_hi, cond_reg);
   6276         } else {
   6277           __ Seleqz(dst_lo, src_lo, cond_reg);
   6278           __ Seleqz(dst_hi, src_hi, cond_reg);
   6279         }
   6280       } else {
   6281         DCHECK(false_src.IsConstant());
   6282         Register src_lo = true_src.AsRegisterPairLow<Register>();
   6283         Register src_hi = true_src.AsRegisterPairHigh<Register>();
   6284         if (cond_inverted) {
   6285           __ Seleqz(dst_lo, src_lo, cond_reg);
   6286           __ Seleqz(dst_hi, src_hi, cond_reg);
   6287         } else {
   6288           __ Selnez(dst_lo, src_lo, cond_reg);
   6289           __ Selnez(dst_hi, src_hi, cond_reg);
   6290         }
   6291       }
   6292       break;
   6293     }
   6294     case DataType::Type::kFloat32: {
   6295       if (!DataType::IsFloatingPointType(cond_type)) {
   6296         // sel*.fmt tests bit 0 of the condition register, account for that.
   6297         __ Sltu(TMP, ZERO, cond_reg);
   6298         __ Mtc1(TMP, fcond_reg);
   6299       }
   6300       FRegister dst_reg = dst.AsFpuRegister<FRegister>();
   6301       if (true_src.IsConstant()) {
   6302         FRegister src_reg = false_src.AsFpuRegister<FRegister>();
   6303         if (cond_inverted) {
   6304           __ SelnezS(dst_reg, src_reg, fcond_reg);
   6305         } else {
   6306           __ SeleqzS(dst_reg, src_reg, fcond_reg);
   6307         }
   6308       } else if (false_src.IsConstant()) {
   6309         FRegister src_reg = true_src.AsFpuRegister<FRegister>();
   6310         if (cond_inverted) {
   6311           __ SeleqzS(dst_reg, src_reg, fcond_reg);
   6312         } else {
   6313           __ SelnezS(dst_reg, src_reg, fcond_reg);
   6314         }
   6315       } else {
   6316         if (cond_inverted) {
   6317           __ SelS(fcond_reg,
   6318                   true_src.AsFpuRegister<FRegister>(),
   6319                   false_src.AsFpuRegister<FRegister>());
   6320         } else {
   6321           __ SelS(fcond_reg,
   6322                   false_src.AsFpuRegister<FRegister>(),
   6323                   true_src.AsFpuRegister<FRegister>());
   6324         }
   6325         __ MovS(dst_reg, fcond_reg);
   6326       }
   6327       break;
   6328     }
   6329     case DataType::Type::kFloat64: {
   6330       if (!DataType::IsFloatingPointType(cond_type)) {
   6331         // sel*.fmt tests bit 0 of the condition register, account for that.
   6332         __ Sltu(TMP, ZERO, cond_reg);
   6333         __ Mtc1(TMP, fcond_reg);
   6334       }
   6335       FRegister dst_reg = dst.AsFpuRegister<FRegister>();
   6336       if (true_src.IsConstant()) {
   6337         FRegister src_reg = false_src.AsFpuRegister<FRegister>();
   6338         if (cond_inverted) {
   6339           __ SelnezD(dst_reg, src_reg, fcond_reg);
   6340         } else {
   6341           __ SeleqzD(dst_reg, src_reg, fcond_reg);
   6342         }
   6343       } else if (false_src.IsConstant()) {
   6344         FRegister src_reg = true_src.AsFpuRegister<FRegister>();
   6345         if (cond_inverted) {
   6346           __ SeleqzD(dst_reg, src_reg, fcond_reg);
   6347         } else {
   6348           __ SelnezD(dst_reg, src_reg, fcond_reg);
   6349         }
   6350       } else {
   6351         if (cond_inverted) {
   6352           __ SelD(fcond_reg,
   6353                   true_src.AsFpuRegister<FRegister>(),
   6354                   false_src.AsFpuRegister<FRegister>());
   6355         } else {
   6356           __ SelD(fcond_reg,
   6357                   false_src.AsFpuRegister<FRegister>(),
   6358                   true_src.AsFpuRegister<FRegister>());
   6359         }
   6360         __ MovD(dst_reg, fcond_reg);
   6361       }
   6362       break;
   6363     }
   6364   }
   6365 }
   6366 
   6367 void LocationsBuilderMIPS::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
   6368   LocationSummary* locations = new (GetGraph()->GetAllocator())
   6369       LocationSummary(flag, LocationSummary::kNoCall);
   6370   locations->SetOut(Location::RequiresRegister());
   6371 }
   6372 
   6373 void InstructionCodeGeneratorMIPS::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
   6374   __ LoadFromOffset(kLoadWord,
   6375                     flag->GetLocations()->Out().AsRegister<Register>(),
   6376                     SP,
   6377                     codegen_->GetStackOffsetOfShouldDeoptimizeFlag());
   6378 }
   6379 
   6380 void LocationsBuilderMIPS::VisitSelect(HSelect* select) {
   6381   LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(select);
   6382   CanMoveConditionally(select, codegen_->GetInstructionSetFeatures().IsR6(), locations);
   6383 }
   6384 
   6385 void InstructionCodeGeneratorMIPS::VisitSelect(HSelect* select) {
   6386   bool is_r6 = codegen_->GetInstructionSetFeatures().IsR6();
   6387   if (CanMoveConditionally(select, is_r6, /* locations_to_set */ nullptr)) {
   6388     if (is_r6) {
   6389       GenConditionalMoveR6(select);
   6390     } else {
   6391       GenConditionalMoveR2(select);
   6392     }
   6393   } else {
   6394     LocationSummary* locations = select->GetLocations();
   6395     MipsLabel false_target;
   6396     GenerateTestAndBranch(select,
   6397                           /* condition_input_index */ 2,
   6398                           /* true_target */ nullptr,
   6399                           &false_target);
   6400     codegen_->MoveLocation(locations->Out(), locations->InAt(1), select->GetType());
   6401     __ Bind(&false_target);
   6402   }
   6403 }
   6404 
   6405 void LocationsBuilderMIPS::VisitNativeDebugInfo(HNativeDebugInfo* info) {
   6406   new (GetGraph()->GetAllocator()) LocationSummary(info);
   6407 }
   6408 
   6409 void InstructionCodeGeneratorMIPS::VisitNativeDebugInfo(HNativeDebugInfo*) {
   6410   // MaybeRecordNativeDebugInfo is already called implicitly in CodeGenerator::Compile.
   6411 }
   6412 
   6413 void CodeGeneratorMIPS::GenerateNop() {
   6414   __ Nop();
   6415 }
   6416 
   6417 void LocationsBuilderMIPS::HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info) {
   6418   DataType::Type field_type = field_info.GetFieldType();
   6419   bool is_wide = (field_type == DataType::Type::kInt64) || (field_type == DataType::Type::kFloat64);
   6420   bool generate_volatile = field_info.IsVolatile() && is_wide;
   6421   bool object_field_get_with_read_barrier =
   6422       kEmitCompilerReadBarrier && (field_type == DataType::Type::kReference);
   6423   LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
   6424       instruction,
   6425       generate_volatile
   6426           ? LocationSummary::kCallOnMainOnly
   6427           : (object_field_get_with_read_barrier
   6428               ? LocationSummary::kCallOnSlowPath
   6429               : LocationSummary::kNoCall));
   6430 
   6431   if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
   6432     locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty());  // No caller-save registers.
   6433   }
   6434   locations->SetInAt(0, Location::RequiresRegister());
   6435   if (generate_volatile) {
   6436     InvokeRuntimeCallingConvention calling_convention;
   6437     // need A0 to hold base + offset
   6438     locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
   6439     if (field_type == DataType::Type::kInt64) {
   6440       locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kInt64));
   6441     } else {
   6442       // Use Location::Any() to prevent situations when running out of available fp registers.
   6443       locations->SetOut(Location::Any());
   6444       // Need some temp core regs since FP results are returned in core registers
   6445       Location reg = calling_convention.GetReturnLocation(DataType::Type::kInt64);
   6446       locations->AddTemp(Location::RegisterLocation(reg.AsRegisterPairLow<Register>()));
   6447       locations->AddTemp(Location::RegisterLocation(reg.AsRegisterPairHigh<Register>()));
   6448     }
   6449   } else {
   6450     if (DataType::IsFloatingPointType(instruction->GetType())) {
   6451       locations->SetOut(Location::RequiresFpuRegister());
   6452     } else {
   6453       // The output overlaps in the case of an object field get with
   6454       // read barriers enabled: we do not want the move to overwrite the
   6455       // object's location, as we need it to emit the read barrier.
   6456       locations->SetOut(Location::RequiresRegister(),
   6457                         object_field_get_with_read_barrier
   6458                             ? Location::kOutputOverlap
   6459                             : Location::kNoOutputOverlap);
   6460     }
   6461     if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
   6462       // We need a temporary register for the read barrier marking slow
   6463       // path in CodeGeneratorMIPS::GenerateFieldLoadWithBakerReadBarrier.
   6464       if (!kBakerReadBarrierThunksEnableForFields) {
   6465         locations->AddTemp(Location::RequiresRegister());
   6466       }
   6467     }
   6468   }
   6469 }
   6470 
   6471 void InstructionCodeGeneratorMIPS::HandleFieldGet(HInstruction* instruction,
   6472                                                   const FieldInfo& field_info,
   6473                                                   uint32_t dex_pc) {
   6474   DCHECK_EQ(DataType::Size(field_info.GetFieldType()), DataType::Size(instruction->GetType()));
   6475   DataType::Type type = instruction->GetType();
   6476   LocationSummary* locations = instruction->GetLocations();
   6477   Location obj_loc = locations->InAt(0);
   6478   Register obj = obj_loc.AsRegister<Register>();
   6479   Location dst_loc = locations->Out();
   6480   LoadOperandType load_type = kLoadUnsignedByte;
   6481   bool is_volatile = field_info.IsVolatile();
   6482   uint32_t offset = field_info.GetFieldOffset().Uint32Value();
   6483   auto null_checker = GetImplicitNullChecker(instruction, codegen_);
   6484 
   6485   switch (type) {
   6486     case DataType::Type::kBool:
   6487     case DataType::Type::kUint8:
   6488       load_type = kLoadUnsignedByte;
   6489       break;
   6490     case DataType::Type::kInt8:
   6491       load_type = kLoadSignedByte;
   6492       break;
   6493     case DataType::Type::kUint16:
   6494       load_type = kLoadUnsignedHalfword;
   6495       break;
   6496     case DataType::Type::kInt16:
   6497       load_type = kLoadSignedHalfword;
   6498       break;
   6499     case DataType::Type::kInt32:
   6500     case DataType::Type::kFloat32:
   6501     case DataType::Type::kReference:
   6502       load_type = kLoadWord;
   6503       break;
   6504     case DataType::Type::kInt64:
   6505     case DataType::Type::kFloat64:
   6506       load_type = kLoadDoubleword;
   6507       break;
   6508     case DataType::Type::kUint32:
   6509     case DataType::Type::kUint64:
   6510     case DataType::Type::kVoid:
   6511       LOG(FATAL) << "Unreachable type " << type;
   6512       UNREACHABLE();
   6513   }
   6514 
   6515   if (is_volatile && load_type == kLoadDoubleword) {
   6516     InvokeRuntimeCallingConvention calling_convention;
   6517     __ Addiu32(locations->GetTemp(0).AsRegister<Register>(), obj, offset);
   6518     // Do implicit Null check
   6519     __ LoadFromOffset(kLoadWord,
   6520                       ZERO,
   6521                       locations->GetTemp(0).AsRegister<Register>(),
   6522                       0,
   6523                       null_checker);
   6524     codegen_->InvokeRuntime(kQuickA64Load, instruction, dex_pc);
   6525     CheckEntrypointTypes<kQuickA64Load, int64_t, volatile const int64_t*>();
   6526     if (type == DataType::Type::kFloat64) {
   6527       // FP results are returned in core registers. Need to move them.
   6528       if (dst_loc.IsFpuRegister()) {
   6529         __ Mtc1(locations->GetTemp(1).AsRegister<Register>(), dst_loc.AsFpuRegister<FRegister>());
   6530         __ MoveToFpuHigh(locations->GetTemp(2).AsRegister<Register>(),
   6531                          dst_loc.AsFpuRegister<FRegister>());
   6532       } else {
   6533         DCHECK(dst_loc.IsDoubleStackSlot());
   6534         __ StoreToOffset(kStoreWord,
   6535                          locations->GetTemp(1).AsRegister<Register>(),
   6536                          SP,
   6537                          dst_loc.GetStackIndex());
   6538         __ StoreToOffset(kStoreWord,
   6539                          locations->GetTemp(2).AsRegister<Register>(),
   6540                          SP,
   6541                          dst_loc.GetStackIndex() + 4);
   6542       }
   6543     }
   6544   } else {
   6545     if (type == DataType::Type::kReference) {
   6546       // /* HeapReference<Object> */ dst = *(obj + offset)
   6547       if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
   6548         Location temp_loc =
   6549             kBakerReadBarrierThunksEnableForFields ? Location::NoLocation() : locations->GetTemp(0);
   6550         // Note that a potential implicit null check is handled in this
   6551         // CodeGeneratorMIPS::GenerateFieldLoadWithBakerReadBarrier call.
   6552         codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
   6553                                                         dst_loc,
   6554                                                         obj,
   6555                                                         offset,
   6556                                                         temp_loc,
   6557                                                         /* needs_null_check */ true);
   6558         if (is_volatile) {
   6559           GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
   6560         }
   6561       } else {
   6562         __ LoadFromOffset(kLoadWord, dst_loc.AsRegister<Register>(), obj, offset, null_checker);
   6563         if (is_volatile) {
   6564           GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
   6565         }
   6566         // If read barriers are enabled, emit read barriers other than
   6567         // Baker's using a slow path (and also unpoison the loaded
   6568         // reference, if heap poisoning is enabled).
   6569         codegen_->MaybeGenerateReadBarrierSlow(instruction, dst_loc, dst_loc, obj_loc, offset);
   6570       }
   6571     } else if (!DataType::IsFloatingPointType(type)) {
   6572       Register dst;
   6573       if (type == DataType::Type::kInt64) {
   6574         DCHECK(dst_loc.IsRegisterPair());
   6575         dst = dst_loc.AsRegisterPairLow<Register>();
   6576       } else {
   6577         DCHECK(dst_loc.IsRegister());
   6578         dst = dst_loc.AsRegister<Register>();
   6579       }
   6580       __ LoadFromOffset(load_type, dst, obj, offset, null_checker);
   6581     } else {
   6582       DCHECK(dst_loc.IsFpuRegister());
   6583       FRegister dst = dst_loc.AsFpuRegister<FRegister>();
   6584       if (type == DataType::Type::kFloat32) {
   6585         __ LoadSFromOffset(dst, obj, offset, null_checker);
   6586       } else {
   6587         __ LoadDFromOffset(dst, obj, offset, null_checker);
   6588       }
   6589     }
   6590   }
   6591 
   6592   // Memory barriers, in the case of references, are handled in the
   6593   // previous switch statement.
   6594   if (is_volatile && (type != DataType::Type::kReference)) {
   6595     GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
   6596   }
   6597 }
   6598 
   6599 void LocationsBuilderMIPS::HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info) {
   6600   DataType::Type field_type = field_info.GetFieldType();
   6601   bool is_wide = (field_type == DataType::Type::kInt64) || (field_type == DataType::Type::kFloat64);
   6602   bool generate_volatile = field_info.IsVolatile() && is_wide;
   6603   LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
   6604       instruction, generate_volatile ? LocationSummary::kCallOnMainOnly : LocationSummary::kNoCall);
   6605 
   6606   locations->SetInAt(0, Location::RequiresRegister());
   6607   if (generate_volatile) {
   6608     InvokeRuntimeCallingConvention calling_convention;
   6609     // need A0 to hold base + offset
   6610     locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
   6611     if (field_type == DataType::Type::kInt64) {
   6612       locations->SetInAt(1, Location::RegisterPairLocation(
   6613           calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
   6614     } else {
   6615       // Use Location::Any() to prevent situations when running out of available fp registers.
   6616       locations->SetInAt(1, Location::Any());
   6617       // Pass FP parameters in core registers.
   6618       locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
   6619       locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(3)));
   6620     }
   6621   } else {
   6622     if (DataType::IsFloatingPointType(field_type)) {
   6623       locations->SetInAt(1, FpuRegisterOrConstantForStore(instruction->InputAt(1)));
   6624     } else {
   6625       locations->SetInAt(1, RegisterOrZeroConstant(instruction->InputAt(1)));
   6626     }
   6627   }
   6628 }
   6629 
   6630 void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction,
   6631                                                   const FieldInfo& field_info,
   6632                                                   uint32_t dex_pc,
   6633                                                   bool value_can_be_null) {
   6634   DataType::Type type = field_info.GetFieldType();
   6635   LocationSummary* locations = instruction->GetLocations();
   6636   Register obj = locations->InAt(0).AsRegister<Register>();
   6637   Location value_location = locations->InAt(1);
   6638   StoreOperandType store_type = kStoreByte;
   6639   bool is_volatile = field_info.IsVolatile();
   6640   uint32_t offset = field_info.GetFieldOffset().Uint32Value();
   6641   bool needs_write_barrier = CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1));
   6642   auto null_checker = GetImplicitNullChecker(instruction, codegen_);
   6643 
   6644   switch (type) {
   6645     case DataType::Type::kBool:
   6646     case DataType::Type::kUint8:
   6647     case DataType::Type::kInt8:
   6648       store_type = kStoreByte;
   6649       break;
   6650     case DataType::Type::kUint16:
   6651     case DataType::Type::kInt16:
   6652       store_type = kStoreHalfword;
   6653       break;
   6654     case DataType::Type::kInt32:
   6655     case DataType::Type::kFloat32:
   6656     case DataType::Type::kReference:
   6657       store_type = kStoreWord;
   6658       break;
   6659     case DataType::Type::kInt64:
   6660     case DataType::Type::kFloat64:
   6661       store_type = kStoreDoubleword;
   6662       break;
   6663     case DataType::Type::kUint32:
   6664     case DataType::Type::kUint64:
   6665     case DataType::Type::kVoid:
   6666       LOG(FATAL) << "Unreachable type " << type;
   6667       UNREACHABLE();
   6668   }
   6669 
   6670   if (is_volatile) {
   6671     GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
   6672   }
   6673 
   6674   if (is_volatile && store_type == kStoreDoubleword) {
   6675     InvokeRuntimeCallingConvention calling_convention;
   6676     __ Addiu32(locations->GetTemp(0).AsRegister<Register>(), obj, offset);
   6677     // Do implicit Null check.
   6678     __ LoadFromOffset(kLoadWord,
   6679                       ZERO,
   6680                       locations->GetTemp(0).AsRegister<Register>(),
   6681                       0,
   6682                       null_checker);
   6683     if (type == DataType::Type::kFloat64) {
   6684       // Pass FP parameters in core registers.
   6685       if (value_location.IsFpuRegister()) {
   6686         __ Mfc1(locations->GetTemp(1).AsRegister<Register>(),
   6687                 value_location.AsFpuRegister<FRegister>());
   6688         __ MoveFromFpuHigh(locations->GetTemp(2).AsRegister<Register>(),
   6689                            value_location.AsFpuRegister<FRegister>());
   6690       } else if (value_location.IsDoubleStackSlot()) {
   6691         __ LoadFromOffset(kLoadWord,
   6692                           locations->GetTemp(1).AsRegister<Register>(),
   6693                           SP,
   6694                           value_location.GetStackIndex());
   6695         __ LoadFromOffset(kLoadWord,
   6696                           locations->GetTemp(2).AsRegister<Register>(),
   6697                           SP,
   6698                           value_location.GetStackIndex() + 4);
   6699       } else {
   6700         DCHECK(value_location.IsConstant());
   6701         DCHECK(value_location.GetConstant()->IsDoubleConstant());
   6702         int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant());
   6703         __ LoadConst64(locations->GetTemp(2).AsRegister<Register>(),
   6704                        locations->GetTemp(1).AsRegister<Register>(),
   6705                        value);
   6706       }
   6707     }
   6708     codegen_->InvokeRuntime(kQuickA64Store, instruction, dex_pc);
   6709     CheckEntrypointTypes<kQuickA64Store, void, volatile int64_t *, int64_t>();
   6710   } else {
   6711     if (value_location.IsConstant()) {
   6712       int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant());
   6713       __ StoreConstToOffset(store_type, value, obj, offset, TMP, null_checker);
   6714     } else if (!DataType::IsFloatingPointType(type)) {
   6715       Register src;
   6716       if (type == DataType::Type::kInt64) {
   6717         src = value_location.AsRegisterPairLow<Register>();
   6718       } else {
   6719         src = value_location.AsRegister<Register>();
   6720       }
   6721       if (kPoisonHeapReferences && needs_write_barrier) {
   6722         // Note that in the case where `value` is a null reference,
   6723         // we do not enter this block, as a null reference does not
   6724         // need poisoning.
   6725         DCHECK_EQ(type, DataType::Type::kReference);
   6726         __ PoisonHeapReference(TMP, src);
   6727         __ StoreToOffset(store_type, TMP, obj, offset, null_checker);
   6728       } else {
   6729         __ StoreToOffset(store_type, src, obj, offset, null_checker);
   6730       }
   6731     } else {
   6732       FRegister src = value_location.AsFpuRegister<FRegister>();
   6733       if (type == DataType::Type::kFloat32) {
   6734         __ StoreSToOffset(src, obj, offset, null_checker);
   6735       } else {
   6736         __ StoreDToOffset(src, obj, offset, null_checker);
   6737       }
   6738     }
   6739   }
   6740 
   6741   if (needs_write_barrier) {
   6742     Register src = value_location.AsRegister<Register>();
   6743     codegen_->MarkGCCard(obj, src, value_can_be_null);
   6744   }
   6745 
   6746   if (is_volatile) {
   6747     GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
   6748   }
   6749 }
   6750 
   6751 void LocationsBuilderMIPS::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
   6752   HandleFieldGet(instruction, instruction->GetFieldInfo());
   6753 }
   6754 
   6755 void InstructionCodeGeneratorMIPS::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
   6756   HandleFieldGet(instruction, instruction->GetFieldInfo(), instruction->GetDexPc());
   6757 }
   6758 
   6759 void LocationsBuilderMIPS::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
   6760   HandleFieldSet(instruction, instruction->GetFieldInfo());
   6761 }
   6762 
   6763 void InstructionCodeGeneratorMIPS::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
   6764   HandleFieldSet(instruction,
   6765                  instruction->GetFieldInfo(),
   6766                  instruction->GetDexPc(),
   6767                  instruction->GetValueCanBeNull());
   6768 }
   6769 
   6770 void InstructionCodeGeneratorMIPS::GenerateReferenceLoadOneRegister(
   6771     HInstruction* instruction,
   6772     Location out,
   6773     uint32_t offset,
   6774     Location maybe_temp,
   6775     ReadBarrierOption read_barrier_option) {
   6776   Register out_reg = out.AsRegister<Register>();
   6777   if (read_barrier_option == kWithReadBarrier) {
   6778     CHECK(kEmitCompilerReadBarrier);
   6779     if (!kUseBakerReadBarrier || !kBakerReadBarrierThunksEnableForFields) {
   6780       DCHECK(maybe_temp.IsRegister()) << maybe_temp;
   6781     }
   6782     if (kUseBakerReadBarrier) {
   6783       // Load with fast path based Baker's read barrier.
   6784       // /* HeapReference<Object> */ out = *(out + offset)
   6785       codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
   6786                                                       out,
   6787                                                       out_reg,
   6788                                                       offset,
   6789                                                       maybe_temp,
   6790                                                       /* needs_null_check */ false);
   6791     } else {
   6792       // Load with slow path based read barrier.
   6793       // Save the value of `out` into `maybe_temp` before overwriting it
   6794       // in the following move operation, as we will need it for the
   6795       // read barrier below.
   6796       __ Move(maybe_temp.AsRegister<Register>(), out_reg);
   6797       // /* HeapReference<Object> */ out = *(out + offset)
   6798       __ LoadFromOffset(kLoadWord, out_reg, out_reg, offset);
   6799       codegen_->GenerateReadBarrierSlow(instruction, out, out, maybe_temp, offset);
   6800     }
   6801   } else {
   6802     // Plain load with no read barrier.
   6803     // /* HeapReference<Object> */ out = *(out + offset)
   6804     __ LoadFromOffset(kLoadWord, out_reg, out_reg, offset);
   6805     __ MaybeUnpoisonHeapReference(out_reg);
   6806   }
   6807 }
   6808 
   6809 void InstructionCodeGeneratorMIPS::GenerateReferenceLoadTwoRegisters(
   6810     HInstruction* instruction,
   6811     Location out,
   6812     Location obj,
   6813     uint32_t offset,
   6814     Location maybe_temp,
   6815     ReadBarrierOption read_barrier_option) {
   6816   Register out_reg = out.AsRegister<Register>();
   6817   Register obj_reg = obj.AsRegister<Register>();
   6818   if (read_barrier_option == kWithReadBarrier) {
   6819     CHECK(kEmitCompilerReadBarrier);
   6820     if (kUseBakerReadBarrier) {
   6821       if (!kBakerReadBarrierThunksEnableForFields) {
   6822         DCHECK(maybe_temp.IsRegister()) << maybe_temp;
   6823       }
   6824       // Load with fast path based Baker's read barrier.
   6825       // /* HeapReference<Object> */ out = *(obj + offset)
   6826       codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
   6827                                                       out,
   6828                                                       obj_reg,
   6829                                                       offset,
   6830                                                       maybe_temp,
   6831                                                       /* needs_null_check */ false);
   6832     } else {
   6833       // Load with slow path based read barrier.
   6834       // /* HeapReference<Object> */ out = *(obj + offset)
   6835       __ LoadFromOffset(kLoadWord, out_reg, obj_reg, offset);
   6836       codegen_->GenerateReadBarrierSlow(instruction, out, out, obj, offset);
   6837     }
   6838   } else {
   6839     // Plain load with no read barrier.
   6840     // /* HeapReference<Object> */ out = *(obj + offset)
   6841     __ LoadFromOffset(kLoadWord, out_reg, obj_reg, offset);
   6842     __ MaybeUnpoisonHeapReference(out_reg);
   6843   }
   6844 }
   6845 
   6846 static inline int GetBakerMarkThunkNumber(Register reg) {
   6847   static_assert(BAKER_MARK_INTROSPECTION_REGISTER_COUNT == 21, "Expecting equal");
   6848   if (reg >= V0 && reg <= T7) {  // 14 consequtive regs.
   6849     return reg - V0;
   6850   } else if (reg >= S2 && reg <= S7) {  // 6 consequtive regs.
   6851     return 14 + (reg - S2);
   6852   } else if (reg == FP) {  // One more.
   6853     return 20;
   6854   }
   6855   LOG(FATAL) << "Unexpected register " << reg;
   6856   UNREACHABLE();
   6857 }
   6858 
   6859 static inline int GetBakerMarkFieldArrayThunkDisplacement(Register reg, bool short_offset) {
   6860   int num = GetBakerMarkThunkNumber(reg) +
   6861       (short_offset ? BAKER_MARK_INTROSPECTION_REGISTER_COUNT : 0);
   6862   return num * BAKER_MARK_INTROSPECTION_FIELD_ARRAY_ENTRY_SIZE;
   6863 }
   6864 
   6865 static inline int GetBakerMarkGcRootThunkDisplacement(Register reg) {
   6866   return GetBakerMarkThunkNumber(reg) * BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRY_SIZE +
   6867       BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRIES_OFFSET;
   6868 }
   6869 
   6870 void InstructionCodeGeneratorMIPS::GenerateGcRootFieldLoad(HInstruction* instruction,
   6871                                                            Location root,
   6872                                                            Register obj,
   6873                                                            uint32_t offset,
   6874                                                            ReadBarrierOption read_barrier_option,
   6875                                                            MipsLabel* label_low) {
   6876   bool reordering;
   6877   if (label_low != nullptr) {
   6878     DCHECK_EQ(offset, 0x5678u);
   6879   }
   6880   Register root_reg = root.AsRegister<Register>();
   6881   if (read_barrier_option == kWithReadBarrier) {
   6882     DCHECK(kEmitCompilerReadBarrier);
   6883     if (kUseBakerReadBarrier) {
   6884       // Fast path implementation of art::ReadBarrier::BarrierForRoot when
   6885       // Baker's read barrier are used:
   6886       if (kBakerReadBarrierThunksEnableForGcRoots) {
   6887         // Note that we do not actually check the value of `GetIsGcMarking()`
   6888         // to decide whether to mark the loaded GC root or not.  Instead, we
   6889         // load into `temp` (T9) the read barrier mark introspection entrypoint.
   6890         // If `temp` is null, it means that `GetIsGcMarking()` is false, and
   6891         // vice versa.
   6892         //
   6893         // We use thunks for the slow path. That thunk checks the reference
   6894         // and jumps to the entrypoint if needed.
   6895         //
   6896         //     temp = Thread::Current()->pReadBarrierMarkReg00
   6897         //     // AKA &art_quick_read_barrier_mark_introspection.
   6898         //     GcRoot<mirror::Object> root = *(obj+offset);  // Original reference load.
   6899         //     if (temp != nullptr) {
   6900         //        temp = &gc_root_thunk<root_reg>
   6901         //        root = temp(root)
   6902         //     }
   6903 
   6904         bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
   6905         const int32_t entry_point_offset =
   6906             Thread::ReadBarrierMarkEntryPointsOffset<kMipsPointerSize>(0);
   6907         const int thunk_disp = GetBakerMarkGcRootThunkDisplacement(root_reg);
   6908         int16_t offset_low = Low16Bits(offset);
   6909         int16_t offset_high = High16Bits(offset - offset_low);  // Accounts for sign
   6910                                                                 // extension in lw.
   6911         bool short_offset = IsInt<16>(static_cast<int32_t>(offset));
   6912         Register base = short_offset ? obj : TMP;
   6913         // Loading the entrypoint does not require a load acquire since it is only changed when
   6914         // threads are suspended or running a checkpoint.
   6915         __ LoadFromOffset(kLoadWord, T9, TR, entry_point_offset);
   6916         reordering = __ SetReorder(false);
   6917         if (!short_offset) {
   6918           DCHECK(!label_low);
   6919           __ AddUpper(base, obj, offset_high);
   6920         }
   6921         MipsLabel skip_call;
   6922         __ Beqz(T9, &skip_call, /* is_bare */ true);
   6923         if (label_low != nullptr) {
   6924           DCHECK(short_offset);
   6925           __ Bind(label_low);
   6926         }
   6927         // /* GcRoot<mirror::Object> */ root = *(obj + offset)
   6928         __ LoadFromOffset(kLoadWord, root_reg, base, offset_low);  // Single instruction
   6929                                                                    // in delay slot.
   6930         if (isR6) {
   6931           __ Jialc(T9, thunk_disp);
   6932         } else {
   6933           __ Addiu(T9, T9, thunk_disp);
   6934           __ Jalr(T9);
   6935           __ Nop();
   6936         }
   6937         __ Bind(&skip_call);
   6938         __ SetReorder(reordering);
   6939       } else {
   6940         // Note that we do not actually check the value of `GetIsGcMarking()`
   6941         // to decide whether to mark the loaded GC root or not.  Instead, we
   6942         // load into `temp` (T9) the read barrier mark entry point corresponding
   6943         // to register `root`. If `temp` is null, it means that `GetIsGcMarking()`
   6944         // is false, and vice versa.
   6945         //
   6946         //     GcRoot<mirror::Object> root = *(obj+offset);  // Original reference load.
   6947         //     temp = Thread::Current()->pReadBarrierMarkReg ## root.reg()
   6948         //     if (temp != null) {
   6949         //       root = temp(root)
   6950         //     }
   6951 
   6952         if (label_low != nullptr) {
   6953           reordering = __ SetReorder(false);
   6954           __ Bind(label_low);
   6955         }
   6956         // /* GcRoot<mirror::Object> */ root = *(obj + offset)
   6957         __ LoadFromOffset(kLoadWord, root_reg, obj, offset);
   6958         if (label_low != nullptr) {
   6959           __ SetReorder(reordering);
   6960         }
   6961         static_assert(
   6962             sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(GcRoot<mirror::Object>),
   6963             "art::mirror::CompressedReference<mirror::Object> and art::GcRoot<mirror::Object> "
   6964             "have different sizes.");
   6965         static_assert(sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(int32_t),
   6966                       "art::mirror::CompressedReference<mirror::Object> and int32_t "
   6967                       "have different sizes.");
   6968 
   6969         // Slow path marking the GC root `root`.
   6970         Location temp = Location::RegisterLocation(T9);
   6971         SlowPathCodeMIPS* slow_path =
   6972             new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathMIPS(
   6973                 instruction,
   6974                 root,
   6975                 /*entrypoint*/ temp);
   6976         codegen_->AddSlowPath(slow_path);
   6977 
   6978         const int32_t entry_point_offset =
   6979             Thread::ReadBarrierMarkEntryPointsOffset<kMipsPointerSize>(root.reg() - 1);
   6980         // Loading the entrypoint does not require a load acquire since it is only changed when
   6981         // threads are suspended or running a checkpoint.
   6982         __ LoadFromOffset(kLoadWord, temp.AsRegister<Register>(), TR, entry_point_offset);
   6983         __ Bnez(temp.AsRegister<Register>(), slow_path->GetEntryLabel());
   6984         __ Bind(slow_path->GetExitLabel());
   6985       }
   6986     } else {
   6987       if (label_low != nullptr) {
   6988         reordering = __ SetReorder(false);
   6989         __ Bind(label_low);
   6990       }
   6991       // GC root loaded through a slow path for read barriers other
   6992       // than Baker's.
   6993       // /* GcRoot<mirror::Object>* */ root = obj + offset
   6994       __ Addiu32(root_reg, obj, offset);
   6995       if (label_low != nullptr) {
   6996         __ SetReorder(reordering);
   6997       }
   6998       // /* mirror::Object* */ root = root->Read()
   6999       codegen_->GenerateReadBarrierForRootSlow(instruction, root, root);
   7000     }
   7001   } else {
   7002     if (label_low != nullptr) {
   7003       reordering = __ SetReorder(false);
   7004       __ Bind(label_low);
   7005     }
   7006     // Plain GC root load with no read barrier.
   7007     // /* GcRoot<mirror::Object> */ root = *(obj + offset)
   7008     __ LoadFromOffset(kLoadWord, root_reg, obj, offset);
   7009     // Note that GC roots are not affected by heap poisoning, thus we
   7010     // do not have to unpoison `root_reg` here.
   7011     if (label_low != nullptr) {
   7012       __ SetReorder(reordering);
   7013     }
   7014   }
   7015 }
   7016 
   7017 void CodeGeneratorMIPS::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
   7018                                                               Location ref,
   7019                                                               Register obj,
   7020                                                               uint32_t offset,
   7021                                                               Location temp,
   7022                                                               bool needs_null_check) {
   7023   DCHECK(kEmitCompilerReadBarrier);
   7024   DCHECK(kUseBakerReadBarrier);
   7025 
   7026   if (kBakerReadBarrierThunksEnableForFields) {
   7027     // Note that we do not actually check the value of `GetIsGcMarking()`
   7028     // to decide whether to mark the loaded reference or not.  Instead, we
   7029     // load into `temp` (T9) the read barrier mark introspection entrypoint.
   7030     // If `temp` is null, it means that `GetIsGcMarking()` is false, and
   7031     // vice versa.
   7032     //
   7033     // We use thunks for the slow path. That thunk checks the reference
   7034     // and jumps to the entrypoint if needed. If the holder is not gray,
   7035     // it issues a load-load memory barrier and returns to the original
   7036     // reference load.
   7037     //
   7038     //     temp = Thread::Current()->pReadBarrierMarkReg00
   7039     //     // AKA &art_quick_read_barrier_mark_introspection.
   7040     //     if (temp != nullptr) {
   7041     //        temp = &field_array_thunk<holder_reg>
   7042     //        temp()
   7043     //     }
   7044     //   not_gray_return_address:
   7045     //     // If the offset is too large to fit into the lw instruction, we
   7046     //     // use an adjusted base register (TMP) here. This register
   7047     //     // receives bits 16 ... 31 of the offset before the thunk invocation
   7048     //     // and the thunk benefits from it.
   7049     //     HeapReference<mirror::Object> reference = *(obj+offset);  // Original reference load.
   7050     //   gray_return_address:
   7051 
   7052     DCHECK(temp.IsInvalid());
   7053     bool isR6 = GetInstructionSetFeatures().IsR6();
   7054     int16_t offset_low = Low16Bits(offset);
   7055     int16_t offset_high = High16Bits(offset - offset_low);  // Accounts for sign extension in lw.
   7056     bool short_offset = IsInt<16>(static_cast<int32_t>(offset));
   7057     bool reordering = __ SetReorder(false);
   7058     const int32_t entry_point_offset =
   7059         Thread::ReadBarrierMarkEntryPointsOffset<kMipsPointerSize>(0);
   7060     // There may have or may have not been a null check if the field offset is smaller than
   7061     // the page size.
   7062     // There must've been a null check in case it's actually a load from an array.
   7063     // We will, however, perform an explicit null check in the thunk as it's easier to
   7064     // do it than not.
   7065     if (instruction->IsArrayGet()) {
   7066       DCHECK(!needs_null_check);
   7067     }
   7068     const int thunk_disp = GetBakerMarkFieldArrayThunkDisplacement(obj, short_offset);
   7069     // Loading the entrypoint does not require a load acquire since it is only changed when
   7070     // threads are suspended or running a checkpoint.
   7071     __ LoadFromOffset(kLoadWord, T9, TR, entry_point_offset);
   7072     Register ref_reg = ref.AsRegister<Register>();
   7073     Register base = short_offset ? obj : TMP;
   7074     MipsLabel skip_call;
   7075     if (short_offset) {
   7076       if (isR6) {
   7077         __ Beqzc(T9, &skip_call, /* is_bare */ true);
   7078         __ Nop();  // In forbidden slot.
   7079         __ Jialc(T9, thunk_disp);
   7080       } else {
   7081         __ Beqz(T9, &skip_call, /* is_bare */ true);
   7082         __ Addiu(T9, T9, thunk_disp);  // In delay slot.
   7083         __ Jalr(T9);
   7084         __ Nop();  // In delay slot.
   7085       }
   7086       __ Bind(&skip_call);
   7087     } else {
   7088       if (isR6) {
   7089         __ Beqz(T9, &skip_call, /* is_bare */ true);
   7090         __ Aui(base, obj, offset_high);  // In delay slot.
   7091         __ Jialc(T9, thunk_disp);
   7092         __ Bind(&skip_call);
   7093       } else {
   7094         __ Lui(base, offset_high);
   7095         __ Beqz(T9, &skip_call, /* is_bare */ true);
   7096         __ Addiu(T9, T9, thunk_disp);  // In delay slot.
   7097         __ Jalr(T9);
   7098         __ Bind(&skip_call);
   7099         __ Addu(base, base, obj);  // In delay slot.
   7100       }
   7101     }
   7102     // /* HeapReference<Object> */ ref = *(obj + offset)
   7103     __ LoadFromOffset(kLoadWord, ref_reg, base, offset_low);  // Single instruction.
   7104     if (needs_null_check) {
   7105       MaybeRecordImplicitNullCheck(instruction);
   7106     }
   7107     __ MaybeUnpoisonHeapReference(ref_reg);
   7108     __ SetReorder(reordering);
   7109     return;
   7110   }
   7111 
   7112   // /* HeapReference<Object> */ ref = *(obj + offset)
   7113   Location no_index = Location::NoLocation();
   7114   ScaleFactor no_scale_factor = TIMES_1;
   7115   GenerateReferenceLoadWithBakerReadBarrier(instruction,
   7116                                             ref,
   7117                                             obj,
   7118                                             offset,
   7119                                             no_index,
   7120                                             no_scale_factor,
   7121                                             temp,
   7122                                             needs_null_check);
   7123 }
   7124 
   7125 void CodeGeneratorMIPS::GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction,
   7126                                                               Location ref,
   7127                                                               Register obj,
   7128                                                               uint32_t data_offset,
   7129                                                               Location index,
   7130                                                               Location temp,
   7131                                                               bool needs_null_check) {
   7132   DCHECK(kEmitCompilerReadBarrier);
   7133   DCHECK(kUseBakerReadBarrier);
   7134 
   7135   static_assert(
   7136       sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
   7137       "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
   7138   ScaleFactor scale_factor = TIMES_4;
   7139 
   7140   if (kBakerReadBarrierThunksEnableForArrays) {
   7141     // Note that we do not actually check the value of `GetIsGcMarking()`
   7142     // to decide whether to mark the loaded reference or not.  Instead, we
   7143     // load into `temp` (T9) the read barrier mark introspection entrypoint.
   7144     // If `temp` is null, it means that `GetIsGcMarking()` is false, and
   7145     // vice versa.
   7146     //
   7147     // We use thunks for the slow path. That thunk checks the reference
   7148     // and jumps to the entrypoint if needed. If the holder is not gray,
   7149     // it issues a load-load memory barrier and returns to the original
   7150     // reference load.
   7151     //
   7152     //     temp = Thread::Current()->pReadBarrierMarkReg00
   7153     //     // AKA &art_quick_read_barrier_mark_introspection.
   7154     //     if (temp != nullptr) {
   7155     //        temp = &field_array_thunk<holder_reg>
   7156     //        temp()
   7157     //     }
   7158     //   not_gray_return_address:
   7159     //     // The element address is pre-calculated in the TMP register before the
   7160     //     // thunk invocation and the thunk benefits from it.
   7161     //     HeapReference<mirror::Object> reference = data[index];  // Original reference load.
   7162     //   gray_return_address:
   7163 
   7164     DCHECK(temp.IsInvalid());
   7165     DCHECK(index.IsValid());
   7166     bool reordering = __ SetReorder(false);
   7167     const int32_t entry_point_offset =
   7168         Thread::ReadBarrierMarkEntryPointsOffset<kMipsPointerSize>(0);
   7169     // We will not do the explicit null check in the thunk as some form of a null check
   7170     // must've been done earlier.
   7171     DCHECK(!needs_null_check);
   7172     const int thunk_disp = GetBakerMarkFieldArrayThunkDisplacement(obj, /* short_offset */ false);
   7173     // Loading the entrypoint does not require a load acquire since it is only changed when
   7174     // threads are suspended or running a checkpoint.
   7175     __ LoadFromOffset(kLoadWord, T9, TR, entry_point_offset);
   7176     Register ref_reg = ref.AsRegister<Register>();
   7177     Register index_reg = index.IsRegisterPair()
   7178         ? index.AsRegisterPairLow<Register>()
   7179         : index.AsRegister<Register>();
   7180     MipsLabel skip_call;
   7181     if (GetInstructionSetFeatures().IsR6()) {
   7182       __ Beqz(T9, &skip_call, /* is_bare */ true);
   7183       __ Lsa(TMP, index_reg, obj, scale_factor);  // In delay slot.
   7184       __ Jialc(T9, thunk_disp);
   7185       __ Bind(&skip_call);
   7186     } else {
   7187       __ Sll(TMP, index_reg, scale_factor);
   7188       __ Beqz(T9, &skip_call, /* is_bare */ true);
   7189       __ Addiu(T9, T9, thunk_disp);  // In delay slot.
   7190       __ Jalr(T9);
   7191       __ Bind(&skip_call);
   7192       __ Addu(TMP, TMP, obj);  // In delay slot.
   7193     }
   7194     // /* HeapReference<Object> */ ref = *(obj + data_offset + (index << scale_factor))
   7195     DCHECK(IsInt<16>(static_cast<int32_t>(data_offset))) << data_offset;
   7196     __ LoadFromOffset(kLoadWord, ref_reg, TMP, data_offset);  // Single instruction.
   7197     __ MaybeUnpoisonHeapReference(ref_reg);
   7198     __ SetReorder(reordering);
   7199     return;
   7200   }
   7201 
   7202   // /* HeapReference<Object> */ ref =
   7203   //     *(obj + data_offset + index * sizeof(HeapReference<Object>))
   7204   GenerateReferenceLoadWithBakerReadBarrier(instruction,
   7205                                             ref,
   7206                                             obj,
   7207                                             data_offset,
   7208                                             index,
   7209                                             scale_factor,
   7210                                             temp,
   7211                                             needs_null_check);
   7212 }
   7213 
   7214 void CodeGeneratorMIPS::GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
   7215                                                                   Location ref,
   7216                                                                   Register obj,
   7217                                                                   uint32_t offset,
   7218                                                                   Location index,
   7219                                                                   ScaleFactor scale_factor,
   7220                                                                   Location temp,
   7221                                                                   bool needs_null_check,
   7222                                                                   bool always_update_field) {
   7223   DCHECK(kEmitCompilerReadBarrier);
   7224   DCHECK(kUseBakerReadBarrier);
   7225 
   7226   // In slow path based read barriers, the read barrier call is
   7227   // inserted after the original load. However, in fast path based
   7228   // Baker's read barriers, we need to perform the load of
   7229   // mirror::Object::monitor_ *before* the original reference load.
   7230   // This load-load ordering is required by the read barrier.
   7231   // The fast path/slow path (for Baker's algorithm) should look like:
   7232   //
   7233   //   uint32_t rb_state = Lockword(obj->monitor_).ReadBarrierState();
   7234   //   lfence;  // Load fence or artificial data dependency to prevent load-load reordering
   7235   //   HeapReference<Object> ref = *src;  // Original reference load.
   7236   //   bool is_gray = (rb_state == ReadBarrier::GrayState());
   7237   //   if (is_gray) {
   7238   //     ref = ReadBarrier::Mark(ref);  // Performed by runtime entrypoint slow path.
   7239   //   }
   7240   //
   7241   // Note: the original implementation in ReadBarrier::Barrier is
   7242   // slightly more complex as it performs additional checks that we do
   7243   // not do here for performance reasons.
   7244 
   7245   Register ref_reg = ref.AsRegister<Register>();
   7246   Register temp_reg = temp.AsRegister<Register>();
   7247   uint32_t monitor_offset = mirror::Object::MonitorOffset().Int32Value();
   7248 
   7249   // /* int32_t */ monitor = obj->monitor_
   7250   __ LoadFromOffset(kLoadWord, temp_reg, obj, monitor_offset);
   7251   if (needs_null_check) {
   7252     MaybeRecordImplicitNullCheck(instruction);
   7253   }
   7254   // /* LockWord */ lock_word = LockWord(monitor)
   7255   static_assert(sizeof(LockWord) == sizeof(int32_t),
   7256                 "art::LockWord and int32_t have different sizes.");
   7257 
   7258   __ Sync(0);  // Barrier to prevent load-load reordering.
   7259 
   7260   // The actual reference load.
   7261   if (index.IsValid()) {
   7262     // Load types involving an "index": ArrayGet,
   7263     // UnsafeGetObject/UnsafeGetObjectVolatile and UnsafeCASObject
   7264     // intrinsics.
   7265     // /* HeapReference<Object> */ ref = *(obj + offset + (index << scale_factor))
   7266     if (index.IsConstant()) {
   7267       size_t computed_offset =
   7268           (index.GetConstant()->AsIntConstant()->GetValue() << scale_factor) + offset;
   7269       __ LoadFromOffset(kLoadWord, ref_reg, obj, computed_offset);
   7270     } else {
   7271       // Handle the special case of the
   7272       // UnsafeGetObject/UnsafeGetObjectVolatile and UnsafeCASObject
   7273       // intrinsics, which use a register pair as index ("long
   7274       // offset"), of which only the low part contains data.
   7275       Register index_reg = index.IsRegisterPair()
   7276           ? index.AsRegisterPairLow<Register>()
   7277           : index.AsRegister<Register>();
   7278       __ ShiftAndAdd(TMP, index_reg, obj, scale_factor, TMP);
   7279       __ LoadFromOffset(kLoadWord, ref_reg, TMP, offset);
   7280     }
   7281   } else {
   7282     // /* HeapReference<Object> */ ref = *(obj + offset)
   7283     __ LoadFromOffset(kLoadWord, ref_reg, obj, offset);
   7284   }
   7285 
   7286   // Object* ref = ref_addr->AsMirrorPtr()
   7287   __ MaybeUnpoisonHeapReference(ref_reg);
   7288 
   7289   // Slow path marking the object `ref` when it is gray.
   7290   SlowPathCodeMIPS* slow_path;
   7291   if (always_update_field) {
   7292     // ReadBarrierMarkAndUpdateFieldSlowPathMIPS only supports address
   7293     // of the form `obj + field_offset`, where `obj` is a register and
   7294     // `field_offset` is a register pair (of which only the lower half
   7295     // is used). Thus `offset` and `scale_factor` above are expected
   7296     // to be null in this code path.
   7297     DCHECK_EQ(offset, 0u);
   7298     DCHECK_EQ(scale_factor, ScaleFactor::TIMES_1);
   7299     slow_path = new (GetScopedAllocator())
   7300         ReadBarrierMarkAndUpdateFieldSlowPathMIPS(instruction,
   7301                                                   ref,
   7302                                                   obj,
   7303                                                   /* field_offset */ index,
   7304                                                   temp_reg);
   7305   } else {
   7306     slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathMIPS(instruction, ref);
   7307   }
   7308   AddSlowPath(slow_path);
   7309 
   7310   // if (rb_state == ReadBarrier::GrayState())
   7311   //   ref = ReadBarrier::Mark(ref);
   7312   // Given the numeric representation, it's enough to check the low bit of the
   7313   // rb_state. We do that by shifting the bit into the sign bit (31) and
   7314   // performing a branch on less than zero.
   7315   static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
   7316   static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
   7317   static_assert(LockWord::kReadBarrierStateSize == 1, "Expecting 1-bit read barrier state size");
   7318   __ Sll(temp_reg, temp_reg, 31 - LockWord::kReadBarrierStateShift);
   7319   __ Bltz(temp_reg, slow_path->GetEntryLabel());
   7320   __ Bind(slow_path->GetExitLabel());
   7321 }
   7322 
   7323 void CodeGeneratorMIPS::GenerateReadBarrierSlow(HInstruction* instruction,
   7324                                                 Location out,
   7325                                                 Location ref,
   7326                                                 Location obj,
   7327                                                 uint32_t offset,
   7328                                                 Location index) {
   7329   DCHECK(kEmitCompilerReadBarrier);
   7330 
   7331   // Insert a slow path based read barrier *after* the reference load.
   7332   //
   7333   // If heap poisoning is enabled, the unpoisoning of the loaded
   7334   // reference will be carried out by the runtime within the slow
   7335   // path.
   7336   //
   7337   // Note that `ref` currently does not get unpoisoned (when heap
   7338   // poisoning is enabled), which is alright as the `ref` argument is
   7339   // not used by the artReadBarrierSlow entry point.
   7340   //
   7341   // TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
   7342   SlowPathCodeMIPS* slow_path = new (GetScopedAllocator())
   7343       ReadBarrierForHeapReferenceSlowPathMIPS(instruction, out, ref, obj, offset, index);
   7344   AddSlowPath(slow_path);
   7345 
   7346   __ B(slow_path->GetEntryLabel());
   7347   __ Bind(slow_path->GetExitLabel());
   7348 }
   7349 
   7350 void CodeGeneratorMIPS::MaybeGenerateReadBarrierSlow(HInstruction* instruction,
   7351                                                      Location out,
   7352                                                      Location ref,
   7353                                                      Location obj,
   7354                                                      uint32_t offset,
   7355                                                      Location index) {
   7356   if (kEmitCompilerReadBarrier) {
   7357     // Baker's read barriers shall be handled by the fast path
   7358     // (CodeGeneratorMIPS::GenerateReferenceLoadWithBakerReadBarrier).
   7359     DCHECK(!kUseBakerReadBarrier);
   7360     // If heap poisoning is enabled, unpoisoning will be taken care of
   7361     // by the runtime within the slow path.
   7362     GenerateReadBarrierSlow(instruction, out, ref, obj, offset, index);
   7363   } else if (kPoisonHeapReferences) {
   7364     __ UnpoisonHeapReference(out.AsRegister<Register>());
   7365   }
   7366 }
   7367 
   7368 void CodeGeneratorMIPS::GenerateReadBarrierForRootSlow(HInstruction* instruction,
   7369                                                        Location out,
   7370                                                        Location root) {
   7371   DCHECK(kEmitCompilerReadBarrier);
   7372 
   7373   // Insert a slow path based read barrier *after* the GC root load.
   7374   //
   7375   // Note that GC roots are not affected by heap poisoning, so we do
   7376   // not need to do anything special for this here.
   7377   SlowPathCodeMIPS* slow_path =
   7378       new (GetScopedAllocator()) ReadBarrierForRootSlowPathMIPS(instruction, out, root);
   7379   AddSlowPath(slow_path);
   7380 
   7381   __ B(slow_path->GetEntryLabel());
   7382   __ Bind(slow_path->GetExitLabel());
   7383 }
   7384 
   7385 void LocationsBuilderMIPS::VisitInstanceOf(HInstanceOf* instruction) {
   7386   LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
   7387   TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
   7388   bool baker_read_barrier_slow_path = false;
   7389   switch (type_check_kind) {
   7390     case TypeCheckKind::kExactCheck:
   7391     case TypeCheckKind::kAbstractClassCheck:
   7392     case TypeCheckKind::kClassHierarchyCheck:
   7393     case TypeCheckKind::kArrayObjectCheck: {
   7394       bool needs_read_barrier = CodeGenerator::InstanceOfNeedsReadBarrier(instruction);
   7395       call_kind = needs_read_barrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
   7396       baker_read_barrier_slow_path = kUseBakerReadBarrier && needs_read_barrier;
   7397       break;
   7398     }
   7399     case TypeCheckKind::kArrayCheck:
   7400     case TypeCheckKind::kUnresolvedCheck:
   7401     case TypeCheckKind::kInterfaceCheck:
   7402       call_kind = LocationSummary::kCallOnSlowPath;
   7403       break;
   7404   }
   7405 
   7406   LocationSummary* locations =
   7407       new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
   7408   if (baker_read_barrier_slow_path) {
   7409     locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty());  // No caller-save registers.
   7410   }
   7411   locations->SetInAt(0, Location::RequiresRegister());
   7412   locations->SetInAt(1, Location::RequiresRegister());
   7413   // The output does overlap inputs.
   7414   // Note that TypeCheckSlowPathMIPS uses this register too.
   7415   locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
   7416   locations->AddRegisterTemps(NumberOfInstanceOfTemps(type_check_kind));
   7417 }
   7418 
   7419 void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
   7420   TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
   7421   LocationSummary* locations = instruction->GetLocations();
   7422   Location obj_loc = locations->InAt(0);
   7423   Register obj = obj_loc.AsRegister<Register>();
   7424   Register cls = locations->InAt(1).AsRegister<Register>();
   7425   Location out_loc = locations->Out();
   7426   Register out = out_loc.AsRegister<Register>();
   7427   const size_t num_temps = NumberOfInstanceOfTemps(type_check_kind);
   7428   DCHECK_LE(num_temps, 1u);
   7429   Location maybe_temp_loc = (num_temps >= 1) ? locations->GetTemp(0) : Location::NoLocation();
   7430   uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
   7431   uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
   7432   uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
   7433   uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
   7434   MipsLabel done;
   7435   SlowPathCodeMIPS* slow_path = nullptr;
   7436 
   7437   // Return 0 if `obj` is null.
   7438   // Avoid this check if we know `obj` is not null.
   7439   if (instruction->MustDoNullCheck()) {
   7440     __ Move(out, ZERO);
   7441     __ Beqz(obj, &done);
   7442   }
   7443 
   7444   switch (type_check_kind) {
   7445     case TypeCheckKind::kExactCheck: {
   7446       ReadBarrierOption read_barrier_option =
   7447           CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
   7448       // /* HeapReference<Class> */ out = obj->klass_
   7449       GenerateReferenceLoadTwoRegisters(instruction,
   7450                                         out_loc,
   7451                                         obj_loc,
   7452                                         class_offset,
   7453                                         maybe_temp_loc,
   7454                                         read_barrier_option);
   7455       // Classes must be equal for the instanceof to succeed.
   7456       __ Xor(out, out, cls);
   7457       __ Sltiu(out, out, 1);
   7458       break;
   7459     }
   7460 
   7461     case TypeCheckKind::kAbstractClassCheck: {
   7462       ReadBarrierOption read_barrier_option =
   7463           CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
   7464       // /* HeapReference<Class> */ out = obj->klass_
   7465       GenerateReferenceLoadTwoRegisters(instruction,
   7466                                         out_loc,
   7467                                         obj_loc,
   7468                                         class_offset,
   7469                                         maybe_temp_loc,
   7470                                         read_barrier_option);
   7471       // If the class is abstract, we eagerly fetch the super class of the
   7472       // object to avoid doing a comparison we know will fail.
   7473       MipsLabel loop;
   7474       __ Bind(&loop);
   7475       // /* HeapReference<Class> */ out = out->super_class_
   7476       GenerateReferenceLoadOneRegister(instruction,
   7477                                        out_loc,
   7478                                        super_offset,
   7479                                        maybe_temp_loc,
   7480                                        read_barrier_option);
   7481       // If `out` is null, we use it for the result, and jump to `done`.
   7482       __ Beqz(out, &done);
   7483       __ Bne(out, cls, &loop);
   7484       __ LoadConst32(out, 1);
   7485       break;
   7486     }
   7487 
   7488     case TypeCheckKind::kClassHierarchyCheck: {
   7489       ReadBarrierOption read_barrier_option =
   7490           CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
   7491       // /* HeapReference<Class> */ out = obj->klass_
   7492       GenerateReferenceLoadTwoRegisters(instruction,
   7493                                         out_loc,
   7494                                         obj_loc,
   7495                                         class_offset,
   7496                                         maybe_temp_loc,
   7497                                         read_barrier_option);
   7498       // Walk over the class hierarchy to find a match.
   7499       MipsLabel loop, success;
   7500       __ Bind(&loop);
   7501       __ Beq(out, cls, &success);
   7502       // /* HeapReference<Class> */ out = out->super_class_
   7503       GenerateReferenceLoadOneRegister(instruction,
   7504                                        out_loc,
   7505                                        super_offset,
   7506                                        maybe_temp_loc,
   7507                                        read_barrier_option);
   7508       __ Bnez(out, &loop);
   7509       // If `out` is null, we use it for the result, and jump to `done`.
   7510       __ B(&done);
   7511       __ Bind(&success);
   7512       __ LoadConst32(out, 1);
   7513       break;
   7514     }
   7515 
   7516     case TypeCheckKind::kArrayObjectCheck: {
   7517       ReadBarrierOption read_barrier_option =
   7518           CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
   7519       // /* HeapReference<Class> */ out = obj->klass_
   7520       GenerateReferenceLoadTwoRegisters(instruction,
   7521                                         out_loc,
   7522                                         obj_loc,
   7523                                         class_offset,
   7524                                         maybe_temp_loc,
   7525                                         read_barrier_option);
   7526       // Do an exact check.
   7527       MipsLabel success;
   7528       __ Beq(out, cls, &success);
   7529       // Otherwise, we need to check that the object's class is a non-primitive array.
   7530       // /* HeapReference<Class> */ out = out->component_type_
   7531       GenerateReferenceLoadOneRegister(instruction,
   7532                                        out_loc,
   7533                                        component_offset,
   7534                                        maybe_temp_loc,
   7535                                        read_barrier_option);
   7536       // If `out` is null, we use it for the result, and jump to `done`.
   7537       __ Beqz(out, &done);
   7538       __ LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset);
   7539       static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
   7540       __ Sltiu(out, out, 1);
   7541       __ B(&done);
   7542       __ Bind(&success);
   7543       __ LoadConst32(out, 1);
   7544       break;
   7545     }
   7546 
   7547     case TypeCheckKind::kArrayCheck: {
   7548       // No read barrier since the slow path will retry upon failure.
   7549       // /* HeapReference<Class> */ out = obj->klass_
   7550       GenerateReferenceLoadTwoRegisters(instruction,
   7551                                         out_loc,
   7552                                         obj_loc,
   7553                                         class_offset,
   7554                                         maybe_temp_loc,
   7555                                         kWithoutReadBarrier);
   7556       DCHECK(locations->OnlyCallsOnSlowPath());
   7557       slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS(
   7558           instruction, /* is_fatal */ false);
   7559       codegen_->AddSlowPath(slow_path);
   7560       __ Bne(out, cls, slow_path->GetEntryLabel());
   7561       __ LoadConst32(out, 1);
   7562       break;
   7563     }
   7564 
   7565     case TypeCheckKind::kUnresolvedCheck:
   7566     case TypeCheckKind::kInterfaceCheck: {
   7567       // Note that we indeed only call on slow path, but we always go
   7568       // into the slow path for the unresolved and interface check
   7569       // cases.
   7570       //
   7571       // We cannot directly call the InstanceofNonTrivial runtime
   7572       // entry point without resorting to a type checking slow path
   7573       // here (i.e. by calling InvokeRuntime directly), as it would
   7574       // require to assign fixed registers for the inputs of this
   7575       // HInstanceOf instruction (following the runtime calling
   7576       // convention), which might be cluttered by the potential first
   7577       // read barrier emission at the beginning of this method.
   7578       //
   7579       // TODO: Introduce a new runtime entry point taking the object
   7580       // to test (instead of its class) as argument, and let it deal
   7581       // with the read barrier issues. This will let us refactor this
   7582       // case of the `switch` code as it was previously (with a direct
   7583       // call to the runtime not using a type checking slow path).
   7584       // This should also be beneficial for the other cases above.
   7585       DCHECK(locations->OnlyCallsOnSlowPath());
   7586       slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS(
   7587           instruction, /* is_fatal */ false);
   7588       codegen_->AddSlowPath(slow_path);
   7589       __ B(slow_path->GetEntryLabel());
   7590       break;
   7591     }
   7592   }
   7593 
   7594   __ Bind(&done);
   7595 
   7596   if (slow_path != nullptr) {
   7597     __ Bind(slow_path->GetExitLabel());
   7598   }
   7599 }
   7600 
   7601 void LocationsBuilderMIPS::VisitIntConstant(HIntConstant* constant) {
   7602   LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant);
   7603   locations->SetOut(Location::ConstantLocation(constant));
   7604 }
   7605 
   7606 void InstructionCodeGeneratorMIPS::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
   7607   // Will be generated at use site.
   7608 }
   7609 
   7610 void LocationsBuilderMIPS::VisitNullConstant(HNullConstant* constant) {
   7611   LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant);
   7612   locations->SetOut(Location::ConstantLocation(constant));
   7613 }
   7614 
   7615 void InstructionCodeGeneratorMIPS::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
   7616   // Will be generated at use site.
   7617 }
   7618 
   7619 void LocationsBuilderMIPS::HandleInvoke(HInvoke* invoke) {
   7620   InvokeDexCallingConventionVisitorMIPS calling_convention_visitor;
   7621   CodeGenerator::CreateCommonInvokeLocationSummary(invoke, &calling_convention_visitor);
   7622 }
   7623 
   7624 void LocationsBuilderMIPS::VisitInvokeInterface(HInvokeInterface* invoke) {
   7625   HandleInvoke(invoke);
   7626   // The register T7 is required to be used for the hidden argument in
   7627   // art_quick_imt_conflict_trampoline, so add the hidden argument.
   7628   invoke->GetLocations()->AddTemp(Location::RegisterLocation(T7));
   7629 }
   7630 
   7631 void InstructionCodeGeneratorMIPS::VisitInvokeInterface(HInvokeInterface* invoke) {
   7632   // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
   7633   Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
   7634   Location receiver = invoke->GetLocations()->InAt(0);
   7635   uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
   7636   Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsPointerSize);
   7637 
   7638   // temp = object->GetClass();
   7639   if (receiver.IsStackSlot()) {
   7640     __ LoadFromOffset(kLoadWord, temp, SP, receiver.GetStackIndex());
   7641     __ LoadFromOffset(kLoadWord, temp, temp, class_offset);
   7642   } else {
   7643     __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
   7644   }
   7645   codegen_->MaybeRecordImplicitNullCheck(invoke);
   7646   // Instead of simply (possibly) unpoisoning `temp` here, we should
   7647   // emit a read barrier for the previous class reference load.
   7648   // However this is not required in practice, as this is an
   7649   // intermediate/temporary reference and because the current
   7650   // concurrent copying collector keeps the from-space memory
   7651   // intact/accessible until the end of the marking phase (the
   7652   // concurrent copying collector may not in the future).
   7653   __ MaybeUnpoisonHeapReference(temp);
   7654   __ LoadFromOffset(kLoadWord, temp, temp,
   7655       mirror::Class::ImtPtrOffset(kMipsPointerSize).Uint32Value());
   7656   uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
   7657       invoke->GetImtIndex(), kMipsPointerSize));
   7658   // temp = temp->GetImtEntryAt(method_offset);
   7659   __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
   7660   // T9 = temp->GetEntryPoint();
   7661   __ LoadFromOffset(kLoadWord, T9, temp, entry_point.Int32Value());
   7662   // Set the hidden argument.
   7663   __ LoadConst32(invoke->GetLocations()->GetTemp(1).AsRegister<Register>(),
   7664                  invoke->GetDexMethodIndex());
   7665   // T9();
   7666   __ Jalr(T9);
   7667   __ NopIfNoReordering();
   7668   DCHECK(!codegen_->IsLeafMethod());
   7669   codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
   7670 }
   7671 
   7672 void LocationsBuilderMIPS::VisitInvokeVirtual(HInvokeVirtual* invoke) {
   7673   IntrinsicLocationsBuilderMIPS intrinsic(codegen_);
   7674   if (intrinsic.TryDispatch(invoke)) {
   7675     return;
   7676   }
   7677 
   7678   HandleInvoke(invoke);
   7679 }
   7680 
   7681 void LocationsBuilderMIPS::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
   7682   // Explicit clinit checks triggered by static invokes must have been pruned by
   7683   // art::PrepareForRegisterAllocation.
   7684   DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
   7685 
   7686   bool is_r6 = codegen_->GetInstructionSetFeatures().IsR6();
   7687   bool has_irreducible_loops = codegen_->GetGraph()->HasIrreducibleLoops();
   7688   bool has_extra_input = invoke->HasPcRelativeMethodLoadKind() && !is_r6 && !has_irreducible_loops;
   7689 
   7690   IntrinsicLocationsBuilderMIPS intrinsic(codegen_);
   7691   if (intrinsic.TryDispatch(invoke)) {
   7692     if (invoke->GetLocations()->CanCall() && has_extra_input) {
   7693       invoke->GetLocations()->SetInAt(invoke->GetSpecialInputIndex(), Location::Any());
   7694     }
   7695     return;
   7696   }
   7697 
   7698   HandleInvoke(invoke);
   7699 
   7700   // Add the extra input register if either the dex cache array base register
   7701   // or the PC-relative base register for accessing literals is needed.
   7702   if (has_extra_input) {
   7703     invoke->GetLocations()->SetInAt(invoke->GetSpecialInputIndex(), Location::RequiresRegister());
   7704   }
   7705 }
   7706 
   7707 void LocationsBuilderMIPS::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
   7708   HandleInvoke(invoke);
   7709 }
   7710 
   7711 void InstructionCodeGeneratorMIPS::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
   7712   codegen_->GenerateInvokePolymorphicCall(invoke);
   7713 }
   7714 
   7715 static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorMIPS* codegen) {
   7716   if (invoke->GetLocations()->Intrinsified()) {
   7717     IntrinsicCodeGeneratorMIPS intrinsic(codegen);
   7718     intrinsic.Dispatch(invoke);
   7719     return true;
   7720   }
   7721   return false;
   7722 }
   7723 
   7724 HLoadString::LoadKind CodeGeneratorMIPS::GetSupportedLoadStringKind(
   7725     HLoadString::LoadKind desired_string_load_kind) {
   7726   switch (desired_string_load_kind) {
   7727     case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
   7728     case HLoadString::LoadKind::kBootImageInternTable:
   7729     case HLoadString::LoadKind::kBssEntry:
   7730       DCHECK(!Runtime::Current()->UseJitCompilation());
   7731       break;
   7732     case HLoadString::LoadKind::kJitTableAddress:
   7733       DCHECK(Runtime::Current()->UseJitCompilation());
   7734       break;
   7735     case HLoadString::LoadKind::kBootImageAddress:
   7736     case HLoadString::LoadKind::kRuntimeCall:
   7737       break;
   7738   }
   7739   return desired_string_load_kind;
   7740 }
   7741 
   7742 HLoadClass::LoadKind CodeGeneratorMIPS::GetSupportedLoadClassKind(
   7743     HLoadClass::LoadKind desired_class_load_kind) {
   7744   switch (desired_class_load_kind) {
   7745     case HLoadClass::LoadKind::kInvalid:
   7746       LOG(FATAL) << "UNREACHABLE";
   7747       UNREACHABLE();
   7748     case HLoadClass::LoadKind::kReferrersClass:
   7749       break;
   7750     case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
   7751     case HLoadClass::LoadKind::kBootImageClassTable:
   7752     case HLoadClass::LoadKind::kBssEntry:
   7753       DCHECK(!Runtime::Current()->UseJitCompilation());
   7754       break;
   7755     case HLoadClass::LoadKind::kJitTableAddress:
   7756       DCHECK(Runtime::Current()->UseJitCompilation());
   7757       break;
   7758     case HLoadClass::LoadKind::kBootImageAddress:
   7759     case HLoadClass::LoadKind::kRuntimeCall:
   7760       break;
   7761   }
   7762   return desired_class_load_kind;
   7763 }
   7764 
   7765 Register CodeGeneratorMIPS::GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke,
   7766                                                                   Register temp) {
   7767   CHECK(!GetInstructionSetFeatures().IsR6());
   7768   CHECK(!GetGraph()->HasIrreducibleLoops());
   7769   CHECK_EQ(invoke->InputCount(), invoke->GetNumberOfArguments() + 1u);
   7770   Location location = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
   7771   if (!invoke->GetLocations()->Intrinsified()) {
   7772     return location.AsRegister<Register>();
   7773   }
   7774   // For intrinsics we allow any location, so it may be on the stack.
   7775   if (!location.IsRegister()) {
   7776     __ LoadFromOffset(kLoadWord, temp, SP, location.GetStackIndex());
   7777     return temp;
   7778   }
   7779   // For register locations, check if the register was saved. If so, get it from the stack.
   7780   // Note: There is a chance that the register was saved but not overwritten, so we could
   7781   // save one load. However, since this is just an intrinsic slow path we prefer this
   7782   // simple and more robust approach rather that trying to determine if that's the case.
   7783   SlowPathCode* slow_path = GetCurrentSlowPath();
   7784   DCHECK(slow_path != nullptr);  // For intrinsified invokes the call is emitted on the slow path.
   7785   if (slow_path->IsCoreRegisterSaved(location.AsRegister<Register>())) {
   7786     int stack_offset = slow_path->GetStackOffsetOfCoreRegister(location.AsRegister<Register>());
   7787     __ LoadFromOffset(kLoadWord, temp, SP, stack_offset);
   7788     return temp;
   7789   }
   7790   return location.AsRegister<Register>();
   7791 }
   7792 
   7793 HInvokeStaticOrDirect::DispatchInfo CodeGeneratorMIPS::GetSupportedInvokeStaticOrDirectDispatch(
   7794       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
   7795       HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
   7796   return desired_dispatch_info;
   7797 }
   7798 
   7799 void CodeGeneratorMIPS::GenerateStaticOrDirectCall(
   7800     HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path) {
   7801   // All registers are assumed to be correctly set up per the calling convention.
   7802   Location callee_method = temp;  // For all kinds except kRecursive, callee will be in temp.
   7803   HInvokeStaticOrDirect::MethodLoadKind method_load_kind = invoke->GetMethodLoadKind();
   7804   HInvokeStaticOrDirect::CodePtrLocation code_ptr_location = invoke->GetCodePtrLocation();
   7805   bool is_r6 = GetInstructionSetFeatures().IsR6();
   7806   bool has_irreducible_loops = GetGraph()->HasIrreducibleLoops();
   7807   Register base_reg = (invoke->HasPcRelativeMethodLoadKind() && !is_r6 && !has_irreducible_loops)
   7808       ? GetInvokeStaticOrDirectExtraParameter(invoke, temp.AsRegister<Register>())
   7809       : ZERO;
   7810 
   7811   switch (method_load_kind) {
   7812     case HInvokeStaticOrDirect::MethodLoadKind::kStringInit: {
   7813       // temp = thread->string_init_entrypoint
   7814       uint32_t offset =
   7815           GetThreadOffset<kMipsPointerSize>(invoke->GetStringInitEntryPoint()).Int32Value();
   7816       __ LoadFromOffset(kLoadWord,
   7817                         temp.AsRegister<Register>(),
   7818                         TR,
   7819                         offset);
   7820       break;
   7821     }
   7822     case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
   7823       callee_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
   7824       break;
   7825     case HInvokeStaticOrDirect::MethodLoadKind::kBootImageLinkTimePcRelative: {
   7826       DCHECK(GetCompilerOptions().IsBootImage());
   7827       PcRelativePatchInfo* info_high = NewBootImageMethodPatch(invoke->GetTargetMethod());
   7828       PcRelativePatchInfo* info_low =
   7829           NewBootImageMethodPatch(invoke->GetTargetMethod(), info_high);
   7830       Register temp_reg = temp.AsRegister<Register>();
   7831       EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, base_reg);
   7832       __ Addiu(temp_reg, TMP, /* placeholder */ 0x5678, &info_low->label);
   7833       break;
   7834     }
   7835     case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
   7836       __ LoadConst32(temp.AsRegister<Register>(), invoke->GetMethodAddress());
   7837       break;
   7838     case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry: {
   7839       PcRelativePatchInfo* info_high = NewMethodBssEntryPatch(
   7840           MethodReference(&GetGraph()->GetDexFile(), invoke->GetDexMethodIndex()));
   7841       PcRelativePatchInfo* info_low = NewMethodBssEntryPatch(
   7842           MethodReference(&GetGraph()->GetDexFile(), invoke->GetDexMethodIndex()), info_high);
   7843       Register temp_reg = temp.AsRegister<Register>();
   7844       EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, base_reg);
   7845       __ Lw(temp_reg, TMP, /* placeholder */ 0x5678, &info_low->label);
   7846       break;
   7847     }
   7848     case HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall: {
   7849       GenerateInvokeStaticOrDirectRuntimeCall(invoke, temp, slow_path);
   7850       return;  // No code pointer retrieval; the runtime performs the call directly.
   7851     }
   7852   }
   7853 
   7854   switch (code_ptr_location) {
   7855     case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
   7856       __ Bal(&frame_entry_label_);
   7857       break;
   7858     case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
   7859       // T9 = callee_method->entry_point_from_quick_compiled_code_;
   7860       __ LoadFromOffset(kLoadWord,
   7861                         T9,
   7862                         callee_method.AsRegister<Register>(),
   7863                         ArtMethod::EntryPointFromQuickCompiledCodeOffset(
   7864                             kMipsPointerSize).Int32Value());
   7865       // T9()
   7866       __ Jalr(T9);
   7867       __ NopIfNoReordering();
   7868       break;
   7869   }
   7870   RecordPcInfo(invoke, invoke->GetDexPc(), slow_path);
   7871 
   7872   DCHECK(!IsLeafMethod());
   7873 }
   7874 
   7875 void InstructionCodeGeneratorMIPS::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
   7876   // Explicit clinit checks triggered by static invokes must have been pruned by
   7877   // art::PrepareForRegisterAllocation.
   7878   DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
   7879 
   7880   if (TryGenerateIntrinsicCode(invoke, codegen_)) {
   7881     return;
   7882   }
   7883 
   7884   LocationSummary* locations = invoke->GetLocations();
   7885   codegen_->GenerateStaticOrDirectCall(invoke,
   7886                                        locations->HasTemps()
   7887                                            ? locations->GetTemp(0)
   7888                                            : Location::NoLocation());
   7889 }
   7890 
   7891 void CodeGeneratorMIPS::GenerateVirtualCall(
   7892     HInvokeVirtual* invoke, Location temp_location, SlowPathCode* slow_path) {
   7893   // Use the calling convention instead of the location of the receiver, as
   7894   // intrinsics may have put the receiver in a different register. In the intrinsics
   7895   // slow path, the arguments have been moved to the right place, so here we are
   7896   // guaranteed that the receiver is the first register of the calling convention.
   7897   InvokeDexCallingConvention calling_convention;
   7898   Register receiver = calling_convention.GetRegisterAt(0);
   7899 
   7900   Register temp = temp_location.AsRegister<Register>();
   7901   size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
   7902       invoke->GetVTableIndex(), kMipsPointerSize).SizeValue();
   7903   uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
   7904   Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsPointerSize);
   7905 
   7906   // temp = object->GetClass();
   7907   __ LoadFromOffset(kLoadWord, temp, receiver, class_offset);
   7908   MaybeRecordImplicitNullCheck(invoke);
   7909   // Instead of simply (possibly) unpoisoning `temp` here, we should
   7910   // emit a read barrier for the previous class reference load.
   7911   // However this is not required in practice, as this is an
   7912   // intermediate/temporary reference and because the current
   7913   // concurrent copying collector keeps the from-space memory
   7914   // intact/accessible until the end of the marking phase (the
   7915   // concurrent copying collector may not in the future).
   7916   __ MaybeUnpoisonHeapReference(temp);
   7917   // temp = temp->GetMethodAt(method_offset);
   7918   __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
   7919   // T9 = temp->GetEntryPoint();
   7920   __ LoadFromOffset(kLoadWord, T9, temp, entry_point.Int32Value());
   7921   // T9();
   7922   __ Jalr(T9);
   7923   __ NopIfNoReordering();
   7924   RecordPcInfo(invoke, invoke->GetDexPc(), slow_path);
   7925 }
   7926 
   7927 void InstructionCodeGeneratorMIPS::VisitInvokeVirtual(HInvokeVirtual* invoke) {
   7928   if (TryGenerateIntrinsicCode(invoke, codegen_)) {
   7929     return;
   7930   }
   7931 
   7932   codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0));
   7933   DCHECK(!codegen_->IsLeafMethod());
   7934 }
   7935 
   7936 void LocationsBuilderMIPS::VisitLoadClass(HLoadClass* cls) {
   7937   HLoadClass::LoadKind load_kind = cls->GetLoadKind();
   7938   if (load_kind == HLoadClass::LoadKind::kRuntimeCall) {
   7939     InvokeRuntimeCallingConvention calling_convention;
   7940     Location loc = Location::RegisterLocation(calling_convention.GetRegisterAt(0));
   7941     CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(cls, loc, loc);
   7942     return;
   7943   }
   7944   DCHECK(!cls->NeedsAccessCheck());
   7945   const bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
   7946   const bool has_irreducible_loops = codegen_->GetGraph()->HasIrreducibleLoops();
   7947   const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
   7948   LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
   7949       ? LocationSummary::kCallOnSlowPath
   7950       : LocationSummary::kNoCall;
   7951   LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(cls, call_kind);
   7952   if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) {
   7953     locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty());  // No caller-save registers.
   7954   }
   7955   switch (load_kind) {
   7956     // We need an extra register for PC-relative literals on R2.
   7957     case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
   7958     case HLoadClass::LoadKind::kBootImageAddress:
   7959     case HLoadClass::LoadKind::kBootImageClassTable:
   7960     case HLoadClass::LoadKind::kBssEntry:
   7961       if (isR6) {
   7962         break;
   7963       }
   7964       if (has_irreducible_loops) {
   7965         if (load_kind != HLoadClass::LoadKind::kBootImageAddress) {
   7966           codegen_->ClobberRA();
   7967         }
   7968         break;
   7969       }
   7970       FALLTHROUGH_INTENDED;
   7971     case HLoadClass::LoadKind::kReferrersClass:
   7972       locations->SetInAt(0, Location::RequiresRegister());
   7973       break;
   7974     default:
   7975       break;
   7976   }
   7977   locations->SetOut(Location::RequiresRegister());
   7978   if (load_kind == HLoadClass::LoadKind::kBssEntry) {
   7979     if (!kUseReadBarrier || kUseBakerReadBarrier) {
   7980       // Rely on the type resolution or initialization and marking to save everything we need.
   7981       RegisterSet caller_saves = RegisterSet::Empty();
   7982       InvokeRuntimeCallingConvention calling_convention;
   7983       caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
   7984       locations->SetCustomSlowPathCallerSaves(caller_saves);
   7985     } else {
   7986       // For non-Baker read barriers we have a temp-clobbering call.
   7987     }
   7988   }
   7989 }
   7990 
   7991 // NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
   7992 // move.
   7993 void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS {
   7994   HLoadClass::LoadKind load_kind = cls->GetLoadKind();
   7995   if (load_kind == HLoadClass::LoadKind::kRuntimeCall) {
   7996     codegen_->GenerateLoadClassRuntimeCall(cls);
   7997     return;
   7998   }
   7999   DCHECK(!cls->NeedsAccessCheck());
   8000 
   8001   LocationSummary* locations = cls->GetLocations();
   8002   Location out_loc = locations->Out();
   8003   Register out = out_loc.AsRegister<Register>();
   8004   Register base_or_current_method_reg;
   8005   bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
   8006   bool has_irreducible_loops = GetGraph()->HasIrreducibleLoops();
   8007   switch (load_kind) {
   8008     // We need an extra register for PC-relative literals on R2.
   8009     case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
   8010     case HLoadClass::LoadKind::kBootImageAddress:
   8011     case HLoadClass::LoadKind::kBootImageClassTable:
   8012     case HLoadClass::LoadKind::kBssEntry:
   8013       base_or_current_method_reg =
   8014           (isR6 || has_irreducible_loops) ? ZERO : locations->InAt(0).AsRegister<Register>();
   8015       break;
   8016     case HLoadClass::LoadKind::kReferrersClass:
   8017     case HLoadClass::LoadKind::kRuntimeCall:
   8018       base_or_current_method_reg = locations->InAt(0).AsRegister<Register>();
   8019       break;
   8020     default:
   8021       base_or_current_method_reg = ZERO;
   8022       break;
   8023   }
   8024 
   8025   const ReadBarrierOption read_barrier_option = cls->IsInBootImage()
   8026       ? kWithoutReadBarrier
   8027       : kCompilerReadBarrierOption;
   8028   bool generate_null_check = false;
   8029   switch (load_kind) {
   8030     case HLoadClass::LoadKind::kReferrersClass: {
   8031       DCHECK(!cls->CanCallRuntime());
   8032       DCHECK(!cls->MustGenerateClinitCheck());
   8033       // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_
   8034       GenerateGcRootFieldLoad(cls,
   8035                               out_loc,
   8036                               base_or_current_method_reg,
   8037                               ArtMethod::DeclaringClassOffset().Int32Value(),
   8038                               read_barrier_option);
   8039       break;
   8040     }
   8041     case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
   8042       DCHECK(codegen_->GetCompilerOptions().IsBootImage());
   8043       DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
   8044       CodeGeneratorMIPS::PcRelativePatchInfo* info_high =
   8045           codegen_->NewBootImageTypePatch(cls->GetDexFile(), cls->GetTypeIndex());
   8046       CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
   8047           codegen_->NewBootImageTypePatch(cls->GetDexFile(), cls->GetTypeIndex(), info_high);
   8048       codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high,
   8049                                                      out,
   8050                                                      base_or_current_method_reg);
   8051       __ Addiu(out, out, /* placeholder */ 0x5678, &info_low->label);
   8052       break;
   8053     }
   8054     case HLoadClass::LoadKind::kBootImageAddress: {
   8055       DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
   8056       uint32_t address = dchecked_integral_cast<uint32_t>(
   8057           reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
   8058       DCHECK_NE(address, 0u);
   8059       if (isR6 || !has_irreducible_loops) {
   8060         __ LoadLiteral(out,
   8061                        base_or_current_method_reg,
   8062                        codegen_->DeduplicateBootImageAddressLiteral(address));
   8063       } else {
   8064         __ LoadConst32(out, address);
   8065       }
   8066       break;
   8067     }
   8068     case HLoadClass::LoadKind::kBootImageClassTable: {
   8069       DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
   8070       CodeGeneratorMIPS::PcRelativePatchInfo* info_high =
   8071           codegen_->NewBootImageTypePatch(cls->GetDexFile(), cls->GetTypeIndex());
   8072       CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
   8073           codegen_->NewBootImageTypePatch(cls->GetDexFile(), cls->GetTypeIndex(), info_high);
   8074       codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high,
   8075                                                      out,
   8076                                                      base_or_current_method_reg);
   8077       __ Lw(out, out, /* placeholder */ 0x5678, &info_low->label);
   8078       // Extract the reference from the slot data, i.e. clear the hash bits.
   8079       int32_t masked_hash = ClassTable::TableSlot::MaskHash(
   8080           ComputeModifiedUtf8Hash(cls->GetDexFile().StringByTypeIdx(cls->GetTypeIndex())));
   8081       if (masked_hash != 0) {
   8082         __ Addiu(out, out, -masked_hash);
   8083       }
   8084       break;
   8085     }
   8086     case HLoadClass::LoadKind::kBssEntry: {
   8087       CodeGeneratorMIPS::PcRelativePatchInfo* bss_info_high =
   8088           codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
   8089       CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
   8090           codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex(), bss_info_high);
   8091       codegen_->EmitPcRelativeAddressPlaceholderHigh(bss_info_high,
   8092                                                      out,
   8093                                                      base_or_current_method_reg);
   8094       GenerateGcRootFieldLoad(cls,
   8095                               out_loc,
   8096                               out,
   8097                               /* placeholder */ 0x5678,
   8098                               read_barrier_option,
   8099                               &info_low->label);
   8100       generate_null_check = true;
   8101       break;
   8102     }
   8103     case HLoadClass::LoadKind::kJitTableAddress: {
   8104       CodeGeneratorMIPS::JitPatchInfo* info = codegen_->NewJitRootClassPatch(cls->GetDexFile(),
   8105                                                                              cls->GetTypeIndex(),
   8106                                                                              cls->GetClass());
   8107       bool reordering = __ SetReorder(false);
   8108       __ Bind(&info->high_label);
   8109       __ Lui(out, /* placeholder */ 0x1234);
   8110       __ SetReorder(reordering);
   8111       GenerateGcRootFieldLoad(cls,
   8112                               out_loc,
   8113                               out,
   8114                               /* placeholder */ 0x5678,
   8115                               read_barrier_option,
   8116                               &info->low_label);
   8117       break;
   8118     }
   8119     case HLoadClass::LoadKind::kRuntimeCall:
   8120     case HLoadClass::LoadKind::kInvalid:
   8121       LOG(FATAL) << "UNREACHABLE";
   8122       UNREACHABLE();
   8123   }
   8124 
   8125   if (generate_null_check || cls->MustGenerateClinitCheck()) {
   8126     DCHECK(cls->CanCallRuntime());
   8127     SlowPathCodeMIPS* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS(
   8128         cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
   8129     codegen_->AddSlowPath(slow_path);
   8130     if (generate_null_check) {
   8131       __ Beqz(out, slow_path->GetEntryLabel());
   8132     }
   8133     if (cls->MustGenerateClinitCheck()) {
   8134       GenerateClassInitializationCheck(slow_path, out);
   8135     } else {
   8136       __ Bind(slow_path->GetExitLabel());
   8137     }
   8138   }
   8139 }
   8140 
   8141 static int32_t GetExceptionTlsOffset() {
   8142   return Thread::ExceptionOffset<kMipsPointerSize>().Int32Value();
   8143 }
   8144 
   8145 void LocationsBuilderMIPS::VisitLoadException(HLoadException* load) {
   8146   LocationSummary* locations =
   8147       new (GetGraph()->GetAllocator()) LocationSummary(load, LocationSummary::kNoCall);
   8148   locations->SetOut(Location::RequiresRegister());
   8149 }
   8150 
   8151 void InstructionCodeGeneratorMIPS::VisitLoadException(HLoadException* load) {
   8152   Register out = load->GetLocations()->Out().AsRegister<Register>();
   8153   __ LoadFromOffset(kLoadWord, out, TR, GetExceptionTlsOffset());
   8154 }
   8155 
   8156 void LocationsBuilderMIPS::VisitClearException(HClearException* clear) {
   8157   new (GetGraph()->GetAllocator()) LocationSummary(clear, LocationSummary::kNoCall);
   8158 }
   8159 
   8160 void InstructionCodeGeneratorMIPS::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
   8161   __ StoreToOffset(kStoreWord, ZERO, TR, GetExceptionTlsOffset());
   8162 }
   8163 
   8164 void LocationsBuilderMIPS::VisitLoadString(HLoadString* load) {
   8165   LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load);
   8166   LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(load, call_kind);
   8167   HLoadString::LoadKind load_kind = load->GetLoadKind();
   8168   const bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
   8169   const bool has_irreducible_loops = codegen_->GetGraph()->HasIrreducibleLoops();
   8170   switch (load_kind) {
   8171     // We need an extra register for PC-relative literals on R2.
   8172     case HLoadString::LoadKind::kBootImageAddress:
   8173     case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
   8174     case HLoadString::LoadKind::kBootImageInternTable:
   8175     case HLoadString::LoadKind::kBssEntry:
   8176       if (isR6) {
   8177         break;
   8178       }
   8179       if (has_irreducible_loops) {
   8180         if (load_kind != HLoadString::LoadKind::kBootImageAddress) {
   8181           codegen_->ClobberRA();
   8182         }
   8183         break;
   8184       }
   8185       FALLTHROUGH_INTENDED;
   8186     // We need an extra register for PC-relative dex cache accesses.
   8187     case HLoadString::LoadKind::kRuntimeCall:
   8188       locations->SetInAt(0, Location::RequiresRegister());
   8189       break;
   8190     default:
   8191       break;
   8192   }
   8193   if (load_kind == HLoadString::LoadKind::kRuntimeCall) {
   8194     InvokeRuntimeCallingConvention calling_convention;
   8195     locations->SetOut(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
   8196   } else {
   8197     locations->SetOut(Location::RequiresRegister());
   8198     if (load_kind == HLoadString::LoadKind::kBssEntry) {
   8199       if (!kUseReadBarrier || kUseBakerReadBarrier) {
   8200         // Rely on the pResolveString and marking to save everything we need.
   8201         RegisterSet caller_saves = RegisterSet::Empty();
   8202         InvokeRuntimeCallingConvention calling_convention;
   8203         caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
   8204         locations->SetCustomSlowPathCallerSaves(caller_saves);
   8205       } else {
   8206         // For non-Baker read barriers we have a temp-clobbering call.
   8207       }
   8208     }
   8209   }
   8210 }
   8211 
   8212 // NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
   8213 // move.
   8214 void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_SAFETY_ANALYSIS {
   8215   HLoadString::LoadKind load_kind = load->GetLoadKind();
   8216   LocationSummary* locations = load->GetLocations();
   8217   Location out_loc = locations->Out();
   8218   Register out = out_loc.AsRegister<Register>();
   8219   Register base_or_current_method_reg;
   8220   bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
   8221   bool has_irreducible_loops = GetGraph()->HasIrreducibleLoops();
   8222   switch (load_kind) {
   8223     // We need an extra register for PC-relative literals on R2.
   8224     case HLoadString::LoadKind::kBootImageAddress:
   8225     case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
   8226     case HLoadString::LoadKind::kBootImageInternTable:
   8227     case HLoadString::LoadKind::kBssEntry:
   8228       base_or_current_method_reg =
   8229           (isR6 || has_irreducible_loops) ? ZERO : locations->InAt(0).AsRegister<Register>();
   8230       break;
   8231     default:
   8232       base_or_current_method_reg = ZERO;
   8233       break;
   8234   }
   8235 
   8236   switch (load_kind) {
   8237     case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
   8238       DCHECK(codegen_->GetCompilerOptions().IsBootImage());
   8239       CodeGeneratorMIPS::PcRelativePatchInfo* info_high =
   8240           codegen_->NewBootImageStringPatch(load->GetDexFile(), load->GetStringIndex());
   8241       CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
   8242           codegen_->NewBootImageStringPatch(load->GetDexFile(), load->GetStringIndex(), info_high);
   8243       codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high,
   8244                                                      out,
   8245                                                      base_or_current_method_reg);
   8246       __ Addiu(out, out, /* placeholder */ 0x5678, &info_low->label);
   8247       return;
   8248     }
   8249     case HLoadString::LoadKind::kBootImageAddress: {
   8250       uint32_t address = dchecked_integral_cast<uint32_t>(
   8251           reinterpret_cast<uintptr_t>(load->GetString().Get()));
   8252       DCHECK_NE(address, 0u);
   8253       if (isR6 || !has_irreducible_loops) {
   8254         __ LoadLiteral(out,
   8255                        base_or_current_method_reg,
   8256                        codegen_->DeduplicateBootImageAddressLiteral(address));
   8257       } else {
   8258         __ LoadConst32(out, address);
   8259       }
   8260       return;
   8261     }
   8262     case HLoadString::LoadKind::kBootImageInternTable: {
   8263       DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
   8264       CodeGeneratorMIPS::PcRelativePatchInfo* info_high =
   8265           codegen_->NewBootImageStringPatch(load->GetDexFile(), load->GetStringIndex());
   8266       CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
   8267           codegen_->NewBootImageStringPatch(load->GetDexFile(), load->GetStringIndex(), info_high);
   8268       codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high,
   8269                                                      out,
   8270                                                      base_or_current_method_reg);
   8271       __ Lw(out, out, /* placeholder */ 0x5678, &info_low->label);
   8272       return;
   8273     }
   8274     case HLoadString::LoadKind::kBssEntry: {
   8275       DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
   8276       CodeGeneratorMIPS::PcRelativePatchInfo* info_high =
   8277           codegen_->NewStringBssEntryPatch(load->GetDexFile(), load->GetStringIndex());
   8278       CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
   8279           codegen_->NewStringBssEntryPatch(load->GetDexFile(), load->GetStringIndex(), info_high);
   8280       codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high,
   8281                                                      out,
   8282                                                      base_or_current_method_reg);
   8283       GenerateGcRootFieldLoad(load,
   8284                               out_loc,
   8285                               out,
   8286                               /* placeholder */ 0x5678,
   8287                               kCompilerReadBarrierOption,
   8288                               &info_low->label);
   8289       SlowPathCodeMIPS* slow_path =
   8290           new (codegen_->GetScopedAllocator()) LoadStringSlowPathMIPS(load);
   8291       codegen_->AddSlowPath(slow_path);
   8292       __ Beqz(out, slow_path->GetEntryLabel());
   8293       __ Bind(slow_path->GetExitLabel());
   8294       return;
   8295     }
   8296     case HLoadString::LoadKind::kJitTableAddress: {
   8297       CodeGeneratorMIPS::JitPatchInfo* info =
   8298           codegen_->NewJitRootStringPatch(load->GetDexFile(),
   8299                                           load->GetStringIndex(),
   8300                                           load->GetString());
   8301       bool reordering = __ SetReorder(false);
   8302       __ Bind(&info->high_label);
   8303       __ Lui(out, /* placeholder */ 0x1234);
   8304       __ SetReorder(reordering);
   8305       GenerateGcRootFieldLoad(load,
   8306                               out_loc,
   8307                               out,
   8308                               /* placeholder */ 0x5678,
   8309                               kCompilerReadBarrierOption,
   8310                               &info->low_label);
   8311       return;
   8312     }
   8313     default:
   8314       break;
   8315   }
   8316 
   8317   // TODO: Re-add the compiler code to do string dex cache lookup again.
   8318   DCHECK(load_kind == HLoadString::LoadKind::kRuntimeCall);
   8319   InvokeRuntimeCallingConvention calling_convention;
   8320   DCHECK_EQ(calling_convention.GetRegisterAt(0), out);
   8321   __ LoadConst32(calling_convention.GetRegisterAt(0), load->GetStringIndex().index_);
   8322   codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc());
   8323   CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
   8324 }
   8325 
   8326 void LocationsBuilderMIPS::VisitLongConstant(HLongConstant* constant) {
   8327   LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant);
   8328   locations->SetOut(Location::ConstantLocation(constant));
   8329 }
   8330 
   8331 void InstructionCodeGeneratorMIPS::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
   8332   // Will be generated at use site.
   8333 }
   8334 
   8335 void LocationsBuilderMIPS::VisitMonitorOperation(HMonitorOperation* instruction) {
   8336   LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
   8337       instruction, LocationSummary::kCallOnMainOnly);
   8338   InvokeRuntimeCallingConvention calling_convention;
   8339   locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
   8340 }
   8341 
   8342 void InstructionCodeGeneratorMIPS::VisitMonitorOperation(HMonitorOperation* instruction) {
   8343   if (instruction->IsEnter()) {
   8344     codegen_->InvokeRuntime(kQuickLockObject, instruction, instruction->GetDexPc());
   8345     CheckEntrypointTypes<kQuickLockObject, void, mirror::Object*>();
   8346   } else {
   8347     codegen_->InvokeRuntime(kQuickUnlockObject, instruction, instruction->GetDexPc());
   8348   }
   8349   CheckEntrypointTypes<kQuickUnlockObject, void, mirror::Object*>();
   8350 }
   8351 
   8352 void LocationsBuilderMIPS::VisitMul(HMul* mul) {
   8353   LocationSummary* locations =
   8354       new (GetGraph()->GetAllocator()) LocationSummary(mul, LocationSummary::kNoCall);
   8355   switch (mul->GetResultType()) {
   8356     case DataType::Type::kInt32:
   8357     case DataType::Type::kInt64:
   8358       locations->SetInAt(0, Location::RequiresRegister());
   8359       locations->SetInAt(1, Location::RequiresRegister());
   8360       locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   8361       break;
   8362 
   8363     case DataType::Type::kFloat32:
   8364     case DataType::Type::kFloat64:
   8365       locations->SetInAt(0, Location::RequiresFpuRegister());
   8366       locations->SetInAt(1, Location::RequiresFpuRegister());
   8367       locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
   8368       break;
   8369 
   8370     default:
   8371       LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
   8372   }
   8373 }
   8374 
   8375 void InstructionCodeGeneratorMIPS::VisitMul(HMul* instruction) {
   8376   DataType::Type type = instruction->GetType();
   8377   LocationSummary* locations = instruction->GetLocations();
   8378   bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
   8379 
   8380   switch (type) {
   8381     case DataType::Type::kInt32: {
   8382       Register dst = locations->Out().AsRegister<Register>();
   8383       Register lhs = locations->InAt(0).AsRegister<Register>();
   8384       Register rhs = locations->InAt(1).AsRegister<Register>();
   8385 
   8386       if (isR6) {
   8387         __ MulR6(dst, lhs, rhs);
   8388       } else {
   8389         __ MulR2(dst, lhs, rhs);
   8390       }
   8391       break;
   8392     }
   8393     case DataType::Type::kInt64: {
   8394       Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
   8395       Register dst_low = locations->Out().AsRegisterPairLow<Register>();
   8396       Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>();
   8397       Register lhs_low = locations->InAt(0).AsRegisterPairLow<Register>();
   8398       Register rhs_high = locations->InAt(1).AsRegisterPairHigh<Register>();
   8399       Register rhs_low = locations->InAt(1).AsRegisterPairLow<Register>();
   8400 
   8401       // Extra checks to protect caused by the existance of A1_A2.
   8402       // The algorithm is wrong if dst_high is either lhs_lo or rhs_lo:
   8403       // (e.g. lhs=a0_a1, rhs=a2_a3 and dst=a1_a2).
   8404       DCHECK_NE(dst_high, lhs_low);
   8405       DCHECK_NE(dst_high, rhs_low);
   8406 
   8407       // A_B * C_D
   8408       // dst_hi:  [ low(A*D) + low(B*C) + hi(B*D) ]
   8409       // dst_lo:  [ low(B*D) ]
   8410       // Note: R2 and R6 MUL produce the low 32 bit of the multiplication result.
   8411 
   8412       if (isR6) {
   8413         __ MulR6(TMP, lhs_high, rhs_low);
   8414         __ MulR6(dst_high, lhs_low, rhs_high);
   8415         __ Addu(dst_high, dst_high, TMP);
   8416         __ MuhuR6(TMP, lhs_low, rhs_low);
   8417         __ Addu(dst_high, dst_high, TMP);
   8418         __ MulR6(dst_low, lhs_low, rhs_low);
   8419       } else {
   8420         __ MulR2(TMP, lhs_high, rhs_low);
   8421         __ MulR2(dst_high, lhs_low, rhs_high);
   8422         __ Addu(dst_high, dst_high, TMP);
   8423         __ MultuR2(lhs_low, rhs_low);
   8424         __ Mfhi(TMP);
   8425         __ Addu(dst_high, dst_high, TMP);
   8426         __ Mflo(dst_low);
   8427       }
   8428       break;
   8429     }
   8430     case DataType::Type::kFloat32:
   8431     case DataType::Type::kFloat64: {
   8432       FRegister dst = locations->Out().AsFpuRegister<FRegister>();
   8433       FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
   8434       FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
   8435       if (type == DataType::Type::kFloat32) {
   8436         __ MulS(dst, lhs, rhs);
   8437       } else {
   8438         __ MulD(dst, lhs, rhs);
   8439       }
   8440       break;
   8441     }
   8442     default:
   8443       LOG(FATAL) << "Unexpected mul type " << type;
   8444   }
   8445 }
   8446 
   8447 void LocationsBuilderMIPS::VisitNeg(HNeg* neg) {
   8448   LocationSummary* locations =
   8449       new (GetGraph()->GetAllocator()) LocationSummary(neg, LocationSummary::kNoCall);
   8450   switch (neg->GetResultType()) {
   8451     case DataType::Type::kInt32:
   8452     case DataType::Type::kInt64:
   8453       locations->SetInAt(0, Location::RequiresRegister());
   8454       locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   8455       break;
   8456 
   8457     case DataType::Type::kFloat32:
   8458     case DataType::Type::kFloat64:
   8459       locations->SetInAt(0, Location::RequiresFpuRegister());
   8460       locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
   8461       break;
   8462 
   8463     default:
   8464       LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
   8465   }
   8466 }
   8467 
   8468 void InstructionCodeGeneratorMIPS::VisitNeg(HNeg* instruction) {
   8469   DataType::Type type = instruction->GetType();
   8470   LocationSummary* locations = instruction->GetLocations();
   8471 
   8472   switch (type) {
   8473     case DataType::Type::kInt32: {
   8474       Register dst = locations->Out().AsRegister<Register>();
   8475       Register src = locations->InAt(0).AsRegister<Register>();
   8476       __ Subu(dst, ZERO, src);
   8477       break;
   8478     }
   8479     case DataType::Type::kInt64: {
   8480       Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
   8481       Register dst_low = locations->Out().AsRegisterPairLow<Register>();
   8482       Register src_high = locations->InAt(0).AsRegisterPairHigh<Register>();
   8483       Register src_low = locations->InAt(0).AsRegisterPairLow<Register>();
   8484       __ Subu(dst_low, ZERO, src_low);
   8485       __ Sltu(TMP, ZERO, dst_low);
   8486       __ Subu(dst_high, ZERO, src_high);
   8487       __ Subu(dst_high, dst_high, TMP);
   8488       break;
   8489     }
   8490     case DataType::Type::kFloat32:
   8491     case DataType::Type::kFloat64: {
   8492       FRegister dst = locations->Out().AsFpuRegister<FRegister>();
   8493       FRegister src = locations->InAt(0).AsFpuRegister<FRegister>();
   8494       if (type == DataType::Type::kFloat32) {
   8495         __ NegS(dst, src);
   8496       } else {
   8497         __ NegD(dst, src);
   8498       }
   8499       break;
   8500     }
   8501     default:
   8502       LOG(FATAL) << "Unexpected neg type " << type;
   8503   }
   8504 }
   8505 
   8506 void LocationsBuilderMIPS::VisitNewArray(HNewArray* instruction) {
   8507   LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
   8508       instruction, LocationSummary::kCallOnMainOnly);
   8509   InvokeRuntimeCallingConvention calling_convention;
   8510   locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kReference));
   8511   locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
   8512   locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
   8513 }
   8514 
   8515 void InstructionCodeGeneratorMIPS::VisitNewArray(HNewArray* instruction) {
   8516   // Note: if heap poisoning is enabled, the entry point takes care
   8517   // of poisoning the reference.
   8518   QuickEntrypointEnum entrypoint =
   8519       CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
   8520   codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
   8521   CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
   8522   DCHECK(!codegen_->IsLeafMethod());
   8523 }
   8524 
   8525 void LocationsBuilderMIPS::VisitNewInstance(HNewInstance* instruction) {
   8526   LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
   8527       instruction, LocationSummary::kCallOnMainOnly);
   8528   InvokeRuntimeCallingConvention calling_convention;
   8529   if (instruction->IsStringAlloc()) {
   8530     locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument));
   8531   } else {
   8532     locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
   8533   }
   8534   locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kReference));
   8535 }
   8536 
   8537 void InstructionCodeGeneratorMIPS::VisitNewInstance(HNewInstance* instruction) {
   8538   // Note: if heap poisoning is enabled, the entry point takes care
   8539   // of poisoning the reference.
   8540   if (instruction->IsStringAlloc()) {
   8541     // String is allocated through StringFactory. Call NewEmptyString entry point.
   8542     Register temp = instruction->GetLocations()->GetTemp(0).AsRegister<Register>();
   8543     MemberOffset code_offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsPointerSize);
   8544     __ LoadFromOffset(kLoadWord, temp, TR, QUICK_ENTRY_POINT(pNewEmptyString));
   8545     __ LoadFromOffset(kLoadWord, T9, temp, code_offset.Int32Value());
   8546     __ Jalr(T9);
   8547     __ NopIfNoReordering();
   8548     codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
   8549   } else {
   8550     codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
   8551     CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
   8552   }
   8553 }
   8554 
   8555 void LocationsBuilderMIPS::VisitNot(HNot* instruction) {
   8556   LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
   8557   locations->SetInAt(0, Location::RequiresRegister());
   8558   locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   8559 }
   8560 
   8561 void InstructionCodeGeneratorMIPS::VisitNot(HNot* instruction) {
   8562   DataType::Type type = instruction->GetType();
   8563   LocationSummary* locations = instruction->GetLocations();
   8564 
   8565   switch (type) {
   8566     case DataType::Type::kInt32: {
   8567       Register dst = locations->Out().AsRegister<Register>();
   8568       Register src = locations->InAt(0).AsRegister<Register>();
   8569       __ Nor(dst, src, ZERO);
   8570       break;
   8571     }
   8572 
   8573     case DataType::Type::kInt64: {
   8574       Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
   8575       Register dst_low = locations->Out().AsRegisterPairLow<Register>();
   8576       Register src_high = locations->InAt(0).AsRegisterPairHigh<Register>();
   8577       Register src_low = locations->InAt(0).AsRegisterPairLow<Register>();
   8578       __ Nor(dst_high, src_high, ZERO);
   8579       __ Nor(dst_low, src_low, ZERO);
   8580       break;
   8581     }
   8582 
   8583     default:
   8584       LOG(FATAL) << "Unexpected type for not operation " << instruction->GetResultType();
   8585   }
   8586 }
   8587 
   8588 void LocationsBuilderMIPS::VisitBooleanNot(HBooleanNot* instruction) {
   8589   LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
   8590   locations->SetInAt(0, Location::RequiresRegister());
   8591   locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   8592 }
   8593 
   8594 void InstructionCodeGeneratorMIPS::VisitBooleanNot(HBooleanNot* instruction) {
   8595   LocationSummary* locations = instruction->GetLocations();
   8596   __ Xori(locations->Out().AsRegister<Register>(),
   8597           locations->InAt(0).AsRegister<Register>(),
   8598           1);
   8599 }
   8600 
   8601 void LocationsBuilderMIPS::VisitNullCheck(HNullCheck* instruction) {
   8602   LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction);
   8603   locations->SetInAt(0, Location::RequiresRegister());
   8604 }
   8605 
   8606 void CodeGeneratorMIPS::GenerateImplicitNullCheck(HNullCheck* instruction) {
   8607   if (CanMoveNullCheckToUser(instruction)) {
   8608     return;
   8609   }
   8610   Location obj = instruction->GetLocations()->InAt(0);
   8611 
   8612   __ Lw(ZERO, obj.AsRegister<Register>(), 0);
   8613   RecordPcInfo(instruction, instruction->GetDexPc());
   8614 }
   8615 
   8616 void CodeGeneratorMIPS::GenerateExplicitNullCheck(HNullCheck* instruction) {
   8617   SlowPathCodeMIPS* slow_path = new (GetScopedAllocator()) NullCheckSlowPathMIPS(instruction);
   8618   AddSlowPath(slow_path);
   8619 
   8620   Location obj = instruction->GetLocations()->InAt(0);
   8621 
   8622   __ Beqz(obj.AsRegister<Register>(), slow_path->GetEntryLabel());
   8623 }
   8624 
   8625 void InstructionCodeGeneratorMIPS::VisitNullCheck(HNullCheck* instruction) {
   8626   codegen_->GenerateNullCheck(instruction);
   8627 }
   8628 
   8629 void LocationsBuilderMIPS::VisitOr(HOr* instruction) {
   8630   HandleBinaryOp(instruction);
   8631 }
   8632 
   8633 void InstructionCodeGeneratorMIPS::VisitOr(HOr* instruction) {
   8634   HandleBinaryOp(instruction);
   8635 }
   8636 
   8637 void LocationsBuilderMIPS::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
   8638   LOG(FATAL) << "Unreachable";
   8639 }
   8640 
   8641 void InstructionCodeGeneratorMIPS::VisitParallelMove(HParallelMove* instruction) {
   8642   if (instruction->GetNext()->IsSuspendCheck() &&
   8643       instruction->GetBlock()->GetLoopInformation() != nullptr) {
   8644     HSuspendCheck* suspend_check = instruction->GetNext()->AsSuspendCheck();
   8645     // The back edge will generate the suspend check.
   8646     codegen_->ClearSpillSlotsFromLoopPhisInStackMap(suspend_check, instruction);
   8647   }
   8648 
   8649   codegen_->GetMoveResolver()->EmitNativeCode(instruction);
   8650 }
   8651 
   8652 void LocationsBuilderMIPS::VisitParameterValue(HParameterValue* instruction) {
   8653   LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
   8654   Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
   8655   if (location.IsStackSlot()) {
   8656     location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
   8657   } else if (location.IsDoubleStackSlot()) {
   8658     location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
   8659   }
   8660   locations->SetOut(location);
   8661 }
   8662 
   8663 void InstructionCodeGeneratorMIPS::VisitParameterValue(HParameterValue* instruction
   8664                                                          ATTRIBUTE_UNUSED) {
   8665   // Nothing to do, the parameter is already at its location.
   8666 }
   8667 
   8668 void LocationsBuilderMIPS::VisitCurrentMethod(HCurrentMethod* instruction) {
   8669   LocationSummary* locations =
   8670       new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
   8671   locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
   8672 }
   8673 
   8674 void InstructionCodeGeneratorMIPS::VisitCurrentMethod(HCurrentMethod* instruction
   8675                                                         ATTRIBUTE_UNUSED) {
   8676   // Nothing to do, the method is already at its location.
   8677 }
   8678 
   8679 void LocationsBuilderMIPS::VisitPhi(HPhi* instruction) {
   8680   LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
   8681   for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
   8682     locations->SetInAt(i, Location::Any());
   8683   }
   8684   locations->SetOut(Location::Any());
   8685 }
   8686 
   8687 void InstructionCodeGeneratorMIPS::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
   8688   LOG(FATAL) << "Unreachable";
   8689 }
   8690 
   8691 void LocationsBuilderMIPS::VisitRem(HRem* rem) {
   8692   DataType::Type type = rem->GetResultType();
   8693   bool call_rem;
   8694   if ((type == DataType::Type::kInt64) && rem->InputAt(1)->IsConstant()) {
   8695     int64_t imm = CodeGenerator::GetInt64ValueOf(rem->InputAt(1)->AsConstant());
   8696     call_rem = (imm != 0) && !IsPowerOfTwo(static_cast<uint64_t>(AbsOrMin(imm)));
   8697   } else {
   8698     call_rem = (type != DataType::Type::kInt32);
   8699   }
   8700   LocationSummary::CallKind call_kind = call_rem
   8701       ? LocationSummary::kCallOnMainOnly
   8702       : LocationSummary::kNoCall;
   8703   LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(rem, call_kind);
   8704 
   8705   switch (type) {
   8706     case DataType::Type::kInt32:
   8707       locations->SetInAt(0, Location::RequiresRegister());
   8708       locations->SetInAt(1, Location::RegisterOrConstant(rem->InputAt(1)));
   8709       locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   8710       break;
   8711 
   8712     case DataType::Type::kInt64: {
   8713       if (call_rem) {
   8714         InvokeRuntimeCallingConvention calling_convention;
   8715         locations->SetInAt(0, Location::RegisterPairLocation(
   8716             calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
   8717         locations->SetInAt(1, Location::RegisterPairLocation(
   8718             calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
   8719         locations->SetOut(calling_convention.GetReturnLocation(type));
   8720       } else {
   8721         locations->SetInAt(0, Location::RequiresRegister());
   8722         locations->SetInAt(1, Location::ConstantLocation(rem->InputAt(1)->AsConstant()));
   8723         locations->SetOut(Location::RequiresRegister());
   8724       }
   8725       break;
   8726     }
   8727 
   8728     case DataType::Type::kFloat32:
   8729     case DataType::Type::kFloat64: {
   8730       InvokeRuntimeCallingConvention calling_convention;
   8731       locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
   8732       locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
   8733       locations->SetOut(calling_convention.GetReturnLocation(type));
   8734       break;
   8735     }
   8736 
   8737     default:
   8738       LOG(FATAL) << "Unexpected rem type " << type;
   8739   }
   8740 }
   8741 
   8742 void InstructionCodeGeneratorMIPS::VisitRem(HRem* instruction) {
   8743   DataType::Type type = instruction->GetType();
   8744   LocationSummary* locations = instruction->GetLocations();
   8745 
   8746   switch (type) {
   8747     case DataType::Type::kInt32:
   8748       GenerateDivRemIntegral(instruction);
   8749       break;
   8750     case DataType::Type::kInt64: {
   8751       if (locations->InAt(1).IsConstant()) {
   8752         int64_t imm = locations->InAt(1).GetConstant()->AsLongConstant()->GetValue();
   8753         if (imm == 0) {
   8754           // Do not generate anything. DivZeroCheck would prevent any code to be executed.
   8755         } else if (imm == 1 || imm == -1) {
   8756           DivRemOneOrMinusOne(instruction);
   8757         } else {
   8758           DCHECK(IsPowerOfTwo(static_cast<uint64_t>(AbsOrMin(imm))));
   8759           DivRemByPowerOfTwo(instruction);
   8760         }
   8761       } else {
   8762         codegen_->InvokeRuntime(kQuickLmod, instruction, instruction->GetDexPc());
   8763         CheckEntrypointTypes<kQuickLmod, int64_t, int64_t, int64_t>();
   8764       }
   8765       break;
   8766     }
   8767     case DataType::Type::kFloat32: {
   8768       codegen_->InvokeRuntime(kQuickFmodf, instruction, instruction->GetDexPc());
   8769       CheckEntrypointTypes<kQuickFmodf, float, float, float>();
   8770       break;
   8771     }
   8772     case DataType::Type::kFloat64: {
   8773       codegen_->InvokeRuntime(kQuickFmod, instruction, instruction->GetDexPc());
   8774       CheckEntrypointTypes<kQuickFmod, double, double, double>();
   8775       break;
   8776     }
   8777     default:
   8778       LOG(FATAL) << "Unexpected rem type " << type;
   8779   }
   8780 }
   8781 
   8782 void LocationsBuilderMIPS::VisitConstructorFence(HConstructorFence* constructor_fence) {
   8783   constructor_fence->SetLocations(nullptr);
   8784 }
   8785 
   8786 void InstructionCodeGeneratorMIPS::VisitConstructorFence(
   8787     HConstructorFence* constructor_fence ATTRIBUTE_UNUSED) {
   8788   GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
   8789 }
   8790 
   8791 void LocationsBuilderMIPS::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
   8792   memory_barrier->SetLocations(nullptr);
   8793 }
   8794 
   8795 void InstructionCodeGeneratorMIPS::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
   8796   GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
   8797 }
   8798 
   8799 void LocationsBuilderMIPS::VisitReturn(HReturn* ret) {
   8800   LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(ret);
   8801   DataType::Type return_type = ret->InputAt(0)->GetType();
   8802   locations->SetInAt(0, MipsReturnLocation(return_type));
   8803 }
   8804 
   8805 void InstructionCodeGeneratorMIPS::VisitReturn(HReturn* ret ATTRIBUTE_UNUSED) {
   8806   codegen_->GenerateFrameExit();
   8807 }
   8808 
   8809 void LocationsBuilderMIPS::VisitReturnVoid(HReturnVoid* ret) {
   8810   ret->SetLocations(nullptr);
   8811 }
   8812 
   8813 void InstructionCodeGeneratorMIPS::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) {
   8814   codegen_->GenerateFrameExit();
   8815 }
   8816 
   8817 void LocationsBuilderMIPS::VisitRor(HRor* ror) {
   8818   HandleShift(ror);
   8819 }
   8820 
   8821 void InstructionCodeGeneratorMIPS::VisitRor(HRor* ror) {
   8822   HandleShift(ror);
   8823 }
   8824 
   8825 void LocationsBuilderMIPS::VisitShl(HShl* shl) {
   8826   HandleShift(shl);
   8827 }
   8828 
   8829 void InstructionCodeGeneratorMIPS::VisitShl(HShl* shl) {
   8830   HandleShift(shl);
   8831 }
   8832 
   8833 void LocationsBuilderMIPS::VisitShr(HShr* shr) {
   8834   HandleShift(shr);
   8835 }
   8836 
   8837 void InstructionCodeGeneratorMIPS::VisitShr(HShr* shr) {
   8838   HandleShift(shr);
   8839 }
   8840 
   8841 void LocationsBuilderMIPS::VisitSub(HSub* instruction) {
   8842   HandleBinaryOp(instruction);
   8843 }
   8844 
   8845 void InstructionCodeGeneratorMIPS::VisitSub(HSub* instruction) {
   8846   HandleBinaryOp(instruction);
   8847 }
   8848 
   8849 void LocationsBuilderMIPS::VisitStaticFieldGet(HStaticFieldGet* instruction) {
   8850   HandleFieldGet(instruction, instruction->GetFieldInfo());
   8851 }
   8852 
   8853 void InstructionCodeGeneratorMIPS::VisitStaticFieldGet(HStaticFieldGet* instruction) {
   8854   HandleFieldGet(instruction, instruction->GetFieldInfo(), instruction->GetDexPc());
   8855 }
   8856 
   8857 void LocationsBuilderMIPS::VisitStaticFieldSet(HStaticFieldSet* instruction) {
   8858   HandleFieldSet(instruction, instruction->GetFieldInfo());
   8859 }
   8860 
   8861 void InstructionCodeGeneratorMIPS::VisitStaticFieldSet(HStaticFieldSet* instruction) {
   8862   HandleFieldSet(instruction,
   8863                  instruction->GetFieldInfo(),
   8864                  instruction->GetDexPc(),
   8865                  instruction->GetValueCanBeNull());
   8866 }
   8867 
   8868 void LocationsBuilderMIPS::VisitUnresolvedInstanceFieldGet(
   8869     HUnresolvedInstanceFieldGet* instruction) {
   8870   FieldAccessCallingConventionMIPS calling_convention;
   8871   codegen_->CreateUnresolvedFieldLocationSummary(instruction,
   8872                                                  instruction->GetFieldType(),
   8873                                                  calling_convention);
   8874 }
   8875 
   8876 void InstructionCodeGeneratorMIPS::VisitUnresolvedInstanceFieldGet(
   8877     HUnresolvedInstanceFieldGet* instruction) {
   8878   FieldAccessCallingConventionMIPS calling_convention;
   8879   codegen_->GenerateUnresolvedFieldAccess(instruction,
   8880                                           instruction->GetFieldType(),
   8881                                           instruction->GetFieldIndex(),
   8882                                           instruction->GetDexPc(),
   8883                                           calling_convention);
   8884 }
   8885 
   8886 void LocationsBuilderMIPS::VisitUnresolvedInstanceFieldSet(
   8887     HUnresolvedInstanceFieldSet* instruction) {
   8888   FieldAccessCallingConventionMIPS calling_convention;
   8889   codegen_->CreateUnresolvedFieldLocationSummary(instruction,
   8890                                                  instruction->GetFieldType(),
   8891                                                  calling_convention);
   8892 }
   8893 
   8894 void InstructionCodeGeneratorMIPS::VisitUnresolvedInstanceFieldSet(
   8895     HUnresolvedInstanceFieldSet* instruction) {
   8896   FieldAccessCallingConventionMIPS calling_convention;
   8897   codegen_->GenerateUnresolvedFieldAccess(instruction,
   8898                                           instruction->GetFieldType(),
   8899                                           instruction->GetFieldIndex(),
   8900                                           instruction->GetDexPc(),
   8901                                           calling_convention);
   8902 }
   8903 
   8904 void LocationsBuilderMIPS::VisitUnresolvedStaticFieldGet(
   8905     HUnresolvedStaticFieldGet* instruction) {
   8906   FieldAccessCallingConventionMIPS calling_convention;
   8907   codegen_->CreateUnresolvedFieldLocationSummary(instruction,
   8908                                                  instruction->GetFieldType(),
   8909                                                  calling_convention);
   8910 }
   8911 
   8912 void InstructionCodeGeneratorMIPS::VisitUnresolvedStaticFieldGet(
   8913     HUnresolvedStaticFieldGet* instruction) {
   8914   FieldAccessCallingConventionMIPS calling_convention;
   8915   codegen_->GenerateUnresolvedFieldAccess(instruction,
   8916                                           instruction->GetFieldType(),
   8917                                           instruction->GetFieldIndex(),
   8918                                           instruction->GetDexPc(),
   8919                                           calling_convention);
   8920 }
   8921 
   8922 void LocationsBuilderMIPS::VisitUnresolvedStaticFieldSet(
   8923     HUnresolvedStaticFieldSet* instruction) {
   8924   FieldAccessCallingConventionMIPS calling_convention;
   8925   codegen_->CreateUnresolvedFieldLocationSummary(instruction,
   8926                                                  instruction->GetFieldType(),
   8927                                                  calling_convention);
   8928 }
   8929 
   8930 void InstructionCodeGeneratorMIPS::VisitUnresolvedStaticFieldSet(
   8931     HUnresolvedStaticFieldSet* instruction) {
   8932   FieldAccessCallingConventionMIPS calling_convention;
   8933   codegen_->GenerateUnresolvedFieldAccess(instruction,
   8934                                           instruction->GetFieldType(),
   8935                                           instruction->GetFieldIndex(),
   8936                                           instruction->GetDexPc(),
   8937                                           calling_convention);
   8938 }
   8939 
   8940 void LocationsBuilderMIPS::VisitSuspendCheck(HSuspendCheck* instruction) {
   8941   LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
   8942       instruction, LocationSummary::kCallOnSlowPath);
   8943   // In suspend check slow path, usually there are no caller-save registers at all.
   8944   // If SIMD instructions are present, however, we force spilling all live SIMD
   8945   // registers in full width (since the runtime only saves/restores lower part).
   8946   locations->SetCustomSlowPathCallerSaves(
   8947       GetGraph()->HasSIMD() ? RegisterSet::AllFpu() : RegisterSet::Empty());
   8948 }
   8949 
   8950 void InstructionCodeGeneratorMIPS::VisitSuspendCheck(HSuspendCheck* instruction) {
   8951   HBasicBlock* block = instruction->GetBlock();
   8952   if (block->GetLoopInformation() != nullptr) {
   8953     DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
   8954     // The back edge will generate the suspend check.
   8955     return;
   8956   }
   8957   if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
   8958     // The goto will generate the suspend check.
   8959     return;
   8960   }
   8961   GenerateSuspendCheck(instruction, nullptr);
   8962 }
   8963 
   8964 void LocationsBuilderMIPS::VisitThrow(HThrow* instruction) {
   8965   LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
   8966       instruction, LocationSummary::kCallOnMainOnly);
   8967   InvokeRuntimeCallingConvention calling_convention;
   8968   locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
   8969 }
   8970 
   8971 void InstructionCodeGeneratorMIPS::VisitThrow(HThrow* instruction) {
   8972   codegen_->InvokeRuntime(kQuickDeliverException, instruction, instruction->GetDexPc());
   8973   CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
   8974 }
   8975 
   8976 void LocationsBuilderMIPS::VisitTypeConversion(HTypeConversion* conversion) {
   8977   DataType::Type input_type = conversion->GetInputType();
   8978   DataType::Type result_type = conversion->GetResultType();
   8979   DCHECK(!DataType::IsTypeConversionImplicit(input_type, result_type))
   8980       << input_type << " -> " << result_type;
   8981   bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
   8982 
   8983   if ((input_type == DataType::Type::kReference) || (input_type == DataType::Type::kVoid) ||
   8984       (result_type == DataType::Type::kReference) || (result_type == DataType::Type::kVoid)) {
   8985     LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type;
   8986   }
   8987 
   8988   LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
   8989   if (!isR6 &&
   8990       ((DataType::IsFloatingPointType(result_type) && input_type == DataType::Type::kInt64) ||
   8991        (result_type == DataType::Type::kInt64 && DataType::IsFloatingPointType(input_type)))) {
   8992     call_kind = LocationSummary::kCallOnMainOnly;
   8993   }
   8994 
   8995   LocationSummary* locations =
   8996       new (GetGraph()->GetAllocator()) LocationSummary(conversion, call_kind);
   8997 
   8998   if (call_kind == LocationSummary::kNoCall) {
   8999     if (DataType::IsFloatingPointType(input_type)) {
   9000       locations->SetInAt(0, Location::RequiresFpuRegister());
   9001     } else {
   9002       locations->SetInAt(0, Location::RequiresRegister());
   9003     }
   9004 
   9005     if (DataType::IsFloatingPointType(result_type)) {
   9006       locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
   9007     } else {
   9008       locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
   9009     }
   9010   } else {
   9011     InvokeRuntimeCallingConvention calling_convention;
   9012 
   9013     if (DataType::IsFloatingPointType(input_type)) {
   9014       locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
   9015     } else {
   9016       DCHECK_EQ(input_type, DataType::Type::kInt64);
   9017       locations->SetInAt(0, Location::RegisterPairLocation(
   9018                  calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
   9019     }
   9020 
   9021     locations->SetOut(calling_convention.GetReturnLocation(result_type));
   9022   }
   9023 }
   9024 
   9025 void InstructionCodeGeneratorMIPS::VisitTypeConversion(HTypeConversion* conversion) {
   9026   LocationSummary* locations = conversion->GetLocations();
   9027   DataType::Type result_type = conversion->GetResultType();
   9028   DataType::Type input_type = conversion->GetInputType();
   9029   bool has_sign_extension = codegen_->GetInstructionSetFeatures().IsMipsIsaRevGreaterThanEqual2();
   9030   bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
   9031 
   9032   DCHECK(!DataType::IsTypeConversionImplicit(input_type, result_type))
   9033       << input_type << " -> " << result_type;
   9034 
   9035   if (result_type == DataType::Type::kInt64 && DataType::IsIntegralType(input_type)) {
   9036     Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
   9037     Register dst_low = locations->Out().AsRegisterPairLow<Register>();
   9038     Register src = locations->InAt(0).AsRegister<Register>();
   9039 
   9040     if (dst_low != src) {
   9041       __ Move(dst_low, src);
   9042     }
   9043     __ Sra(dst_high, src, 31);
   9044   } else if (DataType::IsIntegralType(result_type) && DataType::IsIntegralType(input_type)) {
   9045     Register dst = locations->Out().AsRegister<Register>();
   9046     Register src = (input_type == DataType::Type::kInt64)
   9047         ? locations->InAt(0).AsRegisterPairLow<Register>()
   9048         : locations->InAt(0).AsRegister<Register>();
   9049 
   9050     switch (result_type) {
   9051       case DataType::Type::kUint8:
   9052         __ Andi(dst, src, 0xFF);
   9053         break;
   9054       case DataType::Type::kInt8:
   9055         if (has_sign_extension) {
   9056           __ Seb(dst, src);
   9057         } else {
   9058           __ Sll(dst, src, 24);
   9059           __ Sra(dst, dst, 24);
   9060         }
   9061         break;
   9062       case DataType::Type::kUint16:
   9063         __ Andi(dst, src, 0xFFFF);
   9064         break;
   9065       case DataType::Type::kInt16:
   9066         if (has_sign_extension) {
   9067           __ Seh(dst, src);
   9068         } else {
   9069           __ Sll(dst, src, 16);
   9070           __ Sra(dst, dst, 16);
   9071         }
   9072         break;
   9073       case DataType::Type::kInt32:
   9074         if (dst != src) {
   9075           __ Move(dst, src);
   9076         }
   9077         break;
   9078 
   9079       default:
   9080         LOG(FATAL) << "Unexpected type conversion from " << input_type
   9081                    << " to " << result_type;
   9082     }
   9083   } else if (DataType::IsFloatingPointType(result_type) && DataType::IsIntegralType(input_type)) {
   9084     if (input_type == DataType::Type::kInt64) {
   9085       if (isR6) {
   9086         // cvt.s.l/cvt.d.l requires MIPSR2+ with FR=1. MIPS32R6 is implemented as a secondary
   9087         // architecture on top of MIPS64R6, which has FR=1, and therefore can use the instruction.
   9088         Register src_high = locations->InAt(0).AsRegisterPairHigh<Register>();
   9089         Register src_low = locations->InAt(0).AsRegisterPairLow<Register>();
   9090         FRegister dst = locations->Out().AsFpuRegister<FRegister>();
   9091         __ Mtc1(src_low, FTMP);
   9092         __ Mthc1(src_high, FTMP);
   9093         if (result_type == DataType::Type::kFloat32) {
   9094           __ Cvtsl(dst, FTMP);
   9095         } else {
   9096           __ Cvtdl(dst, FTMP);
   9097         }
   9098       } else {
   9099         QuickEntrypointEnum entrypoint =
   9100             (result_type == DataType::Type::kFloat32) ? kQuickL2f : kQuickL2d;
   9101         codegen_->InvokeRuntime(entrypoint, conversion, conversion->GetDexPc());
   9102         if (result_type == DataType::Type::kFloat32) {
   9103           CheckEntrypointTypes<kQuickL2f, float, int64_t>();
   9104         } else {
   9105           CheckEntrypointTypes<kQuickL2d, double, int64_t>();
   9106         }
   9107       }
   9108     } else {
   9109       Register src = locations->InAt(0).AsRegister<Register>();
   9110       FRegister dst = locations->Out().AsFpuRegister<FRegister>();
   9111       __ Mtc1(src, FTMP);
   9112       if (result_type == DataType::Type::kFloat32) {
   9113         __ Cvtsw(dst, FTMP);
   9114       } else {
   9115         __ Cvtdw(dst, FTMP);
   9116       }
   9117     }
   9118   } else if (DataType::IsIntegralType(result_type) && DataType::IsFloatingPointType(input_type)) {
   9119     CHECK(result_type == DataType::Type::kInt32 || result_type == DataType::Type::kInt64);
   9120 
   9121     // When NAN2008=1 (R6), the truncate instruction caps the output at the minimum/maximum
   9122     // value of the output type if the input is outside of the range after the truncation or
   9123     // produces 0 when the input is a NaN. IOW, the three special cases produce three distinct
   9124     // results. This matches the desired float/double-to-int/long conversion exactly.
   9125     //
   9126     // When NAN2008=0 (R2 and before), the truncate instruction produces the maximum positive
   9127     // value when the input is either a NaN or is outside of the range of the output type
   9128     // after the truncation. IOW, the three special cases (NaN, too small, too big) produce
   9129     // the same result.
   9130     //
   9131     // The code takes care of the different behaviors by first comparing the input to the
   9132     // minimum output value (-2**-63 for truncating to long, -2**-31 for truncating to int).
   9133     // If the input is greater than or equal to the minimum, it procedes to the truncate
   9134     // instruction, which will handle such an input the same way irrespective of NAN2008.
   9135     // Otherwise the input is compared to itself to determine whether it is a NaN or not
   9136     // in order to return either zero or the minimum value.
   9137     if (result_type == DataType::Type::kInt64) {
   9138       if (isR6) {
   9139         // trunc.l.s/trunc.l.d requires MIPSR2+ with FR=1. MIPS32R6 is implemented as a secondary
   9140         // architecture on top of MIPS64R6, which has FR=1, and therefore can use the instruction.
   9141         FRegister src = locations->InAt(0).AsFpuRegister<FRegister>();
   9142         Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
   9143         Register dst_low = locations->Out().AsRegisterPairLow<Register>();
   9144 
   9145         if (input_type == DataType::Type::kFloat32) {
   9146           __ TruncLS(FTMP, src);
   9147         } else {
   9148           __ TruncLD(FTMP, src);
   9149         }
   9150         __ Mfc1(dst_low, FTMP);
   9151         __ Mfhc1(dst_high, FTMP);
   9152       } else {
   9153         QuickEntrypointEnum entrypoint =
   9154             (input_type == DataType::Type::kFloat32) ? kQuickF2l : kQuickD2l;
   9155         codegen_->InvokeRuntime(entrypoint, conversion, conversion->GetDexPc());
   9156         if (input_type == DataType::Type::kFloat32) {
   9157           CheckEntrypointTypes<kQuickF2l, int64_t, float>();
   9158         } else {
   9159           CheckEntrypointTypes<kQuickD2l, int64_t, double>();
   9160         }
   9161       }
   9162     } else {
   9163       FRegister src = locations->InAt(0).AsFpuRegister<FRegister>();
   9164       Register dst = locations->Out().AsRegister<Register>();
   9165       MipsLabel truncate;
   9166       MipsLabel done;
   9167 
   9168       if (!isR6) {
   9169         if (input_type == DataType::Type::kFloat32) {
   9170           uint32_t min_val = bit_cast<uint32_t, float>(std::numeric_limits<int32_t>::min());
   9171           __ LoadConst32(TMP, min_val);
   9172           __ Mtc1(TMP, FTMP);
   9173         } else {
   9174           uint64_t min_val = bit_cast<uint64_t, double>(std::numeric_limits<int32_t>::min());
   9175           __ LoadConst32(TMP, High32Bits(min_val));
   9176           __ Mtc1(ZERO, FTMP);
   9177           __ MoveToFpuHigh(TMP, FTMP);
   9178         }
   9179 
   9180         if (input_type == DataType::Type::kFloat32) {
   9181           __ ColeS(0, FTMP, src);
   9182         } else {
   9183           __ ColeD(0, FTMP, src);
   9184         }
   9185         __ Bc1t(0, &truncate);
   9186 
   9187         if (input_type == DataType::Type::kFloat32) {
   9188           __ CeqS(0, src, src);
   9189         } else {
   9190           __ CeqD(0, src, src);
   9191         }
   9192         __ LoadConst32(dst, std::numeric_limits<int32_t>::min());
   9193         __ Movf(dst, ZERO, 0);
   9194 
   9195         __ B(&done);
   9196 
   9197         __ Bind(&truncate);
   9198       }
   9199 
   9200       if (input_type == DataType::Type::kFloat32) {
   9201         __ TruncWS(FTMP, src);
   9202       } else {
   9203         __ TruncWD(FTMP, src);
   9204       }
   9205       __ Mfc1(dst, FTMP);
   9206 
   9207       if (!isR6) {
   9208         __ Bind(&done);
   9209       }
   9210     }
   9211   } else if (DataType::IsFloatingPointType(result_type) &&
   9212              DataType::IsFloatingPointType(input_type)) {
   9213     FRegister dst = locations->Out().AsFpuRegister<FRegister>();
   9214     FRegister src = locations->InAt(0).AsFpuRegister<FRegister>();
   9215     if (result_type == DataType::Type::kFloat32) {
   9216       __ Cvtsd(dst, src);
   9217     } else {
   9218       __ Cvtds(dst, src);
   9219     }
   9220   } else {
   9221     LOG(FATAL) << "Unexpected or unimplemented type conversion from " << input_type
   9222                 << " to " << result_type;
   9223   }
   9224 }
   9225 
   9226 void LocationsBuilderMIPS::VisitUShr(HUShr* ushr) {
   9227   HandleShift(ushr);
   9228 }
   9229 
   9230 void InstructionCodeGeneratorMIPS::VisitUShr(HUShr* ushr) {
   9231   HandleShift(ushr);
   9232 }
   9233 
   9234 void LocationsBuilderMIPS::VisitXor(HXor* instruction) {
   9235   HandleBinaryOp(instruction);
   9236 }
   9237 
   9238 void InstructionCodeGeneratorMIPS::VisitXor(HXor* instruction) {
   9239   HandleBinaryOp(instruction);
   9240 }
   9241 
   9242 void LocationsBuilderMIPS::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
   9243   // Nothing to do, this should be removed during prepare for register allocator.
   9244   LOG(FATAL) << "Unreachable";
   9245 }
   9246 
   9247 void InstructionCodeGeneratorMIPS::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
   9248   // Nothing to do, this should be removed during prepare for register allocator.
   9249   LOG(FATAL) << "Unreachable";
   9250 }
   9251 
   9252 void LocationsBuilderMIPS::VisitEqual(HEqual* comp) {
   9253   HandleCondition(comp);
   9254 }
   9255 
   9256 void InstructionCodeGeneratorMIPS::VisitEqual(HEqual* comp) {
   9257   HandleCondition(comp);
   9258 }
   9259 
   9260 void LocationsBuilderMIPS::VisitNotEqual(HNotEqual* comp) {
   9261   HandleCondition(comp);
   9262 }
   9263 
   9264 void InstructionCodeGeneratorMIPS::VisitNotEqual(HNotEqual* comp) {
   9265   HandleCondition(comp);
   9266 }
   9267 
   9268 void LocationsBuilderMIPS::VisitLessThan(HLessThan* comp) {
   9269   HandleCondition(comp);
   9270 }
   9271 
   9272 void InstructionCodeGeneratorMIPS::VisitLessThan(HLessThan* comp) {
   9273   HandleCondition(comp);
   9274 }
   9275 
   9276 void LocationsBuilderMIPS::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
   9277   HandleCondition(comp);
   9278 }
   9279 
   9280 void InstructionCodeGeneratorMIPS::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
   9281   HandleCondition(comp);
   9282 }
   9283 
   9284 void LocationsBuilderMIPS::VisitGreaterThan(HGreaterThan* comp) {
   9285   HandleCondition(comp);
   9286 }
   9287 
   9288 void InstructionCodeGeneratorMIPS::VisitGreaterThan(HGreaterThan* comp) {
   9289   HandleCondition(comp);
   9290 }
   9291 
   9292 void LocationsBuilderMIPS::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
   9293   HandleCondition(comp);
   9294 }
   9295 
   9296 void InstructionCodeGeneratorMIPS::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
   9297   HandleCondition(comp);
   9298 }
   9299 
   9300 void LocationsBuilderMIPS::VisitBelow(HBelow* comp) {
   9301   HandleCondition(comp);
   9302 }
   9303 
   9304 void InstructionCodeGeneratorMIPS::VisitBelow(HBelow* comp) {
   9305   HandleCondition(comp);
   9306 }
   9307 
   9308 void LocationsBuilderMIPS::VisitBelowOrEqual(HBelowOrEqual* comp) {
   9309   HandleCondition(comp);
   9310 }
   9311 
   9312 void InstructionCodeGeneratorMIPS::VisitBelowOrEqual(HBelowOrEqual* comp) {
   9313   HandleCondition(comp);
   9314 }
   9315 
   9316 void LocationsBuilderMIPS::VisitAbove(HAbove* comp) {
   9317   HandleCondition(comp);
   9318 }
   9319 
   9320 void InstructionCodeGeneratorMIPS::VisitAbove(HAbove* comp) {
   9321   HandleCondition(comp);
   9322 }
   9323 
   9324 void LocationsBuilderMIPS::VisitAboveOrEqual(HAboveOrEqual* comp) {
   9325   HandleCondition(comp);
   9326 }
   9327 
   9328 void InstructionCodeGeneratorMIPS::VisitAboveOrEqual(HAboveOrEqual* comp) {
   9329   HandleCondition(comp);
   9330 }
   9331 
   9332 void LocationsBuilderMIPS::VisitPackedSwitch(HPackedSwitch* switch_instr) {
   9333   LocationSummary* locations =
   9334       new (GetGraph()->GetAllocator()) LocationSummary(switch_instr, LocationSummary::kNoCall);
   9335   locations->SetInAt(0, Location::RequiresRegister());
   9336   if (!codegen_->GetInstructionSetFeatures().IsR6()) {
   9337     uint32_t num_entries = switch_instr->GetNumEntries();
   9338     if (num_entries > InstructionCodeGeneratorMIPS::kPackedSwitchJumpTableThreshold) {
   9339       // When there's no HMipsComputeBaseMethodAddress input, R2 uses the NAL
   9340       // instruction to simulate PC-relative addressing when accessing the jump table.
   9341       // NAL clobbers RA. Make sure RA is preserved.
   9342       codegen_->ClobberRA();
   9343     }
   9344   }
   9345 }
   9346 
   9347 void InstructionCodeGeneratorMIPS::GenPackedSwitchWithCompares(Register value_reg,
   9348                                                                int32_t lower_bound,
   9349                                                                uint32_t num_entries,
   9350                                                                HBasicBlock* switch_block,
   9351                                                                HBasicBlock* default_block) {
   9352   // Create a set of compare/jumps.
   9353   Register temp_reg = TMP;
   9354   __ Addiu32(temp_reg, value_reg, -lower_bound);
   9355   // Jump to default if index is negative
   9356   // Note: We don't check the case that index is positive while value < lower_bound, because in
   9357   // this case, index >= num_entries must be true. So that we can save one branch instruction.
   9358   __ Bltz(temp_reg, codegen_->GetLabelOf(default_block));
   9359 
   9360   const ArenaVector<HBasicBlock*>& successors = switch_block->GetSuccessors();
   9361   // Jump to successors[0] if value == lower_bound.
   9362   __ Beqz(temp_reg, codegen_->GetLabelOf(successors[0]));
   9363   int32_t last_index = 0;
   9364   for (; num_entries - last_index > 2; last_index += 2) {
   9365     __ Addiu(temp_reg, temp_reg, -2);
   9366     // Jump to successors[last_index + 1] if value < case_value[last_index + 2].
   9367     __ Bltz(temp_reg, codegen_->GetLabelOf(successors[last_index + 1]));
   9368     // Jump to successors[last_index + 2] if value == case_value[last_index + 2].
   9369     __ Beqz(temp_reg, codegen_->GetLabelOf(successors[last_index + 2]));
   9370   }
   9371   if (num_entries - last_index == 2) {
   9372     // The last missing case_value.
   9373     __ Addiu(temp_reg, temp_reg, -1);
   9374     __ Beqz(temp_reg, codegen_->GetLabelOf(successors[last_index + 1]));
   9375   }
   9376 
   9377   // And the default for any other value.
   9378   if (!codegen_->GoesToNextBlock(switch_block, default_block)) {
   9379     __ B(codegen_->GetLabelOf(default_block));
   9380   }
   9381 }
   9382 
   9383 void InstructionCodeGeneratorMIPS::GenTableBasedPackedSwitch(Register value_reg,
   9384                                                              Register constant_area,
   9385                                                              int32_t lower_bound,
   9386                                                              uint32_t num_entries,
   9387                                                              HBasicBlock* switch_block,
   9388                                                              HBasicBlock* default_block) {
   9389   // Create a jump table.
   9390   std::vector<MipsLabel*> labels(num_entries);
   9391   const ArenaVector<HBasicBlock*>& successors = switch_block->GetSuccessors();
   9392   for (uint32_t i = 0; i < num_entries; i++) {
   9393     labels[i] = codegen_->GetLabelOf(successors[i]);
   9394   }
   9395   JumpTable* table = __ CreateJumpTable(std::move(labels));
   9396 
   9397   // Is the value in range?
   9398   __ Addiu32(TMP, value_reg, -lower_bound);
   9399   if (IsInt<16>(static_cast<int32_t>(num_entries))) {
   9400     __ Sltiu(AT, TMP, num_entries);
   9401     __ Beqz(AT, codegen_->GetLabelOf(default_block));
   9402   } else {
   9403     __ LoadConst32(AT, num_entries);
   9404     __ Bgeu(TMP, AT, codegen_->GetLabelOf(default_block));
   9405   }
   9406 
   9407   // We are in the range of the table.
   9408   // Load the target address from the jump table, indexing by the value.
   9409   __ LoadLabelAddress(AT, constant_area, table->GetLabel());
   9410   __ ShiftAndAdd(TMP, TMP, AT, 2, TMP);
   9411   __ Lw(TMP, TMP, 0);
   9412   // Compute the absolute target address by adding the table start address
   9413   // (the table contains offsets to targets relative to its start).
   9414   __ Addu(TMP, TMP, AT);
   9415   // And jump.
   9416   __ Jr(TMP);
   9417   __ NopIfNoReordering();
   9418 }
   9419 
   9420 void InstructionCodeGeneratorMIPS::VisitPackedSwitch(HPackedSwitch* switch_instr) {
   9421   int32_t lower_bound = switch_instr->GetStartValue();
   9422   uint32_t num_entries = switch_instr->GetNumEntries();
   9423   LocationSummary* locations = switch_instr->GetLocations();
   9424   Register value_reg = locations->InAt(0).AsRegister<Register>();
   9425   HBasicBlock* switch_block = switch_instr->GetBlock();
   9426   HBasicBlock* default_block = switch_instr->GetDefaultBlock();
   9427 
   9428   if (num_entries > kPackedSwitchJumpTableThreshold) {
   9429     // R6 uses PC-relative addressing to access the jump table.
   9430     //
   9431     // R2, OTOH, uses an HMipsComputeBaseMethodAddress input (when available)
   9432     // to access the jump table and it is implemented by changing HPackedSwitch to
   9433     // HMipsPackedSwitch, which bears HMipsComputeBaseMethodAddress (see
   9434     // VisitMipsPackedSwitch()).
   9435     //
   9436     // When there's no HMipsComputeBaseMethodAddress input (e.g. in presence of
   9437     // irreducible loops), R2 uses the NAL instruction to simulate PC-relative
   9438     // addressing.
   9439     GenTableBasedPackedSwitch(value_reg,
   9440                               ZERO,
   9441                               lower_bound,
   9442                               num_entries,
   9443                               switch_block,
   9444                               default_block);
   9445   } else {
   9446     GenPackedSwitchWithCompares(value_reg,
   9447                                 lower_bound,
   9448                                 num_entries,
   9449                                 switch_block,
   9450                                 default_block);
   9451   }
   9452 }
   9453 
   9454 void LocationsBuilderMIPS::VisitMipsPackedSwitch(HMipsPackedSwitch* switch_instr) {
   9455   LocationSummary* locations =
   9456       new (GetGraph()->GetAllocator()) LocationSummary(switch_instr, LocationSummary::kNoCall);
   9457   locations->SetInAt(0, Location::RequiresRegister());
   9458   // Constant area pointer (HMipsComputeBaseMethodAddress).
   9459   locations->SetInAt(1, Location::RequiresRegister());
   9460 }
   9461 
   9462 void InstructionCodeGeneratorMIPS::VisitMipsPackedSwitch(HMipsPackedSwitch* switch_instr) {
   9463   int32_t lower_bound = switch_instr->GetStartValue();
   9464   uint32_t num_entries = switch_instr->GetNumEntries();
   9465   LocationSummary* locations = switch_instr->GetLocations();
   9466   Register value_reg = locations->InAt(0).AsRegister<Register>();
   9467   Register constant_area = locations->InAt(1).AsRegister<Register>();
   9468   HBasicBlock* switch_block = switch_instr->GetBlock();
   9469   HBasicBlock* default_block = switch_instr->GetDefaultBlock();
   9470 
   9471   // This is an R2-only path. HPackedSwitch has been changed to
   9472   // HMipsPackedSwitch, which bears HMipsComputeBaseMethodAddress
   9473   // required to address the jump table relative to PC.
   9474   GenTableBasedPackedSwitch(value_reg,
   9475                             constant_area,
   9476                             lower_bound,
   9477                             num_entries,
   9478                             switch_block,
   9479                             default_block);
   9480 }
   9481 
   9482 void LocationsBuilderMIPS::VisitMipsComputeBaseMethodAddress(
   9483     HMipsComputeBaseMethodAddress* insn) {
   9484   LocationSummary* locations =
   9485       new (GetGraph()->GetAllocator()) LocationSummary(insn, LocationSummary::kNoCall);
   9486   locations->SetOut(Location::RequiresRegister());
   9487 }
   9488 
   9489 void InstructionCodeGeneratorMIPS::VisitMipsComputeBaseMethodAddress(
   9490     HMipsComputeBaseMethodAddress* insn) {
   9491   LocationSummary* locations = insn->GetLocations();
   9492   Register reg = locations->Out().AsRegister<Register>();
   9493 
   9494   CHECK(!codegen_->GetInstructionSetFeatures().IsR6());
   9495 
   9496   // Generate a dummy PC-relative call to obtain PC.
   9497   __ Nal();
   9498   // Grab the return address off RA.
   9499   __ Move(reg, RA);
   9500 
   9501   // Remember this offset (the obtained PC value) for later use with constant area.
   9502   __ BindPcRelBaseLabel();
   9503 }
   9504 
   9505 void LocationsBuilderMIPS::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
   9506   // The trampoline uses the same calling convention as dex calling conventions,
   9507   // except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain
   9508   // the method_idx.
   9509   HandleInvoke(invoke);
   9510 }
   9511 
   9512 void InstructionCodeGeneratorMIPS::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
   9513   codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke);
   9514 }
   9515 
   9516 void LocationsBuilderMIPS::VisitClassTableGet(HClassTableGet* instruction) {
   9517   LocationSummary* locations =
   9518       new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
   9519   locations->SetInAt(0, Location::RequiresRegister());
   9520   locations->SetOut(Location::RequiresRegister());
   9521 }
   9522 
   9523 void InstructionCodeGeneratorMIPS::VisitClassTableGet(HClassTableGet* instruction) {
   9524   LocationSummary* locations = instruction->GetLocations();
   9525   if (instruction->GetTableKind() == HClassTableGet::TableKind::kVTable) {
   9526     uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
   9527         instruction->GetIndex(), kMipsPointerSize).SizeValue();
   9528     __ LoadFromOffset(kLoadWord,
   9529                       locations->Out().AsRegister<Register>(),
   9530                       locations->InAt(0).AsRegister<Register>(),
   9531                       method_offset);
   9532   } else {
   9533     uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
   9534         instruction->GetIndex(), kMipsPointerSize));
   9535     __ LoadFromOffset(kLoadWord,
   9536                       locations->Out().AsRegister<Register>(),
   9537                       locations->InAt(0).AsRegister<Register>(),
   9538                       mirror::Class::ImtPtrOffset(kMipsPointerSize).Uint32Value());
   9539     __ LoadFromOffset(kLoadWord,
   9540                       locations->Out().AsRegister<Register>(),
   9541                       locations->Out().AsRegister<Register>(),
   9542                       method_offset);
   9543   }
   9544 }
   9545 
   9546 void LocationsBuilderMIPS::VisitIntermediateAddress(HIntermediateAddress* instruction
   9547                                                     ATTRIBUTE_UNUSED) {
   9548   LOG(FATAL) << "Unreachable";
   9549 }
   9550 
   9551 void InstructionCodeGeneratorMIPS::VisitIntermediateAddress(HIntermediateAddress* instruction
   9552                                                             ATTRIBUTE_UNUSED) {
   9553   LOG(FATAL) << "Unreachable";
   9554 }
   9555 
   9556 #undef __
   9557 #undef QUICK_ENTRY_POINT
   9558 
   9559 }  // namespace mips
   9560 }  // namespace art
   9561