Home | History | Annotate | Download | only in optimizing
      1 /*
      2  * Copyright (C) 2015 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "intrinsics_mips64.h"
     18 
     19 #include "arch/mips64/instruction_set_features_mips64.h"
     20 #include "art_method.h"
     21 #include "code_generator_mips64.h"
     22 #include "entrypoints/quick/quick_entrypoints.h"
     23 #include "intrinsics.h"
     24 #include "mirror/array-inl.h"
     25 #include "mirror/string.h"
     26 #include "scoped_thread_state_change-inl.h"
     27 #include "thread.h"
     28 #include "utils/mips64/assembler_mips64.h"
     29 #include "utils/mips64/constants_mips64.h"
     30 
     31 namespace art {
     32 
     33 namespace mips64 {
     34 
     35 IntrinsicLocationsBuilderMIPS64::IntrinsicLocationsBuilderMIPS64(CodeGeneratorMIPS64* codegen)
     36   : codegen_(codegen), arena_(codegen->GetGraph()->GetArena()) {
     37 }
     38 
     39 Mips64Assembler* IntrinsicCodeGeneratorMIPS64::GetAssembler() {
     40   return reinterpret_cast<Mips64Assembler*>(codegen_->GetAssembler());
     41 }
     42 
     43 ArenaAllocator* IntrinsicCodeGeneratorMIPS64::GetAllocator() {
     44   return codegen_->GetGraph()->GetArena();
     45 }
     46 
     47 #define __ codegen->GetAssembler()->
     48 
     49 static void MoveFromReturnRegister(Location trg,
     50                                    Primitive::Type type,
     51                                    CodeGeneratorMIPS64* codegen) {
     52   if (!trg.IsValid()) {
     53     DCHECK_EQ(type, Primitive::kPrimVoid);
     54     return;
     55   }
     56 
     57   DCHECK_NE(type, Primitive::kPrimVoid);
     58 
     59   if (Primitive::IsIntegralType(type) || type == Primitive::kPrimNot) {
     60     GpuRegister trg_reg = trg.AsRegister<GpuRegister>();
     61     if (trg_reg != V0) {
     62       __ Move(V0, trg_reg);
     63     }
     64   } else {
     65     FpuRegister trg_reg = trg.AsFpuRegister<FpuRegister>();
     66     if (trg_reg != F0) {
     67       if (type == Primitive::kPrimFloat) {
     68         __ MovS(F0, trg_reg);
     69       } else {
     70         __ MovD(F0, trg_reg);
     71       }
     72     }
     73   }
     74 }
     75 
     76 static void MoveArguments(HInvoke* invoke, CodeGeneratorMIPS64* codegen) {
     77   InvokeDexCallingConventionVisitorMIPS64 calling_convention_visitor;
     78   IntrinsicVisitor::MoveArguments(invoke, codegen, &calling_convention_visitor);
     79 }
     80 
     81 // Slow-path for fallback (calling the managed code to handle the
     82 // intrinsic) in an intrinsified call. This will copy the arguments
     83 // into the positions for a regular call.
     84 //
     85 // Note: The actual parameters are required to be in the locations
     86 //       given by the invoke's location summary. If an intrinsic
     87 //       modifies those locations before a slowpath call, they must be
     88 //       restored!
     89 class IntrinsicSlowPathMIPS64 : public SlowPathCodeMIPS64 {
     90  public:
     91   explicit IntrinsicSlowPathMIPS64(HInvoke* invoke)
     92      : SlowPathCodeMIPS64(invoke), invoke_(invoke) { }
     93 
     94   void EmitNativeCode(CodeGenerator* codegen_in) OVERRIDE {
     95     CodeGeneratorMIPS64* codegen = down_cast<CodeGeneratorMIPS64*>(codegen_in);
     96 
     97     __ Bind(GetEntryLabel());
     98 
     99     SaveLiveRegisters(codegen, invoke_->GetLocations());
    100 
    101     MoveArguments(invoke_, codegen);
    102 
    103     if (invoke_->IsInvokeStaticOrDirect()) {
    104       codegen->GenerateStaticOrDirectCall(
    105           invoke_->AsInvokeStaticOrDirect(), Location::RegisterLocation(A0), this);
    106     } else {
    107       codegen->GenerateVirtualCall(
    108           invoke_->AsInvokeVirtual(), Location::RegisterLocation(A0), this);
    109     }
    110 
    111     // Copy the result back to the expected output.
    112     Location out = invoke_->GetLocations()->Out();
    113     if (out.IsValid()) {
    114       DCHECK(out.IsRegister());  // TODO: Replace this when we support output in memory.
    115       DCHECK(!invoke_->GetLocations()->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
    116       MoveFromReturnRegister(out, invoke_->GetType(), codegen);
    117     }
    118 
    119     RestoreLiveRegisters(codegen, invoke_->GetLocations());
    120     __ Bc(GetExitLabel());
    121   }
    122 
    123   const char* GetDescription() const OVERRIDE { return "IntrinsicSlowPathMIPS64"; }
    124 
    125  private:
    126   // The instruction where this slow path is happening.
    127   HInvoke* const invoke_;
    128 
    129   DISALLOW_COPY_AND_ASSIGN(IntrinsicSlowPathMIPS64);
    130 };
    131 
    132 #undef __
    133 
    134 bool IntrinsicLocationsBuilderMIPS64::TryDispatch(HInvoke* invoke) {
    135   Dispatch(invoke);
    136   LocationSummary* res = invoke->GetLocations();
    137   return res != nullptr && res->Intrinsified();
    138 }
    139 
    140 #define __ assembler->
    141 
    142 static void CreateFPToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
    143   LocationSummary* locations = new (arena) LocationSummary(invoke,
    144                                                            LocationSummary::kNoCall,
    145                                                            kIntrinsified);
    146   locations->SetInAt(0, Location::RequiresFpuRegister());
    147   locations->SetOut(Location::RequiresRegister());
    148 }
    149 
    150 static void MoveFPToInt(LocationSummary* locations, bool is64bit, Mips64Assembler* assembler) {
    151   FpuRegister in  = locations->InAt(0).AsFpuRegister<FpuRegister>();
    152   GpuRegister out = locations->Out().AsRegister<GpuRegister>();
    153 
    154   if (is64bit) {
    155     __ Dmfc1(out, in);
    156   } else {
    157     __ Mfc1(out, in);
    158   }
    159 }
    160 
    161 // long java.lang.Double.doubleToRawLongBits(double)
    162 void IntrinsicLocationsBuilderMIPS64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
    163   CreateFPToIntLocations(arena_, invoke);
    164 }
    165 
    166 void IntrinsicCodeGeneratorMIPS64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
    167   MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
    168 }
    169 
    170 // int java.lang.Float.floatToRawIntBits(float)
    171 void IntrinsicLocationsBuilderMIPS64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
    172   CreateFPToIntLocations(arena_, invoke);
    173 }
    174 
    175 void IntrinsicCodeGeneratorMIPS64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
    176   MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
    177 }
    178 
    179 static void CreateIntToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
    180   LocationSummary* locations = new (arena) LocationSummary(invoke,
    181                                                            LocationSummary::kNoCall,
    182                                                            kIntrinsified);
    183   locations->SetInAt(0, Location::RequiresRegister());
    184   locations->SetOut(Location::RequiresFpuRegister());
    185 }
    186 
    187 static void MoveIntToFP(LocationSummary* locations, bool is64bit, Mips64Assembler* assembler) {
    188   GpuRegister in  = locations->InAt(0).AsRegister<GpuRegister>();
    189   FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
    190 
    191   if (is64bit) {
    192     __ Dmtc1(in, out);
    193   } else {
    194     __ Mtc1(in, out);
    195   }
    196 }
    197 
    198 // double java.lang.Double.longBitsToDouble(long)
    199 void IntrinsicLocationsBuilderMIPS64::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
    200   CreateIntToFPLocations(arena_, invoke);
    201 }
    202 
    203 void IntrinsicCodeGeneratorMIPS64::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
    204   MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
    205 }
    206 
    207 // float java.lang.Float.intBitsToFloat(int)
    208 void IntrinsicLocationsBuilderMIPS64::VisitFloatIntBitsToFloat(HInvoke* invoke) {
    209   CreateIntToFPLocations(arena_, invoke);
    210 }
    211 
    212 void IntrinsicCodeGeneratorMIPS64::VisitFloatIntBitsToFloat(HInvoke* invoke) {
    213   MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
    214 }
    215 
    216 static void CreateIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
    217   LocationSummary* locations = new (arena) LocationSummary(invoke,
    218                                                            LocationSummary::kNoCall,
    219                                                            kIntrinsified);
    220   locations->SetInAt(0, Location::RequiresRegister());
    221   locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
    222 }
    223 
    224 static void GenReverseBytes(LocationSummary* locations,
    225                             Primitive::Type type,
    226                             Mips64Assembler* assembler) {
    227   GpuRegister in  = locations->InAt(0).AsRegister<GpuRegister>();
    228   GpuRegister out = locations->Out().AsRegister<GpuRegister>();
    229 
    230   switch (type) {
    231     case Primitive::kPrimShort:
    232       __ Dsbh(out, in);
    233       __ Seh(out, out);
    234       break;
    235     case Primitive::kPrimInt:
    236       __ Rotr(out, in, 16);
    237       __ Wsbh(out, out);
    238       break;
    239     case Primitive::kPrimLong:
    240       __ Dsbh(out, in);
    241       __ Dshd(out, out);
    242       break;
    243     default:
    244       LOG(FATAL) << "Unexpected size for reverse-bytes: " << type;
    245       UNREACHABLE();
    246   }
    247 }
    248 
    249 // int java.lang.Integer.reverseBytes(int)
    250 void IntrinsicLocationsBuilderMIPS64::VisitIntegerReverseBytes(HInvoke* invoke) {
    251   CreateIntToIntLocations(arena_, invoke);
    252 }
    253 
    254 void IntrinsicCodeGeneratorMIPS64::VisitIntegerReverseBytes(HInvoke* invoke) {
    255   GenReverseBytes(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler());
    256 }
    257 
    258 // long java.lang.Long.reverseBytes(long)
    259 void IntrinsicLocationsBuilderMIPS64::VisitLongReverseBytes(HInvoke* invoke) {
    260   CreateIntToIntLocations(arena_, invoke);
    261 }
    262 
    263 void IntrinsicCodeGeneratorMIPS64::VisitLongReverseBytes(HInvoke* invoke) {
    264   GenReverseBytes(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler());
    265 }
    266 
    267 // short java.lang.Short.reverseBytes(short)
    268 void IntrinsicLocationsBuilderMIPS64::VisitShortReverseBytes(HInvoke* invoke) {
    269   CreateIntToIntLocations(arena_, invoke);
    270 }
    271 
    272 void IntrinsicCodeGeneratorMIPS64::VisitShortReverseBytes(HInvoke* invoke) {
    273   GenReverseBytes(invoke->GetLocations(), Primitive::kPrimShort, GetAssembler());
    274 }
    275 
    276 static void GenNumberOfLeadingZeroes(LocationSummary* locations,
    277                                      bool is64bit,
    278                                      Mips64Assembler* assembler) {
    279   GpuRegister in  = locations->InAt(0).AsRegister<GpuRegister>();
    280   GpuRegister out = locations->Out().AsRegister<GpuRegister>();
    281 
    282   if (is64bit) {
    283     __ Dclz(out, in);
    284   } else {
    285     __ Clz(out, in);
    286   }
    287 }
    288 
    289 // int java.lang.Integer.numberOfLeadingZeros(int i)
    290 void IntrinsicLocationsBuilderMIPS64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
    291   CreateIntToIntLocations(arena_, invoke);
    292 }
    293 
    294 void IntrinsicCodeGeneratorMIPS64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
    295   GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
    296 }
    297 
    298 // int java.lang.Long.numberOfLeadingZeros(long i)
    299 void IntrinsicLocationsBuilderMIPS64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
    300   CreateIntToIntLocations(arena_, invoke);
    301 }
    302 
    303 void IntrinsicCodeGeneratorMIPS64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
    304   GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
    305 }
    306 
    307 static void GenNumberOfTrailingZeroes(LocationSummary* locations,
    308                                       bool is64bit,
    309                                       Mips64Assembler* assembler) {
    310   Location in = locations->InAt(0);
    311   Location out = locations->Out();
    312 
    313   if (is64bit) {
    314     __ Dsbh(out.AsRegister<GpuRegister>(), in.AsRegister<GpuRegister>());
    315     __ Dshd(out.AsRegister<GpuRegister>(), out.AsRegister<GpuRegister>());
    316     __ Dbitswap(out.AsRegister<GpuRegister>(), out.AsRegister<GpuRegister>());
    317     __ Dclz(out.AsRegister<GpuRegister>(), out.AsRegister<GpuRegister>());
    318   } else {
    319     __ Rotr(out.AsRegister<GpuRegister>(), in.AsRegister<GpuRegister>(), 16);
    320     __ Wsbh(out.AsRegister<GpuRegister>(), out.AsRegister<GpuRegister>());
    321     __ Bitswap(out.AsRegister<GpuRegister>(), out.AsRegister<GpuRegister>());
    322     __ Clz(out.AsRegister<GpuRegister>(), out.AsRegister<GpuRegister>());
    323   }
    324 }
    325 
    326 // int java.lang.Integer.numberOfTrailingZeros(int i)
    327 void IntrinsicLocationsBuilderMIPS64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
    328   CreateIntToIntLocations(arena_, invoke);
    329 }
    330 
    331 void IntrinsicCodeGeneratorMIPS64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
    332   GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
    333 }
    334 
    335 // int java.lang.Long.numberOfTrailingZeros(long i)
    336 void IntrinsicLocationsBuilderMIPS64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
    337   CreateIntToIntLocations(arena_, invoke);
    338 }
    339 
    340 void IntrinsicCodeGeneratorMIPS64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
    341   GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
    342 }
    343 
    344 static void GenReverse(LocationSummary* locations,
    345                        Primitive::Type type,
    346                        Mips64Assembler* assembler) {
    347   DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong);
    348 
    349   GpuRegister in  = locations->InAt(0).AsRegister<GpuRegister>();
    350   GpuRegister out = locations->Out().AsRegister<GpuRegister>();
    351 
    352   if (type == Primitive::kPrimInt) {
    353     __ Rotr(out, in, 16);
    354     __ Wsbh(out, out);
    355     __ Bitswap(out, out);
    356   } else {
    357     __ Dsbh(out, in);
    358     __ Dshd(out, out);
    359     __ Dbitswap(out, out);
    360   }
    361 }
    362 
    363 // int java.lang.Integer.reverse(int)
    364 void IntrinsicLocationsBuilderMIPS64::VisitIntegerReverse(HInvoke* invoke) {
    365   CreateIntToIntLocations(arena_, invoke);
    366 }
    367 
    368 void IntrinsicCodeGeneratorMIPS64::VisitIntegerReverse(HInvoke* invoke) {
    369   GenReverse(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler());
    370 }
    371 
    372 // long java.lang.Long.reverse(long)
    373 void IntrinsicLocationsBuilderMIPS64::VisitLongReverse(HInvoke* invoke) {
    374   CreateIntToIntLocations(arena_, invoke);
    375 }
    376 
    377 void IntrinsicCodeGeneratorMIPS64::VisitLongReverse(HInvoke* invoke) {
    378   GenReverse(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler());
    379 }
    380 
    381 static void CreateFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
    382   LocationSummary* locations = new (arena) LocationSummary(invoke,
    383                                                            LocationSummary::kNoCall,
    384                                                            kIntrinsified);
    385   locations->SetInAt(0, Location::RequiresFpuRegister());
    386   locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
    387 }
    388 
    389 static void GenBitCount(LocationSummary* locations,
    390                         const Primitive::Type type,
    391                         Mips64Assembler* assembler) {
    392   GpuRegister out = locations->Out().AsRegister<GpuRegister>();
    393   GpuRegister in = locations->InAt(0).AsRegister<GpuRegister>();
    394 
    395   DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong);
    396 
    397   // https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
    398   //
    399   // A generalization of the best bit counting method to integers of
    400   // bit-widths up to 128 (parameterized by type T) is this:
    401   //
    402   // v = v - ((v >> 1) & (T)~(T)0/3);                           // temp
    403   // v = (v & (T)~(T)0/15*3) + ((v >> 2) & (T)~(T)0/15*3);      // temp
    404   // v = (v + (v >> 4)) & (T)~(T)0/255*15;                      // temp
    405   // c = (T)(v * ((T)~(T)0/255)) >> (sizeof(T) - 1) * BITS_PER_BYTE; // count
    406   //
    407   // For comparison, for 32-bit quantities, this algorithm can be executed
    408   // using 20 MIPS instructions (the calls to LoadConst32() generate two
    409   // machine instructions each for the values being used in this algorithm).
    410   // A(n unrolled) loop-based algorithm requires 25 instructions.
    411   //
    412   // For a 64-bit operand this can be performed in 24 instructions compared
    413   // to a(n unrolled) loop based algorithm which requires 38 instructions.
    414   //
    415   // There are algorithms which are faster in the cases where very few
    416   // bits are set but the algorithm here attempts to minimize the total
    417   // number of instructions executed even when a large number of bits
    418   // are set.
    419 
    420   if (type == Primitive::kPrimInt) {
    421     __ Srl(TMP, in, 1);
    422     __ LoadConst32(AT, 0x55555555);
    423     __ And(TMP, TMP, AT);
    424     __ Subu(TMP, in, TMP);
    425     __ LoadConst32(AT, 0x33333333);
    426     __ And(out, TMP, AT);
    427     __ Srl(TMP, TMP, 2);
    428     __ And(TMP, TMP, AT);
    429     __ Addu(TMP, out, TMP);
    430     __ Srl(out, TMP, 4);
    431     __ Addu(out, out, TMP);
    432     __ LoadConst32(AT, 0x0F0F0F0F);
    433     __ And(out, out, AT);
    434     __ LoadConst32(TMP, 0x01010101);
    435     __ MulR6(out, out, TMP);
    436     __ Srl(out, out, 24);
    437   } else if (type == Primitive::kPrimLong) {
    438     __ Dsrl(TMP, in, 1);
    439     __ LoadConst64(AT, 0x5555555555555555L);
    440     __ And(TMP, TMP, AT);
    441     __ Dsubu(TMP, in, TMP);
    442     __ LoadConst64(AT, 0x3333333333333333L);
    443     __ And(out, TMP, AT);
    444     __ Dsrl(TMP, TMP, 2);
    445     __ And(TMP, TMP, AT);
    446     __ Daddu(TMP, out, TMP);
    447     __ Dsrl(out, TMP, 4);
    448     __ Daddu(out, out, TMP);
    449     __ LoadConst64(AT, 0x0F0F0F0F0F0F0F0FL);
    450     __ And(out, out, AT);
    451     __ LoadConst64(TMP, 0x0101010101010101L);
    452     __ Dmul(out, out, TMP);
    453     __ Dsrl32(out, out, 24);
    454   }
    455 }
    456 
    457 // int java.lang.Integer.bitCount(int)
    458 void IntrinsicLocationsBuilderMIPS64::VisitIntegerBitCount(HInvoke* invoke) {
    459   CreateIntToIntLocations(arena_, invoke);
    460 }
    461 
    462 void IntrinsicCodeGeneratorMIPS64::VisitIntegerBitCount(HInvoke* invoke) {
    463   GenBitCount(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler());
    464 }
    465 
    466 // int java.lang.Long.bitCount(long)
    467 void IntrinsicLocationsBuilderMIPS64::VisitLongBitCount(HInvoke* invoke) {
    468   CreateIntToIntLocations(arena_, invoke);
    469 }
    470 
    471 void IntrinsicCodeGeneratorMIPS64::VisitLongBitCount(HInvoke* invoke) {
    472   GenBitCount(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler());
    473 }
    474 
    475 static void MathAbsFP(LocationSummary* locations, bool is64bit, Mips64Assembler* assembler) {
    476   FpuRegister in = locations->InAt(0).AsFpuRegister<FpuRegister>();
    477   FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
    478 
    479   if (is64bit) {
    480     __ AbsD(out, in);
    481   } else {
    482     __ AbsS(out, in);
    483   }
    484 }
    485 
    486 // double java.lang.Math.abs(double)
    487 void IntrinsicLocationsBuilderMIPS64::VisitMathAbsDouble(HInvoke* invoke) {
    488   CreateFPToFPLocations(arena_, invoke);
    489 }
    490 
    491 void IntrinsicCodeGeneratorMIPS64::VisitMathAbsDouble(HInvoke* invoke) {
    492   MathAbsFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
    493 }
    494 
    495 // float java.lang.Math.abs(float)
    496 void IntrinsicLocationsBuilderMIPS64::VisitMathAbsFloat(HInvoke* invoke) {
    497   CreateFPToFPLocations(arena_, invoke);
    498 }
    499 
    500 void IntrinsicCodeGeneratorMIPS64::VisitMathAbsFloat(HInvoke* invoke) {
    501   MathAbsFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
    502 }
    503 
    504 static void CreateIntToInt(ArenaAllocator* arena, HInvoke* invoke) {
    505   LocationSummary* locations = new (arena) LocationSummary(invoke,
    506                                                            LocationSummary::kNoCall,
    507                                                            kIntrinsified);
    508   locations->SetInAt(0, Location::RequiresRegister());
    509   locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
    510 }
    511 
    512 static void GenAbsInteger(LocationSummary* locations, bool is64bit, Mips64Assembler* assembler) {
    513   GpuRegister in  = locations->InAt(0).AsRegister<GpuRegister>();
    514   GpuRegister out = locations->Out().AsRegister<GpuRegister>();
    515 
    516   if (is64bit) {
    517     __ Dsra32(AT, in, 31);
    518     __ Xor(out, in, AT);
    519     __ Dsubu(out, out, AT);
    520   } else {
    521     __ Sra(AT, in, 31);
    522     __ Xor(out, in, AT);
    523     __ Subu(out, out, AT);
    524   }
    525 }
    526 
    527 // int java.lang.Math.abs(int)
    528 void IntrinsicLocationsBuilderMIPS64::VisitMathAbsInt(HInvoke* invoke) {
    529   CreateIntToInt(arena_, invoke);
    530 }
    531 
    532 void IntrinsicCodeGeneratorMIPS64::VisitMathAbsInt(HInvoke* invoke) {
    533   GenAbsInteger(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
    534 }
    535 
    536 // long java.lang.Math.abs(long)
    537 void IntrinsicLocationsBuilderMIPS64::VisitMathAbsLong(HInvoke* invoke) {
    538   CreateIntToInt(arena_, invoke);
    539 }
    540 
    541 void IntrinsicCodeGeneratorMIPS64::VisitMathAbsLong(HInvoke* invoke) {
    542   GenAbsInteger(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
    543 }
    544 
    545 static void GenMinMaxFP(LocationSummary* locations,
    546                         bool is_min,
    547                         Primitive::Type type,
    548                         Mips64Assembler* assembler) {
    549   FpuRegister a = locations->InAt(0).AsFpuRegister<FpuRegister>();
    550   FpuRegister b = locations->InAt(1).AsFpuRegister<FpuRegister>();
    551   FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
    552 
    553   Mips64Label noNaNs;
    554   Mips64Label done;
    555   FpuRegister ftmp = ((out != a) && (out != b)) ? out : FTMP;
    556 
    557   // When Java computes min/max it prefers a NaN to a number; the
    558   // behavior of MIPSR6 is to prefer numbers to NaNs, i.e., if one of
    559   // the inputs is a NaN and the other is a valid number, the MIPS
    560   // instruction will return the number; Java wants the NaN value
    561   // returned. This is why there is extra logic preceding the use of
    562   // the MIPS min.fmt/max.fmt instructions. If either a, or b holds a
    563   // NaN, return the NaN, otherwise return the min/max.
    564   if (type == Primitive::kPrimDouble) {
    565     __ CmpUnD(FTMP, a, b);
    566     __ Bc1eqz(FTMP, &noNaNs);
    567 
    568     // One of the inputs is a NaN
    569     __ CmpEqD(ftmp, a, a);
    570     // If a == a then b is the NaN, otherwise a is the NaN.
    571     __ SelD(ftmp, a, b);
    572 
    573     if (ftmp != out) {
    574       __ MovD(out, ftmp);
    575     }
    576 
    577     __ Bc(&done);
    578 
    579     __ Bind(&noNaNs);
    580 
    581     if (is_min) {
    582       __ MinD(out, a, b);
    583     } else {
    584       __ MaxD(out, a, b);
    585     }
    586   } else {
    587     DCHECK_EQ(type, Primitive::kPrimFloat);
    588     __ CmpUnS(FTMP, a, b);
    589     __ Bc1eqz(FTMP, &noNaNs);
    590 
    591     // One of the inputs is a NaN
    592     __ CmpEqS(ftmp, a, a);
    593     // If a == a then b is the NaN, otherwise a is the NaN.
    594     __ SelS(ftmp, a, b);
    595 
    596     if (ftmp != out) {
    597       __ MovS(out, ftmp);
    598     }
    599 
    600     __ Bc(&done);
    601 
    602     __ Bind(&noNaNs);
    603 
    604     if (is_min) {
    605       __ MinS(out, a, b);
    606     } else {
    607       __ MaxS(out, a, b);
    608     }
    609   }
    610 
    611   __ Bind(&done);
    612 }
    613 
    614 static void CreateFPFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
    615   LocationSummary* locations = new (arena) LocationSummary(invoke,
    616                                                            LocationSummary::kNoCall,
    617                                                            kIntrinsified);
    618   locations->SetInAt(0, Location::RequiresFpuRegister());
    619   locations->SetInAt(1, Location::RequiresFpuRegister());
    620   locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
    621 }
    622 
    623 // double java.lang.Math.min(double, double)
    624 void IntrinsicLocationsBuilderMIPS64::VisitMathMinDoubleDouble(HInvoke* invoke) {
    625   CreateFPFPToFPLocations(arena_, invoke);
    626 }
    627 
    628 void IntrinsicCodeGeneratorMIPS64::VisitMathMinDoubleDouble(HInvoke* invoke) {
    629   GenMinMaxFP(invoke->GetLocations(), /* is_min */ true, Primitive::kPrimDouble, GetAssembler());
    630 }
    631 
    632 // float java.lang.Math.min(float, float)
    633 void IntrinsicLocationsBuilderMIPS64::VisitMathMinFloatFloat(HInvoke* invoke) {
    634   CreateFPFPToFPLocations(arena_, invoke);
    635 }
    636 
    637 void IntrinsicCodeGeneratorMIPS64::VisitMathMinFloatFloat(HInvoke* invoke) {
    638   GenMinMaxFP(invoke->GetLocations(), /* is_min */ true, Primitive::kPrimFloat, GetAssembler());
    639 }
    640 
    641 // double java.lang.Math.max(double, double)
    642 void IntrinsicLocationsBuilderMIPS64::VisitMathMaxDoubleDouble(HInvoke* invoke) {
    643   CreateFPFPToFPLocations(arena_, invoke);
    644 }
    645 
    646 void IntrinsicCodeGeneratorMIPS64::VisitMathMaxDoubleDouble(HInvoke* invoke) {
    647   GenMinMaxFP(invoke->GetLocations(), /* is_min */ false, Primitive::kPrimDouble, GetAssembler());
    648 }
    649 
    650 // float java.lang.Math.max(float, float)
    651 void IntrinsicLocationsBuilderMIPS64::VisitMathMaxFloatFloat(HInvoke* invoke) {
    652   CreateFPFPToFPLocations(arena_, invoke);
    653 }
    654 
    655 void IntrinsicCodeGeneratorMIPS64::VisitMathMaxFloatFloat(HInvoke* invoke) {
    656   GenMinMaxFP(invoke->GetLocations(), /* is_min */ false, Primitive::kPrimFloat, GetAssembler());
    657 }
    658 
    659 static void GenMinMax(LocationSummary* locations,
    660                       bool is_min,
    661                       Mips64Assembler* assembler) {
    662   GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
    663   GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
    664   GpuRegister out = locations->Out().AsRegister<GpuRegister>();
    665 
    666   if (lhs == rhs) {
    667     if (out != lhs) {
    668       __ Move(out, lhs);
    669     }
    670   } else {
    671     // Some architectures, such as ARM and MIPS (prior to r6), have a
    672     // conditional move instruction which only changes the target
    673     // (output) register if the condition is true (MIPS prior to r6 had
    674     // MOVF, MOVT, and MOVZ). The SELEQZ and SELNEZ instructions always
    675     // change the target (output) register.  If the condition is true the
    676     // output register gets the contents of the "rs" register; otherwise,
    677     // the output register is set to zero. One consequence of this is
    678     // that to implement something like "rd = c==0 ? rs : rt" MIPS64r6
    679     // needs to use a pair of SELEQZ/SELNEZ instructions.  After
    680     // executing this pair of instructions one of the output registers
    681     // from the pair will necessarily contain zero. Then the code ORs the
    682     // output registers from the SELEQZ/SELNEZ instructions to get the
    683     // final result.
    684     //
    685     // The initial test to see if the output register is same as the
    686     // first input register is needed to make sure that value in the
    687     // first input register isn't clobbered before we've finished
    688     // computing the output value. The logic in the corresponding else
    689     // clause performs the same task but makes sure the second input
    690     // register isn't clobbered in the event that it's the same register
    691     // as the output register; the else clause also handles the case
    692     // where the output register is distinct from both the first, and the
    693     // second input registers.
    694     if (out == lhs) {
    695       __ Slt(AT, rhs, lhs);
    696       if (is_min) {
    697         __ Seleqz(out, lhs, AT);
    698         __ Selnez(AT, rhs, AT);
    699       } else {
    700         __ Selnez(out, lhs, AT);
    701         __ Seleqz(AT, rhs, AT);
    702       }
    703     } else {
    704       __ Slt(AT, lhs, rhs);
    705       if (is_min) {
    706         __ Seleqz(out, rhs, AT);
    707         __ Selnez(AT, lhs, AT);
    708       } else {
    709         __ Selnez(out, rhs, AT);
    710         __ Seleqz(AT, lhs, AT);
    711       }
    712     }
    713     __ Or(out, out, AT);
    714   }
    715 }
    716 
    717 static void CreateIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
    718   LocationSummary* locations = new (arena) LocationSummary(invoke,
    719                                                            LocationSummary::kNoCall,
    720                                                            kIntrinsified);
    721   locations->SetInAt(0, Location::RequiresRegister());
    722   locations->SetInAt(1, Location::RequiresRegister());
    723   locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
    724 }
    725 
    726 // int java.lang.Math.min(int, int)
    727 void IntrinsicLocationsBuilderMIPS64::VisitMathMinIntInt(HInvoke* invoke) {
    728   CreateIntIntToIntLocations(arena_, invoke);
    729 }
    730 
    731 void IntrinsicCodeGeneratorMIPS64::VisitMathMinIntInt(HInvoke* invoke) {
    732   GenMinMax(invoke->GetLocations(), /* is_min */ true, GetAssembler());
    733 }
    734 
    735 // long java.lang.Math.min(long, long)
    736 void IntrinsicLocationsBuilderMIPS64::VisitMathMinLongLong(HInvoke* invoke) {
    737   CreateIntIntToIntLocations(arena_, invoke);
    738 }
    739 
    740 void IntrinsicCodeGeneratorMIPS64::VisitMathMinLongLong(HInvoke* invoke) {
    741   GenMinMax(invoke->GetLocations(), /* is_min */ true, GetAssembler());
    742 }
    743 
    744 // int java.lang.Math.max(int, int)
    745 void IntrinsicLocationsBuilderMIPS64::VisitMathMaxIntInt(HInvoke* invoke) {
    746   CreateIntIntToIntLocations(arena_, invoke);
    747 }
    748 
    749 void IntrinsicCodeGeneratorMIPS64::VisitMathMaxIntInt(HInvoke* invoke) {
    750   GenMinMax(invoke->GetLocations(), /* is_min */ false, GetAssembler());
    751 }
    752 
    753 // long java.lang.Math.max(long, long)
    754 void IntrinsicLocationsBuilderMIPS64::VisitMathMaxLongLong(HInvoke* invoke) {
    755   CreateIntIntToIntLocations(arena_, invoke);
    756 }
    757 
    758 void IntrinsicCodeGeneratorMIPS64::VisitMathMaxLongLong(HInvoke* invoke) {
    759   GenMinMax(invoke->GetLocations(), /* is_min */ false, GetAssembler());
    760 }
    761 
    762 // double java.lang.Math.sqrt(double)
    763 void IntrinsicLocationsBuilderMIPS64::VisitMathSqrt(HInvoke* invoke) {
    764   CreateFPToFPLocations(arena_, invoke);
    765 }
    766 
    767 void IntrinsicCodeGeneratorMIPS64::VisitMathSqrt(HInvoke* invoke) {
    768   LocationSummary* locations = invoke->GetLocations();
    769   Mips64Assembler* assembler = GetAssembler();
    770   FpuRegister in = locations->InAt(0).AsFpuRegister<FpuRegister>();
    771   FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
    772 
    773   __ SqrtD(out, in);
    774 }
    775 
    776 static void CreateFPToFP(ArenaAllocator* arena,
    777                          HInvoke* invoke,
    778                          Location::OutputOverlap overlaps = Location::kOutputOverlap) {
    779   LocationSummary* locations = new (arena) LocationSummary(invoke,
    780                                                            LocationSummary::kNoCall,
    781                                                            kIntrinsified);
    782   locations->SetInAt(0, Location::RequiresFpuRegister());
    783   locations->SetOut(Location::RequiresFpuRegister(), overlaps);
    784 }
    785 
    786 // double java.lang.Math.rint(double)
    787 void IntrinsicLocationsBuilderMIPS64::VisitMathRint(HInvoke* invoke) {
    788   CreateFPToFP(arena_, invoke, Location::kNoOutputOverlap);
    789 }
    790 
    791 void IntrinsicCodeGeneratorMIPS64::VisitMathRint(HInvoke* invoke) {
    792   LocationSummary* locations = invoke->GetLocations();
    793   Mips64Assembler* assembler = GetAssembler();
    794   FpuRegister in = locations->InAt(0).AsFpuRegister<FpuRegister>();
    795   FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
    796 
    797   __ RintD(out, in);
    798 }
    799 
    800 // double java.lang.Math.floor(double)
    801 void IntrinsicLocationsBuilderMIPS64::VisitMathFloor(HInvoke* invoke) {
    802   CreateFPToFP(arena_, invoke);
    803 }
    804 
    805 const constexpr uint16_t kFPLeaveUnchanged = kPositiveZero |
    806                                              kPositiveInfinity |
    807                                              kNegativeZero |
    808                                              kNegativeInfinity |
    809                                              kQuietNaN |
    810                                              kSignalingNaN;
    811 
    812 enum FloatRoundingMode {
    813   kFloor,
    814   kCeil,
    815 };
    816 
    817 static void GenRoundingMode(LocationSummary* locations,
    818                             FloatRoundingMode mode,
    819                             Mips64Assembler* assembler) {
    820   FpuRegister in = locations->InAt(0).AsFpuRegister<FpuRegister>();
    821   FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
    822 
    823   DCHECK_NE(in, out);
    824 
    825   Mips64Label done;
    826 
    827   // double floor/ceil(double in) {
    828   //     if in.isNaN || in.isInfinite || in.isZero {
    829   //         return in;
    830   //     }
    831   __ ClassD(out, in);
    832   __ Dmfc1(AT, out);
    833   __ Andi(AT, AT, kFPLeaveUnchanged);   // +0.0 | +Inf | -0.0 | -Inf | qNaN | sNaN
    834   __ MovD(out, in);
    835   __ Bnezc(AT, &done);
    836 
    837   //     Long outLong = floor/ceil(in);
    838   //     if (outLong == Long.MAX_VALUE) || (outLong == Long.MIN_VALUE) {
    839   //         // floor()/ceil() has almost certainly returned a value
    840   //         // which can't be successfully represented as a signed
    841   //         // 64-bit number.  Java expects that the input value will
    842   //         // be returned in these cases.
    843   //         // There is also a small probability that floor(in)/ceil(in)
    844   //         // correctly truncates/rounds up the input value to
    845   //         // Long.MAX_VALUE or Long.MIN_VALUE. In these cases, this
    846   //         // exception handling code still does the correct thing.
    847   //         return in;
    848   //     }
    849   if (mode == kFloor) {
    850     __ FloorLD(out, in);
    851   } else  if (mode == kCeil) {
    852     __ CeilLD(out, in);
    853   }
    854   __ Dmfc1(AT, out);
    855   __ MovD(out, in);
    856   __ Daddiu(TMP, AT, 1);
    857   __ Dati(TMP, 0x8000);  // TMP = AT + 0x8000 0000 0000 0001
    858                          // or    AT - 0x7FFF FFFF FFFF FFFF.
    859                          // IOW, TMP = 1 if AT = Long.MIN_VALUE
    860                          // or   TMP = 0 if AT = Long.MAX_VALUE.
    861   __ Dsrl(TMP, TMP, 1);  // TMP = 0 if AT = Long.MIN_VALUE
    862                          //         or AT = Long.MAX_VALUE.
    863   __ Beqzc(TMP, &done);
    864 
    865   //     double out = outLong;
    866   //     return out;
    867   __ Dmtc1(AT, out);
    868   __ Cvtdl(out, out);
    869   __ Bind(&done);
    870   // }
    871 }
    872 
    873 void IntrinsicCodeGeneratorMIPS64::VisitMathFloor(HInvoke* invoke) {
    874   GenRoundingMode(invoke->GetLocations(), kFloor, GetAssembler());
    875 }
    876 
    877 // double java.lang.Math.ceil(double)
    878 void IntrinsicLocationsBuilderMIPS64::VisitMathCeil(HInvoke* invoke) {
    879   CreateFPToFP(arena_, invoke);
    880 }
    881 
    882 void IntrinsicCodeGeneratorMIPS64::VisitMathCeil(HInvoke* invoke) {
    883   GenRoundingMode(invoke->GetLocations(), kCeil, GetAssembler());
    884 }
    885 
    886 static void GenRound(LocationSummary* locations, Mips64Assembler* assembler, Primitive::Type type) {
    887   FpuRegister in = locations->InAt(0).AsFpuRegister<FpuRegister>();
    888   FpuRegister half = locations->GetTemp(0).AsFpuRegister<FpuRegister>();
    889   GpuRegister out = locations->Out().AsRegister<GpuRegister>();
    890 
    891   DCHECK(type == Primitive::kPrimFloat || type == Primitive::kPrimDouble);
    892 
    893   Mips64Label done;
    894 
    895   // out = floor(in);
    896   //
    897   // if (out != MAX_VALUE && out != MIN_VALUE) {
    898   //   TMP = ((in - out) >= 0.5) ? 1 : 0;
    899   //   return out += TMP;
    900   // }
    901   // return out;
    902 
    903   // out = floor(in);
    904   if (type == Primitive::kPrimDouble) {
    905     __ FloorLD(FTMP, in);
    906     __ Dmfc1(out, FTMP);
    907   } else {
    908     __ FloorWS(FTMP, in);
    909     __ Mfc1(out, FTMP);
    910   }
    911 
    912   // if (out != MAX_VALUE && out != MIN_VALUE)
    913   if (type == Primitive::kPrimDouble) {
    914     __ Daddiu(TMP, out, 1);
    915     __ Dati(TMP, 0x8000);  // TMP = out + 0x8000 0000 0000 0001
    916                            // or    out - 0x7FFF FFFF FFFF FFFF.
    917                            // IOW, TMP = 1 if out = Long.MIN_VALUE
    918                            // or   TMP = 0 if out = Long.MAX_VALUE.
    919     __ Dsrl(TMP, TMP, 1);  // TMP = 0 if out = Long.MIN_VALUE
    920                            //         or out = Long.MAX_VALUE.
    921     __ Beqzc(TMP, &done);
    922   } else {
    923     __ Addiu(TMP, out, 1);
    924     __ Aui(TMP, TMP, 0x8000);  // TMP = out + 0x8000 0001
    925                                // or    out - 0x7FFF FFFF.
    926                                // IOW, TMP = 1 if out = Int.MIN_VALUE
    927                                // or   TMP = 0 if out = Int.MAX_VALUE.
    928     __ Srl(TMP, TMP, 1);       // TMP = 0 if out = Int.MIN_VALUE
    929                                //         or out = Int.MAX_VALUE.
    930     __ Beqzc(TMP, &done);
    931   }
    932 
    933   // TMP = (0.5 <= (in - out)) ? -1 : 0;
    934   if (type == Primitive::kPrimDouble) {
    935     __ Cvtdl(FTMP, FTMP);  // Convert output of floor.l.d back to "double".
    936     __ LoadConst64(AT, bit_cast<int64_t, double>(0.5));
    937     __ SubD(FTMP, in, FTMP);
    938     __ Dmtc1(AT, half);
    939     __ CmpLeD(FTMP, half, FTMP);
    940     __ Dmfc1(TMP, FTMP);
    941   } else {
    942     __ Cvtsw(FTMP, FTMP);  // Convert output of floor.w.s back to "float".
    943     __ LoadConst32(AT, bit_cast<int32_t, float>(0.5f));
    944     __ SubS(FTMP, in, FTMP);
    945     __ Mtc1(AT, half);
    946     __ CmpLeS(FTMP, half, FTMP);
    947     __ Mfc1(TMP, FTMP);
    948   }
    949 
    950   // Return out -= TMP.
    951   if (type == Primitive::kPrimDouble) {
    952     __ Dsubu(out, out, TMP);
    953   } else {
    954     __ Subu(out, out, TMP);
    955   }
    956 
    957   __ Bind(&done);
    958 }
    959 
    960 // int java.lang.Math.round(float)
    961 void IntrinsicLocationsBuilderMIPS64::VisitMathRoundFloat(HInvoke* invoke) {
    962   LocationSummary* locations = new (arena_) LocationSummary(invoke,
    963                                                            LocationSummary::kNoCall,
    964                                                            kIntrinsified);
    965   locations->SetInAt(0, Location::RequiresFpuRegister());
    966   locations->AddTemp(Location::RequiresFpuRegister());
    967   locations->SetOut(Location::RequiresRegister());
    968 }
    969 
    970 void IntrinsicCodeGeneratorMIPS64::VisitMathRoundFloat(HInvoke* invoke) {
    971   GenRound(invoke->GetLocations(), GetAssembler(), Primitive::kPrimFloat);
    972 }
    973 
    974 // long java.lang.Math.round(double)
    975 void IntrinsicLocationsBuilderMIPS64::VisitMathRoundDouble(HInvoke* invoke) {
    976   LocationSummary* locations = new (arena_) LocationSummary(invoke,
    977                                                            LocationSummary::kNoCall,
    978                                                            kIntrinsified);
    979   locations->SetInAt(0, Location::RequiresFpuRegister());
    980   locations->AddTemp(Location::RequiresFpuRegister());
    981   locations->SetOut(Location::RequiresRegister());
    982 }
    983 
    984 void IntrinsicCodeGeneratorMIPS64::VisitMathRoundDouble(HInvoke* invoke) {
    985   GenRound(invoke->GetLocations(), GetAssembler(), Primitive::kPrimDouble);
    986 }
    987 
    988 // byte libcore.io.Memory.peekByte(long address)
    989 void IntrinsicLocationsBuilderMIPS64::VisitMemoryPeekByte(HInvoke* invoke) {
    990   CreateIntToIntLocations(arena_, invoke);
    991 }
    992 
    993 void IntrinsicCodeGeneratorMIPS64::VisitMemoryPeekByte(HInvoke* invoke) {
    994   Mips64Assembler* assembler = GetAssembler();
    995   GpuRegister adr = invoke->GetLocations()->InAt(0).AsRegister<GpuRegister>();
    996   GpuRegister out = invoke->GetLocations()->Out().AsRegister<GpuRegister>();
    997 
    998   __ Lb(out, adr, 0);
    999 }
   1000 
   1001 // short libcore.io.Memory.peekShort(long address)
   1002 void IntrinsicLocationsBuilderMIPS64::VisitMemoryPeekShortNative(HInvoke* invoke) {
   1003   CreateIntToIntLocations(arena_, invoke);
   1004 }
   1005 
   1006 void IntrinsicCodeGeneratorMIPS64::VisitMemoryPeekShortNative(HInvoke* invoke) {
   1007   Mips64Assembler* assembler = GetAssembler();
   1008   GpuRegister adr = invoke->GetLocations()->InAt(0).AsRegister<GpuRegister>();
   1009   GpuRegister out = invoke->GetLocations()->Out().AsRegister<GpuRegister>();
   1010 
   1011   __ Lh(out, adr, 0);
   1012 }
   1013 
   1014 // int libcore.io.Memory.peekInt(long address)
   1015 void IntrinsicLocationsBuilderMIPS64::VisitMemoryPeekIntNative(HInvoke* invoke) {
   1016   CreateIntToIntLocations(arena_, invoke);
   1017 }
   1018 
   1019 void IntrinsicCodeGeneratorMIPS64::VisitMemoryPeekIntNative(HInvoke* invoke) {
   1020   Mips64Assembler* assembler = GetAssembler();
   1021   GpuRegister adr = invoke->GetLocations()->InAt(0).AsRegister<GpuRegister>();
   1022   GpuRegister out = invoke->GetLocations()->Out().AsRegister<GpuRegister>();
   1023 
   1024   __ Lw(out, adr, 0);
   1025 }
   1026 
   1027 // long libcore.io.Memory.peekLong(long address)
   1028 void IntrinsicLocationsBuilderMIPS64::VisitMemoryPeekLongNative(HInvoke* invoke) {
   1029   CreateIntToIntLocations(arena_, invoke);
   1030 }
   1031 
   1032 void IntrinsicCodeGeneratorMIPS64::VisitMemoryPeekLongNative(HInvoke* invoke) {
   1033   Mips64Assembler* assembler = GetAssembler();
   1034   GpuRegister adr = invoke->GetLocations()->InAt(0).AsRegister<GpuRegister>();
   1035   GpuRegister out = invoke->GetLocations()->Out().AsRegister<GpuRegister>();
   1036 
   1037   __ Ld(out, adr, 0);
   1038 }
   1039 
   1040 static void CreateIntIntToVoidLocations(ArenaAllocator* arena, HInvoke* invoke) {
   1041   LocationSummary* locations = new (arena) LocationSummary(invoke,
   1042                                                            LocationSummary::kNoCall,
   1043                                                            kIntrinsified);
   1044   locations->SetInAt(0, Location::RequiresRegister());
   1045   locations->SetInAt(1, Location::RequiresRegister());
   1046 }
   1047 
   1048 // void libcore.io.Memory.pokeByte(long address, byte value)
   1049 void IntrinsicLocationsBuilderMIPS64::VisitMemoryPokeByte(HInvoke* invoke) {
   1050   CreateIntIntToVoidLocations(arena_, invoke);
   1051 }
   1052 
   1053 void IntrinsicCodeGeneratorMIPS64::VisitMemoryPokeByte(HInvoke* invoke) {
   1054   Mips64Assembler* assembler = GetAssembler();
   1055   GpuRegister adr = invoke->GetLocations()->InAt(0).AsRegister<GpuRegister>();
   1056   GpuRegister val = invoke->GetLocations()->InAt(1).AsRegister<GpuRegister>();
   1057 
   1058   __ Sb(val, adr, 0);
   1059 }
   1060 
   1061 // void libcore.io.Memory.pokeShort(long address, short value)
   1062 void IntrinsicLocationsBuilderMIPS64::VisitMemoryPokeShortNative(HInvoke* invoke) {
   1063   CreateIntIntToVoidLocations(arena_, invoke);
   1064 }
   1065 
   1066 void IntrinsicCodeGeneratorMIPS64::VisitMemoryPokeShortNative(HInvoke* invoke) {
   1067   Mips64Assembler* assembler = GetAssembler();
   1068   GpuRegister adr = invoke->GetLocations()->InAt(0).AsRegister<GpuRegister>();
   1069   GpuRegister val = invoke->GetLocations()->InAt(1).AsRegister<GpuRegister>();
   1070 
   1071   __ Sh(val, adr, 0);
   1072 }
   1073 
   1074 // void libcore.io.Memory.pokeInt(long address, int value)
   1075 void IntrinsicLocationsBuilderMIPS64::VisitMemoryPokeIntNative(HInvoke* invoke) {
   1076   CreateIntIntToVoidLocations(arena_, invoke);
   1077 }
   1078 
   1079 void IntrinsicCodeGeneratorMIPS64::VisitMemoryPokeIntNative(HInvoke* invoke) {
   1080   Mips64Assembler* assembler = GetAssembler();
   1081   GpuRegister adr = invoke->GetLocations()->InAt(0).AsRegister<GpuRegister>();
   1082   GpuRegister val = invoke->GetLocations()->InAt(1).AsRegister<GpuRegister>();
   1083 
   1084   __ Sw(val, adr, 00);
   1085 }
   1086 
   1087 // void libcore.io.Memory.pokeLong(long address, long value)
   1088 void IntrinsicLocationsBuilderMIPS64::VisitMemoryPokeLongNative(HInvoke* invoke) {
   1089   CreateIntIntToVoidLocations(arena_, invoke);
   1090 }
   1091 
   1092 void IntrinsicCodeGeneratorMIPS64::VisitMemoryPokeLongNative(HInvoke* invoke) {
   1093   Mips64Assembler* assembler = GetAssembler();
   1094   GpuRegister adr = invoke->GetLocations()->InAt(0).AsRegister<GpuRegister>();
   1095   GpuRegister val = invoke->GetLocations()->InAt(1).AsRegister<GpuRegister>();
   1096 
   1097   __ Sd(val, adr, 0);
   1098 }
   1099 
   1100 // Thread java.lang.Thread.currentThread()
   1101 void IntrinsicLocationsBuilderMIPS64::VisitThreadCurrentThread(HInvoke* invoke) {
   1102   LocationSummary* locations = new (arena_) LocationSummary(invoke,
   1103                                                             LocationSummary::kNoCall,
   1104                                                             kIntrinsified);
   1105   locations->SetOut(Location::RequiresRegister());
   1106 }
   1107 
   1108 void IntrinsicCodeGeneratorMIPS64::VisitThreadCurrentThread(HInvoke* invoke) {
   1109   Mips64Assembler* assembler = GetAssembler();
   1110   GpuRegister out = invoke->GetLocations()->Out().AsRegister<GpuRegister>();
   1111 
   1112   __ LoadFromOffset(kLoadUnsignedWord,
   1113                     out,
   1114                     TR,
   1115                     Thread::PeerOffset<kMips64PointerSize>().Int32Value());
   1116 }
   1117 
   1118 static void CreateIntIntIntToIntLocations(ArenaAllocator* arena,
   1119                                           HInvoke* invoke,
   1120                                           Primitive::Type type) {
   1121   bool can_call = kEmitCompilerReadBarrier &&
   1122       (invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject ||
   1123        invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile);
   1124   LocationSummary* locations = new (arena) LocationSummary(invoke,
   1125                                                            (can_call
   1126                                                                 ? LocationSummary::kCallOnSlowPath
   1127                                                                 : LocationSummary::kNoCall),
   1128                                                            kIntrinsified);
   1129   if (can_call && kUseBakerReadBarrier) {
   1130     locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty());  // No caller-save registers.
   1131   }
   1132   locations->SetInAt(0, Location::NoLocation());        // Unused receiver.
   1133   locations->SetInAt(1, Location::RequiresRegister());
   1134   locations->SetInAt(2, Location::RequiresRegister());
   1135   locations->SetOut(Location::RequiresRegister(),
   1136                     (can_call ? Location::kOutputOverlap : Location::kNoOutputOverlap));
   1137   if (type == Primitive::kPrimNot && kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
   1138     // We need a temporary register for the read barrier marking slow
   1139     // path in InstructionCodeGeneratorMIPS64::GenerateReferenceLoadWithBakerReadBarrier.
   1140     locations->AddTemp(Location::RequiresRegister());
   1141   }
   1142 }
   1143 
   1144 // Note that the caller must supply a properly aligned memory address.
   1145 // If they do not, the behavior is undefined (atomicity not guaranteed, exception may occur).
   1146 static void GenUnsafeGet(HInvoke* invoke,
   1147                          Primitive::Type type,
   1148                          bool is_volatile,
   1149                          CodeGeneratorMIPS64* codegen) {
   1150   LocationSummary* locations = invoke->GetLocations();
   1151   DCHECK((type == Primitive::kPrimInt) ||
   1152          (type == Primitive::kPrimLong) ||
   1153          (type == Primitive::kPrimNot)) << type;
   1154   Mips64Assembler* assembler = codegen->GetAssembler();
   1155   // Target register.
   1156   Location trg_loc = locations->Out();
   1157   GpuRegister trg = trg_loc.AsRegister<GpuRegister>();
   1158   // Object pointer.
   1159   Location base_loc = locations->InAt(1);
   1160   GpuRegister base = base_loc.AsRegister<GpuRegister>();
   1161   // Long offset.
   1162   Location offset_loc = locations->InAt(2);
   1163   GpuRegister offset = offset_loc.AsRegister<GpuRegister>();
   1164 
   1165   if (!(kEmitCompilerReadBarrier && kUseBakerReadBarrier && (type == Primitive::kPrimNot))) {
   1166     __ Daddu(TMP, base, offset);
   1167   }
   1168 
   1169   switch (type) {
   1170     case Primitive::kPrimLong:
   1171       __ Ld(trg, TMP, 0);
   1172       if (is_volatile) {
   1173         __ Sync(0);
   1174       }
   1175       break;
   1176 
   1177     case Primitive::kPrimInt:
   1178       __ Lw(trg, TMP, 0);
   1179       if (is_volatile) {
   1180         __ Sync(0);
   1181       }
   1182       break;
   1183 
   1184     case Primitive::kPrimNot:
   1185       if (kEmitCompilerReadBarrier) {
   1186         if (kUseBakerReadBarrier) {
   1187           Location temp = locations->GetTemp(0);
   1188           codegen->GenerateReferenceLoadWithBakerReadBarrier(invoke,
   1189                                                              trg_loc,
   1190                                                              base,
   1191                                                              /* offset */ 0U,
   1192                                                              /* index */ offset_loc,
   1193                                                              TIMES_1,
   1194                                                              temp,
   1195                                                              /* needs_null_check */ false);
   1196           if (is_volatile) {
   1197             __ Sync(0);
   1198           }
   1199         } else {
   1200           __ Lwu(trg, TMP, 0);
   1201           if (is_volatile) {
   1202             __ Sync(0);
   1203           }
   1204           codegen->GenerateReadBarrierSlow(invoke,
   1205                                            trg_loc,
   1206                                            trg_loc,
   1207                                            base_loc,
   1208                                            /* offset */ 0U,
   1209                                            /* index */ offset_loc);
   1210         }
   1211       } else {
   1212         __ Lwu(trg, TMP, 0);
   1213         if (is_volatile) {
   1214           __ Sync(0);
   1215         }
   1216         __ MaybeUnpoisonHeapReference(trg);
   1217       }
   1218       break;
   1219 
   1220     default:
   1221       LOG(FATAL) << "Unsupported op size " << type;
   1222       UNREACHABLE();
   1223   }
   1224 }
   1225 
   1226 // int sun.misc.Unsafe.getInt(Object o, long offset)
   1227 void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGet(HInvoke* invoke) {
   1228   CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimInt);
   1229 }
   1230 
   1231 void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGet(HInvoke* invoke) {
   1232   GenUnsafeGet(invoke, Primitive::kPrimInt, /* is_volatile */ false, codegen_);
   1233 }
   1234 
   1235 // int sun.misc.Unsafe.getIntVolatile(Object o, long offset)
   1236 void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetVolatile(HInvoke* invoke) {
   1237   CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimInt);
   1238 }
   1239 
   1240 void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetVolatile(HInvoke* invoke) {
   1241   GenUnsafeGet(invoke, Primitive::kPrimInt, /* is_volatile */ true, codegen_);
   1242 }
   1243 
   1244 // long sun.misc.Unsafe.getLong(Object o, long offset)
   1245 void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetLong(HInvoke* invoke) {
   1246   CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimLong);
   1247 }
   1248 
   1249 void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetLong(HInvoke* invoke) {
   1250   GenUnsafeGet(invoke, Primitive::kPrimLong, /* is_volatile */ false, codegen_);
   1251 }
   1252 
   1253 // long sun.misc.Unsafe.getLongVolatile(Object o, long offset)
   1254 void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
   1255   CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimLong);
   1256 }
   1257 
   1258 void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
   1259   GenUnsafeGet(invoke, Primitive::kPrimLong, /* is_volatile */ true, codegen_);
   1260 }
   1261 
   1262 // Object sun.misc.Unsafe.getObject(Object o, long offset)
   1263 void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetObject(HInvoke* invoke) {
   1264   CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimNot);
   1265 }
   1266 
   1267 void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetObject(HInvoke* invoke) {
   1268   GenUnsafeGet(invoke, Primitive::kPrimNot, /* is_volatile */ false, codegen_);
   1269 }
   1270 
   1271 // Object sun.misc.Unsafe.getObjectVolatile(Object o, long offset)
   1272 void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
   1273   CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimNot);
   1274 }
   1275 
   1276 void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
   1277   GenUnsafeGet(invoke, Primitive::kPrimNot, /* is_volatile */ true, codegen_);
   1278 }
   1279 
   1280 static void CreateIntIntIntIntToVoid(ArenaAllocator* arena, HInvoke* invoke) {
   1281   LocationSummary* locations = new (arena) LocationSummary(invoke,
   1282                                                            LocationSummary::kNoCall,
   1283                                                            kIntrinsified);
   1284   locations->SetInAt(0, Location::NoLocation());        // Unused receiver.
   1285   locations->SetInAt(1, Location::RequiresRegister());
   1286   locations->SetInAt(2, Location::RequiresRegister());
   1287   locations->SetInAt(3, Location::RequiresRegister());
   1288 }
   1289 
   1290 // Note that the caller must supply a properly aligned memory address.
   1291 // If they do not, the behavior is undefined (atomicity not guaranteed, exception may occur).
   1292 static void GenUnsafePut(LocationSummary* locations,
   1293                          Primitive::Type type,
   1294                          bool is_volatile,
   1295                          bool is_ordered,
   1296                          CodeGeneratorMIPS64* codegen) {
   1297   DCHECK((type == Primitive::kPrimInt) ||
   1298          (type == Primitive::kPrimLong) ||
   1299          (type == Primitive::kPrimNot));
   1300   Mips64Assembler* assembler = codegen->GetAssembler();
   1301   // Object pointer.
   1302   GpuRegister base = locations->InAt(1).AsRegister<GpuRegister>();
   1303   // Long offset.
   1304   GpuRegister offset = locations->InAt(2).AsRegister<GpuRegister>();
   1305   GpuRegister value = locations->InAt(3).AsRegister<GpuRegister>();
   1306 
   1307   __ Daddu(TMP, base, offset);
   1308   if (is_volatile || is_ordered) {
   1309     __ Sync(0);
   1310   }
   1311   switch (type) {
   1312     case Primitive::kPrimInt:
   1313     case Primitive::kPrimNot:
   1314       if (kPoisonHeapReferences && type == Primitive::kPrimNot) {
   1315         __ PoisonHeapReference(AT, value);
   1316         __ Sw(AT, TMP, 0);
   1317       } else {
   1318         __ Sw(value, TMP, 0);
   1319       }
   1320       break;
   1321 
   1322     case Primitive::kPrimLong:
   1323       __ Sd(value, TMP, 0);
   1324       break;
   1325 
   1326     default:
   1327       LOG(FATAL) << "Unsupported op size " << type;
   1328       UNREACHABLE();
   1329   }
   1330   if (is_volatile) {
   1331     __ Sync(0);
   1332   }
   1333 
   1334   if (type == Primitive::kPrimNot) {
   1335     bool value_can_be_null = true;  // TODO: Worth finding out this information?
   1336     codegen->MarkGCCard(base, value, value_can_be_null);
   1337   }
   1338 }
   1339 
   1340 // void sun.misc.Unsafe.putInt(Object o, long offset, int x)
   1341 void IntrinsicLocationsBuilderMIPS64::VisitUnsafePut(HInvoke* invoke) {
   1342   CreateIntIntIntIntToVoid(arena_, invoke);
   1343 }
   1344 
   1345 void IntrinsicCodeGeneratorMIPS64::VisitUnsafePut(HInvoke* invoke) {
   1346   GenUnsafePut(invoke->GetLocations(),
   1347                Primitive::kPrimInt,
   1348                /* is_volatile */ false,
   1349                /* is_ordered */ false,
   1350                codegen_);
   1351 }
   1352 
   1353 // void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x)
   1354 void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutOrdered(HInvoke* invoke) {
   1355   CreateIntIntIntIntToVoid(arena_, invoke);
   1356 }
   1357 
   1358 void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutOrdered(HInvoke* invoke) {
   1359   GenUnsafePut(invoke->GetLocations(),
   1360                Primitive::kPrimInt,
   1361                /* is_volatile */ false,
   1362                /* is_ordered */ true,
   1363                codegen_);
   1364 }
   1365 
   1366 // void sun.misc.Unsafe.putIntVolatile(Object o, long offset, int x)
   1367 void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutVolatile(HInvoke* invoke) {
   1368   CreateIntIntIntIntToVoid(arena_, invoke);
   1369 }
   1370 
   1371 void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutVolatile(HInvoke* invoke) {
   1372   GenUnsafePut(invoke->GetLocations(),
   1373                Primitive::kPrimInt,
   1374                /* is_volatile */ true,
   1375                /* is_ordered */ false,
   1376                codegen_);
   1377 }
   1378 
   1379 // void sun.misc.Unsafe.putObject(Object o, long offset, Object x)
   1380 void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObject(HInvoke* invoke) {
   1381   CreateIntIntIntIntToVoid(arena_, invoke);
   1382 }
   1383 
   1384 void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObject(HInvoke* invoke) {
   1385   GenUnsafePut(invoke->GetLocations(),
   1386                Primitive::kPrimNot,
   1387                /* is_volatile */ false,
   1388                /* is_ordered */ false,
   1389                codegen_);
   1390 }
   1391 
   1392 // void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x)
   1393 void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
   1394   CreateIntIntIntIntToVoid(arena_, invoke);
   1395 }
   1396 
   1397 void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
   1398   GenUnsafePut(invoke->GetLocations(),
   1399                Primitive::kPrimNot,
   1400                /* is_volatile */ false,
   1401                /* is_ordered */ true,
   1402                codegen_);
   1403 }
   1404 
   1405 // void sun.misc.Unsafe.putObjectVolatile(Object o, long offset, Object x)
   1406 void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
   1407   CreateIntIntIntIntToVoid(arena_, invoke);
   1408 }
   1409 
   1410 void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
   1411   GenUnsafePut(invoke->GetLocations(),
   1412                Primitive::kPrimNot,
   1413                /* is_volatile */ true,
   1414                /* is_ordered */ false,
   1415                codegen_);
   1416 }
   1417 
   1418 // void sun.misc.Unsafe.putLong(Object o, long offset, long x)
   1419 void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLong(HInvoke* invoke) {
   1420   CreateIntIntIntIntToVoid(arena_, invoke);
   1421 }
   1422 
   1423 void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLong(HInvoke* invoke) {
   1424   GenUnsafePut(invoke->GetLocations(),
   1425                Primitive::kPrimLong,
   1426                /* is_volatile */ false,
   1427                /* is_ordered */ false,
   1428                codegen_);
   1429 }
   1430 
   1431 // void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x)
   1432 void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
   1433   CreateIntIntIntIntToVoid(arena_, invoke);
   1434 }
   1435 
   1436 void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
   1437   GenUnsafePut(invoke->GetLocations(),
   1438                Primitive::kPrimLong,
   1439                /* is_volatile */ false,
   1440                /* is_ordered */ true,
   1441                codegen_);
   1442 }
   1443 
   1444 // void sun.misc.Unsafe.putLongVolatile(Object o, long offset, long x)
   1445 void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
   1446   CreateIntIntIntIntToVoid(arena_, invoke);
   1447 }
   1448 
   1449 void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
   1450   GenUnsafePut(invoke->GetLocations(),
   1451                Primitive::kPrimLong,
   1452                /* is_volatile */ true,
   1453                /* is_ordered */ false,
   1454                codegen_);
   1455 }
   1456 
   1457 static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* arena, HInvoke* invoke) {
   1458   bool can_call = kEmitCompilerReadBarrier &&
   1459       kUseBakerReadBarrier &&
   1460       (invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject);
   1461   LocationSummary* locations = new (arena) LocationSummary(invoke,
   1462                                                            (can_call
   1463                                                                 ? LocationSummary::kCallOnSlowPath
   1464                                                                 : LocationSummary::kNoCall),
   1465                                                            kIntrinsified);
   1466   locations->SetInAt(0, Location::NoLocation());        // Unused receiver.
   1467   locations->SetInAt(1, Location::RequiresRegister());
   1468   locations->SetInAt(2, Location::RequiresRegister());
   1469   locations->SetInAt(3, Location::RequiresRegister());
   1470   locations->SetInAt(4, Location::RequiresRegister());
   1471   locations->SetOut(Location::RequiresRegister());
   1472 
   1473   // Temporary register used in CAS by (Baker) read barrier.
   1474   if (can_call) {
   1475     locations->AddTemp(Location::RequiresRegister());
   1476   }
   1477 }
   1478 
   1479 // Note that the caller must supply a properly aligned memory address.
   1480 // If they do not, the behavior is undefined (atomicity not guaranteed, exception may occur).
   1481 static void GenCas(HInvoke* invoke, Primitive::Type type, CodeGeneratorMIPS64* codegen) {
   1482   Mips64Assembler* assembler = codegen->GetAssembler();
   1483   LocationSummary* locations = invoke->GetLocations();
   1484   GpuRegister base = locations->InAt(1).AsRegister<GpuRegister>();
   1485   Location offset_loc = locations->InAt(2);
   1486   GpuRegister offset = offset_loc.AsRegister<GpuRegister>();
   1487   GpuRegister expected = locations->InAt(3).AsRegister<GpuRegister>();
   1488   GpuRegister value = locations->InAt(4).AsRegister<GpuRegister>();
   1489   Location out_loc = locations->Out();
   1490   GpuRegister out = out_loc.AsRegister<GpuRegister>();
   1491 
   1492   DCHECK_NE(base, out);
   1493   DCHECK_NE(offset, out);
   1494   DCHECK_NE(expected, out);
   1495 
   1496   if (type == Primitive::kPrimNot) {
   1497     // The only read barrier implementation supporting the
   1498     // UnsafeCASObject intrinsic is the Baker-style read barriers.
   1499     DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
   1500 
   1501     // Mark card for object assuming new value is stored. Worst case we will mark an unchanged
   1502     // object and scan the receiver at the next GC for nothing.
   1503     bool value_can_be_null = true;  // TODO: Worth finding out this information?
   1504     codegen->MarkGCCard(base, value, value_can_be_null);
   1505 
   1506     if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
   1507       Location temp = locations->GetTemp(0);
   1508       // Need to make sure the reference stored in the field is a to-space
   1509       // one before attempting the CAS or the CAS could fail incorrectly.
   1510       codegen->GenerateReferenceLoadWithBakerReadBarrier(
   1511           invoke,
   1512           out_loc,  // Unused, used only as a "temporary" within the read barrier.
   1513           base,
   1514           /* offset */ 0u,
   1515           /* index */ offset_loc,
   1516           ScaleFactor::TIMES_1,
   1517           temp,
   1518           /* needs_null_check */ false,
   1519           /* always_update_field */ true);
   1520     }
   1521   }
   1522 
   1523   Mips64Label loop_head, exit_loop;
   1524   __ Daddu(TMP, base, offset);
   1525 
   1526   if (kPoisonHeapReferences && type == Primitive::kPrimNot) {
   1527     __ PoisonHeapReference(expected);
   1528     // Do not poison `value`, if it is the same register as
   1529     // `expected`, which has just been poisoned.
   1530     if (value != expected) {
   1531       __ PoisonHeapReference(value);
   1532     }
   1533   }
   1534 
   1535   // do {
   1536   //   tmp_value = [tmp_ptr] - expected;
   1537   // } while (tmp_value == 0 && failure([tmp_ptr] <- r_new_value));
   1538   // result = tmp_value != 0;
   1539 
   1540   __ Sync(0);
   1541   __ Bind(&loop_head);
   1542   if (type == Primitive::kPrimLong) {
   1543     __ Lld(out, TMP);
   1544   } else {
   1545     // Note: We will need a read barrier here, when read barrier
   1546     // support is added to the MIPS64 back end.
   1547     __ Ll(out, TMP);
   1548     if (type == Primitive::kPrimNot) {
   1549       // The LL instruction sign-extends the 32-bit value, but
   1550       // 32-bit references must be zero-extended. Zero-extend `out`.
   1551       __ Dext(out, out, 0, 32);
   1552     }
   1553   }
   1554   __ Dsubu(out, out, expected);         // If we didn't get the 'expected'
   1555   __ Sltiu(out, out, 1);                // value, set 'out' to false, and
   1556   __ Beqzc(out, &exit_loop);            // return.
   1557   __ Move(out, value);  // Use 'out' for the 'store conditional' instruction.
   1558                         // If we use 'value' directly, we would lose 'value'
   1559                         // in the case that the store fails.  Whether the
   1560                         // store succeeds, or fails, it will load the
   1561                         // correct Boolean value into the 'out' register.
   1562   if (type == Primitive::kPrimLong) {
   1563     __ Scd(out, TMP);
   1564   } else {
   1565     __ Sc(out, TMP);
   1566   }
   1567   __ Beqzc(out, &loop_head);    // If we couldn't do the read-modify-write
   1568                                 // cycle atomically then retry.
   1569   __ Bind(&exit_loop);
   1570   __ Sync(0);
   1571 
   1572   if (kPoisonHeapReferences && type == Primitive::kPrimNot) {
   1573     __ UnpoisonHeapReference(expected);
   1574     // Do not unpoison `value`, if it is the same register as
   1575     // `expected`, which has just been unpoisoned.
   1576     if (value != expected) {
   1577       __ UnpoisonHeapReference(value);
   1578     }
   1579   }
   1580 }
   1581 
   1582 // boolean sun.misc.Unsafe.compareAndSwapInt(Object o, long offset, int expected, int x)
   1583 void IntrinsicLocationsBuilderMIPS64::VisitUnsafeCASInt(HInvoke* invoke) {
   1584   CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke);
   1585 }
   1586 
   1587 void IntrinsicCodeGeneratorMIPS64::VisitUnsafeCASInt(HInvoke* invoke) {
   1588   GenCas(invoke, Primitive::kPrimInt, codegen_);
   1589 }
   1590 
   1591 // boolean sun.misc.Unsafe.compareAndSwapLong(Object o, long offset, long expected, long x)
   1592 void IntrinsicLocationsBuilderMIPS64::VisitUnsafeCASLong(HInvoke* invoke) {
   1593   CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke);
   1594 }
   1595 
   1596 void IntrinsicCodeGeneratorMIPS64::VisitUnsafeCASLong(HInvoke* invoke) {
   1597   GenCas(invoke, Primitive::kPrimLong, codegen_);
   1598 }
   1599 
   1600 // boolean sun.misc.Unsafe.compareAndSwapObject(Object o, long offset, Object expected, Object x)
   1601 void IntrinsicLocationsBuilderMIPS64::VisitUnsafeCASObject(HInvoke* invoke) {
   1602   // The only read barrier implementation supporting the
   1603   // UnsafeCASObject intrinsic is the Baker-style read barriers.
   1604   if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) {
   1605     return;
   1606   }
   1607 
   1608   CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke);
   1609 }
   1610 
   1611 void IntrinsicCodeGeneratorMIPS64::VisitUnsafeCASObject(HInvoke* invoke) {
   1612   // The only read barrier implementation supporting the
   1613   // UnsafeCASObject intrinsic is the Baker-style read barriers.
   1614   DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
   1615 
   1616   GenCas(invoke, Primitive::kPrimNot, codegen_);
   1617 }
   1618 
   1619 // int java.lang.String.compareTo(String anotherString)
   1620 void IntrinsicLocationsBuilderMIPS64::VisitStringCompareTo(HInvoke* invoke) {
   1621   LocationSummary* locations = new (arena_) LocationSummary(invoke,
   1622                                                             LocationSummary::kCallOnMainAndSlowPath,
   1623                                                             kIntrinsified);
   1624   InvokeRuntimeCallingConvention calling_convention;
   1625   locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
   1626   locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
   1627   Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt);
   1628   locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>()));
   1629 }
   1630 
   1631 void IntrinsicCodeGeneratorMIPS64::VisitStringCompareTo(HInvoke* invoke) {
   1632   Mips64Assembler* assembler = GetAssembler();
   1633   LocationSummary* locations = invoke->GetLocations();
   1634 
   1635   // Note that the null check must have been done earlier.
   1636   DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
   1637 
   1638   GpuRegister argument = locations->InAt(1).AsRegister<GpuRegister>();
   1639   SlowPathCodeMIPS64* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS64(invoke);
   1640   codegen_->AddSlowPath(slow_path);
   1641   __ Beqzc(argument, slow_path->GetEntryLabel());
   1642 
   1643   codegen_->InvokeRuntime(kQuickStringCompareTo, invoke, invoke->GetDexPc(), slow_path);
   1644   __ Bind(slow_path->GetExitLabel());
   1645 }
   1646 
   1647 // boolean java.lang.String.equals(Object anObject)
   1648 void IntrinsicLocationsBuilderMIPS64::VisitStringEquals(HInvoke* invoke) {
   1649   LocationSummary* locations = new (arena_) LocationSummary(invoke,
   1650                                                             LocationSummary::kNoCall,
   1651                                                             kIntrinsified);
   1652   locations->SetInAt(0, Location::RequiresRegister());
   1653   locations->SetInAt(1, Location::RequiresRegister());
   1654   locations->SetOut(Location::RequiresRegister());
   1655 
   1656   // Temporary registers to store lengths of strings and for calculations.
   1657   locations->AddTemp(Location::RequiresRegister());
   1658   locations->AddTemp(Location::RequiresRegister());
   1659   locations->AddTemp(Location::RequiresRegister());
   1660 }
   1661 
   1662 void IntrinsicCodeGeneratorMIPS64::VisitStringEquals(HInvoke* invoke) {
   1663   Mips64Assembler* assembler = GetAssembler();
   1664   LocationSummary* locations = invoke->GetLocations();
   1665 
   1666   GpuRegister str = locations->InAt(0).AsRegister<GpuRegister>();
   1667   GpuRegister arg = locations->InAt(1).AsRegister<GpuRegister>();
   1668   GpuRegister out = locations->Out().AsRegister<GpuRegister>();
   1669 
   1670   GpuRegister temp1 = locations->GetTemp(0).AsRegister<GpuRegister>();
   1671   GpuRegister temp2 = locations->GetTemp(1).AsRegister<GpuRegister>();
   1672   GpuRegister temp3 = locations->GetTemp(2).AsRegister<GpuRegister>();
   1673 
   1674   Mips64Label loop;
   1675   Mips64Label end;
   1676   Mips64Label return_true;
   1677   Mips64Label return_false;
   1678 
   1679   // Get offsets of count, value, and class fields within a string object.
   1680   const int32_t count_offset = mirror::String::CountOffset().Int32Value();
   1681   const int32_t value_offset = mirror::String::ValueOffset().Int32Value();
   1682   const int32_t class_offset = mirror::Object::ClassOffset().Int32Value();
   1683 
   1684   // Note that the null check must have been done earlier.
   1685   DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
   1686 
   1687   // If the register containing the pointer to "this", and the register
   1688   // containing the pointer to "anObject" are the same register then
   1689   // "this", and "anObject" are the same object and we can
   1690   // short-circuit the logic to a true result.
   1691   if (str == arg) {
   1692     __ LoadConst64(out, 1);
   1693     return;
   1694   }
   1695 
   1696   StringEqualsOptimizations optimizations(invoke);
   1697   if (!optimizations.GetArgumentNotNull()) {
   1698     // Check if input is null, return false if it is.
   1699     __ Beqzc(arg, &return_false);
   1700   }
   1701 
   1702   // Reference equality check, return true if same reference.
   1703   __ Beqc(str, arg, &return_true);
   1704 
   1705   if (!optimizations.GetArgumentIsString()) {
   1706     // Instanceof check for the argument by comparing class fields.
   1707     // All string objects must have the same type since String cannot be subclassed.
   1708     // Receiver must be a string object, so its class field is equal to all strings' class fields.
   1709     // If the argument is a string object, its class field must be equal to receiver's class field.
   1710     __ Lw(temp1, str, class_offset);
   1711     __ Lw(temp2, arg, class_offset);
   1712     __ Bnec(temp1, temp2, &return_false);
   1713   }
   1714 
   1715   // Load `count` fields of this and argument strings.
   1716   __ Lw(temp1, str, count_offset);
   1717   __ Lw(temp2, arg, count_offset);
   1718   // Check if `count` fields are equal, return false if they're not.
   1719   // Also compares the compression style, if differs return false.
   1720   __ Bnec(temp1, temp2, &return_false);
   1721   // Return true if both strings are empty. Even with string compression `count == 0` means empty.
   1722   static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
   1723                 "Expecting 0=compressed, 1=uncompressed");
   1724   __ Beqzc(temp1, &return_true);
   1725 
   1726   // Don't overwrite input registers
   1727   __ Move(TMP, str);
   1728   __ Move(temp3, arg);
   1729 
   1730   // Assertions that must hold in order to compare strings 8 bytes at a time.
   1731   DCHECK_ALIGNED(value_offset, 8);
   1732   static_assert(IsAligned<8>(kObjectAlignment), "String of odd length is not zero padded");
   1733 
   1734   if (mirror::kUseStringCompression) {
   1735     // For string compression, calculate the number of bytes to compare (not chars).
   1736     __ Dext(temp2, temp1, 0, 1);         // Extract compression flag.
   1737     __ Srl(temp1, temp1, 1);             // Extract length.
   1738     __ Sllv(temp1, temp1, temp2);        // Double the byte count if uncompressed.
   1739   }
   1740 
   1741   // Loop to compare strings 8 bytes at a time starting at the beginning of the string.
   1742   // Ok to do this because strings are zero-padded to kObjectAlignment.
   1743   __ Bind(&loop);
   1744   __ Ld(out, TMP, value_offset);
   1745   __ Ld(temp2, temp3, value_offset);
   1746   __ Bnec(out, temp2, &return_false);
   1747   __ Daddiu(TMP, TMP, 8);
   1748   __ Daddiu(temp3, temp3, 8);
   1749   // With string compression, we have compared 8 bytes, otherwise 4 chars.
   1750   __ Addiu(temp1, temp1, mirror::kUseStringCompression ? -8 : -4);
   1751   __ Bgtzc(temp1, &loop);
   1752 
   1753   // Return true and exit the function.
   1754   // If loop does not result in returning false, we return true.
   1755   __ Bind(&return_true);
   1756   __ LoadConst64(out, 1);
   1757   __ Bc(&end);
   1758 
   1759   // Return false and exit the function.
   1760   __ Bind(&return_false);
   1761   __ LoadConst64(out, 0);
   1762   __ Bind(&end);
   1763 }
   1764 
   1765 static void GenerateStringIndexOf(HInvoke* invoke,
   1766                                   Mips64Assembler* assembler,
   1767                                   CodeGeneratorMIPS64* codegen,
   1768                                   ArenaAllocator* allocator,
   1769                                   bool start_at_zero) {
   1770   LocationSummary* locations = invoke->GetLocations();
   1771   GpuRegister tmp_reg = start_at_zero ? locations->GetTemp(0).AsRegister<GpuRegister>() : TMP;
   1772 
   1773   // Note that the null check must have been done earlier.
   1774   DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
   1775 
   1776   // Check for code points > 0xFFFF. Either a slow-path check when we don't know statically,
   1777   // or directly dispatch for a large constant, or omit slow-path for a small constant or a char.
   1778   SlowPathCodeMIPS64* slow_path = nullptr;
   1779   HInstruction* code_point = invoke->InputAt(1);
   1780   if (code_point->IsIntConstant()) {
   1781     if (!IsUint<16>(code_point->AsIntConstant()->GetValue())) {
   1782       // Always needs the slow-path. We could directly dispatch to it,
   1783       // but this case should be rare, so for simplicity just put the
   1784       // full slow-path down and branch unconditionally.
   1785       slow_path = new (allocator) IntrinsicSlowPathMIPS64(invoke);
   1786       codegen->AddSlowPath(slow_path);
   1787       __ Bc(slow_path->GetEntryLabel());
   1788       __ Bind(slow_path->GetExitLabel());
   1789       return;
   1790     }
   1791   } else if (code_point->GetType() != Primitive::kPrimChar) {
   1792     GpuRegister char_reg = locations->InAt(1).AsRegister<GpuRegister>();
   1793     __ LoadConst32(tmp_reg, std::numeric_limits<uint16_t>::max());
   1794     slow_path = new (allocator) IntrinsicSlowPathMIPS64(invoke);
   1795     codegen->AddSlowPath(slow_path);
   1796     __ Bltuc(tmp_reg, char_reg, slow_path->GetEntryLabel());    // UTF-16 required
   1797   }
   1798 
   1799   if (start_at_zero) {
   1800     DCHECK_EQ(tmp_reg, A2);
   1801     // Start-index = 0.
   1802     __ Clear(tmp_reg);
   1803   }
   1804 
   1805   codegen->InvokeRuntime(kQuickIndexOf, invoke, invoke->GetDexPc(), slow_path);
   1806   CheckEntrypointTypes<kQuickIndexOf, int32_t, void*, uint32_t, uint32_t>();
   1807 
   1808   if (slow_path != nullptr) {
   1809     __ Bind(slow_path->GetExitLabel());
   1810   }
   1811 }
   1812 
   1813 // int java.lang.String.indexOf(int ch)
   1814 void IntrinsicLocationsBuilderMIPS64::VisitStringIndexOf(HInvoke* invoke) {
   1815   LocationSummary* locations = new (arena_) LocationSummary(invoke,
   1816                                                             LocationSummary::kCallOnMainAndSlowPath,
   1817                                                             kIntrinsified);
   1818   // We have a hand-crafted assembly stub that follows the runtime
   1819   // calling convention. So it's best to align the inputs accordingly.
   1820   InvokeRuntimeCallingConvention calling_convention;
   1821   locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
   1822   locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
   1823   Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt);
   1824   locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>()));
   1825 
   1826   // Need a temp for slow-path codepoint compare, and need to send start-index=0.
   1827   locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
   1828 }
   1829 
   1830 void IntrinsicCodeGeneratorMIPS64::VisitStringIndexOf(HInvoke* invoke) {
   1831   GenerateStringIndexOf(invoke, GetAssembler(), codegen_, GetAllocator(), /* start_at_zero */ true);
   1832 }
   1833 
   1834 // int java.lang.String.indexOf(int ch, int fromIndex)
   1835 void IntrinsicLocationsBuilderMIPS64::VisitStringIndexOfAfter(HInvoke* invoke) {
   1836   LocationSummary* locations = new (arena_) LocationSummary(invoke,
   1837                                                             LocationSummary::kCallOnMainAndSlowPath,
   1838                                                             kIntrinsified);
   1839   // We have a hand-crafted assembly stub that follows the runtime
   1840   // calling convention. So it's best to align the inputs accordingly.
   1841   InvokeRuntimeCallingConvention calling_convention;
   1842   locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
   1843   locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
   1844   locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
   1845   Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt);
   1846   locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>()));
   1847 }
   1848 
   1849 void IntrinsicCodeGeneratorMIPS64::VisitStringIndexOfAfter(HInvoke* invoke) {
   1850   GenerateStringIndexOf(
   1851       invoke, GetAssembler(), codegen_, GetAllocator(), /* start_at_zero */ false);
   1852 }
   1853 
   1854 // java.lang.StringFactory.newStringFromBytes(byte[] data, int high, int offset, int byteCount)
   1855 void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromBytes(HInvoke* invoke) {
   1856   LocationSummary* locations = new (arena_) LocationSummary(invoke,
   1857                                                             LocationSummary::kCallOnMainAndSlowPath,
   1858                                                             kIntrinsified);
   1859   InvokeRuntimeCallingConvention calling_convention;
   1860   locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
   1861   locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
   1862   locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
   1863   locations->SetInAt(3, Location::RegisterLocation(calling_convention.GetRegisterAt(3)));
   1864   Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt);
   1865   locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>()));
   1866 }
   1867 
   1868 void IntrinsicCodeGeneratorMIPS64::VisitStringNewStringFromBytes(HInvoke* invoke) {
   1869   Mips64Assembler* assembler = GetAssembler();
   1870   LocationSummary* locations = invoke->GetLocations();
   1871 
   1872   GpuRegister byte_array = locations->InAt(0).AsRegister<GpuRegister>();
   1873   SlowPathCodeMIPS64* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS64(invoke);
   1874   codegen_->AddSlowPath(slow_path);
   1875   __ Beqzc(byte_array, slow_path->GetEntryLabel());
   1876 
   1877   codegen_->InvokeRuntime(kQuickAllocStringFromBytes, invoke, invoke->GetDexPc(), slow_path);
   1878   CheckEntrypointTypes<kQuickAllocStringFromBytes, void*, void*, int32_t, int32_t, int32_t>();
   1879   __ Bind(slow_path->GetExitLabel());
   1880 }
   1881 
   1882 // java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
   1883 void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromChars(HInvoke* invoke) {
   1884   LocationSummary* locations = new (arena_) LocationSummary(invoke,
   1885                                                             LocationSummary::kCallOnMainOnly,
   1886                                                             kIntrinsified);
   1887   InvokeRuntimeCallingConvention calling_convention;
   1888   locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
   1889   locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
   1890   locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
   1891   Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt);
   1892   locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>()));
   1893 }
   1894 
   1895 void IntrinsicCodeGeneratorMIPS64::VisitStringNewStringFromChars(HInvoke* invoke) {
   1896   // No need to emit code checking whether `locations->InAt(2)` is a null
   1897   // pointer, as callers of the native method
   1898   //
   1899   //   java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
   1900   //
   1901   // all include a null check on `data` before calling that method.
   1902   codegen_->InvokeRuntime(kQuickAllocStringFromChars, invoke, invoke->GetDexPc());
   1903   CheckEntrypointTypes<kQuickAllocStringFromChars, void*, int32_t, int32_t, void*>();
   1904 }
   1905 
   1906 // java.lang.StringFactory.newStringFromString(String toCopy)
   1907 void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromString(HInvoke* invoke) {
   1908   LocationSummary* locations = new (arena_) LocationSummary(invoke,
   1909                                                             LocationSummary::kCallOnMainAndSlowPath,
   1910                                                             kIntrinsified);
   1911   InvokeRuntimeCallingConvention calling_convention;
   1912   locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
   1913   Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt);
   1914   locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>()));
   1915 }
   1916 
   1917 void IntrinsicCodeGeneratorMIPS64::VisitStringNewStringFromString(HInvoke* invoke) {
   1918   Mips64Assembler* assembler = GetAssembler();
   1919   LocationSummary* locations = invoke->GetLocations();
   1920 
   1921   GpuRegister string_to_copy = locations->InAt(0).AsRegister<GpuRegister>();
   1922   SlowPathCodeMIPS64* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS64(invoke);
   1923   codegen_->AddSlowPath(slow_path);
   1924   __ Beqzc(string_to_copy, slow_path->GetEntryLabel());
   1925 
   1926   codegen_->InvokeRuntime(kQuickAllocStringFromString, invoke, invoke->GetDexPc(), slow_path);
   1927   CheckEntrypointTypes<kQuickAllocStringFromString, void*, void*>();
   1928   __ Bind(slow_path->GetExitLabel());
   1929 }
   1930 
   1931 static void GenIsInfinite(LocationSummary* locations,
   1932                           bool is64bit,
   1933                           Mips64Assembler* assembler) {
   1934   FpuRegister in = locations->InAt(0).AsFpuRegister<FpuRegister>();
   1935   GpuRegister out = locations->Out().AsRegister<GpuRegister>();
   1936 
   1937   if (is64bit) {
   1938     __ ClassD(FTMP, in);
   1939   } else {
   1940     __ ClassS(FTMP, in);
   1941   }
   1942   __ Mfc1(out, FTMP);
   1943   __ Andi(out, out, kPositiveInfinity | kNegativeInfinity);
   1944   __ Sltu(out, ZERO, out);
   1945 }
   1946 
   1947 // boolean java.lang.Float.isInfinite(float)
   1948 void IntrinsicLocationsBuilderMIPS64::VisitFloatIsInfinite(HInvoke* invoke) {
   1949   CreateFPToIntLocations(arena_, invoke);
   1950 }
   1951 
   1952 void IntrinsicCodeGeneratorMIPS64::VisitFloatIsInfinite(HInvoke* invoke) {
   1953   GenIsInfinite(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
   1954 }
   1955 
   1956 // boolean java.lang.Double.isInfinite(double)
   1957 void IntrinsicLocationsBuilderMIPS64::VisitDoubleIsInfinite(HInvoke* invoke) {
   1958   CreateFPToIntLocations(arena_, invoke);
   1959 }
   1960 
   1961 void IntrinsicCodeGeneratorMIPS64::VisitDoubleIsInfinite(HInvoke* invoke) {
   1962   GenIsInfinite(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
   1963 }
   1964 
   1965 // void java.lang.String.getChars(int srcBegin, int srcEnd, char[] dst, int dstBegin)
   1966 void IntrinsicLocationsBuilderMIPS64::VisitStringGetCharsNoCheck(HInvoke* invoke) {
   1967   LocationSummary* locations = new (arena_) LocationSummary(invoke,
   1968                                                             LocationSummary::kNoCall,
   1969                                                             kIntrinsified);
   1970   locations->SetInAt(0, Location::RequiresRegister());
   1971   locations->SetInAt(1, Location::RequiresRegister());
   1972   locations->SetInAt(2, Location::RequiresRegister());
   1973   locations->SetInAt(3, Location::RequiresRegister());
   1974   locations->SetInAt(4, Location::RequiresRegister());
   1975 
   1976   locations->AddTemp(Location::RequiresRegister());
   1977   locations->AddTemp(Location::RequiresRegister());
   1978   locations->AddTemp(Location::RequiresRegister());
   1979 }
   1980 
   1981 void IntrinsicCodeGeneratorMIPS64::VisitStringGetCharsNoCheck(HInvoke* invoke) {
   1982   Mips64Assembler* assembler = GetAssembler();
   1983   LocationSummary* locations = invoke->GetLocations();
   1984 
   1985   // Check assumption that sizeof(Char) is 2 (used in scaling below).
   1986   const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar);
   1987   DCHECK_EQ(char_size, 2u);
   1988   const size_t char_shift = Primitive::ComponentSizeShift(Primitive::kPrimChar);
   1989 
   1990   GpuRegister srcObj = locations->InAt(0).AsRegister<GpuRegister>();
   1991   GpuRegister srcBegin = locations->InAt(1).AsRegister<GpuRegister>();
   1992   GpuRegister srcEnd = locations->InAt(2).AsRegister<GpuRegister>();
   1993   GpuRegister dstObj = locations->InAt(3).AsRegister<GpuRegister>();
   1994   GpuRegister dstBegin = locations->InAt(4).AsRegister<GpuRegister>();
   1995 
   1996   GpuRegister dstPtr = locations->GetTemp(0).AsRegister<GpuRegister>();
   1997   GpuRegister srcPtr = locations->GetTemp(1).AsRegister<GpuRegister>();
   1998   GpuRegister numChrs = locations->GetTemp(2).AsRegister<GpuRegister>();
   1999 
   2000   Mips64Label done;
   2001   Mips64Label loop;
   2002 
   2003   // Location of data in char array buffer.
   2004   const uint32_t data_offset = mirror::Array::DataOffset(char_size).Uint32Value();
   2005 
   2006   // Get offset of value field within a string object.
   2007   const int32_t value_offset = mirror::String::ValueOffset().Int32Value();
   2008 
   2009   __ Beqc(srcEnd, srcBegin, &done);  // No characters to move.
   2010 
   2011   // Calculate number of characters to be copied.
   2012   __ Dsubu(numChrs, srcEnd, srcBegin);
   2013 
   2014   // Calculate destination address.
   2015   __ Daddiu(dstPtr, dstObj, data_offset);
   2016   __ Dlsa(dstPtr, dstBegin, dstPtr, char_shift);
   2017 
   2018   if (mirror::kUseStringCompression) {
   2019     Mips64Label uncompressed_copy, compressed_loop;
   2020     const uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
   2021     // Load count field and extract compression flag.
   2022     __ LoadFromOffset(kLoadWord, TMP, srcObj, count_offset);
   2023     __ Dext(TMP, TMP, 0, 1);
   2024 
   2025     // If string is uncompressed, use uncompressed path.
   2026     __ Bnezc(TMP, &uncompressed_copy);
   2027 
   2028     // Copy loop for compressed src, copying 1 character (8-bit) to (16-bit) at a time.
   2029     __ Daddu(srcPtr, srcObj, srcBegin);
   2030     __ Bind(&compressed_loop);
   2031     __ LoadFromOffset(kLoadUnsignedByte, TMP, srcPtr, value_offset);
   2032     __ StoreToOffset(kStoreHalfword, TMP, dstPtr, 0);
   2033     __ Daddiu(numChrs, numChrs, -1);
   2034     __ Daddiu(srcPtr, srcPtr, 1);
   2035     __ Daddiu(dstPtr, dstPtr, 2);
   2036     __ Bnezc(numChrs, &compressed_loop);
   2037 
   2038     __ Bc(&done);
   2039     __ Bind(&uncompressed_copy);
   2040   }
   2041 
   2042   // Calculate source address.
   2043   __ Daddiu(srcPtr, srcObj, value_offset);
   2044   __ Dlsa(srcPtr, srcBegin, srcPtr, char_shift);
   2045 
   2046   __ Bind(&loop);
   2047   __ Lh(AT, srcPtr, 0);
   2048   __ Daddiu(numChrs, numChrs, -1);
   2049   __ Daddiu(srcPtr, srcPtr, char_size);
   2050   __ Sh(AT, dstPtr, 0);
   2051   __ Daddiu(dstPtr, dstPtr, char_size);
   2052   __ Bnezc(numChrs, &loop);
   2053 
   2054   __ Bind(&done);
   2055 }
   2056 
   2057 // static void java.lang.System.arraycopy(Object src, int srcPos,
   2058 //                                        Object dest, int destPos,
   2059 //                                        int length)
   2060 void IntrinsicLocationsBuilderMIPS64::VisitSystemArrayCopyChar(HInvoke* invoke) {
   2061   HIntConstant* src_pos = invoke->InputAt(1)->AsIntConstant();
   2062   HIntConstant* dest_pos = invoke->InputAt(3)->AsIntConstant();
   2063   HIntConstant* length = invoke->InputAt(4)->AsIntConstant();
   2064 
   2065   // As long as we are checking, we might as well check to see if the src and dest
   2066   // positions are >= 0.
   2067   if ((src_pos != nullptr && src_pos->GetValue() < 0) ||
   2068       (dest_pos != nullptr && dest_pos->GetValue() < 0)) {
   2069     // We will have to fail anyways.
   2070     return;
   2071   }
   2072 
   2073   // And since we are already checking, check the length too.
   2074   if (length != nullptr) {
   2075     int32_t len = length->GetValue();
   2076     if (len < 0) {
   2077       // Just call as normal.
   2078       return;
   2079     }
   2080   }
   2081 
   2082   // Okay, it is safe to generate inline code.
   2083   LocationSummary* locations =
   2084       new (arena_) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
   2085   // arraycopy(Object src, int srcPos, Object dest, int destPos, int length).
   2086   locations->SetInAt(0, Location::RequiresRegister());
   2087   locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
   2088   locations->SetInAt(2, Location::RequiresRegister());
   2089   locations->SetInAt(3, Location::RegisterOrConstant(invoke->InputAt(3)));
   2090   locations->SetInAt(4, Location::RegisterOrConstant(invoke->InputAt(4)));
   2091 
   2092   locations->AddTemp(Location::RequiresRegister());
   2093   locations->AddTemp(Location::RequiresRegister());
   2094   locations->AddTemp(Location::RequiresRegister());
   2095 }
   2096 
   2097 // Utility routine to verify that "length(input) - pos >= length"
   2098 static void EnoughItems(Mips64Assembler* assembler,
   2099                         GpuRegister length_input_minus_pos,
   2100                         Location length,
   2101                         SlowPathCodeMIPS64* slow_path) {
   2102   if (length.IsConstant()) {
   2103     int32_t length_constant = length.GetConstant()->AsIntConstant()->GetValue();
   2104 
   2105     if (IsInt<16>(length_constant)) {
   2106       __ Slti(TMP, length_input_minus_pos, length_constant);
   2107       __ Bnezc(TMP, slow_path->GetEntryLabel());
   2108     } else {
   2109       __ LoadConst32(TMP, length_constant);
   2110       __ Bltc(length_input_minus_pos, TMP, slow_path->GetEntryLabel());
   2111     }
   2112   } else {
   2113     __ Bltc(length_input_minus_pos, length.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
   2114   }
   2115 }
   2116 
   2117 static void CheckPosition(Mips64Assembler* assembler,
   2118                           Location pos,
   2119                           GpuRegister input,
   2120                           Location length,
   2121                           SlowPathCodeMIPS64* slow_path,
   2122                           bool length_is_input_length = false) {
   2123   // Where is the length in the Array?
   2124   const uint32_t length_offset = mirror::Array::LengthOffset().Uint32Value();
   2125 
   2126   // Calculate length(input) - pos.
   2127   if (pos.IsConstant()) {
   2128     int32_t pos_const = pos.GetConstant()->AsIntConstant()->GetValue();
   2129     if (pos_const == 0) {
   2130       if (!length_is_input_length) {
   2131         // Check that length(input) >= length.
   2132         __ LoadFromOffset(kLoadWord, AT, input, length_offset);
   2133         EnoughItems(assembler, AT, length, slow_path);
   2134       }
   2135     } else {
   2136       // Check that (length(input) - pos) >= zero.
   2137       __ LoadFromOffset(kLoadWord, AT, input, length_offset);
   2138       DCHECK_GT(pos_const, 0);
   2139       __ Addiu32(AT, AT, -pos_const);
   2140       __ Bltzc(AT, slow_path->GetEntryLabel());
   2141 
   2142       // Verify that (length(input) - pos) >= length.
   2143       EnoughItems(assembler, AT, length, slow_path);
   2144     }
   2145   } else if (length_is_input_length) {
   2146     // The only way the copy can succeed is if pos is zero.
   2147     GpuRegister pos_reg = pos.AsRegister<GpuRegister>();
   2148     __ Bnezc(pos_reg, slow_path->GetEntryLabel());
   2149   } else {
   2150     // Verify that pos >= 0.
   2151     GpuRegister pos_reg = pos.AsRegister<GpuRegister>();
   2152     __ Bltzc(pos_reg, slow_path->GetEntryLabel());
   2153 
   2154     // Check that (length(input) - pos) >= zero.
   2155     __ LoadFromOffset(kLoadWord, AT, input, length_offset);
   2156     __ Subu(AT, AT, pos_reg);
   2157     __ Bltzc(AT, slow_path->GetEntryLabel());
   2158 
   2159     // Verify that (length(input) - pos) >= length.
   2160     EnoughItems(assembler, AT, length, slow_path);
   2161   }
   2162 }
   2163 
   2164 void IntrinsicCodeGeneratorMIPS64::VisitSystemArrayCopyChar(HInvoke* invoke) {
   2165   Mips64Assembler* assembler = GetAssembler();
   2166   LocationSummary* locations = invoke->GetLocations();
   2167 
   2168   GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
   2169   Location src_pos = locations->InAt(1);
   2170   GpuRegister dest = locations->InAt(2).AsRegister<GpuRegister>();
   2171   Location dest_pos = locations->InAt(3);
   2172   Location length = locations->InAt(4);
   2173 
   2174   Mips64Label loop;
   2175 
   2176   GpuRegister dest_base = locations->GetTemp(0).AsRegister<GpuRegister>();
   2177   GpuRegister src_base = locations->GetTemp(1).AsRegister<GpuRegister>();
   2178   GpuRegister count = locations->GetTemp(2).AsRegister<GpuRegister>();
   2179 
   2180   SlowPathCodeMIPS64* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS64(invoke);
   2181   codegen_->AddSlowPath(slow_path);
   2182 
   2183   // Bail out if the source and destination are the same (to handle overlap).
   2184   __ Beqc(src, dest, slow_path->GetEntryLabel());
   2185 
   2186   // Bail out if the source is null.
   2187   __ Beqzc(src, slow_path->GetEntryLabel());
   2188 
   2189   // Bail out if the destination is null.
   2190   __ Beqzc(dest, slow_path->GetEntryLabel());
   2191 
   2192   // Load length into register for count.
   2193   if (length.IsConstant()) {
   2194     __ LoadConst32(count, length.GetConstant()->AsIntConstant()->GetValue());
   2195   } else {
   2196     // If the length is negative, bail out.
   2197     // We have already checked in the LocationsBuilder for the constant case.
   2198     __ Bltzc(length.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
   2199 
   2200     __ Move(count, length.AsRegister<GpuRegister>());
   2201   }
   2202 
   2203   // Validity checks: source.
   2204   CheckPosition(assembler, src_pos, src, Location::RegisterLocation(count), slow_path);
   2205 
   2206   // Validity checks: dest.
   2207   CheckPosition(assembler, dest_pos, dest, Location::RegisterLocation(count), slow_path);
   2208 
   2209   // If count is zero, we're done.
   2210   __ Beqzc(count, slow_path->GetExitLabel());
   2211 
   2212   // Okay, everything checks out.  Finally time to do the copy.
   2213   // Check assumption that sizeof(Char) is 2 (used in scaling below).
   2214   const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar);
   2215   DCHECK_EQ(char_size, 2u);
   2216 
   2217   const size_t char_shift = Primitive::ComponentSizeShift(Primitive::kPrimChar);
   2218 
   2219   const uint32_t data_offset = mirror::Array::DataOffset(char_size).Uint32Value();
   2220 
   2221   // Calculate source and destination addresses.
   2222   if (src_pos.IsConstant()) {
   2223     int32_t src_pos_const = src_pos.GetConstant()->AsIntConstant()->GetValue();
   2224 
   2225     __ Daddiu64(src_base, src, data_offset + char_size * src_pos_const, TMP);
   2226   } else {
   2227     __ Daddiu64(src_base, src, data_offset, TMP);
   2228     __ Dlsa(src_base, src_pos.AsRegister<GpuRegister>(), src_base, char_shift);
   2229   }
   2230   if (dest_pos.IsConstant()) {
   2231     int32_t dest_pos_const = dest_pos.GetConstant()->AsIntConstant()->GetValue();
   2232 
   2233     __ Daddiu64(dest_base, dest, data_offset + char_size * dest_pos_const, TMP);
   2234   } else {
   2235     __ Daddiu64(dest_base, dest, data_offset, TMP);
   2236     __ Dlsa(dest_base, dest_pos.AsRegister<GpuRegister>(), dest_base, char_shift);
   2237   }
   2238 
   2239   __ Bind(&loop);
   2240   __ Lh(TMP, src_base, 0);
   2241   __ Daddiu(src_base, src_base, char_size);
   2242   __ Daddiu(count, count, -1);
   2243   __ Sh(TMP, dest_base, 0);
   2244   __ Daddiu(dest_base, dest_base, char_size);
   2245   __ Bnezc(count, &loop);
   2246 
   2247   __ Bind(slow_path->GetExitLabel());
   2248 }
   2249 
   2250 static void GenHighestOneBit(LocationSummary* locations,
   2251                              Primitive::Type type,
   2252                              Mips64Assembler* assembler) {
   2253   DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong) << PrettyDescriptor(type);
   2254 
   2255   GpuRegister in = locations->InAt(0).AsRegister<GpuRegister>();
   2256   GpuRegister out = locations->Out().AsRegister<GpuRegister>();
   2257 
   2258   if (type == Primitive::kPrimLong) {
   2259     __ Dclz(TMP, in);
   2260     __ LoadConst64(AT, INT64_C(0x8000000000000000));
   2261     __ Dsrlv(AT, AT, TMP);
   2262   } else {
   2263     __ Clz(TMP, in);
   2264     __ LoadConst32(AT, 0x80000000);
   2265     __ Srlv(AT, AT, TMP);
   2266   }
   2267   // For either value of "type", when "in" is zero, "out" should also
   2268   // be zero. Without this extra "and" operation, when "in" is zero,
   2269   // "out" would be either Integer.MIN_VALUE, or Long.MIN_VALUE because
   2270   // the MIPS logical shift operations "dsrlv", and "srlv" don't use
   2271   // the shift amount (TMP) directly; they use either (TMP % 64) or
   2272   // (TMP % 32), respectively.
   2273   __ And(out, AT, in);
   2274 }
   2275 
   2276 // int java.lang.Integer.highestOneBit(int)
   2277 void IntrinsicLocationsBuilderMIPS64::VisitIntegerHighestOneBit(HInvoke* invoke) {
   2278   CreateIntToIntLocations(arena_, invoke);
   2279 }
   2280 
   2281 void IntrinsicCodeGeneratorMIPS64::VisitIntegerHighestOneBit(HInvoke* invoke) {
   2282   GenHighestOneBit(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler());
   2283 }
   2284 
   2285 // long java.lang.Long.highestOneBit(long)
   2286 void IntrinsicLocationsBuilderMIPS64::VisitLongHighestOneBit(HInvoke* invoke) {
   2287   CreateIntToIntLocations(arena_, invoke);
   2288 }
   2289 
   2290 void IntrinsicCodeGeneratorMIPS64::VisitLongHighestOneBit(HInvoke* invoke) {
   2291   GenHighestOneBit(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler());
   2292 }
   2293 
   2294 static void GenLowestOneBit(LocationSummary* locations,
   2295                             Primitive::Type type,
   2296                             Mips64Assembler* assembler) {
   2297   DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong) << PrettyDescriptor(type);
   2298 
   2299   GpuRegister in = locations->InAt(0).AsRegister<GpuRegister>();
   2300   GpuRegister out = locations->Out().AsRegister<GpuRegister>();
   2301 
   2302   if (type == Primitive::kPrimLong) {
   2303     __ Dsubu(TMP, ZERO, in);
   2304   } else {
   2305     __ Subu(TMP, ZERO, in);
   2306   }
   2307   __ And(out, TMP, in);
   2308 }
   2309 
   2310 // int java.lang.Integer.lowestOneBit(int)
   2311 void IntrinsicLocationsBuilderMIPS64::VisitIntegerLowestOneBit(HInvoke* invoke) {
   2312   CreateIntToIntLocations(arena_, invoke);
   2313 }
   2314 
   2315 void IntrinsicCodeGeneratorMIPS64::VisitIntegerLowestOneBit(HInvoke* invoke) {
   2316   GenLowestOneBit(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler());
   2317 }
   2318 
   2319 // long java.lang.Long.lowestOneBit(long)
   2320 void IntrinsicLocationsBuilderMIPS64::VisitLongLowestOneBit(HInvoke* invoke) {
   2321   CreateIntToIntLocations(arena_, invoke);
   2322 }
   2323 
   2324 void IntrinsicCodeGeneratorMIPS64::VisitLongLowestOneBit(HInvoke* invoke) {
   2325   GenLowestOneBit(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler());
   2326 }
   2327 
   2328 static void CreateFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) {
   2329   LocationSummary* locations = new (arena) LocationSummary(invoke,
   2330                                                            LocationSummary::kCallOnMainOnly,
   2331                                                            kIntrinsified);
   2332   InvokeRuntimeCallingConvention calling_convention;
   2333 
   2334   locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
   2335   locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimDouble));
   2336 }
   2337 
   2338 static void CreateFPFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) {
   2339   LocationSummary* locations = new (arena) LocationSummary(invoke,
   2340                                                            LocationSummary::kCallOnMainOnly,
   2341                                                            kIntrinsified);
   2342   InvokeRuntimeCallingConvention calling_convention;
   2343 
   2344   locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
   2345   locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
   2346   locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimDouble));
   2347 }
   2348 
   2349 static void GenFPToFPCall(HInvoke* invoke,
   2350                           CodeGeneratorMIPS64* codegen,
   2351                           QuickEntrypointEnum entry) {
   2352   LocationSummary* locations = invoke->GetLocations();
   2353   FpuRegister in = locations->InAt(0).AsFpuRegister<FpuRegister>();
   2354   DCHECK_EQ(in, F12);
   2355   FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
   2356   DCHECK_EQ(out, F0);
   2357 
   2358   codegen->InvokeRuntime(entry, invoke, invoke->GetDexPc());
   2359 }
   2360 
   2361 static void GenFPFPToFPCall(HInvoke* invoke,
   2362                             CodeGeneratorMIPS64* codegen,
   2363                             QuickEntrypointEnum entry) {
   2364   LocationSummary* locations = invoke->GetLocations();
   2365   FpuRegister in0 = locations->InAt(0).AsFpuRegister<FpuRegister>();
   2366   DCHECK_EQ(in0, F12);
   2367   FpuRegister in1 = locations->InAt(1).AsFpuRegister<FpuRegister>();
   2368   DCHECK_EQ(in1, F13);
   2369   FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
   2370   DCHECK_EQ(out, F0);
   2371 
   2372   codegen->InvokeRuntime(entry, invoke, invoke->GetDexPc());
   2373 }
   2374 
   2375 // static double java.lang.Math.cos(double a)
   2376 void IntrinsicLocationsBuilderMIPS64::VisitMathCos(HInvoke* invoke) {
   2377   CreateFPToFPCallLocations(arena_, invoke);
   2378 }
   2379 
   2380 void IntrinsicCodeGeneratorMIPS64::VisitMathCos(HInvoke* invoke) {
   2381   GenFPToFPCall(invoke, codegen_, kQuickCos);
   2382 }
   2383 
   2384 // static double java.lang.Math.sin(double a)
   2385 void IntrinsicLocationsBuilderMIPS64::VisitMathSin(HInvoke* invoke) {
   2386   CreateFPToFPCallLocations(arena_, invoke);
   2387 }
   2388 
   2389 void IntrinsicCodeGeneratorMIPS64::VisitMathSin(HInvoke* invoke) {
   2390   GenFPToFPCall(invoke, codegen_, kQuickSin);
   2391 }
   2392 
   2393 // static double java.lang.Math.acos(double a)
   2394 void IntrinsicLocationsBuilderMIPS64::VisitMathAcos(HInvoke* invoke) {
   2395   CreateFPToFPCallLocations(arena_, invoke);
   2396 }
   2397 
   2398 void IntrinsicCodeGeneratorMIPS64::VisitMathAcos(HInvoke* invoke) {
   2399   GenFPToFPCall(invoke, codegen_, kQuickAcos);
   2400 }
   2401 
   2402 // static double java.lang.Math.asin(double a)
   2403 void IntrinsicLocationsBuilderMIPS64::VisitMathAsin(HInvoke* invoke) {
   2404   CreateFPToFPCallLocations(arena_, invoke);
   2405 }
   2406 
   2407 void IntrinsicCodeGeneratorMIPS64::VisitMathAsin(HInvoke* invoke) {
   2408   GenFPToFPCall(invoke, codegen_, kQuickAsin);
   2409 }
   2410 
   2411 // static double java.lang.Math.atan(double a)
   2412 void IntrinsicLocationsBuilderMIPS64::VisitMathAtan(HInvoke* invoke) {
   2413   CreateFPToFPCallLocations(arena_, invoke);
   2414 }
   2415 
   2416 void IntrinsicCodeGeneratorMIPS64::VisitMathAtan(HInvoke* invoke) {
   2417   GenFPToFPCall(invoke, codegen_, kQuickAtan);
   2418 }
   2419 
   2420 // static double java.lang.Math.atan2(double y, double x)
   2421 void IntrinsicLocationsBuilderMIPS64::VisitMathAtan2(HInvoke* invoke) {
   2422   CreateFPFPToFPCallLocations(arena_, invoke);
   2423 }
   2424 
   2425 void IntrinsicCodeGeneratorMIPS64::VisitMathAtan2(HInvoke* invoke) {
   2426   GenFPFPToFPCall(invoke, codegen_, kQuickAtan2);
   2427 }
   2428 
   2429 // static double java.lang.Math.cbrt(double a)
   2430 void IntrinsicLocationsBuilderMIPS64::VisitMathCbrt(HInvoke* invoke) {
   2431   CreateFPToFPCallLocations(arena_, invoke);
   2432 }
   2433 
   2434 void IntrinsicCodeGeneratorMIPS64::VisitMathCbrt(HInvoke* invoke) {
   2435   GenFPToFPCall(invoke, codegen_, kQuickCbrt);
   2436 }
   2437 
   2438 // static double java.lang.Math.cosh(double x)
   2439 void IntrinsicLocationsBuilderMIPS64::VisitMathCosh(HInvoke* invoke) {
   2440   CreateFPToFPCallLocations(arena_, invoke);
   2441 }
   2442 
   2443 void IntrinsicCodeGeneratorMIPS64::VisitMathCosh(HInvoke* invoke) {
   2444   GenFPToFPCall(invoke, codegen_, kQuickCosh);
   2445 }
   2446 
   2447 // static double java.lang.Math.exp(double a)
   2448 void IntrinsicLocationsBuilderMIPS64::VisitMathExp(HInvoke* invoke) {
   2449   CreateFPToFPCallLocations(arena_, invoke);
   2450 }
   2451 
   2452 void IntrinsicCodeGeneratorMIPS64::VisitMathExp(HInvoke* invoke) {
   2453   GenFPToFPCall(invoke, codegen_, kQuickExp);
   2454 }
   2455 
   2456 // static double java.lang.Math.expm1(double x)
   2457 void IntrinsicLocationsBuilderMIPS64::VisitMathExpm1(HInvoke* invoke) {
   2458   CreateFPToFPCallLocations(arena_, invoke);
   2459 }
   2460 
   2461 void IntrinsicCodeGeneratorMIPS64::VisitMathExpm1(HInvoke* invoke) {
   2462   GenFPToFPCall(invoke, codegen_, kQuickExpm1);
   2463 }
   2464 
   2465 // static double java.lang.Math.hypot(double x, double y)
   2466 void IntrinsicLocationsBuilderMIPS64::VisitMathHypot(HInvoke* invoke) {
   2467   CreateFPFPToFPCallLocations(arena_, invoke);
   2468 }
   2469 
   2470 void IntrinsicCodeGeneratorMIPS64::VisitMathHypot(HInvoke* invoke) {
   2471   GenFPFPToFPCall(invoke, codegen_, kQuickHypot);
   2472 }
   2473 
   2474 // static double java.lang.Math.log(double a)
   2475 void IntrinsicLocationsBuilderMIPS64::VisitMathLog(HInvoke* invoke) {
   2476   CreateFPToFPCallLocations(arena_, invoke);
   2477 }
   2478 
   2479 void IntrinsicCodeGeneratorMIPS64::VisitMathLog(HInvoke* invoke) {
   2480   GenFPToFPCall(invoke, codegen_, kQuickLog);
   2481 }
   2482 
   2483 // static double java.lang.Math.log10(double x)
   2484 void IntrinsicLocationsBuilderMIPS64::VisitMathLog10(HInvoke* invoke) {
   2485   CreateFPToFPCallLocations(arena_, invoke);
   2486 }
   2487 
   2488 void IntrinsicCodeGeneratorMIPS64::VisitMathLog10(HInvoke* invoke) {
   2489   GenFPToFPCall(invoke, codegen_, kQuickLog10);
   2490 }
   2491 
   2492 // static double java.lang.Math.nextAfter(double start, double direction)
   2493 void IntrinsicLocationsBuilderMIPS64::VisitMathNextAfter(HInvoke* invoke) {
   2494   CreateFPFPToFPCallLocations(arena_, invoke);
   2495 }
   2496 
   2497 void IntrinsicCodeGeneratorMIPS64::VisitMathNextAfter(HInvoke* invoke) {
   2498   GenFPFPToFPCall(invoke, codegen_, kQuickNextAfter);
   2499 }
   2500 
   2501 // static double java.lang.Math.sinh(double x)
   2502 void IntrinsicLocationsBuilderMIPS64::VisitMathSinh(HInvoke* invoke) {
   2503   CreateFPToFPCallLocations(arena_, invoke);
   2504 }
   2505 
   2506 void IntrinsicCodeGeneratorMIPS64::VisitMathSinh(HInvoke* invoke) {
   2507   GenFPToFPCall(invoke, codegen_, kQuickSinh);
   2508 }
   2509 
   2510 // static double java.lang.Math.tan(double a)
   2511 void IntrinsicLocationsBuilderMIPS64::VisitMathTan(HInvoke* invoke) {
   2512   CreateFPToFPCallLocations(arena_, invoke);
   2513 }
   2514 
   2515 void IntrinsicCodeGeneratorMIPS64::VisitMathTan(HInvoke* invoke) {
   2516   GenFPToFPCall(invoke, codegen_, kQuickTan);
   2517 }
   2518 
   2519 // static double java.lang.Math.tanh(double x)
   2520 void IntrinsicLocationsBuilderMIPS64::VisitMathTanh(HInvoke* invoke) {
   2521   CreateFPToFPCallLocations(arena_, invoke);
   2522 }
   2523 
   2524 void IntrinsicCodeGeneratorMIPS64::VisitMathTanh(HInvoke* invoke) {
   2525   GenFPToFPCall(invoke, codegen_, kQuickTanh);
   2526 }
   2527 
   2528 // long java.lang.Integer.valueOf(long)
   2529 void IntrinsicLocationsBuilderMIPS64::VisitIntegerValueOf(HInvoke* invoke) {
   2530   InvokeRuntimeCallingConvention calling_convention;
   2531   IntrinsicVisitor::ComputeIntegerValueOfLocations(
   2532       invoke,
   2533       codegen_,
   2534       calling_convention.GetReturnLocation(Primitive::kPrimNot),
   2535       Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
   2536 }
   2537 
   2538 void IntrinsicCodeGeneratorMIPS64::VisitIntegerValueOf(HInvoke* invoke) {
   2539   IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
   2540   LocationSummary* locations = invoke->GetLocations();
   2541   Mips64Assembler* assembler = GetAssembler();
   2542   InstructionCodeGeneratorMIPS64* icodegen =
   2543       down_cast<InstructionCodeGeneratorMIPS64*>(codegen_->GetInstructionVisitor());
   2544 
   2545   GpuRegister out = locations->Out().AsRegister<GpuRegister>();
   2546   InvokeRuntimeCallingConvention calling_convention;
   2547   if (invoke->InputAt(0)->IsConstant()) {
   2548     int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
   2549     if (value >= info.low && value <= info.high) {
   2550       // Just embed the j.l.Integer in the code.
   2551       ScopedObjectAccess soa(Thread::Current());
   2552       mirror::Object* boxed = info.cache->Get(value + (-info.low));
   2553       DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
   2554       uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
   2555       __ LoadConst64(out, address);
   2556     } else {
   2557       // Allocate and initialize a new j.l.Integer.
   2558       // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
   2559       // JIT object table.
   2560       uint32_t address =
   2561           dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
   2562       __ LoadConst64(calling_convention.GetRegisterAt(0), address);
   2563       codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
   2564       CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
   2565       __ StoreConstToOffset(kStoreWord, value, out, info.value_offset, TMP);
   2566       // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
   2567       // one.
   2568       icodegen->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
   2569     }
   2570   } else {
   2571     GpuRegister in = locations->InAt(0).AsRegister<GpuRegister>();
   2572     Mips64Label allocate, done;
   2573     int32_t count = static_cast<uint32_t>(info.high) - info.low + 1;
   2574 
   2575     // Is (info.low <= in) && (in <= info.high)?
   2576     __ Addiu32(out, in, -info.low);
   2577     // As unsigned quantities is out < (info.high - info.low + 1)?
   2578     __ LoadConst32(AT, count);
   2579     // Branch if out >= (info.high - info.low + 1).
   2580     // This means that "in" is outside of the range [info.low, info.high].
   2581     __ Bgeuc(out, AT, &allocate);
   2582 
   2583     // If the value is within the bounds, load the j.l.Integer directly from the array.
   2584     uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
   2585     uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
   2586     __ LoadConst64(TMP, data_offset + address);
   2587     __ Dlsa(out, out, TMP, TIMES_4);
   2588     __ Lwu(out, out, 0);
   2589     __ MaybeUnpoisonHeapReference(out);
   2590     __ Bc(&done);
   2591 
   2592     __ Bind(&allocate);
   2593     // Otherwise allocate and initialize a new j.l.Integer.
   2594     address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
   2595     __ LoadConst64(calling_convention.GetRegisterAt(0), address);
   2596     codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
   2597     CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
   2598     __ StoreToOffset(kStoreWord, in, out, info.value_offset);
   2599     // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
   2600     // one.
   2601     icodegen->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
   2602     __ Bind(&done);
   2603   }
   2604 }
   2605 
   2606 UNIMPLEMENTED_INTRINSIC(MIPS64, ReferenceGetReferent)
   2607 UNIMPLEMENTED_INTRINSIC(MIPS64, SystemArrayCopy)
   2608 
   2609 UNIMPLEMENTED_INTRINSIC(MIPS64, StringStringIndexOf);
   2610 UNIMPLEMENTED_INTRINSIC(MIPS64, StringStringIndexOfAfter);
   2611 UNIMPLEMENTED_INTRINSIC(MIPS64, StringBufferAppend);
   2612 UNIMPLEMENTED_INTRINSIC(MIPS64, StringBufferLength);
   2613 UNIMPLEMENTED_INTRINSIC(MIPS64, StringBufferToString);
   2614 UNIMPLEMENTED_INTRINSIC(MIPS64, StringBuilderAppend);
   2615 UNIMPLEMENTED_INTRINSIC(MIPS64, StringBuilderLength);
   2616 UNIMPLEMENTED_INTRINSIC(MIPS64, StringBuilderToString);
   2617 
   2618 // 1.8.
   2619 UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndAddInt)
   2620 UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndAddLong)
   2621 UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetInt)
   2622 UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetLong)
   2623 UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetObject)
   2624 
   2625 UNIMPLEMENTED_INTRINSIC(MIPS64, ThreadInterrupted)
   2626 
   2627 UNREACHABLE_INTRINSICS(MIPS64)
   2628 
   2629 #undef __
   2630 
   2631 }  // namespace mips64
   2632 }  // namespace art
   2633