1 /* 2 * Copyright (C) 2015 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include "intrinsics_mips.h" 18 19 #include "arch/mips/instruction_set_features_mips.h" 20 #include "art_method.h" 21 #include "code_generator_mips.h" 22 #include "entrypoints/quick/quick_entrypoints.h" 23 #include "heap_poisoning.h" 24 #include "intrinsics.h" 25 #include "mirror/array-inl.h" 26 #include "mirror/object_array-inl.h" 27 #include "mirror/string.h" 28 #include "scoped_thread_state_change-inl.h" 29 #include "thread.h" 30 #include "utils/mips/assembler_mips.h" 31 #include "utils/mips/constants_mips.h" 32 33 namespace art { 34 35 namespace mips { 36 37 IntrinsicLocationsBuilderMIPS::IntrinsicLocationsBuilderMIPS(CodeGeneratorMIPS* codegen) 38 : codegen_(codegen), allocator_(codegen->GetGraph()->GetAllocator()) { 39 } 40 41 MipsAssembler* IntrinsicCodeGeneratorMIPS::GetAssembler() { 42 return reinterpret_cast<MipsAssembler*>(codegen_->GetAssembler()); 43 } 44 45 ArenaAllocator* IntrinsicCodeGeneratorMIPS::GetAllocator() { 46 return codegen_->GetGraph()->GetAllocator(); 47 } 48 49 inline bool IntrinsicCodeGeneratorMIPS::IsR2OrNewer() const { 50 return codegen_->GetInstructionSetFeatures().IsMipsIsaRevGreaterThanEqual2(); 51 } 52 53 inline bool IntrinsicCodeGeneratorMIPS::IsR6() const { 54 return codegen_->GetInstructionSetFeatures().IsR6(); 55 } 56 57 inline bool IntrinsicCodeGeneratorMIPS::Is32BitFPU() const { 58 return codegen_->GetInstructionSetFeatures().Is32BitFloatingPoint(); 59 } 60 61 #define __ codegen->GetAssembler()-> 62 63 static void MoveFromReturnRegister(Location trg, 64 DataType::Type type, 65 CodeGeneratorMIPS* codegen) { 66 if (!trg.IsValid()) { 67 DCHECK_EQ(type, DataType::Type::kVoid); 68 return; 69 } 70 71 DCHECK_NE(type, DataType::Type::kVoid); 72 73 if (DataType::IsIntegralType(type) || type == DataType::Type::kReference) { 74 Register trg_reg = trg.AsRegister<Register>(); 75 if (trg_reg != V0) { 76 __ Move(V0, trg_reg); 77 } 78 } else { 79 FRegister trg_reg = trg.AsFpuRegister<FRegister>(); 80 if (trg_reg != F0) { 81 if (type == DataType::Type::kFloat32) { 82 __ MovS(F0, trg_reg); 83 } else { 84 __ MovD(F0, trg_reg); 85 } 86 } 87 } 88 } 89 90 static void MoveArguments(HInvoke* invoke, CodeGeneratorMIPS* codegen) { 91 InvokeDexCallingConventionVisitorMIPS calling_convention_visitor; 92 IntrinsicVisitor::MoveArguments(invoke, codegen, &calling_convention_visitor); 93 } 94 95 // Slow-path for fallback (calling the managed code to handle the 96 // intrinsic) in an intrinsified call. This will copy the arguments 97 // into the positions for a regular call. 98 // 99 // Note: The actual parameters are required to be in the locations 100 // given by the invoke's location summary. If an intrinsic 101 // modifies those locations before a slowpath call, they must be 102 // restored! 103 class IntrinsicSlowPathMIPS : public SlowPathCodeMIPS { 104 public: 105 explicit IntrinsicSlowPathMIPS(HInvoke* invoke) : SlowPathCodeMIPS(invoke), invoke_(invoke) { } 106 107 void EmitNativeCode(CodeGenerator* codegen_in) OVERRIDE { 108 CodeGeneratorMIPS* codegen = down_cast<CodeGeneratorMIPS*>(codegen_in); 109 110 __ Bind(GetEntryLabel()); 111 112 SaveLiveRegisters(codegen, invoke_->GetLocations()); 113 114 MoveArguments(invoke_, codegen); 115 116 if (invoke_->IsInvokeStaticOrDirect()) { 117 codegen->GenerateStaticOrDirectCall( 118 invoke_->AsInvokeStaticOrDirect(), Location::RegisterLocation(A0), this); 119 } else { 120 codegen->GenerateVirtualCall( 121 invoke_->AsInvokeVirtual(), Location::RegisterLocation(A0), this); 122 } 123 124 // Copy the result back to the expected output. 125 Location out = invoke_->GetLocations()->Out(); 126 if (out.IsValid()) { 127 DCHECK(out.IsRegister()); // TODO: Replace this when we support output in memory. 128 DCHECK(!invoke_->GetLocations()->GetLiveRegisters()->ContainsCoreRegister(out.reg())); 129 MoveFromReturnRegister(out, invoke_->GetType(), codegen); 130 } 131 132 RestoreLiveRegisters(codegen, invoke_->GetLocations()); 133 __ B(GetExitLabel()); 134 } 135 136 const char* GetDescription() const OVERRIDE { return "IntrinsicSlowPathMIPS"; } 137 138 private: 139 // The instruction where this slow path is happening. 140 HInvoke* const invoke_; 141 142 DISALLOW_COPY_AND_ASSIGN(IntrinsicSlowPathMIPS); 143 }; 144 145 #undef __ 146 147 bool IntrinsicLocationsBuilderMIPS::TryDispatch(HInvoke* invoke) { 148 Dispatch(invoke); 149 LocationSummary* res = invoke->GetLocations(); 150 return res != nullptr && res->Intrinsified(); 151 } 152 153 #define __ assembler-> 154 155 static void CreateFPToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) { 156 LocationSummary* locations = 157 new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); 158 locations->SetInAt(0, Location::RequiresFpuRegister()); 159 locations->SetOut(Location::RequiresRegister()); 160 } 161 162 static void MoveFPToInt(LocationSummary* locations, bool is64bit, MipsAssembler* assembler) { 163 FRegister in = locations->InAt(0).AsFpuRegister<FRegister>(); 164 165 if (is64bit) { 166 Register out_lo = locations->Out().AsRegisterPairLow<Register>(); 167 Register out_hi = locations->Out().AsRegisterPairHigh<Register>(); 168 169 __ Mfc1(out_lo, in); 170 __ MoveFromFpuHigh(out_hi, in); 171 } else { 172 Register out = locations->Out().AsRegister<Register>(); 173 174 __ Mfc1(out, in); 175 } 176 } 177 178 // long java.lang.Double.doubleToRawLongBits(double) 179 void IntrinsicLocationsBuilderMIPS::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) { 180 CreateFPToIntLocations(allocator_, invoke); 181 } 182 183 void IntrinsicCodeGeneratorMIPS::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) { 184 MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetAssembler()); 185 } 186 187 // int java.lang.Float.floatToRawIntBits(float) 188 void IntrinsicLocationsBuilderMIPS::VisitFloatFloatToRawIntBits(HInvoke* invoke) { 189 CreateFPToIntLocations(allocator_, invoke); 190 } 191 192 void IntrinsicCodeGeneratorMIPS::VisitFloatFloatToRawIntBits(HInvoke* invoke) { 193 MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler()); 194 } 195 196 static void CreateIntToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) { 197 LocationSummary* locations = 198 new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); 199 locations->SetInAt(0, Location::RequiresRegister()); 200 locations->SetOut(Location::RequiresFpuRegister()); 201 } 202 203 static void MoveIntToFP(LocationSummary* locations, bool is64bit, MipsAssembler* assembler) { 204 FRegister out = locations->Out().AsFpuRegister<FRegister>(); 205 206 if (is64bit) { 207 Register in_lo = locations->InAt(0).AsRegisterPairLow<Register>(); 208 Register in_hi = locations->InAt(0).AsRegisterPairHigh<Register>(); 209 210 __ Mtc1(in_lo, out); 211 __ MoveToFpuHigh(in_hi, out); 212 } else { 213 Register in = locations->InAt(0).AsRegister<Register>(); 214 215 __ Mtc1(in, out); 216 } 217 } 218 219 // double java.lang.Double.longBitsToDouble(long) 220 void IntrinsicLocationsBuilderMIPS::VisitDoubleLongBitsToDouble(HInvoke* invoke) { 221 CreateIntToFPLocations(allocator_, invoke); 222 } 223 224 void IntrinsicCodeGeneratorMIPS::VisitDoubleLongBitsToDouble(HInvoke* invoke) { 225 MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler()); 226 } 227 228 // float java.lang.Float.intBitsToFloat(int) 229 void IntrinsicLocationsBuilderMIPS::VisitFloatIntBitsToFloat(HInvoke* invoke) { 230 CreateIntToFPLocations(allocator_, invoke); 231 } 232 233 void IntrinsicCodeGeneratorMIPS::VisitFloatIntBitsToFloat(HInvoke* invoke) { 234 MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler()); 235 } 236 237 static void CreateIntToIntLocations(ArenaAllocator* allocator, 238 HInvoke* invoke, 239 Location::OutputOverlap overlaps = Location::kNoOutputOverlap) { 240 LocationSummary* locations = 241 new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); 242 locations->SetInAt(0, Location::RequiresRegister()); 243 locations->SetOut(Location::RequiresRegister(), overlaps); 244 } 245 246 static void GenReverse(LocationSummary* locations, 247 DataType::Type type, 248 bool isR2OrNewer, 249 bool isR6, 250 bool reverseBits, 251 MipsAssembler* assembler) { 252 DCHECK(type == DataType::Type::kInt16 || 253 type == DataType::Type::kInt32 || 254 type == DataType::Type::kInt64); 255 DCHECK(type != DataType::Type::kInt16 || !reverseBits); 256 257 if (type == DataType::Type::kInt16) { 258 Register in = locations->InAt(0).AsRegister<Register>(); 259 Register out = locations->Out().AsRegister<Register>(); 260 261 if (isR2OrNewer) { 262 __ Wsbh(out, in); 263 __ Seh(out, out); 264 } else { 265 __ Sll(TMP, in, 24); 266 __ Sra(TMP, TMP, 16); 267 __ Sll(out, in, 16); 268 __ Srl(out, out, 24); 269 __ Or(out, out, TMP); 270 } 271 } else if (type == DataType::Type::kInt32) { 272 Register in = locations->InAt(0).AsRegister<Register>(); 273 Register out = locations->Out().AsRegister<Register>(); 274 275 if (isR2OrNewer) { 276 __ Rotr(out, in, 16); 277 __ Wsbh(out, out); 278 } else { 279 // MIPS32r1 280 // __ Rotr(out, in, 16); 281 __ Sll(TMP, in, 16); 282 __ Srl(out, in, 16); 283 __ Or(out, out, TMP); 284 // __ Wsbh(out, out); 285 __ LoadConst32(AT, 0x00FF00FF); 286 __ And(TMP, out, AT); 287 __ Sll(TMP, TMP, 8); 288 __ Srl(out, out, 8); 289 __ And(out, out, AT); 290 __ Or(out, out, TMP); 291 } 292 if (reverseBits) { 293 if (isR6) { 294 __ Bitswap(out, out); 295 } else { 296 __ LoadConst32(AT, 0x0F0F0F0F); 297 __ And(TMP, out, AT); 298 __ Sll(TMP, TMP, 4); 299 __ Srl(out, out, 4); 300 __ And(out, out, AT); 301 __ Or(out, TMP, out); 302 __ LoadConst32(AT, 0x33333333); 303 __ And(TMP, out, AT); 304 __ Sll(TMP, TMP, 2); 305 __ Srl(out, out, 2); 306 __ And(out, out, AT); 307 __ Or(out, TMP, out); 308 __ LoadConst32(AT, 0x55555555); 309 __ And(TMP, out, AT); 310 __ Sll(TMP, TMP, 1); 311 __ Srl(out, out, 1); 312 __ And(out, out, AT); 313 __ Or(out, TMP, out); 314 } 315 } 316 } else if (type == DataType::Type::kInt64) { 317 Register in_lo = locations->InAt(0).AsRegisterPairLow<Register>(); 318 Register in_hi = locations->InAt(0).AsRegisterPairHigh<Register>(); 319 Register out_lo = locations->Out().AsRegisterPairLow<Register>(); 320 Register out_hi = locations->Out().AsRegisterPairHigh<Register>(); 321 322 if (isR2OrNewer) { 323 __ Rotr(AT, in_hi, 16); 324 __ Rotr(TMP, in_lo, 16); 325 __ Wsbh(out_lo, AT); 326 __ Wsbh(out_hi, TMP); 327 } else { 328 // When calling CreateIntToIntLocations() we promised that the 329 // use of the out_lo/out_hi wouldn't overlap with the use of 330 // in_lo/in_hi. Be very careful not to write to out_lo/out_hi 331 // until we're completely done reading from in_lo/in_hi. 332 // __ Rotr(TMP, in_lo, 16); 333 __ Sll(TMP, in_lo, 16); 334 __ Srl(AT, in_lo, 16); 335 __ Or(TMP, TMP, AT); // Hold in TMP until it's safe 336 // to write to out_hi. 337 // __ Rotr(out_lo, in_hi, 16); 338 __ Sll(AT, in_hi, 16); 339 __ Srl(out_lo, in_hi, 16); // Here we are finally done reading 340 // from in_lo/in_hi so it's okay to 341 // write to out_lo/out_hi. 342 __ Or(out_lo, out_lo, AT); 343 // __ Wsbh(out_hi, out_hi); 344 __ LoadConst32(AT, 0x00FF00FF); 345 __ And(out_hi, TMP, AT); 346 __ Sll(out_hi, out_hi, 8); 347 __ Srl(TMP, TMP, 8); 348 __ And(TMP, TMP, AT); 349 __ Or(out_hi, out_hi, TMP); 350 // __ Wsbh(out_lo, out_lo); 351 __ And(TMP, out_lo, AT); // AT already holds the correct mask value 352 __ Sll(TMP, TMP, 8); 353 __ Srl(out_lo, out_lo, 8); 354 __ And(out_lo, out_lo, AT); 355 __ Or(out_lo, out_lo, TMP); 356 } 357 if (reverseBits) { 358 if (isR6) { 359 __ Bitswap(out_hi, out_hi); 360 __ Bitswap(out_lo, out_lo); 361 } else { 362 __ LoadConst32(AT, 0x0F0F0F0F); 363 __ And(TMP, out_hi, AT); 364 __ Sll(TMP, TMP, 4); 365 __ Srl(out_hi, out_hi, 4); 366 __ And(out_hi, out_hi, AT); 367 __ Or(out_hi, TMP, out_hi); 368 __ And(TMP, out_lo, AT); 369 __ Sll(TMP, TMP, 4); 370 __ Srl(out_lo, out_lo, 4); 371 __ And(out_lo, out_lo, AT); 372 __ Or(out_lo, TMP, out_lo); 373 __ LoadConst32(AT, 0x33333333); 374 __ And(TMP, out_hi, AT); 375 __ Sll(TMP, TMP, 2); 376 __ Srl(out_hi, out_hi, 2); 377 __ And(out_hi, out_hi, AT); 378 __ Or(out_hi, TMP, out_hi); 379 __ And(TMP, out_lo, AT); 380 __ Sll(TMP, TMP, 2); 381 __ Srl(out_lo, out_lo, 2); 382 __ And(out_lo, out_lo, AT); 383 __ Or(out_lo, TMP, out_lo); 384 __ LoadConst32(AT, 0x55555555); 385 __ And(TMP, out_hi, AT); 386 __ Sll(TMP, TMP, 1); 387 __ Srl(out_hi, out_hi, 1); 388 __ And(out_hi, out_hi, AT); 389 __ Or(out_hi, TMP, out_hi); 390 __ And(TMP, out_lo, AT); 391 __ Sll(TMP, TMP, 1); 392 __ Srl(out_lo, out_lo, 1); 393 __ And(out_lo, out_lo, AT); 394 __ Or(out_lo, TMP, out_lo); 395 } 396 } 397 } 398 } 399 400 // int java.lang.Integer.reverseBytes(int) 401 void IntrinsicLocationsBuilderMIPS::VisitIntegerReverseBytes(HInvoke* invoke) { 402 CreateIntToIntLocations(allocator_, invoke); 403 } 404 405 void IntrinsicCodeGeneratorMIPS::VisitIntegerReverseBytes(HInvoke* invoke) { 406 GenReverse(invoke->GetLocations(), 407 DataType::Type::kInt32, 408 IsR2OrNewer(), 409 IsR6(), 410 /* reverseBits */ false, 411 GetAssembler()); 412 } 413 414 // long java.lang.Long.reverseBytes(long) 415 void IntrinsicLocationsBuilderMIPS::VisitLongReverseBytes(HInvoke* invoke) { 416 CreateIntToIntLocations(allocator_, invoke); 417 } 418 419 void IntrinsicCodeGeneratorMIPS::VisitLongReverseBytes(HInvoke* invoke) { 420 GenReverse(invoke->GetLocations(), 421 DataType::Type::kInt64, 422 IsR2OrNewer(), 423 IsR6(), 424 /* reverseBits */ false, 425 GetAssembler()); 426 } 427 428 // short java.lang.Short.reverseBytes(short) 429 void IntrinsicLocationsBuilderMIPS::VisitShortReverseBytes(HInvoke* invoke) { 430 CreateIntToIntLocations(allocator_, invoke); 431 } 432 433 void IntrinsicCodeGeneratorMIPS::VisitShortReverseBytes(HInvoke* invoke) { 434 GenReverse(invoke->GetLocations(), 435 DataType::Type::kInt16, 436 IsR2OrNewer(), 437 IsR6(), 438 /* reverseBits */ false, 439 GetAssembler()); 440 } 441 442 static void GenNumberOfLeadingZeroes(LocationSummary* locations, 443 bool is64bit, 444 bool isR6, 445 MipsAssembler* assembler) { 446 Register out = locations->Out().AsRegister<Register>(); 447 if (is64bit) { 448 Register in_lo = locations->InAt(0).AsRegisterPairLow<Register>(); 449 Register in_hi = locations->InAt(0).AsRegisterPairHigh<Register>(); 450 451 if (isR6) { 452 __ ClzR6(AT, in_hi); 453 __ ClzR6(TMP, in_lo); 454 __ Seleqz(TMP, TMP, in_hi); 455 } else { 456 __ ClzR2(AT, in_hi); 457 __ ClzR2(TMP, in_lo); 458 __ Movn(TMP, ZERO, in_hi); 459 } 460 __ Addu(out, AT, TMP); 461 } else { 462 Register in = locations->InAt(0).AsRegister<Register>(); 463 464 if (isR6) { 465 __ ClzR6(out, in); 466 } else { 467 __ ClzR2(out, in); 468 } 469 } 470 } 471 472 // int java.lang.Integer.numberOfLeadingZeros(int i) 473 void IntrinsicLocationsBuilderMIPS::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) { 474 CreateIntToIntLocations(allocator_, invoke); 475 } 476 477 void IntrinsicCodeGeneratorMIPS::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) { 478 GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit */ false, IsR6(), GetAssembler()); 479 } 480 481 // int java.lang.Long.numberOfLeadingZeros(long i) 482 void IntrinsicLocationsBuilderMIPS::VisitLongNumberOfLeadingZeros(HInvoke* invoke) { 483 CreateIntToIntLocations(allocator_, invoke); 484 } 485 486 void IntrinsicCodeGeneratorMIPS::VisitLongNumberOfLeadingZeros(HInvoke* invoke) { 487 GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit */ true, IsR6(), GetAssembler()); 488 } 489 490 static void GenNumberOfTrailingZeroes(LocationSummary* locations, 491 bool is64bit, 492 bool isR6, 493 MipsAssembler* assembler) { 494 Register out = locations->Out().AsRegister<Register>(); 495 Register in_lo; 496 Register in; 497 498 if (is64bit) { 499 Register in_hi = locations->InAt(0).AsRegisterPairHigh<Register>(); 500 501 in_lo = locations->InAt(0).AsRegisterPairLow<Register>(); 502 503 // If in_lo is zero then count the number of trailing zeroes in in_hi; 504 // otherwise count the number of trailing zeroes in in_lo. 505 // out = in_lo ? in_lo : in_hi; 506 if (isR6) { 507 __ Seleqz(out, in_hi, in_lo); 508 __ Selnez(TMP, in_lo, in_lo); 509 __ Or(out, out, TMP); 510 } else { 511 __ Movz(out, in_hi, in_lo); 512 __ Movn(out, in_lo, in_lo); 513 } 514 515 in = out; 516 } else { 517 in = locations->InAt(0).AsRegister<Register>(); 518 // Give in_lo a dummy value to keep the compiler from complaining. 519 // Since we only get here in the 32-bit case, this value will never 520 // be used. 521 in_lo = in; 522 } 523 524 if (isR6) { 525 // We don't have an instruction to count the number of trailing zeroes. 526 // Start by flipping the bits end-for-end so we can count the number of 527 // leading zeroes instead. 528 __ Rotr(out, in, 16); 529 __ Wsbh(out, out); 530 __ Bitswap(out, out); 531 __ ClzR6(out, out); 532 } else { 533 // Convert trailing zeroes to trailing ones, and bits to their left 534 // to zeroes. 535 __ Addiu(TMP, in, -1); 536 __ Xor(out, TMP, in); 537 __ And(out, out, TMP); 538 // Count number of leading zeroes. 539 __ ClzR2(out, out); 540 // Subtract number of leading zeroes from 32 to get number of trailing ones. 541 // Remember that the trailing ones were formerly trailing zeroes. 542 __ LoadConst32(TMP, 32); 543 __ Subu(out, TMP, out); 544 } 545 546 if (is64bit) { 547 // If in_lo is zero, then we counted the number of trailing zeroes in in_hi so we must add the 548 // number of trailing zeroes in in_lo (32) to get the correct final count 549 __ LoadConst32(TMP, 32); 550 if (isR6) { 551 __ Seleqz(TMP, TMP, in_lo); 552 } else { 553 __ Movn(TMP, ZERO, in_lo); 554 } 555 __ Addu(out, out, TMP); 556 } 557 } 558 559 // int java.lang.Integer.numberOfTrailingZeros(int i) 560 void IntrinsicLocationsBuilderMIPS::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) { 561 CreateIntToIntLocations(allocator_, invoke, Location::kOutputOverlap); 562 } 563 564 void IntrinsicCodeGeneratorMIPS::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) { 565 GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit */ false, IsR6(), GetAssembler()); 566 } 567 568 // int java.lang.Long.numberOfTrailingZeros(long i) 569 void IntrinsicLocationsBuilderMIPS::VisitLongNumberOfTrailingZeros(HInvoke* invoke) { 570 CreateIntToIntLocations(allocator_, invoke, Location::kOutputOverlap); 571 } 572 573 void IntrinsicCodeGeneratorMIPS::VisitLongNumberOfTrailingZeros(HInvoke* invoke) { 574 GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit */ true, IsR6(), GetAssembler()); 575 } 576 577 // int java.lang.Integer.reverse(int) 578 void IntrinsicLocationsBuilderMIPS::VisitIntegerReverse(HInvoke* invoke) { 579 CreateIntToIntLocations(allocator_, invoke); 580 } 581 582 void IntrinsicCodeGeneratorMIPS::VisitIntegerReverse(HInvoke* invoke) { 583 GenReverse(invoke->GetLocations(), 584 DataType::Type::kInt32, 585 IsR2OrNewer(), 586 IsR6(), 587 /* reverseBits */ true, 588 GetAssembler()); 589 } 590 591 // long java.lang.Long.reverse(long) 592 void IntrinsicLocationsBuilderMIPS::VisitLongReverse(HInvoke* invoke) { 593 CreateIntToIntLocations(allocator_, invoke); 594 } 595 596 void IntrinsicCodeGeneratorMIPS::VisitLongReverse(HInvoke* invoke) { 597 GenReverse(invoke->GetLocations(), 598 DataType::Type::kInt64, 599 IsR2OrNewer(), 600 IsR6(), 601 /* reverseBits */ true, 602 GetAssembler()); 603 } 604 605 static void CreateFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) { 606 LocationSummary* locations = 607 new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); 608 locations->SetInAt(0, Location::RequiresFpuRegister()); 609 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); 610 } 611 612 static void GenBitCount(LocationSummary* locations, 613 DataType::Type type, 614 bool isR6, 615 MipsAssembler* assembler) { 616 Register out = locations->Out().AsRegister<Register>(); 617 618 // https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel 619 // 620 // A generalization of the best bit counting method to integers of 621 // bit-widths up to 128 (parameterized by type T) is this: 622 // 623 // v = v - ((v >> 1) & (T)~(T)0/3); // temp 624 // v = (v & (T)~(T)0/15*3) + ((v >> 2) & (T)~(T)0/15*3); // temp 625 // v = (v + (v >> 4)) & (T)~(T)0/255*15; // temp 626 // c = (T)(v * ((T)~(T)0/255)) >> (sizeof(T) - 1) * BITS_PER_BYTE; // count 627 // 628 // For comparison, for 32-bit quantities, this algorithm can be executed 629 // using 20 MIPS instructions (the calls to LoadConst32() generate two 630 // machine instructions each for the values being used in this algorithm). 631 // A(n unrolled) loop-based algorithm required 25 instructions. 632 // 633 // For 64-bit quantities, this algorithm gets executed twice, (once 634 // for in_lo, and again for in_hi), but saves a few instructions 635 // because the mask values only have to be loaded once. Using this 636 // algorithm the count for a 64-bit operand can be performed in 29 637 // instructions compared to a loop-based algorithm which required 47 638 // instructions. 639 640 if (type == DataType::Type::kInt32) { 641 Register in = locations->InAt(0).AsRegister<Register>(); 642 643 __ Srl(TMP, in, 1); 644 __ LoadConst32(AT, 0x55555555); 645 __ And(TMP, TMP, AT); 646 __ Subu(TMP, in, TMP); 647 __ LoadConst32(AT, 0x33333333); 648 __ And(out, TMP, AT); 649 __ Srl(TMP, TMP, 2); 650 __ And(TMP, TMP, AT); 651 __ Addu(TMP, out, TMP); 652 __ Srl(out, TMP, 4); 653 __ Addu(out, out, TMP); 654 __ LoadConst32(AT, 0x0F0F0F0F); 655 __ And(out, out, AT); 656 __ LoadConst32(TMP, 0x01010101); 657 if (isR6) { 658 __ MulR6(out, out, TMP); 659 } else { 660 __ MulR2(out, out, TMP); 661 } 662 __ Srl(out, out, 24); 663 } else { 664 DCHECK_EQ(type, DataType::Type::kInt64); 665 Register in_lo = locations->InAt(0).AsRegisterPairLow<Register>(); 666 Register in_hi = locations->InAt(0).AsRegisterPairHigh<Register>(); 667 Register tmp_hi = locations->GetTemp(0).AsRegister<Register>(); 668 Register out_hi = locations->GetTemp(1).AsRegister<Register>(); 669 Register tmp_lo = TMP; 670 Register out_lo = out; 671 672 __ Srl(tmp_lo, in_lo, 1); 673 __ Srl(tmp_hi, in_hi, 1); 674 675 __ LoadConst32(AT, 0x55555555); 676 677 __ And(tmp_lo, tmp_lo, AT); 678 __ Subu(tmp_lo, in_lo, tmp_lo); 679 680 __ And(tmp_hi, tmp_hi, AT); 681 __ Subu(tmp_hi, in_hi, tmp_hi); 682 683 __ LoadConst32(AT, 0x33333333); 684 685 __ And(out_lo, tmp_lo, AT); 686 __ Srl(tmp_lo, tmp_lo, 2); 687 __ And(tmp_lo, tmp_lo, AT); 688 __ Addu(tmp_lo, out_lo, tmp_lo); 689 690 __ And(out_hi, tmp_hi, AT); 691 __ Srl(tmp_hi, tmp_hi, 2); 692 __ And(tmp_hi, tmp_hi, AT); 693 __ Addu(tmp_hi, out_hi, tmp_hi); 694 695 // Here we deviate from the original algorithm a bit. We've reached 696 // the stage where the bitfields holding the subtotals are large 697 // enough to hold the combined subtotals for both the low word, and 698 // the high word. This means that we can add the subtotals for the 699 // the high, and low words into a single word, and compute the final 700 // result for both the high, and low words using fewer instructions. 701 __ LoadConst32(AT, 0x0F0F0F0F); 702 703 __ Addu(TMP, tmp_hi, tmp_lo); 704 705 __ Srl(out, TMP, 4); 706 __ And(out, out, AT); 707 __ And(TMP, TMP, AT); 708 __ Addu(out, out, TMP); 709 710 __ LoadConst32(AT, 0x01010101); 711 712 if (isR6) { 713 __ MulR6(out, out, AT); 714 } else { 715 __ MulR2(out, out, AT); 716 } 717 718 __ Srl(out, out, 24); 719 } 720 } 721 722 // int java.lang.Integer.bitCount(int) 723 void IntrinsicLocationsBuilderMIPS::VisitIntegerBitCount(HInvoke* invoke) { 724 CreateIntToIntLocations(allocator_, invoke); 725 } 726 727 void IntrinsicCodeGeneratorMIPS::VisitIntegerBitCount(HInvoke* invoke) { 728 GenBitCount(invoke->GetLocations(), DataType::Type::kInt32, IsR6(), GetAssembler()); 729 } 730 731 // int java.lang.Long.bitCount(int) 732 void IntrinsicLocationsBuilderMIPS::VisitLongBitCount(HInvoke* invoke) { 733 LocationSummary* locations = 734 new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); 735 locations->SetInAt(0, Location::RequiresRegister()); 736 locations->SetOut(Location::RequiresRegister()); 737 locations->AddTemp(Location::RequiresRegister()); 738 locations->AddTemp(Location::RequiresRegister()); 739 } 740 741 void IntrinsicCodeGeneratorMIPS::VisitLongBitCount(HInvoke* invoke) { 742 GenBitCount(invoke->GetLocations(), DataType::Type::kInt64, IsR6(), GetAssembler()); 743 } 744 745 static void MathAbsFP(LocationSummary* locations, 746 bool is64bit, 747 bool isR2OrNewer, 748 bool isR6, 749 MipsAssembler* assembler) { 750 FRegister in = locations->InAt(0).AsFpuRegister<FRegister>(); 751 FRegister out = locations->Out().AsFpuRegister<FRegister>(); 752 753 // Note, as a "quality of implementation", rather than pure "spec compliance", we require that 754 // Math.abs() clears the sign bit (but changes nothing else) for all numbers, including NaN 755 // (signaling NaN may become quiet though). 756 // 757 // The ABS.fmt instructions (abs.s and abs.d) do exactly that when NAN2008=1 (R6). For this case, 758 // both regular floating point numbers and NAN values are treated alike, only the sign bit is 759 // affected by this instruction. 760 // But when NAN2008=0 (R2 and before), the ABS.fmt instructions can't be used. For this case, any 761 // NaN operand signals invalid operation. This means that other bits (not just sign bit) might be 762 // changed when doing abs(NaN). Because of that, we clear sign bit in a different way. 763 if (isR6) { 764 if (is64bit) { 765 __ AbsD(out, in); 766 } else { 767 __ AbsS(out, in); 768 } 769 } else { 770 if (is64bit) { 771 if (in != out) { 772 __ MovD(out, in); 773 } 774 __ MoveFromFpuHigh(TMP, in); 775 // ins instruction is not available for R1. 776 if (isR2OrNewer) { 777 __ Ins(TMP, ZERO, 31, 1); 778 } else { 779 __ Sll(TMP, TMP, 1); 780 __ Srl(TMP, TMP, 1); 781 } 782 __ MoveToFpuHigh(TMP, out); 783 } else { 784 __ Mfc1(TMP, in); 785 // ins instruction is not available for R1. 786 if (isR2OrNewer) { 787 __ Ins(TMP, ZERO, 31, 1); 788 } else { 789 __ Sll(TMP, TMP, 1); 790 __ Srl(TMP, TMP, 1); 791 } 792 __ Mtc1(TMP, out); 793 } 794 } 795 } 796 797 // double java.lang.Math.abs(double) 798 void IntrinsicLocationsBuilderMIPS::VisitMathAbsDouble(HInvoke* invoke) { 799 CreateFPToFPLocations(allocator_, invoke); 800 } 801 802 void IntrinsicCodeGeneratorMIPS::VisitMathAbsDouble(HInvoke* invoke) { 803 MathAbsFP(invoke->GetLocations(), /* is64bit */ true, IsR2OrNewer(), IsR6(), GetAssembler()); 804 } 805 806 // float java.lang.Math.abs(float) 807 void IntrinsicLocationsBuilderMIPS::VisitMathAbsFloat(HInvoke* invoke) { 808 CreateFPToFPLocations(allocator_, invoke); 809 } 810 811 void IntrinsicCodeGeneratorMIPS::VisitMathAbsFloat(HInvoke* invoke) { 812 MathAbsFP(invoke->GetLocations(), /* is64bit */ false, IsR2OrNewer(), IsR6(), GetAssembler()); 813 } 814 815 static void GenAbsInteger(LocationSummary* locations, bool is64bit, MipsAssembler* assembler) { 816 if (is64bit) { 817 Register in_lo = locations->InAt(0).AsRegisterPairLow<Register>(); 818 Register in_hi = locations->InAt(0).AsRegisterPairHigh<Register>(); 819 Register out_lo = locations->Out().AsRegisterPairLow<Register>(); 820 Register out_hi = locations->Out().AsRegisterPairHigh<Register>(); 821 822 // The comments in this section show the analogous operations which would 823 // be performed if we had 64-bit registers "in", and "out". 824 // __ Dsra32(AT, in, 31); 825 __ Sra(AT, in_hi, 31); 826 // __ Xor(out, in, AT); 827 __ Xor(TMP, in_lo, AT); 828 __ Xor(out_hi, in_hi, AT); 829 // __ Dsubu(out, out, AT); 830 __ Subu(out_lo, TMP, AT); 831 __ Sltu(TMP, out_lo, TMP); 832 __ Addu(out_hi, out_hi, TMP); 833 } else { 834 Register in = locations->InAt(0).AsRegister<Register>(); 835 Register out = locations->Out().AsRegister<Register>(); 836 837 __ Sra(AT, in, 31); 838 __ Xor(out, in, AT); 839 __ Subu(out, out, AT); 840 } 841 } 842 843 // int java.lang.Math.abs(int) 844 void IntrinsicLocationsBuilderMIPS::VisitMathAbsInt(HInvoke* invoke) { 845 CreateIntToIntLocations(allocator_, invoke); 846 } 847 848 void IntrinsicCodeGeneratorMIPS::VisitMathAbsInt(HInvoke* invoke) { 849 GenAbsInteger(invoke->GetLocations(), /* is64bit */ false, GetAssembler()); 850 } 851 852 // long java.lang.Math.abs(long) 853 void IntrinsicLocationsBuilderMIPS::VisitMathAbsLong(HInvoke* invoke) { 854 CreateIntToIntLocations(allocator_, invoke); 855 } 856 857 void IntrinsicCodeGeneratorMIPS::VisitMathAbsLong(HInvoke* invoke) { 858 GenAbsInteger(invoke->GetLocations(), /* is64bit */ true, GetAssembler()); 859 } 860 861 static void GenMinMaxFP(LocationSummary* locations, 862 bool is_min, 863 DataType::Type type, 864 bool is_R6, 865 MipsAssembler* assembler) { 866 FRegister out = locations->Out().AsFpuRegister<FRegister>(); 867 FRegister a = locations->InAt(0).AsFpuRegister<FRegister>(); 868 FRegister b = locations->InAt(1).AsFpuRegister<FRegister>(); 869 870 if (is_R6) { 871 MipsLabel noNaNs; 872 MipsLabel done; 873 FRegister ftmp = ((out != a) && (out != b)) ? out : FTMP; 874 875 // When Java computes min/max it prefers a NaN to a number; the 876 // behavior of MIPSR6 is to prefer numbers to NaNs, i.e., if one of 877 // the inputs is a NaN and the other is a valid number, the MIPS 878 // instruction will return the number; Java wants the NaN value 879 // returned. This is why there is extra logic preceding the use of 880 // the MIPS min.fmt/max.fmt instructions. If either a, or b holds a 881 // NaN, return the NaN, otherwise return the min/max. 882 if (type == DataType::Type::kFloat64) { 883 __ CmpUnD(FTMP, a, b); 884 __ Bc1eqz(FTMP, &noNaNs); 885 886 // One of the inputs is a NaN 887 __ CmpEqD(ftmp, a, a); 888 // If a == a then b is the NaN, otherwise a is the NaN. 889 __ SelD(ftmp, a, b); 890 891 if (ftmp != out) { 892 __ MovD(out, ftmp); 893 } 894 895 __ B(&done); 896 897 __ Bind(&noNaNs); 898 899 if (is_min) { 900 __ MinD(out, a, b); 901 } else { 902 __ MaxD(out, a, b); 903 } 904 } else { 905 DCHECK_EQ(type, DataType::Type::kFloat32); 906 __ CmpUnS(FTMP, a, b); 907 __ Bc1eqz(FTMP, &noNaNs); 908 909 // One of the inputs is a NaN 910 __ CmpEqS(ftmp, a, a); 911 // If a == a then b is the NaN, otherwise a is the NaN. 912 __ SelS(ftmp, a, b); 913 914 if (ftmp != out) { 915 __ MovS(out, ftmp); 916 } 917 918 __ B(&done); 919 920 __ Bind(&noNaNs); 921 922 if (is_min) { 923 __ MinS(out, a, b); 924 } else { 925 __ MaxS(out, a, b); 926 } 927 } 928 929 __ Bind(&done); 930 } else { 931 MipsLabel ordered; 932 MipsLabel compare; 933 MipsLabel select; 934 MipsLabel done; 935 936 if (type == DataType::Type::kFloat64) { 937 __ CunD(a, b); 938 } else { 939 DCHECK_EQ(type, DataType::Type::kFloat32); 940 __ CunS(a, b); 941 } 942 __ Bc1f(&ordered); 943 944 // a or b (or both) is a NaN. Return one, which is a NaN. 945 if (type == DataType::Type::kFloat64) { 946 __ CeqD(b, b); 947 } else { 948 __ CeqS(b, b); 949 } 950 __ B(&select); 951 952 __ Bind(&ordered); 953 954 // Neither is a NaN. 955 // a == b? (-0.0 compares equal with +0.0) 956 // If equal, handle zeroes, else compare further. 957 if (type == DataType::Type::kFloat64) { 958 __ CeqD(a, b); 959 } else { 960 __ CeqS(a, b); 961 } 962 __ Bc1f(&compare); 963 964 // a == b either bit for bit or one is -0.0 and the other is +0.0. 965 if (type == DataType::Type::kFloat64) { 966 __ MoveFromFpuHigh(TMP, a); 967 __ MoveFromFpuHigh(AT, b); 968 } else { 969 __ Mfc1(TMP, a); 970 __ Mfc1(AT, b); 971 } 972 973 if (is_min) { 974 // -0.0 prevails over +0.0. 975 __ Or(TMP, TMP, AT); 976 } else { 977 // +0.0 prevails over -0.0. 978 __ And(TMP, TMP, AT); 979 } 980 981 if (type == DataType::Type::kFloat64) { 982 __ Mfc1(AT, a); 983 __ Mtc1(AT, out); 984 __ MoveToFpuHigh(TMP, out); 985 } else { 986 __ Mtc1(TMP, out); 987 } 988 __ B(&done); 989 990 __ Bind(&compare); 991 992 if (type == DataType::Type::kFloat64) { 993 if (is_min) { 994 // return (a <= b) ? a : b; 995 __ ColeD(a, b); 996 } else { 997 // return (a >= b) ? a : b; 998 __ ColeD(b, a); // b <= a 999 } 1000 } else { 1001 if (is_min) { 1002 // return (a <= b) ? a : b; 1003 __ ColeS(a, b); 1004 } else { 1005 // return (a >= b) ? a : b; 1006 __ ColeS(b, a); // b <= a 1007 } 1008 } 1009 1010 __ Bind(&select); 1011 1012 if (type == DataType::Type::kFloat64) { 1013 __ MovtD(out, a); 1014 __ MovfD(out, b); 1015 } else { 1016 __ MovtS(out, a); 1017 __ MovfS(out, b); 1018 } 1019 1020 __ Bind(&done); 1021 } 1022 } 1023 1024 static void CreateFPFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) { 1025 LocationSummary* locations = 1026 new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); 1027 locations->SetInAt(0, Location::RequiresFpuRegister()); 1028 locations->SetInAt(1, Location::RequiresFpuRegister()); 1029 locations->SetOut(Location::RequiresFpuRegister(), Location::kOutputOverlap); 1030 } 1031 1032 // double java.lang.Math.min(double, double) 1033 void IntrinsicLocationsBuilderMIPS::VisitMathMinDoubleDouble(HInvoke* invoke) { 1034 CreateFPFPToFPLocations(allocator_, invoke); 1035 } 1036 1037 void IntrinsicCodeGeneratorMIPS::VisitMathMinDoubleDouble(HInvoke* invoke) { 1038 GenMinMaxFP(invoke->GetLocations(), 1039 /* is_min */ true, 1040 DataType::Type::kFloat64, 1041 IsR6(), 1042 GetAssembler()); 1043 } 1044 1045 // float java.lang.Math.min(float, float) 1046 void IntrinsicLocationsBuilderMIPS::VisitMathMinFloatFloat(HInvoke* invoke) { 1047 CreateFPFPToFPLocations(allocator_, invoke); 1048 } 1049 1050 void IntrinsicCodeGeneratorMIPS::VisitMathMinFloatFloat(HInvoke* invoke) { 1051 GenMinMaxFP(invoke->GetLocations(), 1052 /* is_min */ true, 1053 DataType::Type::kFloat32, 1054 IsR6(), 1055 GetAssembler()); 1056 } 1057 1058 // double java.lang.Math.max(double, double) 1059 void IntrinsicLocationsBuilderMIPS::VisitMathMaxDoubleDouble(HInvoke* invoke) { 1060 CreateFPFPToFPLocations(allocator_, invoke); 1061 } 1062 1063 void IntrinsicCodeGeneratorMIPS::VisitMathMaxDoubleDouble(HInvoke* invoke) { 1064 GenMinMaxFP(invoke->GetLocations(), 1065 /* is_min */ false, 1066 DataType::Type::kFloat64, 1067 IsR6(), 1068 GetAssembler()); 1069 } 1070 1071 // float java.lang.Math.max(float, float) 1072 void IntrinsicLocationsBuilderMIPS::VisitMathMaxFloatFloat(HInvoke* invoke) { 1073 CreateFPFPToFPLocations(allocator_, invoke); 1074 } 1075 1076 void IntrinsicCodeGeneratorMIPS::VisitMathMaxFloatFloat(HInvoke* invoke) { 1077 GenMinMaxFP(invoke->GetLocations(), 1078 /* is_min */ false, 1079 DataType::Type::kFloat32, 1080 IsR6(), 1081 GetAssembler()); 1082 } 1083 1084 static void CreateIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) { 1085 LocationSummary* locations = 1086 new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); 1087 locations->SetInAt(0, Location::RequiresRegister()); 1088 locations->SetInAt(1, Location::RequiresRegister()); 1089 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); 1090 } 1091 1092 static void GenMinMax(LocationSummary* locations, 1093 bool is_min, 1094 DataType::Type type, 1095 bool is_R6, 1096 MipsAssembler* assembler) { 1097 if (is_R6) { 1098 // Some architectures, such as ARM and MIPS (prior to r6), have a 1099 // conditional move instruction which only changes the target 1100 // (output) register if the condition is true (MIPS prior to r6 had 1101 // MOVF, MOVT, MOVN, and MOVZ). The SELEQZ and SELNEZ instructions 1102 // always change the target (output) register. If the condition is 1103 // true the output register gets the contents of the "rs" register; 1104 // otherwise, the output register is set to zero. One consequence 1105 // of this is that to implement something like "rd = c==0 ? rs : rt" 1106 // MIPS64r6 needs to use a pair of SELEQZ/SELNEZ instructions. 1107 // After executing this pair of instructions one of the output 1108 // registers from the pair will necessarily contain zero. Then the 1109 // code ORs the output registers from the SELEQZ/SELNEZ instructions 1110 // to get the final result. 1111 // 1112 // The initial test to see if the output register is same as the 1113 // first input register is needed to make sure that value in the 1114 // first input register isn't clobbered before we've finished 1115 // computing the output value. The logic in the corresponding else 1116 // clause performs the same task but makes sure the second input 1117 // register isn't clobbered in the event that it's the same register 1118 // as the output register; the else clause also handles the case 1119 // where the output register is distinct from both the first, and the 1120 // second input registers. 1121 if (type == DataType::Type::kInt64) { 1122 Register a_lo = locations->InAt(0).AsRegisterPairLow<Register>(); 1123 Register a_hi = locations->InAt(0).AsRegisterPairHigh<Register>(); 1124 Register b_lo = locations->InAt(1).AsRegisterPairLow<Register>(); 1125 Register b_hi = locations->InAt(1).AsRegisterPairHigh<Register>(); 1126 Register out_lo = locations->Out().AsRegisterPairLow<Register>(); 1127 Register out_hi = locations->Out().AsRegisterPairHigh<Register>(); 1128 1129 MipsLabel compare_done; 1130 1131 if (a_lo == b_lo) { 1132 if (out_lo != a_lo) { 1133 __ Move(out_lo, a_lo); 1134 __ Move(out_hi, a_hi); 1135 } 1136 } else { 1137 __ Slt(TMP, b_hi, a_hi); 1138 __ Bne(b_hi, a_hi, &compare_done); 1139 1140 __ Sltu(TMP, b_lo, a_lo); 1141 1142 __ Bind(&compare_done); 1143 1144 if (is_min) { 1145 __ Seleqz(AT, a_lo, TMP); 1146 __ Selnez(out_lo, b_lo, TMP); // Safe even if out_lo == a_lo/b_lo 1147 // because at this point we're 1148 // done using a_lo/b_lo. 1149 } else { 1150 __ Selnez(AT, a_lo, TMP); 1151 __ Seleqz(out_lo, b_lo, TMP); // ditto 1152 } 1153 __ Or(out_lo, out_lo, AT); 1154 if (is_min) { 1155 __ Seleqz(AT, a_hi, TMP); 1156 __ Selnez(out_hi, b_hi, TMP); // ditto but for out_hi & a_hi/b_hi 1157 } else { 1158 __ Selnez(AT, a_hi, TMP); 1159 __ Seleqz(out_hi, b_hi, TMP); // ditto but for out_hi & a_hi/b_hi 1160 } 1161 __ Or(out_hi, out_hi, AT); 1162 } 1163 } else { 1164 DCHECK_EQ(type, DataType::Type::kInt32); 1165 Register a = locations->InAt(0).AsRegister<Register>(); 1166 Register b = locations->InAt(1).AsRegister<Register>(); 1167 Register out = locations->Out().AsRegister<Register>(); 1168 1169 if (a == b) { 1170 if (out != a) { 1171 __ Move(out, a); 1172 } 1173 } else { 1174 __ Slt(AT, b, a); 1175 if (is_min) { 1176 __ Seleqz(TMP, a, AT); 1177 __ Selnez(AT, b, AT); 1178 } else { 1179 __ Selnez(TMP, a, AT); 1180 __ Seleqz(AT, b, AT); 1181 } 1182 __ Or(out, TMP, AT); 1183 } 1184 } 1185 } else { 1186 if (type == DataType::Type::kInt64) { 1187 Register a_lo = locations->InAt(0).AsRegisterPairLow<Register>(); 1188 Register a_hi = locations->InAt(0).AsRegisterPairHigh<Register>(); 1189 Register b_lo = locations->InAt(1).AsRegisterPairLow<Register>(); 1190 Register b_hi = locations->InAt(1).AsRegisterPairHigh<Register>(); 1191 Register out_lo = locations->Out().AsRegisterPairLow<Register>(); 1192 Register out_hi = locations->Out().AsRegisterPairHigh<Register>(); 1193 1194 MipsLabel compare_done; 1195 1196 if (a_lo == b_lo) { 1197 if (out_lo != a_lo) { 1198 __ Move(out_lo, a_lo); 1199 __ Move(out_hi, a_hi); 1200 } 1201 } else { 1202 __ Slt(TMP, a_hi, b_hi); 1203 __ Bne(a_hi, b_hi, &compare_done); 1204 1205 __ Sltu(TMP, a_lo, b_lo); 1206 1207 __ Bind(&compare_done); 1208 1209 if (is_min) { 1210 if (out_lo != a_lo) { 1211 __ Movn(out_hi, a_hi, TMP); 1212 __ Movn(out_lo, a_lo, TMP); 1213 } 1214 if (out_lo != b_lo) { 1215 __ Movz(out_hi, b_hi, TMP); 1216 __ Movz(out_lo, b_lo, TMP); 1217 } 1218 } else { 1219 if (out_lo != a_lo) { 1220 __ Movz(out_hi, a_hi, TMP); 1221 __ Movz(out_lo, a_lo, TMP); 1222 } 1223 if (out_lo != b_lo) { 1224 __ Movn(out_hi, b_hi, TMP); 1225 __ Movn(out_lo, b_lo, TMP); 1226 } 1227 } 1228 } 1229 } else { 1230 DCHECK_EQ(type, DataType::Type::kInt32); 1231 Register a = locations->InAt(0).AsRegister<Register>(); 1232 Register b = locations->InAt(1).AsRegister<Register>(); 1233 Register out = locations->Out().AsRegister<Register>(); 1234 1235 if (a == b) { 1236 if (out != a) { 1237 __ Move(out, a); 1238 } 1239 } else { 1240 __ Slt(AT, a, b); 1241 if (is_min) { 1242 if (out != a) { 1243 __ Movn(out, a, AT); 1244 } 1245 if (out != b) { 1246 __ Movz(out, b, AT); 1247 } 1248 } else { 1249 if (out != a) { 1250 __ Movz(out, a, AT); 1251 } 1252 if (out != b) { 1253 __ Movn(out, b, AT); 1254 } 1255 } 1256 } 1257 } 1258 } 1259 } 1260 1261 // int java.lang.Math.min(int, int) 1262 void IntrinsicLocationsBuilderMIPS::VisitMathMinIntInt(HInvoke* invoke) { 1263 CreateIntIntToIntLocations(allocator_, invoke); 1264 } 1265 1266 void IntrinsicCodeGeneratorMIPS::VisitMathMinIntInt(HInvoke* invoke) { 1267 GenMinMax(invoke->GetLocations(), 1268 /* is_min */ true, 1269 DataType::Type::kInt32, 1270 IsR6(), 1271 GetAssembler()); 1272 } 1273 1274 // long java.lang.Math.min(long, long) 1275 void IntrinsicLocationsBuilderMIPS::VisitMathMinLongLong(HInvoke* invoke) { 1276 CreateIntIntToIntLocations(allocator_, invoke); 1277 } 1278 1279 void IntrinsicCodeGeneratorMIPS::VisitMathMinLongLong(HInvoke* invoke) { 1280 GenMinMax(invoke->GetLocations(), 1281 /* is_min */ true, 1282 DataType::Type::kInt64, 1283 IsR6(), 1284 GetAssembler()); 1285 } 1286 1287 // int java.lang.Math.max(int, int) 1288 void IntrinsicLocationsBuilderMIPS::VisitMathMaxIntInt(HInvoke* invoke) { 1289 CreateIntIntToIntLocations(allocator_, invoke); 1290 } 1291 1292 void IntrinsicCodeGeneratorMIPS::VisitMathMaxIntInt(HInvoke* invoke) { 1293 GenMinMax(invoke->GetLocations(), 1294 /* is_min */ false, 1295 DataType::Type::kInt32, 1296 IsR6(), 1297 GetAssembler()); 1298 } 1299 1300 // long java.lang.Math.max(long, long) 1301 void IntrinsicLocationsBuilderMIPS::VisitMathMaxLongLong(HInvoke* invoke) { 1302 CreateIntIntToIntLocations(allocator_, invoke); 1303 } 1304 1305 void IntrinsicCodeGeneratorMIPS::VisitMathMaxLongLong(HInvoke* invoke) { 1306 GenMinMax(invoke->GetLocations(), 1307 /* is_min */ false, 1308 DataType::Type::kInt64, 1309 IsR6(), 1310 GetAssembler()); 1311 } 1312 1313 // double java.lang.Math.sqrt(double) 1314 void IntrinsicLocationsBuilderMIPS::VisitMathSqrt(HInvoke* invoke) { 1315 CreateFPToFPLocations(allocator_, invoke); 1316 } 1317 1318 void IntrinsicCodeGeneratorMIPS::VisitMathSqrt(HInvoke* invoke) { 1319 LocationSummary* locations = invoke->GetLocations(); 1320 MipsAssembler* assembler = GetAssembler(); 1321 FRegister in = locations->InAt(0).AsFpuRegister<FRegister>(); 1322 FRegister out = locations->Out().AsFpuRegister<FRegister>(); 1323 1324 __ SqrtD(out, in); 1325 } 1326 1327 // byte libcore.io.Memory.peekByte(long address) 1328 void IntrinsicLocationsBuilderMIPS::VisitMemoryPeekByte(HInvoke* invoke) { 1329 CreateIntToIntLocations(allocator_, invoke); 1330 } 1331 1332 void IntrinsicCodeGeneratorMIPS::VisitMemoryPeekByte(HInvoke* invoke) { 1333 MipsAssembler* assembler = GetAssembler(); 1334 Register adr = invoke->GetLocations()->InAt(0).AsRegisterPairLow<Register>(); 1335 Register out = invoke->GetLocations()->Out().AsRegister<Register>(); 1336 1337 __ Lb(out, adr, 0); 1338 } 1339 1340 // short libcore.io.Memory.peekShort(long address) 1341 void IntrinsicLocationsBuilderMIPS::VisitMemoryPeekShortNative(HInvoke* invoke) { 1342 CreateIntToIntLocations(allocator_, invoke); 1343 } 1344 1345 void IntrinsicCodeGeneratorMIPS::VisitMemoryPeekShortNative(HInvoke* invoke) { 1346 MipsAssembler* assembler = GetAssembler(); 1347 Register adr = invoke->GetLocations()->InAt(0).AsRegisterPairLow<Register>(); 1348 Register out = invoke->GetLocations()->Out().AsRegister<Register>(); 1349 1350 if (IsR6()) { 1351 __ Lh(out, adr, 0); 1352 } else if (IsR2OrNewer()) { 1353 // Unlike for words, there are no lhl/lhr instructions to load 1354 // unaligned halfwords so the code loads individual bytes, in case 1355 // the address isn't halfword-aligned, and assembles them into a 1356 // signed halfword. 1357 __ Lb(AT, adr, 1); // This byte must be sign-extended. 1358 __ Lb(out, adr, 0); // This byte can be either sign-extended, or 1359 // zero-extended because the following 1360 // instruction overwrites the sign bits. 1361 __ Ins(out, AT, 8, 24); 1362 } else { 1363 __ Lbu(AT, adr, 0); // This byte must be zero-extended. If it's not 1364 // the "or" instruction below will destroy the upper 1365 // 24 bits of the final result. 1366 __ Lb(out, adr, 1); // This byte must be sign-extended. 1367 __ Sll(out, out, 8); 1368 __ Or(out, out, AT); 1369 } 1370 } 1371 1372 // int libcore.io.Memory.peekInt(long address) 1373 void IntrinsicLocationsBuilderMIPS::VisitMemoryPeekIntNative(HInvoke* invoke) { 1374 CreateIntToIntLocations(allocator_, invoke, Location::kOutputOverlap); 1375 } 1376 1377 void IntrinsicCodeGeneratorMIPS::VisitMemoryPeekIntNative(HInvoke* invoke) { 1378 MipsAssembler* assembler = GetAssembler(); 1379 Register adr = invoke->GetLocations()->InAt(0).AsRegisterPairLow<Register>(); 1380 Register out = invoke->GetLocations()->Out().AsRegister<Register>(); 1381 1382 if (IsR6()) { 1383 __ Lw(out, adr, 0); 1384 } else { 1385 __ Lwr(out, adr, 0); 1386 __ Lwl(out, adr, 3); 1387 } 1388 } 1389 1390 // long libcore.io.Memory.peekLong(long address) 1391 void IntrinsicLocationsBuilderMIPS::VisitMemoryPeekLongNative(HInvoke* invoke) { 1392 CreateIntToIntLocations(allocator_, invoke, Location::kOutputOverlap); 1393 } 1394 1395 void IntrinsicCodeGeneratorMIPS::VisitMemoryPeekLongNative(HInvoke* invoke) { 1396 MipsAssembler* assembler = GetAssembler(); 1397 Register adr = invoke->GetLocations()->InAt(0).AsRegisterPairLow<Register>(); 1398 Register out_lo = invoke->GetLocations()->Out().AsRegisterPairLow<Register>(); 1399 Register out_hi = invoke->GetLocations()->Out().AsRegisterPairHigh<Register>(); 1400 1401 if (IsR6()) { 1402 __ Lw(out_lo, adr, 0); 1403 __ Lw(out_hi, adr, 4); 1404 } else { 1405 __ Lwr(out_lo, adr, 0); 1406 __ Lwl(out_lo, adr, 3); 1407 __ Lwr(out_hi, adr, 4); 1408 __ Lwl(out_hi, adr, 7); 1409 } 1410 } 1411 1412 static void CreateIntIntToVoidLocations(ArenaAllocator* allocator, HInvoke* invoke) { 1413 LocationSummary* locations = 1414 new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); 1415 locations->SetInAt(0, Location::RequiresRegister()); 1416 locations->SetInAt(1, Location::RequiresRegister()); 1417 } 1418 1419 // void libcore.io.Memory.pokeByte(long address, byte value) 1420 void IntrinsicLocationsBuilderMIPS::VisitMemoryPokeByte(HInvoke* invoke) { 1421 CreateIntIntToVoidLocations(allocator_, invoke); 1422 } 1423 1424 void IntrinsicCodeGeneratorMIPS::VisitMemoryPokeByte(HInvoke* invoke) { 1425 MipsAssembler* assembler = GetAssembler(); 1426 Register adr = invoke->GetLocations()->InAt(0).AsRegisterPairLow<Register>(); 1427 Register val = invoke->GetLocations()->InAt(1).AsRegister<Register>(); 1428 1429 __ Sb(val, adr, 0); 1430 } 1431 1432 // void libcore.io.Memory.pokeShort(long address, short value) 1433 void IntrinsicLocationsBuilderMIPS::VisitMemoryPokeShortNative(HInvoke* invoke) { 1434 CreateIntIntToVoidLocations(allocator_, invoke); 1435 } 1436 1437 void IntrinsicCodeGeneratorMIPS::VisitMemoryPokeShortNative(HInvoke* invoke) { 1438 MipsAssembler* assembler = GetAssembler(); 1439 Register adr = invoke->GetLocations()->InAt(0).AsRegisterPairLow<Register>(); 1440 Register val = invoke->GetLocations()->InAt(1).AsRegister<Register>(); 1441 1442 if (IsR6()) { 1443 __ Sh(val, adr, 0); 1444 } else { 1445 // Unlike for words, there are no shl/shr instructions to store 1446 // unaligned halfwords so the code stores individual bytes, in case 1447 // the address isn't halfword-aligned. 1448 __ Sb(val, adr, 0); 1449 __ Srl(AT, val, 8); 1450 __ Sb(AT, adr, 1); 1451 } 1452 } 1453 1454 // void libcore.io.Memory.pokeInt(long address, int value) 1455 void IntrinsicLocationsBuilderMIPS::VisitMemoryPokeIntNative(HInvoke* invoke) { 1456 CreateIntIntToVoidLocations(allocator_, invoke); 1457 } 1458 1459 void IntrinsicCodeGeneratorMIPS::VisitMemoryPokeIntNative(HInvoke* invoke) { 1460 MipsAssembler* assembler = GetAssembler(); 1461 Register adr = invoke->GetLocations()->InAt(0).AsRegisterPairLow<Register>(); 1462 Register val = invoke->GetLocations()->InAt(1).AsRegister<Register>(); 1463 1464 if (IsR6()) { 1465 __ Sw(val, adr, 0); 1466 } else { 1467 __ Swr(val, adr, 0); 1468 __ Swl(val, adr, 3); 1469 } 1470 } 1471 1472 // void libcore.io.Memory.pokeLong(long address, long value) 1473 void IntrinsicLocationsBuilderMIPS::VisitMemoryPokeLongNative(HInvoke* invoke) { 1474 CreateIntIntToVoidLocations(allocator_, invoke); 1475 } 1476 1477 void IntrinsicCodeGeneratorMIPS::VisitMemoryPokeLongNative(HInvoke* invoke) { 1478 MipsAssembler* assembler = GetAssembler(); 1479 Register adr = invoke->GetLocations()->InAt(0).AsRegisterPairLow<Register>(); 1480 Register val_lo = invoke->GetLocations()->InAt(1).AsRegisterPairLow<Register>(); 1481 Register val_hi = invoke->GetLocations()->InAt(1).AsRegisterPairHigh<Register>(); 1482 1483 if (IsR6()) { 1484 __ Sw(val_lo, adr, 0); 1485 __ Sw(val_hi, adr, 4); 1486 } else { 1487 __ Swr(val_lo, adr, 0); 1488 __ Swl(val_lo, adr, 3); 1489 __ Swr(val_hi, adr, 4); 1490 __ Swl(val_hi, adr, 7); 1491 } 1492 } 1493 1494 // Thread java.lang.Thread.currentThread() 1495 void IntrinsicLocationsBuilderMIPS::VisitThreadCurrentThread(HInvoke* invoke) { 1496 LocationSummary* locations = 1497 new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); 1498 locations->SetOut(Location::RequiresRegister()); 1499 } 1500 1501 void IntrinsicCodeGeneratorMIPS::VisitThreadCurrentThread(HInvoke* invoke) { 1502 MipsAssembler* assembler = GetAssembler(); 1503 Register out = invoke->GetLocations()->Out().AsRegister<Register>(); 1504 1505 __ LoadFromOffset(kLoadWord, 1506 out, 1507 TR, 1508 Thread::PeerOffset<kMipsPointerSize>().Int32Value()); 1509 } 1510 1511 static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator, 1512 HInvoke* invoke, 1513 DataType::Type type) { 1514 bool can_call = kEmitCompilerReadBarrier && 1515 (invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject || 1516 invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile); 1517 LocationSummary* locations = 1518 new (allocator) LocationSummary(invoke, 1519 can_call 1520 ? LocationSummary::kCallOnSlowPath 1521 : LocationSummary::kNoCall, 1522 kIntrinsified); 1523 if (can_call && kUseBakerReadBarrier) { 1524 locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. 1525 } 1526 locations->SetInAt(0, Location::NoLocation()); // Unused receiver. 1527 locations->SetInAt(1, Location::RequiresRegister()); 1528 locations->SetInAt(2, Location::RequiresRegister()); 1529 locations->SetOut(Location::RequiresRegister(), 1530 (can_call ? Location::kOutputOverlap : Location::kNoOutputOverlap)); 1531 if (type == DataType::Type::kReference && kEmitCompilerReadBarrier && kUseBakerReadBarrier) { 1532 // We need a temporary register for the read barrier marking slow 1533 // path in InstructionCodeGeneratorMIPS::GenerateReferenceLoadWithBakerReadBarrier. 1534 locations->AddTemp(Location::RequiresRegister()); 1535 } 1536 } 1537 1538 // Note that the caller must supply a properly aligned memory address. 1539 // If they do not, the behavior is undefined (atomicity not guaranteed, exception may occur). 1540 static void GenUnsafeGet(HInvoke* invoke, 1541 DataType::Type type, 1542 bool is_volatile, 1543 bool is_R6, 1544 CodeGeneratorMIPS* codegen) { 1545 LocationSummary* locations = invoke->GetLocations(); 1546 DCHECK((type == DataType::Type::kInt32) || 1547 (type == DataType::Type::kInt64) || 1548 (type == DataType::Type::kReference)) << type; 1549 MipsAssembler* assembler = codegen->GetAssembler(); 1550 // Target register. 1551 Location trg_loc = locations->Out(); 1552 // Object pointer. 1553 Location base_loc = locations->InAt(1); 1554 Register base = base_loc.AsRegister<Register>(); 1555 // The "offset" argument is passed as a "long". Since this code is for 1556 // a 32-bit processor, we can only use 32-bit addresses, so we only 1557 // need the low 32-bits of offset. 1558 Location offset_loc = locations->InAt(2); 1559 Register offset_lo = offset_loc.AsRegisterPairLow<Register>(); 1560 1561 if (!(kEmitCompilerReadBarrier && kUseBakerReadBarrier && (type == DataType::Type::kReference))) { 1562 __ Addu(TMP, base, offset_lo); 1563 } 1564 1565 switch (type) { 1566 case DataType::Type::kInt64: { 1567 Register trg_lo = trg_loc.AsRegisterPairLow<Register>(); 1568 Register trg_hi = trg_loc.AsRegisterPairHigh<Register>(); 1569 CHECK(!is_volatile); // TODO: support atomic 8-byte volatile loads. 1570 if (is_R6) { 1571 __ Lw(trg_lo, TMP, 0); 1572 __ Lw(trg_hi, TMP, 4); 1573 } else { 1574 __ Lwr(trg_lo, TMP, 0); 1575 __ Lwl(trg_lo, TMP, 3); 1576 __ Lwr(trg_hi, TMP, 4); 1577 __ Lwl(trg_hi, TMP, 7); 1578 } 1579 break; 1580 } 1581 1582 case DataType::Type::kInt32: { 1583 Register trg = trg_loc.AsRegister<Register>(); 1584 if (is_R6) { 1585 __ Lw(trg, TMP, 0); 1586 } else { 1587 __ Lwr(trg, TMP, 0); 1588 __ Lwl(trg, TMP, 3); 1589 } 1590 if (is_volatile) { 1591 __ Sync(0); 1592 } 1593 break; 1594 } 1595 1596 case DataType::Type::kReference: { 1597 Register trg = trg_loc.AsRegister<Register>(); 1598 if (kEmitCompilerReadBarrier) { 1599 if (kUseBakerReadBarrier) { 1600 Location temp = locations->GetTemp(0); 1601 codegen->GenerateReferenceLoadWithBakerReadBarrier(invoke, 1602 trg_loc, 1603 base, 1604 /* offset */ 0U, 1605 /* index */ offset_loc, 1606 TIMES_1, 1607 temp, 1608 /* needs_null_check */ false); 1609 if (is_volatile) { 1610 __ Sync(0); 1611 } 1612 } else { 1613 if (is_R6) { 1614 __ Lw(trg, TMP, 0); 1615 } else { 1616 __ Lwr(trg, TMP, 0); 1617 __ Lwl(trg, TMP, 3); 1618 } 1619 if (is_volatile) { 1620 __ Sync(0); 1621 } 1622 codegen->GenerateReadBarrierSlow(invoke, 1623 trg_loc, 1624 trg_loc, 1625 base_loc, 1626 /* offset */ 0U, 1627 /* index */ offset_loc); 1628 } 1629 } else { 1630 if (is_R6) { 1631 __ Lw(trg, TMP, 0); 1632 } else { 1633 __ Lwr(trg, TMP, 0); 1634 __ Lwl(trg, TMP, 3); 1635 } 1636 if (is_volatile) { 1637 __ Sync(0); 1638 } 1639 __ MaybeUnpoisonHeapReference(trg); 1640 } 1641 break; 1642 } 1643 1644 default: 1645 LOG(FATAL) << "Unexpected type " << type; 1646 UNREACHABLE(); 1647 } 1648 } 1649 1650 // int sun.misc.Unsafe.getInt(Object o, long offset) 1651 void IntrinsicLocationsBuilderMIPS::VisitUnsafeGet(HInvoke* invoke) { 1652 CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32); 1653 } 1654 1655 void IntrinsicCodeGeneratorMIPS::VisitUnsafeGet(HInvoke* invoke) { 1656 GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, IsR6(), codegen_); 1657 } 1658 1659 // int sun.misc.Unsafe.getIntVolatile(Object o, long offset) 1660 void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetVolatile(HInvoke* invoke) { 1661 CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32); 1662 } 1663 1664 void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetVolatile(HInvoke* invoke) { 1665 GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, IsR6(), codegen_); 1666 } 1667 1668 // long sun.misc.Unsafe.getLong(Object o, long offset) 1669 void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetLong(HInvoke* invoke) { 1670 CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64); 1671 } 1672 1673 void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetLong(HInvoke* invoke) { 1674 GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, IsR6(), codegen_); 1675 } 1676 1677 // Object sun.misc.Unsafe.getObject(Object o, long offset) 1678 void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetObject(HInvoke* invoke) { 1679 CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kReference); 1680 } 1681 1682 void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetObject(HInvoke* invoke) { 1683 GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, IsR6(), codegen_); 1684 } 1685 1686 // Object sun.misc.Unsafe.getObjectVolatile(Object o, long offset) 1687 void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetObjectVolatile(HInvoke* invoke) { 1688 CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kReference); 1689 } 1690 1691 void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetObjectVolatile(HInvoke* invoke) { 1692 GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, IsR6(), codegen_); 1693 } 1694 1695 static void CreateIntIntIntIntToVoidLocations(ArenaAllocator* allocator, HInvoke* invoke) { 1696 LocationSummary* locations = 1697 new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); 1698 locations->SetInAt(0, Location::NoLocation()); // Unused receiver. 1699 locations->SetInAt(1, Location::RequiresRegister()); 1700 locations->SetInAt(2, Location::RequiresRegister()); 1701 locations->SetInAt(3, Location::RequiresRegister()); 1702 } 1703 1704 // Note that the caller must supply a properly aligned memory address. 1705 // If they do not, the behavior is undefined (atomicity not guaranteed, exception may occur). 1706 static void GenUnsafePut(LocationSummary* locations, 1707 DataType::Type type, 1708 bool is_volatile, 1709 bool is_ordered, 1710 bool is_R6, 1711 CodeGeneratorMIPS* codegen) { 1712 DCHECK((type == DataType::Type::kInt32) || 1713 (type == DataType::Type::kInt64) || 1714 (type == DataType::Type::kReference)) << type; 1715 MipsAssembler* assembler = codegen->GetAssembler(); 1716 // Object pointer. 1717 Register base = locations->InAt(1).AsRegister<Register>(); 1718 // The "offset" argument is passed as a "long", i.e., it's 64-bits in 1719 // size. Since this code is for a 32-bit processor, we can only use 1720 // 32-bit addresses, so we only need the low 32-bits of offset. 1721 Register offset_lo = locations->InAt(2).AsRegisterPairLow<Register>(); 1722 1723 __ Addu(TMP, base, offset_lo); 1724 if (is_volatile || is_ordered) { 1725 __ Sync(0); 1726 } 1727 if ((type == DataType::Type::kInt32) || (type == DataType::Type::kReference)) { 1728 Register value = locations->InAt(3).AsRegister<Register>(); 1729 1730 if (kPoisonHeapReferences && type == DataType::Type::kReference) { 1731 __ PoisonHeapReference(AT, value); 1732 value = AT; 1733 } 1734 1735 if (is_R6) { 1736 __ Sw(value, TMP, 0); 1737 } else { 1738 __ Swr(value, TMP, 0); 1739 __ Swl(value, TMP, 3); 1740 } 1741 } else { 1742 Register value_lo = locations->InAt(3).AsRegisterPairLow<Register>(); 1743 Register value_hi = locations->InAt(3).AsRegisterPairHigh<Register>(); 1744 CHECK(!is_volatile); // TODO: support atomic 8-byte volatile stores. 1745 if (is_R6) { 1746 __ Sw(value_lo, TMP, 0); 1747 __ Sw(value_hi, TMP, 4); 1748 } else { 1749 __ Swr(value_lo, TMP, 0); 1750 __ Swl(value_lo, TMP, 3); 1751 __ Swr(value_hi, TMP, 4); 1752 __ Swl(value_hi, TMP, 7); 1753 } 1754 } 1755 1756 if (is_volatile) { 1757 __ Sync(0); 1758 } 1759 1760 if (type == DataType::Type::kReference) { 1761 bool value_can_be_null = true; // TODO: Worth finding out this information? 1762 codegen->MarkGCCard(base, locations->InAt(3).AsRegister<Register>(), value_can_be_null); 1763 } 1764 } 1765 1766 // void sun.misc.Unsafe.putInt(Object o, long offset, int x) 1767 void IntrinsicLocationsBuilderMIPS::VisitUnsafePut(HInvoke* invoke) { 1768 CreateIntIntIntIntToVoidLocations(allocator_, invoke); 1769 } 1770 1771 void IntrinsicCodeGeneratorMIPS::VisitUnsafePut(HInvoke* invoke) { 1772 GenUnsafePut(invoke->GetLocations(), 1773 DataType::Type::kInt32, 1774 /* is_volatile */ false, 1775 /* is_ordered */ false, 1776 IsR6(), 1777 codegen_); 1778 } 1779 1780 // void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x) 1781 void IntrinsicLocationsBuilderMIPS::VisitUnsafePutOrdered(HInvoke* invoke) { 1782 CreateIntIntIntIntToVoidLocations(allocator_, invoke); 1783 } 1784 1785 void IntrinsicCodeGeneratorMIPS::VisitUnsafePutOrdered(HInvoke* invoke) { 1786 GenUnsafePut(invoke->GetLocations(), 1787 DataType::Type::kInt32, 1788 /* is_volatile */ false, 1789 /* is_ordered */ true, 1790 IsR6(), 1791 codegen_); 1792 } 1793 1794 // void sun.misc.Unsafe.putIntVolatile(Object o, long offset, int x) 1795 void IntrinsicLocationsBuilderMIPS::VisitUnsafePutVolatile(HInvoke* invoke) { 1796 CreateIntIntIntIntToVoidLocations(allocator_, invoke); 1797 } 1798 1799 void IntrinsicCodeGeneratorMIPS::VisitUnsafePutVolatile(HInvoke* invoke) { 1800 GenUnsafePut(invoke->GetLocations(), 1801 DataType::Type::kInt32, 1802 /* is_volatile */ true, 1803 /* is_ordered */ false, 1804 IsR6(), 1805 codegen_); 1806 } 1807 1808 // void sun.misc.Unsafe.putObject(Object o, long offset, Object x) 1809 void IntrinsicLocationsBuilderMIPS::VisitUnsafePutObject(HInvoke* invoke) { 1810 CreateIntIntIntIntToVoidLocations(allocator_, invoke); 1811 } 1812 1813 void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObject(HInvoke* invoke) { 1814 GenUnsafePut(invoke->GetLocations(), 1815 DataType::Type::kReference, 1816 /* is_volatile */ false, 1817 /* is_ordered */ false, 1818 IsR6(), 1819 codegen_); 1820 } 1821 1822 // void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x) 1823 void IntrinsicLocationsBuilderMIPS::VisitUnsafePutObjectOrdered(HInvoke* invoke) { 1824 CreateIntIntIntIntToVoidLocations(allocator_, invoke); 1825 } 1826 1827 void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObjectOrdered(HInvoke* invoke) { 1828 GenUnsafePut(invoke->GetLocations(), 1829 DataType::Type::kReference, 1830 /* is_volatile */ false, 1831 /* is_ordered */ true, 1832 IsR6(), 1833 codegen_); 1834 } 1835 1836 // void sun.misc.Unsafe.putObjectVolatile(Object o, long offset, Object x) 1837 void IntrinsicLocationsBuilderMIPS::VisitUnsafePutObjectVolatile(HInvoke* invoke) { 1838 CreateIntIntIntIntToVoidLocations(allocator_, invoke); 1839 } 1840 1841 void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObjectVolatile(HInvoke* invoke) { 1842 GenUnsafePut(invoke->GetLocations(), 1843 DataType::Type::kReference, 1844 /* is_volatile */ true, 1845 /* is_ordered */ false, 1846 IsR6(), 1847 codegen_); 1848 } 1849 1850 // void sun.misc.Unsafe.putLong(Object o, long offset, long x) 1851 void IntrinsicLocationsBuilderMIPS::VisitUnsafePutLong(HInvoke* invoke) { 1852 CreateIntIntIntIntToVoidLocations(allocator_, invoke); 1853 } 1854 1855 void IntrinsicCodeGeneratorMIPS::VisitUnsafePutLong(HInvoke* invoke) { 1856 GenUnsafePut(invoke->GetLocations(), 1857 DataType::Type::kInt64, 1858 /* is_volatile */ false, 1859 /* is_ordered */ false, 1860 IsR6(), 1861 codegen_); 1862 } 1863 1864 // void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x) 1865 void IntrinsicLocationsBuilderMIPS::VisitUnsafePutLongOrdered(HInvoke* invoke) { 1866 CreateIntIntIntIntToVoidLocations(allocator_, invoke); 1867 } 1868 1869 void IntrinsicCodeGeneratorMIPS::VisitUnsafePutLongOrdered(HInvoke* invoke) { 1870 GenUnsafePut(invoke->GetLocations(), 1871 DataType::Type::kInt64, 1872 /* is_volatile */ false, 1873 /* is_ordered */ true, 1874 IsR6(), 1875 codegen_); 1876 } 1877 1878 static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* allocator, HInvoke* invoke) { 1879 bool can_call = kEmitCompilerReadBarrier && 1880 kUseBakerReadBarrier && 1881 (invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject); 1882 LocationSummary* locations = 1883 new (allocator) LocationSummary(invoke, 1884 can_call 1885 ? LocationSummary::kCallOnSlowPath 1886 : LocationSummary::kNoCall, 1887 kIntrinsified); 1888 locations->SetInAt(0, Location::NoLocation()); // Unused receiver. 1889 locations->SetInAt(1, Location::RequiresRegister()); 1890 locations->SetInAt(2, Location::RequiresRegister()); 1891 locations->SetInAt(3, Location::RequiresRegister()); 1892 locations->SetInAt(4, Location::RequiresRegister()); 1893 locations->SetOut(Location::RequiresRegister()); 1894 1895 // Temporary register used in CAS by (Baker) read barrier. 1896 if (can_call) { 1897 locations->AddTemp(Location::RequiresRegister()); 1898 } 1899 } 1900 1901 // Note that the caller must supply a properly aligned memory address. 1902 // If they do not, the behavior is undefined (atomicity not guaranteed, exception may occur). 1903 static void GenCas(HInvoke* invoke, DataType::Type type, CodeGeneratorMIPS* codegen) { 1904 MipsAssembler* assembler = codegen->GetAssembler(); 1905 LocationSummary* locations = invoke->GetLocations(); 1906 bool isR6 = codegen->GetInstructionSetFeatures().IsR6(); 1907 Register base = locations->InAt(1).AsRegister<Register>(); 1908 Location offset_loc = locations->InAt(2); 1909 Register offset_lo = offset_loc.AsRegisterPairLow<Register>(); 1910 Register expected = locations->InAt(3).AsRegister<Register>(); 1911 Register value = locations->InAt(4).AsRegister<Register>(); 1912 Location out_loc = locations->Out(); 1913 Register out = out_loc.AsRegister<Register>(); 1914 1915 DCHECK_NE(base, out); 1916 DCHECK_NE(offset_lo, out); 1917 DCHECK_NE(expected, out); 1918 1919 if (type == DataType::Type::kReference) { 1920 // The only read barrier implementation supporting the 1921 // UnsafeCASObject intrinsic is the Baker-style read barriers. 1922 DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier); 1923 1924 // Mark card for object assuming new value is stored. Worst case we will mark an unchanged 1925 // object and scan the receiver at the next GC for nothing. 1926 bool value_can_be_null = true; // TODO: Worth finding out this information? 1927 codegen->MarkGCCard(base, value, value_can_be_null); 1928 1929 if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { 1930 Location temp = locations->GetTemp(0); 1931 // Need to make sure the reference stored in the field is a to-space 1932 // one before attempting the CAS or the CAS could fail incorrectly. 1933 codegen->GenerateReferenceLoadWithBakerReadBarrier( 1934 invoke, 1935 out_loc, // Unused, used only as a "temporary" within the read barrier. 1936 base, 1937 /* offset */ 0u, 1938 /* index */ offset_loc, 1939 ScaleFactor::TIMES_1, 1940 temp, 1941 /* needs_null_check */ false, 1942 /* always_update_field */ true); 1943 } 1944 } 1945 1946 MipsLabel loop_head, exit_loop; 1947 __ Addu(TMP, base, offset_lo); 1948 1949 if (kPoisonHeapReferences && type == DataType::Type::kReference) { 1950 __ PoisonHeapReference(expected); 1951 // Do not poison `value`, if it is the same register as 1952 // `expected`, which has just been poisoned. 1953 if (value != expected) { 1954 __ PoisonHeapReference(value); 1955 } 1956 } 1957 1958 // do { 1959 // tmp_value = [tmp_ptr] - expected; 1960 // } while (tmp_value == 0 && failure([tmp_ptr] <- r_new_value)); 1961 // result = tmp_value != 0; 1962 1963 __ Sync(0); 1964 __ Bind(&loop_head); 1965 if ((type == DataType::Type::kInt32) || (type == DataType::Type::kReference)) { 1966 if (isR6) { 1967 __ LlR6(out, TMP); 1968 } else { 1969 __ LlR2(out, TMP); 1970 } 1971 } else { 1972 LOG(FATAL) << "Unsupported op size " << type; 1973 UNREACHABLE(); 1974 } 1975 __ Subu(out, out, expected); // If we didn't get the 'expected' 1976 __ Sltiu(out, out, 1); // value, set 'out' to false, and 1977 __ Beqz(out, &exit_loop); // return. 1978 __ Move(out, value); // Use 'out' for the 'store conditional' instruction. 1979 // If we use 'value' directly, we would lose 'value' 1980 // in the case that the store fails. Whether the 1981 // store succeeds, or fails, it will load the 1982 // correct Boolean value into the 'out' register. 1983 // This test isn't really necessary. We only support DataType::Type::kInt, 1984 // DataType::Type::kReference, and we already verified that we're working on one 1985 // of those two types. It's left here in case the code needs to support 1986 // other types in the future. 1987 if ((type == DataType::Type::kInt32) || (type == DataType::Type::kReference)) { 1988 if (isR6) { 1989 __ ScR6(out, TMP); 1990 } else { 1991 __ ScR2(out, TMP); 1992 } 1993 } 1994 __ Beqz(out, &loop_head); // If we couldn't do the read-modify-write 1995 // cycle atomically then retry. 1996 __ Bind(&exit_loop); 1997 __ Sync(0); 1998 1999 if (kPoisonHeapReferences && type == DataType::Type::kReference) { 2000 __ UnpoisonHeapReference(expected); 2001 // Do not unpoison `value`, if it is the same register as 2002 // `expected`, which has just been unpoisoned. 2003 if (value != expected) { 2004 __ UnpoisonHeapReference(value); 2005 } 2006 } 2007 } 2008 2009 // boolean sun.misc.Unsafe.compareAndSwapInt(Object o, long offset, int expected, int x) 2010 void IntrinsicLocationsBuilderMIPS::VisitUnsafeCASInt(HInvoke* invoke) { 2011 CreateIntIntIntIntIntToIntPlusTemps(allocator_, invoke); 2012 } 2013 2014 void IntrinsicCodeGeneratorMIPS::VisitUnsafeCASInt(HInvoke* invoke) { 2015 GenCas(invoke, DataType::Type::kInt32, codegen_); 2016 } 2017 2018 // boolean sun.misc.Unsafe.compareAndSwapObject(Object o, long offset, Object expected, Object x) 2019 void IntrinsicLocationsBuilderMIPS::VisitUnsafeCASObject(HInvoke* invoke) { 2020 // The only read barrier implementation supporting the 2021 // UnsafeCASObject intrinsic is the Baker-style read barriers. 2022 if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) { 2023 return; 2024 } 2025 2026 CreateIntIntIntIntIntToIntPlusTemps(allocator_, invoke); 2027 } 2028 2029 void IntrinsicCodeGeneratorMIPS::VisitUnsafeCASObject(HInvoke* invoke) { 2030 // The only read barrier implementation supporting the 2031 // UnsafeCASObject intrinsic is the Baker-style read barriers. 2032 DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier); 2033 2034 GenCas(invoke, DataType::Type::kReference, codegen_); 2035 } 2036 2037 // int java.lang.String.compareTo(String anotherString) 2038 void IntrinsicLocationsBuilderMIPS::VisitStringCompareTo(HInvoke* invoke) { 2039 LocationSummary* locations = new (allocator_) LocationSummary( 2040 invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified); 2041 InvokeRuntimeCallingConvention calling_convention; 2042 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); 2043 locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); 2044 Location outLocation = calling_convention.GetReturnLocation(DataType::Type::kInt32); 2045 locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<Register>())); 2046 } 2047 2048 void IntrinsicCodeGeneratorMIPS::VisitStringCompareTo(HInvoke* invoke) { 2049 MipsAssembler* assembler = GetAssembler(); 2050 LocationSummary* locations = invoke->GetLocations(); 2051 2052 // Note that the null check must have been done earlier. 2053 DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0))); 2054 2055 Register argument = locations->InAt(1).AsRegister<Register>(); 2056 SlowPathCodeMIPS* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathMIPS(invoke); 2057 codegen_->AddSlowPath(slow_path); 2058 __ Beqz(argument, slow_path->GetEntryLabel()); 2059 codegen_->InvokeRuntime(kQuickStringCompareTo, invoke, invoke->GetDexPc(), slow_path); 2060 __ Bind(slow_path->GetExitLabel()); 2061 } 2062 2063 // boolean java.lang.String.equals(Object anObject) 2064 void IntrinsicLocationsBuilderMIPS::VisitStringEquals(HInvoke* invoke) { 2065 if (kEmitCompilerReadBarrier && 2066 !StringEqualsOptimizations(invoke).GetArgumentIsString() && 2067 !StringEqualsOptimizations(invoke).GetNoReadBarrierForStringClass()) { 2068 // No support for this odd case (String class is moveable, not in the boot image). 2069 return; 2070 } 2071 2072 LocationSummary* locations = 2073 new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); 2074 locations->SetInAt(0, Location::RequiresRegister()); 2075 locations->SetInAt(1, Location::RequiresRegister()); 2076 locations->SetOut(Location::RequiresRegister()); 2077 2078 // Temporary registers to store lengths of strings and for calculations. 2079 locations->AddTemp(Location::RequiresRegister()); 2080 locations->AddTemp(Location::RequiresRegister()); 2081 locations->AddTemp(Location::RequiresRegister()); 2082 } 2083 2084 void IntrinsicCodeGeneratorMIPS::VisitStringEquals(HInvoke* invoke) { 2085 MipsAssembler* assembler = GetAssembler(); 2086 LocationSummary* locations = invoke->GetLocations(); 2087 2088 Register str = locations->InAt(0).AsRegister<Register>(); 2089 Register arg = locations->InAt(1).AsRegister<Register>(); 2090 Register out = locations->Out().AsRegister<Register>(); 2091 2092 Register temp1 = locations->GetTemp(0).AsRegister<Register>(); 2093 Register temp2 = locations->GetTemp(1).AsRegister<Register>(); 2094 Register temp3 = locations->GetTemp(2).AsRegister<Register>(); 2095 2096 MipsLabel loop; 2097 MipsLabel end; 2098 MipsLabel return_true; 2099 MipsLabel return_false; 2100 2101 // Get offsets of count, value, and class fields within a string object. 2102 const uint32_t count_offset = mirror::String::CountOffset().Uint32Value(); 2103 const uint32_t value_offset = mirror::String::ValueOffset().Uint32Value(); 2104 const uint32_t class_offset = mirror::Object::ClassOffset().Uint32Value(); 2105 2106 // Note that the null check must have been done earlier. 2107 DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0))); 2108 2109 // If the register containing the pointer to "this", and the register 2110 // containing the pointer to "anObject" are the same register then 2111 // "this", and "anObject" are the same object and we can 2112 // short-circuit the logic to a true result. 2113 if (str == arg) { 2114 __ LoadConst32(out, 1); 2115 return; 2116 } 2117 StringEqualsOptimizations optimizations(invoke); 2118 if (!optimizations.GetArgumentNotNull()) { 2119 // Check if input is null, return false if it is. 2120 __ Beqz(arg, &return_false); 2121 } 2122 2123 // Reference equality check, return true if same reference. 2124 __ Beq(str, arg, &return_true); 2125 2126 if (!optimizations.GetArgumentIsString()) { 2127 // Instanceof check for the argument by comparing class fields. 2128 // All string objects must have the same type since String cannot be subclassed. 2129 // Receiver must be a string object, so its class field is equal to all strings' class fields. 2130 // If the argument is a string object, its class field must be equal to receiver's class field. 2131 __ Lw(temp1, str, class_offset); 2132 __ Lw(temp2, arg, class_offset); 2133 __ Bne(temp1, temp2, &return_false); 2134 } 2135 2136 // Load `count` fields of this and argument strings. 2137 __ Lw(temp1, str, count_offset); 2138 __ Lw(temp2, arg, count_offset); 2139 // Check if `count` fields are equal, return false if they're not. 2140 // Also compares the compression style, if differs return false. 2141 __ Bne(temp1, temp2, &return_false); 2142 // Return true if both strings are empty. Even with string compression `count == 0` means empty. 2143 static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u, 2144 "Expecting 0=compressed, 1=uncompressed"); 2145 __ Beqz(temp1, &return_true); 2146 2147 // Don't overwrite input registers 2148 __ Move(TMP, str); 2149 __ Move(temp3, arg); 2150 2151 // Assertions that must hold in order to compare strings 4 bytes at a time. 2152 DCHECK_ALIGNED(value_offset, 4); 2153 static_assert(IsAligned<4>(kObjectAlignment), "String of odd length is not zero padded"); 2154 2155 // For string compression, calculate the number of bytes to compare (not chars). 2156 if (mirror::kUseStringCompression) { 2157 // Extract compression flag. 2158 if (IsR2OrNewer()) { 2159 __ Ext(temp2, temp1, 0, 1); 2160 } else { 2161 __ Sll(temp2, temp1, 31); 2162 __ Srl(temp2, temp2, 31); 2163 } 2164 __ Srl(temp1, temp1, 1); // Extract length. 2165 __ Sllv(temp1, temp1, temp2); // Double the byte count if uncompressed. 2166 } 2167 2168 // Loop to compare strings 4 bytes at a time starting at the beginning of the string. 2169 // Ok to do this because strings are zero-padded to kObjectAlignment. 2170 __ Bind(&loop); 2171 __ Lw(out, TMP, value_offset); 2172 __ Lw(temp2, temp3, value_offset); 2173 __ Bne(out, temp2, &return_false); 2174 __ Addiu(TMP, TMP, 4); 2175 __ Addiu(temp3, temp3, 4); 2176 // With string compression, we have compared 4 bytes, otherwise 2 chars. 2177 __ Addiu(temp1, temp1, mirror::kUseStringCompression ? -4 : -2); 2178 __ Bgtz(temp1, &loop); 2179 2180 // Return true and exit the function. 2181 // If loop does not result in returning false, we return true. 2182 __ Bind(&return_true); 2183 __ LoadConst32(out, 1); 2184 __ B(&end); 2185 2186 // Return false and exit the function. 2187 __ Bind(&return_false); 2188 __ LoadConst32(out, 0); 2189 __ Bind(&end); 2190 } 2191 2192 static void GenerateStringIndexOf(HInvoke* invoke, 2193 bool start_at_zero, 2194 MipsAssembler* assembler, 2195 CodeGeneratorMIPS* codegen) { 2196 LocationSummary* locations = invoke->GetLocations(); 2197 Register tmp_reg = start_at_zero ? locations->GetTemp(0).AsRegister<Register>() : TMP; 2198 2199 // Note that the null check must have been done earlier. 2200 DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0))); 2201 2202 // Check for code points > 0xFFFF. Either a slow-path check when we don't know statically, 2203 // or directly dispatch for a large constant, or omit slow-path for a small constant or a char. 2204 SlowPathCodeMIPS* slow_path = nullptr; 2205 HInstruction* code_point = invoke->InputAt(1); 2206 if (code_point->IsIntConstant()) { 2207 if (!IsUint<16>(code_point->AsIntConstant()->GetValue())) { 2208 // Always needs the slow-path. We could directly dispatch to it, 2209 // but this case should be rare, so for simplicity just put the 2210 // full slow-path down and branch unconditionally. 2211 slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathMIPS(invoke); 2212 codegen->AddSlowPath(slow_path); 2213 __ B(slow_path->GetEntryLabel()); 2214 __ Bind(slow_path->GetExitLabel()); 2215 return; 2216 } 2217 } else if (code_point->GetType() != DataType::Type::kUint16) { 2218 Register char_reg = locations->InAt(1).AsRegister<Register>(); 2219 // The "bltu" conditional branch tests to see if the character value 2220 // fits in a valid 16-bit (MIPS halfword) value. If it doesn't then 2221 // the character being searched for, if it exists in the string, is 2222 // encoded using UTF-16 and stored in the string as two (16-bit) 2223 // halfwords. Currently the assembly code used to implement this 2224 // intrinsic doesn't support searching for a character stored as 2225 // two halfwords so we fallback to using the generic implementation 2226 // of indexOf(). 2227 __ LoadConst32(tmp_reg, std::numeric_limits<uint16_t>::max()); 2228 slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathMIPS(invoke); 2229 codegen->AddSlowPath(slow_path); 2230 __ Bltu(tmp_reg, char_reg, slow_path->GetEntryLabel()); 2231 } 2232 2233 if (start_at_zero) { 2234 DCHECK_EQ(tmp_reg, A2); 2235 // Start-index = 0. 2236 __ Clear(tmp_reg); 2237 } 2238 2239 codegen->InvokeRuntime(kQuickIndexOf, invoke, invoke->GetDexPc(), slow_path); 2240 if (slow_path != nullptr) { 2241 __ Bind(slow_path->GetExitLabel()); 2242 } 2243 } 2244 2245 // int java.lang.String.indexOf(int ch) 2246 void IntrinsicLocationsBuilderMIPS::VisitStringIndexOf(HInvoke* invoke) { 2247 LocationSummary* locations = new (allocator_) LocationSummary( 2248 invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified); 2249 // We have a hand-crafted assembly stub that follows the runtime 2250 // calling convention. So it's best to align the inputs accordingly. 2251 InvokeRuntimeCallingConvention calling_convention; 2252 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); 2253 locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); 2254 Location outLocation = calling_convention.GetReturnLocation(DataType::Type::kInt32); 2255 locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<Register>())); 2256 2257 // Need a temp for slow-path codepoint compare, and need to send start-index=0. 2258 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2))); 2259 } 2260 2261 void IntrinsicCodeGeneratorMIPS::VisitStringIndexOf(HInvoke* invoke) { 2262 GenerateStringIndexOf(invoke, /* start_at_zero */ true, GetAssembler(), codegen_); 2263 } 2264 2265 // int java.lang.String.indexOf(int ch, int fromIndex) 2266 void IntrinsicLocationsBuilderMIPS::VisitStringIndexOfAfter(HInvoke* invoke) { 2267 LocationSummary* locations = new (allocator_) LocationSummary( 2268 invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified); 2269 // We have a hand-crafted assembly stub that follows the runtime 2270 // calling convention. So it's best to align the inputs accordingly. 2271 InvokeRuntimeCallingConvention calling_convention; 2272 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); 2273 locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); 2274 locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2))); 2275 Location outLocation = calling_convention.GetReturnLocation(DataType::Type::kInt32); 2276 locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<Register>())); 2277 2278 // Need a temp for slow-path codepoint compare. 2279 locations->AddTemp(Location::RequiresRegister()); 2280 } 2281 2282 void IntrinsicCodeGeneratorMIPS::VisitStringIndexOfAfter(HInvoke* invoke) { 2283 GenerateStringIndexOf(invoke, /* start_at_zero */ false, GetAssembler(), codegen_); 2284 } 2285 2286 // java.lang.StringFactory.newStringFromBytes(byte[] data, int high, int offset, int byteCount) 2287 void IntrinsicLocationsBuilderMIPS::VisitStringNewStringFromBytes(HInvoke* invoke) { 2288 LocationSummary* locations = new (allocator_) LocationSummary( 2289 invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified); 2290 InvokeRuntimeCallingConvention calling_convention; 2291 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); 2292 locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); 2293 locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2))); 2294 locations->SetInAt(3, Location::RegisterLocation(calling_convention.GetRegisterAt(3))); 2295 Location outLocation = calling_convention.GetReturnLocation(DataType::Type::kInt32); 2296 locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<Register>())); 2297 } 2298 2299 void IntrinsicCodeGeneratorMIPS::VisitStringNewStringFromBytes(HInvoke* invoke) { 2300 MipsAssembler* assembler = GetAssembler(); 2301 LocationSummary* locations = invoke->GetLocations(); 2302 2303 Register byte_array = locations->InAt(0).AsRegister<Register>(); 2304 SlowPathCodeMIPS* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathMIPS(invoke); 2305 codegen_->AddSlowPath(slow_path); 2306 __ Beqz(byte_array, slow_path->GetEntryLabel()); 2307 codegen_->InvokeRuntime(kQuickAllocStringFromBytes, invoke, invoke->GetDexPc(), slow_path); 2308 __ Bind(slow_path->GetExitLabel()); 2309 } 2310 2311 // java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data) 2312 void IntrinsicLocationsBuilderMIPS::VisitStringNewStringFromChars(HInvoke* invoke) { 2313 LocationSummary* locations = 2314 new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified); 2315 InvokeRuntimeCallingConvention calling_convention; 2316 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); 2317 locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); 2318 locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2))); 2319 Location outLocation = calling_convention.GetReturnLocation(DataType::Type::kInt32); 2320 locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<Register>())); 2321 } 2322 2323 void IntrinsicCodeGeneratorMIPS::VisitStringNewStringFromChars(HInvoke* invoke) { 2324 // No need to emit code checking whether `locations->InAt(2)` is a null 2325 // pointer, as callers of the native method 2326 // 2327 // java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data) 2328 // 2329 // all include a null check on `data` before calling that method. 2330 codegen_->InvokeRuntime(kQuickAllocStringFromChars, invoke, invoke->GetDexPc()); 2331 } 2332 2333 // java.lang.StringFactory.newStringFromString(String toCopy) 2334 void IntrinsicLocationsBuilderMIPS::VisitStringNewStringFromString(HInvoke* invoke) { 2335 LocationSummary* locations = new (allocator_) LocationSummary( 2336 invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified); 2337 InvokeRuntimeCallingConvention calling_convention; 2338 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); 2339 Location outLocation = calling_convention.GetReturnLocation(DataType::Type::kInt32); 2340 locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<Register>())); 2341 } 2342 2343 void IntrinsicCodeGeneratorMIPS::VisitStringNewStringFromString(HInvoke* invoke) { 2344 MipsAssembler* assembler = GetAssembler(); 2345 LocationSummary* locations = invoke->GetLocations(); 2346 2347 Register string_to_copy = locations->InAt(0).AsRegister<Register>(); 2348 SlowPathCodeMIPS* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathMIPS(invoke); 2349 codegen_->AddSlowPath(slow_path); 2350 __ Beqz(string_to_copy, slow_path->GetEntryLabel()); 2351 codegen_->InvokeRuntime(kQuickAllocStringFromString, invoke, invoke->GetDexPc()); 2352 __ Bind(slow_path->GetExitLabel()); 2353 } 2354 2355 static void GenIsInfinite(LocationSummary* locations, 2356 const DataType::Type type, 2357 const bool isR6, 2358 MipsAssembler* assembler) { 2359 FRegister in = locations->InAt(0).AsFpuRegister<FRegister>(); 2360 Register out = locations->Out().AsRegister<Register>(); 2361 2362 DCHECK(type == DataType::Type::kFloat32 || type == DataType::Type::kFloat64); 2363 2364 if (isR6) { 2365 if (type == DataType::Type::kFloat64) { 2366 __ ClassD(FTMP, in); 2367 } else { 2368 __ ClassS(FTMP, in); 2369 } 2370 __ Mfc1(out, FTMP); 2371 __ Andi(out, out, kPositiveInfinity | kNegativeInfinity); 2372 __ Sltu(out, ZERO, out); 2373 } else { 2374 // If one, or more, of the exponent bits is zero, then the number can't be infinite. 2375 if (type == DataType::Type::kFloat64) { 2376 __ MoveFromFpuHigh(TMP, in); 2377 __ LoadConst32(AT, High32Bits(kPositiveInfinityDouble)); 2378 } else { 2379 __ Mfc1(TMP, in); 2380 __ LoadConst32(AT, kPositiveInfinityFloat); 2381 } 2382 __ Xor(TMP, TMP, AT); 2383 2384 __ Sll(TMP, TMP, 1); 2385 2386 if (type == DataType::Type::kFloat64) { 2387 __ Mfc1(AT, in); 2388 __ Or(TMP, TMP, AT); 2389 } 2390 // If any of the significand bits are one, then the number is not infinite. 2391 __ Sltiu(out, TMP, 1); 2392 } 2393 } 2394 2395 // boolean java.lang.Float.isInfinite(float) 2396 void IntrinsicLocationsBuilderMIPS::VisitFloatIsInfinite(HInvoke* invoke) { 2397 CreateFPToIntLocations(allocator_, invoke); 2398 } 2399 2400 void IntrinsicCodeGeneratorMIPS::VisitFloatIsInfinite(HInvoke* invoke) { 2401 GenIsInfinite(invoke->GetLocations(), DataType::Type::kFloat32, IsR6(), GetAssembler()); 2402 } 2403 2404 // boolean java.lang.Double.isInfinite(double) 2405 void IntrinsicLocationsBuilderMIPS::VisitDoubleIsInfinite(HInvoke* invoke) { 2406 CreateFPToIntLocations(allocator_, invoke); 2407 } 2408 2409 void IntrinsicCodeGeneratorMIPS::VisitDoubleIsInfinite(HInvoke* invoke) { 2410 GenIsInfinite(invoke->GetLocations(), DataType::Type::kFloat64, IsR6(), GetAssembler()); 2411 } 2412 2413 static void GenHighestOneBit(LocationSummary* locations, 2414 const DataType::Type type, 2415 bool isR6, 2416 MipsAssembler* assembler) { 2417 DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64); 2418 2419 if (type == DataType::Type::kInt64) { 2420 Register in_lo = locations->InAt(0).AsRegisterPairLow<Register>(); 2421 Register in_hi = locations->InAt(0).AsRegisterPairHigh<Register>(); 2422 Register out_lo = locations->Out().AsRegisterPairLow<Register>(); 2423 Register out_hi = locations->Out().AsRegisterPairHigh<Register>(); 2424 2425 if (isR6) { 2426 __ ClzR6(TMP, in_hi); 2427 } else { 2428 __ ClzR2(TMP, in_hi); 2429 } 2430 __ LoadConst32(AT, 0x80000000); 2431 __ Srlv(out_hi, AT, TMP); 2432 __ And(out_hi, out_hi, in_hi); 2433 if (isR6) { 2434 __ ClzR6(TMP, in_lo); 2435 } else { 2436 __ ClzR2(TMP, in_lo); 2437 } 2438 __ Srlv(out_lo, AT, TMP); 2439 __ And(out_lo, out_lo, in_lo); 2440 if (isR6) { 2441 __ Seleqz(out_lo, out_lo, out_hi); 2442 } else { 2443 __ Movn(out_lo, ZERO, out_hi); 2444 } 2445 } else { 2446 Register in = locations->InAt(0).AsRegister<Register>(); 2447 Register out = locations->Out().AsRegister<Register>(); 2448 2449 if (isR6) { 2450 __ ClzR6(TMP, in); 2451 } else { 2452 __ ClzR2(TMP, in); 2453 } 2454 __ LoadConst32(AT, 0x80000000); 2455 __ Srlv(AT, AT, TMP); // Srlv shifts in the range of [0;31] bits (lower 5 bits of arg). 2456 __ And(out, AT, in); // So this is required for 0 (=shift by 32). 2457 } 2458 } 2459 2460 // int java.lang.Integer.highestOneBit(int) 2461 void IntrinsicLocationsBuilderMIPS::VisitIntegerHighestOneBit(HInvoke* invoke) { 2462 CreateIntToIntLocations(allocator_, invoke); 2463 } 2464 2465 void IntrinsicCodeGeneratorMIPS::VisitIntegerHighestOneBit(HInvoke* invoke) { 2466 GenHighestOneBit(invoke->GetLocations(), DataType::Type::kInt32, IsR6(), GetAssembler()); 2467 } 2468 2469 // long java.lang.Long.highestOneBit(long) 2470 void IntrinsicLocationsBuilderMIPS::VisitLongHighestOneBit(HInvoke* invoke) { 2471 CreateIntToIntLocations(allocator_, invoke, Location::kOutputOverlap); 2472 } 2473 2474 void IntrinsicCodeGeneratorMIPS::VisitLongHighestOneBit(HInvoke* invoke) { 2475 GenHighestOneBit(invoke->GetLocations(), DataType::Type::kInt64, IsR6(), GetAssembler()); 2476 } 2477 2478 static void GenLowestOneBit(LocationSummary* locations, 2479 const DataType::Type type, 2480 bool isR6, 2481 MipsAssembler* assembler) { 2482 DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64); 2483 2484 if (type == DataType::Type::kInt64) { 2485 Register in_lo = locations->InAt(0).AsRegisterPairLow<Register>(); 2486 Register in_hi = locations->InAt(0).AsRegisterPairHigh<Register>(); 2487 Register out_lo = locations->Out().AsRegisterPairLow<Register>(); 2488 Register out_hi = locations->Out().AsRegisterPairHigh<Register>(); 2489 2490 __ Subu(TMP, ZERO, in_lo); 2491 __ And(out_lo, TMP, in_lo); 2492 __ Subu(TMP, ZERO, in_hi); 2493 __ And(out_hi, TMP, in_hi); 2494 if (isR6) { 2495 __ Seleqz(out_hi, out_hi, out_lo); 2496 } else { 2497 __ Movn(out_hi, ZERO, out_lo); 2498 } 2499 } else { 2500 Register in = locations->InAt(0).AsRegister<Register>(); 2501 Register out = locations->Out().AsRegister<Register>(); 2502 2503 __ Subu(TMP, ZERO, in); 2504 __ And(out, TMP, in); 2505 } 2506 } 2507 2508 // int java.lang.Integer.lowestOneBit(int) 2509 void IntrinsicLocationsBuilderMIPS::VisitIntegerLowestOneBit(HInvoke* invoke) { 2510 CreateIntToIntLocations(allocator_, invoke); 2511 } 2512 2513 void IntrinsicCodeGeneratorMIPS::VisitIntegerLowestOneBit(HInvoke* invoke) { 2514 GenLowestOneBit(invoke->GetLocations(), DataType::Type::kInt32, IsR6(), GetAssembler()); 2515 } 2516 2517 // long java.lang.Long.lowestOneBit(long) 2518 void IntrinsicLocationsBuilderMIPS::VisitLongLowestOneBit(HInvoke* invoke) { 2519 CreateIntToIntLocations(allocator_, invoke); 2520 } 2521 2522 void IntrinsicCodeGeneratorMIPS::VisitLongLowestOneBit(HInvoke* invoke) { 2523 GenLowestOneBit(invoke->GetLocations(), DataType::Type::kInt64, IsR6(), GetAssembler()); 2524 } 2525 2526 // int java.lang.Math.round(float) 2527 void IntrinsicLocationsBuilderMIPS::VisitMathRoundFloat(HInvoke* invoke) { 2528 LocationSummary* locations = 2529 new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); 2530 locations->SetInAt(0, Location::RequiresFpuRegister()); 2531 locations->AddTemp(Location::RequiresFpuRegister()); 2532 locations->SetOut(Location::RequiresRegister()); 2533 } 2534 2535 void IntrinsicCodeGeneratorMIPS::VisitMathRoundFloat(HInvoke* invoke) { 2536 LocationSummary* locations = invoke->GetLocations(); 2537 MipsAssembler* assembler = GetAssembler(); 2538 FRegister in = locations->InAt(0).AsFpuRegister<FRegister>(); 2539 FRegister half = locations->GetTemp(0).AsFpuRegister<FRegister>(); 2540 Register out = locations->Out().AsRegister<Register>(); 2541 2542 MipsLabel done; 2543 2544 if (IsR6()) { 2545 // out = floor(in); 2546 // 2547 // if (out != MAX_VALUE && out != MIN_VALUE) { 2548 // TMP = ((in - out) >= 0.5) ? 1 : 0; 2549 // return out += TMP; 2550 // } 2551 // return out; 2552 2553 // out = floor(in); 2554 __ FloorWS(FTMP, in); 2555 __ Mfc1(out, FTMP); 2556 2557 // if (out != MAX_VALUE && out != MIN_VALUE) 2558 __ Addiu(TMP, out, 1); 2559 __ Aui(TMP, TMP, 0x8000); // TMP = out + 0x8000 0001 2560 // or out - 0x7FFF FFFF. 2561 // IOW, TMP = 1 if out = Int.MIN_VALUE 2562 // or TMP = 0 if out = Int.MAX_VALUE. 2563 __ Srl(TMP, TMP, 1); // TMP = 0 if out = Int.MIN_VALUE 2564 // or out = Int.MAX_VALUE. 2565 __ Beqz(TMP, &done); 2566 2567 // TMP = (0.5f <= (in - out)) ? -1 : 0; 2568 __ Cvtsw(FTMP, FTMP); // Convert output of floor.w.s back to "float". 2569 __ LoadConst32(AT, bit_cast<int32_t, float>(0.5f)); 2570 __ SubS(FTMP, in, FTMP); 2571 __ Mtc1(AT, half); 2572 2573 __ CmpLeS(FTMP, half, FTMP); 2574 __ Mfc1(TMP, FTMP); 2575 2576 // Return out -= TMP. 2577 __ Subu(out, out, TMP); 2578 } else { 2579 // if (in.isNaN) { 2580 // return 0; 2581 // } 2582 // 2583 // out = floor.w.s(in); 2584 // 2585 // /* 2586 // * This "if" statement is only needed for the pre-R6 version of floor.w.s 2587 // * which outputs Integer.MAX_VALUE for negative numbers with magnitudes 2588 // * too large to fit in a 32-bit integer. 2589 // */ 2590 // if (out == Integer.MAX_VALUE) { 2591 // TMP = (in < 0.0f) ? 1 : 0; 2592 // /* 2593 // * If TMP is 1, then adding it to out will wrap its value from 2594 // * Integer.MAX_VALUE to Integer.MIN_VALUE. 2595 // */ 2596 // return out += TMP; 2597 // } 2598 // 2599 // /* 2600 // * For negative values not handled by the previous "if" statement the 2601 // * test here will correctly set the value of TMP. 2602 // */ 2603 // TMP = ((in - out) >= 0.5f) ? 1 : 0; 2604 // return out += TMP; 2605 2606 MipsLabel finite; 2607 MipsLabel add; 2608 2609 // Test for NaN. 2610 __ CunS(in, in); 2611 2612 // Return zero for NaN. 2613 __ Move(out, ZERO); 2614 __ Bc1t(&done); 2615 2616 // out = floor(in); 2617 __ FloorWS(FTMP, in); 2618 __ Mfc1(out, FTMP); 2619 2620 __ LoadConst32(TMP, -1); 2621 2622 // TMP = (out = java.lang.Integer.MAX_VALUE) ? -1 : 0; 2623 __ LoadConst32(AT, std::numeric_limits<int32_t>::max()); 2624 __ Bne(AT, out, &finite); 2625 2626 __ Mtc1(ZERO, FTMP); 2627 __ ColtS(in, FTMP); 2628 2629 __ B(&add); 2630 2631 __ Bind(&finite); 2632 2633 // TMP = (0.5f <= (in - out)) ? -1 : 0; 2634 __ Cvtsw(FTMP, FTMP); // Convert output of floor.w.s back to "float". 2635 __ LoadConst32(AT, bit_cast<int32_t, float>(0.5f)); 2636 __ SubS(FTMP, in, FTMP); 2637 __ Mtc1(AT, half); 2638 __ ColeS(half, FTMP); 2639 2640 __ Bind(&add); 2641 2642 __ Movf(TMP, ZERO); 2643 2644 // Return out -= TMP. 2645 __ Subu(out, out, TMP); 2646 } 2647 __ Bind(&done); 2648 } 2649 2650 // void java.lang.String.getChars(int srcBegin, int srcEnd, char[] dst, int dstBegin) 2651 void IntrinsicLocationsBuilderMIPS::VisitStringGetCharsNoCheck(HInvoke* invoke) { 2652 LocationSummary* locations = 2653 new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); 2654 locations->SetInAt(0, Location::RequiresRegister()); 2655 locations->SetInAt(1, Location::RequiresRegister()); 2656 locations->SetInAt(2, Location::RequiresRegister()); 2657 locations->SetInAt(3, Location::RequiresRegister()); 2658 locations->SetInAt(4, Location::RequiresRegister()); 2659 2660 locations->AddTemp(Location::RequiresRegister()); 2661 locations->AddTemp(Location::RequiresRegister()); 2662 locations->AddTemp(Location::RequiresRegister()); 2663 } 2664 2665 void IntrinsicCodeGeneratorMIPS::VisitStringGetCharsNoCheck(HInvoke* invoke) { 2666 MipsAssembler* assembler = GetAssembler(); 2667 LocationSummary* locations = invoke->GetLocations(); 2668 2669 // Check assumption that sizeof(Char) is 2 (used in scaling below). 2670 const size_t char_size = DataType::Size(DataType::Type::kUint16); 2671 DCHECK_EQ(char_size, 2u); 2672 const size_t char_shift = DataType::SizeShift(DataType::Type::kUint16); 2673 2674 Register srcObj = locations->InAt(0).AsRegister<Register>(); 2675 Register srcBegin = locations->InAt(1).AsRegister<Register>(); 2676 Register srcEnd = locations->InAt(2).AsRegister<Register>(); 2677 Register dstObj = locations->InAt(3).AsRegister<Register>(); 2678 Register dstBegin = locations->InAt(4).AsRegister<Register>(); 2679 2680 Register dstPtr = locations->GetTemp(0).AsRegister<Register>(); 2681 Register srcPtr = locations->GetTemp(1).AsRegister<Register>(); 2682 Register numChrs = locations->GetTemp(2).AsRegister<Register>(); 2683 2684 MipsLabel done; 2685 MipsLabel loop; 2686 2687 // Location of data in char array buffer. 2688 const uint32_t data_offset = mirror::Array::DataOffset(char_size).Uint32Value(); 2689 2690 // Get offset of value field within a string object. 2691 const int32_t value_offset = mirror::String::ValueOffset().Int32Value(); 2692 2693 __ Beq(srcEnd, srcBegin, &done); // No characters to move. 2694 2695 // Calculate number of characters to be copied. 2696 __ Subu(numChrs, srcEnd, srcBegin); 2697 2698 // Calculate destination address. 2699 __ Addiu(dstPtr, dstObj, data_offset); 2700 __ ShiftAndAdd(dstPtr, dstBegin, dstPtr, char_shift); 2701 2702 if (mirror::kUseStringCompression) { 2703 MipsLabel uncompressed_copy, compressed_loop; 2704 const uint32_t count_offset = mirror::String::CountOffset().Uint32Value(); 2705 // Load count field and extract compression flag. 2706 __ LoadFromOffset(kLoadWord, TMP, srcObj, count_offset); 2707 __ Sll(TMP, TMP, 31); 2708 2709 // If string is uncompressed, use uncompressed path. 2710 __ Bnez(TMP, &uncompressed_copy); 2711 2712 // Copy loop for compressed src, copying 1 character (8-bit) to (16-bit) at a time. 2713 __ Addu(srcPtr, srcObj, srcBegin); 2714 __ Bind(&compressed_loop); 2715 __ LoadFromOffset(kLoadUnsignedByte, TMP, srcPtr, value_offset); 2716 __ StoreToOffset(kStoreHalfword, TMP, dstPtr, 0); 2717 __ Addiu(numChrs, numChrs, -1); 2718 __ Addiu(srcPtr, srcPtr, 1); 2719 __ Addiu(dstPtr, dstPtr, 2); 2720 __ Bnez(numChrs, &compressed_loop); 2721 2722 __ B(&done); 2723 __ Bind(&uncompressed_copy); 2724 } 2725 2726 // Calculate source address. 2727 __ Addiu(srcPtr, srcObj, value_offset); 2728 __ ShiftAndAdd(srcPtr, srcBegin, srcPtr, char_shift); 2729 2730 __ Bind(&loop); 2731 __ Lh(AT, srcPtr, 0); 2732 __ Addiu(numChrs, numChrs, -1); 2733 __ Addiu(srcPtr, srcPtr, char_size); 2734 __ Sh(AT, dstPtr, 0); 2735 __ Addiu(dstPtr, dstPtr, char_size); 2736 __ Bnez(numChrs, &loop); 2737 2738 __ Bind(&done); 2739 } 2740 2741 static void CreateFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) { 2742 LocationSummary* locations = 2743 new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified); 2744 InvokeRuntimeCallingConvention calling_convention; 2745 2746 locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0))); 2747 locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kFloat64)); 2748 } 2749 2750 static void CreateFPFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) { 2751 LocationSummary* locations = 2752 new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified); 2753 InvokeRuntimeCallingConvention calling_convention; 2754 2755 locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0))); 2756 locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1))); 2757 locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kFloat64)); 2758 } 2759 2760 static void GenFPToFPCall(HInvoke* invoke, CodeGeneratorMIPS* codegen, QuickEntrypointEnum entry) { 2761 LocationSummary* locations = invoke->GetLocations(); 2762 FRegister in = locations->InAt(0).AsFpuRegister<FRegister>(); 2763 DCHECK_EQ(in, F12); 2764 FRegister out = locations->Out().AsFpuRegister<FRegister>(); 2765 DCHECK_EQ(out, F0); 2766 2767 codegen->InvokeRuntime(entry, invoke, invoke->GetDexPc()); 2768 } 2769 2770 static void GenFPFPToFPCall(HInvoke* invoke, 2771 CodeGeneratorMIPS* codegen, 2772 QuickEntrypointEnum entry) { 2773 LocationSummary* locations = invoke->GetLocations(); 2774 FRegister in0 = locations->InAt(0).AsFpuRegister<FRegister>(); 2775 DCHECK_EQ(in0, F12); 2776 FRegister in1 = locations->InAt(1).AsFpuRegister<FRegister>(); 2777 DCHECK_EQ(in1, F14); 2778 FRegister out = locations->Out().AsFpuRegister<FRegister>(); 2779 DCHECK_EQ(out, F0); 2780 2781 codegen->InvokeRuntime(entry, invoke, invoke->GetDexPc()); 2782 } 2783 2784 // static double java.lang.Math.cos(double a) 2785 void IntrinsicLocationsBuilderMIPS::VisitMathCos(HInvoke* invoke) { 2786 CreateFPToFPCallLocations(allocator_, invoke); 2787 } 2788 2789 void IntrinsicCodeGeneratorMIPS::VisitMathCos(HInvoke* invoke) { 2790 GenFPToFPCall(invoke, codegen_, kQuickCos); 2791 } 2792 2793 // static double java.lang.Math.sin(double a) 2794 void IntrinsicLocationsBuilderMIPS::VisitMathSin(HInvoke* invoke) { 2795 CreateFPToFPCallLocations(allocator_, invoke); 2796 } 2797 2798 void IntrinsicCodeGeneratorMIPS::VisitMathSin(HInvoke* invoke) { 2799 GenFPToFPCall(invoke, codegen_, kQuickSin); 2800 } 2801 2802 // static double java.lang.Math.acos(double a) 2803 void IntrinsicLocationsBuilderMIPS::VisitMathAcos(HInvoke* invoke) { 2804 CreateFPToFPCallLocations(allocator_, invoke); 2805 } 2806 2807 void IntrinsicCodeGeneratorMIPS::VisitMathAcos(HInvoke* invoke) { 2808 GenFPToFPCall(invoke, codegen_, kQuickAcos); 2809 } 2810 2811 // static double java.lang.Math.asin(double a) 2812 void IntrinsicLocationsBuilderMIPS::VisitMathAsin(HInvoke* invoke) { 2813 CreateFPToFPCallLocations(allocator_, invoke); 2814 } 2815 2816 void IntrinsicCodeGeneratorMIPS::VisitMathAsin(HInvoke* invoke) { 2817 GenFPToFPCall(invoke, codegen_, kQuickAsin); 2818 } 2819 2820 // static double java.lang.Math.atan(double a) 2821 void IntrinsicLocationsBuilderMIPS::VisitMathAtan(HInvoke* invoke) { 2822 CreateFPToFPCallLocations(allocator_, invoke); 2823 } 2824 2825 void IntrinsicCodeGeneratorMIPS::VisitMathAtan(HInvoke* invoke) { 2826 GenFPToFPCall(invoke, codegen_, kQuickAtan); 2827 } 2828 2829 // static double java.lang.Math.atan2(double y, double x) 2830 void IntrinsicLocationsBuilderMIPS::VisitMathAtan2(HInvoke* invoke) { 2831 CreateFPFPToFPCallLocations(allocator_, invoke); 2832 } 2833 2834 void IntrinsicCodeGeneratorMIPS::VisitMathAtan2(HInvoke* invoke) { 2835 GenFPFPToFPCall(invoke, codegen_, kQuickAtan2); 2836 } 2837 2838 // static double java.lang.Math.pow(double y, double x) 2839 void IntrinsicLocationsBuilderMIPS::VisitMathPow(HInvoke* invoke) { 2840 CreateFPFPToFPCallLocations(allocator_, invoke); 2841 } 2842 2843 void IntrinsicCodeGeneratorMIPS::VisitMathPow(HInvoke* invoke) { 2844 GenFPFPToFPCall(invoke, codegen_, kQuickPow); 2845 } 2846 2847 // static double java.lang.Math.cbrt(double a) 2848 void IntrinsicLocationsBuilderMIPS::VisitMathCbrt(HInvoke* invoke) { 2849 CreateFPToFPCallLocations(allocator_, invoke); 2850 } 2851 2852 void IntrinsicCodeGeneratorMIPS::VisitMathCbrt(HInvoke* invoke) { 2853 GenFPToFPCall(invoke, codegen_, kQuickCbrt); 2854 } 2855 2856 // static double java.lang.Math.cosh(double x) 2857 void IntrinsicLocationsBuilderMIPS::VisitMathCosh(HInvoke* invoke) { 2858 CreateFPToFPCallLocations(allocator_, invoke); 2859 } 2860 2861 void IntrinsicCodeGeneratorMIPS::VisitMathCosh(HInvoke* invoke) { 2862 GenFPToFPCall(invoke, codegen_, kQuickCosh); 2863 } 2864 2865 // static double java.lang.Math.exp(double a) 2866 void IntrinsicLocationsBuilderMIPS::VisitMathExp(HInvoke* invoke) { 2867 CreateFPToFPCallLocations(allocator_, invoke); 2868 } 2869 2870 void IntrinsicCodeGeneratorMIPS::VisitMathExp(HInvoke* invoke) { 2871 GenFPToFPCall(invoke, codegen_, kQuickExp); 2872 } 2873 2874 // static double java.lang.Math.expm1(double x) 2875 void IntrinsicLocationsBuilderMIPS::VisitMathExpm1(HInvoke* invoke) { 2876 CreateFPToFPCallLocations(allocator_, invoke); 2877 } 2878 2879 void IntrinsicCodeGeneratorMIPS::VisitMathExpm1(HInvoke* invoke) { 2880 GenFPToFPCall(invoke, codegen_, kQuickExpm1); 2881 } 2882 2883 // static double java.lang.Math.hypot(double x, double y) 2884 void IntrinsicLocationsBuilderMIPS::VisitMathHypot(HInvoke* invoke) { 2885 CreateFPFPToFPCallLocations(allocator_, invoke); 2886 } 2887 2888 void IntrinsicCodeGeneratorMIPS::VisitMathHypot(HInvoke* invoke) { 2889 GenFPFPToFPCall(invoke, codegen_, kQuickHypot); 2890 } 2891 2892 // static double java.lang.Math.log(double a) 2893 void IntrinsicLocationsBuilderMIPS::VisitMathLog(HInvoke* invoke) { 2894 CreateFPToFPCallLocations(allocator_, invoke); 2895 } 2896 2897 void IntrinsicCodeGeneratorMIPS::VisitMathLog(HInvoke* invoke) { 2898 GenFPToFPCall(invoke, codegen_, kQuickLog); 2899 } 2900 2901 // static double java.lang.Math.log10(double x) 2902 void IntrinsicLocationsBuilderMIPS::VisitMathLog10(HInvoke* invoke) { 2903 CreateFPToFPCallLocations(allocator_, invoke); 2904 } 2905 2906 void IntrinsicCodeGeneratorMIPS::VisitMathLog10(HInvoke* invoke) { 2907 GenFPToFPCall(invoke, codegen_, kQuickLog10); 2908 } 2909 2910 // static double java.lang.Math.nextAfter(double start, double direction) 2911 void IntrinsicLocationsBuilderMIPS::VisitMathNextAfter(HInvoke* invoke) { 2912 CreateFPFPToFPCallLocations(allocator_, invoke); 2913 } 2914 2915 void IntrinsicCodeGeneratorMIPS::VisitMathNextAfter(HInvoke* invoke) { 2916 GenFPFPToFPCall(invoke, codegen_, kQuickNextAfter); 2917 } 2918 2919 // static double java.lang.Math.sinh(double x) 2920 void IntrinsicLocationsBuilderMIPS::VisitMathSinh(HInvoke* invoke) { 2921 CreateFPToFPCallLocations(allocator_, invoke); 2922 } 2923 2924 void IntrinsicCodeGeneratorMIPS::VisitMathSinh(HInvoke* invoke) { 2925 GenFPToFPCall(invoke, codegen_, kQuickSinh); 2926 } 2927 2928 // static double java.lang.Math.tan(double a) 2929 void IntrinsicLocationsBuilderMIPS::VisitMathTan(HInvoke* invoke) { 2930 CreateFPToFPCallLocations(allocator_, invoke); 2931 } 2932 2933 void IntrinsicCodeGeneratorMIPS::VisitMathTan(HInvoke* invoke) { 2934 GenFPToFPCall(invoke, codegen_, kQuickTan); 2935 } 2936 2937 // static double java.lang.Math.tanh(double x) 2938 void IntrinsicLocationsBuilderMIPS::VisitMathTanh(HInvoke* invoke) { 2939 CreateFPToFPCallLocations(allocator_, invoke); 2940 } 2941 2942 void IntrinsicCodeGeneratorMIPS::VisitMathTanh(HInvoke* invoke) { 2943 GenFPToFPCall(invoke, codegen_, kQuickTanh); 2944 } 2945 2946 // static void java.lang.System.arraycopy(Object src, int srcPos, 2947 // Object dest, int destPos, 2948 // int length) 2949 void IntrinsicLocationsBuilderMIPS::VisitSystemArrayCopyChar(HInvoke* invoke) { 2950 HIntConstant* src_pos = invoke->InputAt(1)->AsIntConstant(); 2951 HIntConstant* dest_pos = invoke->InputAt(3)->AsIntConstant(); 2952 HIntConstant* length = invoke->InputAt(4)->AsIntConstant(); 2953 2954 // As long as we are checking, we might as well check to see if the src and dest 2955 // positions are >= 0. 2956 if ((src_pos != nullptr && src_pos->GetValue() < 0) || 2957 (dest_pos != nullptr && dest_pos->GetValue() < 0)) { 2958 // We will have to fail anyways. 2959 return; 2960 } 2961 2962 // And since we are already checking, check the length too. 2963 if (length != nullptr) { 2964 int32_t len = length->GetValue(); 2965 if (len < 0) { 2966 // Just call as normal. 2967 return; 2968 } 2969 } 2970 2971 // Okay, it is safe to generate inline code. 2972 LocationSummary* locations = 2973 new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified); 2974 // arraycopy(Object src, int srcPos, Object dest, int destPos, int length). 2975 locations->SetInAt(0, Location::RequiresRegister()); 2976 locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1))); 2977 locations->SetInAt(2, Location::RequiresRegister()); 2978 locations->SetInAt(3, Location::RegisterOrConstant(invoke->InputAt(3))); 2979 locations->SetInAt(4, Location::RegisterOrConstant(invoke->InputAt(4))); 2980 2981 locations->AddTemp(Location::RequiresRegister()); 2982 locations->AddTemp(Location::RequiresRegister()); 2983 locations->AddTemp(Location::RequiresRegister()); 2984 } 2985 2986 // Utility routine to verify that "length(input) - pos >= length" 2987 static void EnoughItems(MipsAssembler* assembler, 2988 Register length_input_minus_pos, 2989 Location length, 2990 SlowPathCodeMIPS* slow_path) { 2991 if (length.IsConstant()) { 2992 int32_t length_constant = length.GetConstant()->AsIntConstant()->GetValue(); 2993 2994 if (IsInt<16>(length_constant)) { 2995 __ Slti(TMP, length_input_minus_pos, length_constant); 2996 __ Bnez(TMP, slow_path->GetEntryLabel()); 2997 } else { 2998 __ LoadConst32(TMP, length_constant); 2999 __ Blt(length_input_minus_pos, TMP, slow_path->GetEntryLabel()); 3000 } 3001 } else { 3002 __ Blt(length_input_minus_pos, length.AsRegister<Register>(), slow_path->GetEntryLabel()); 3003 } 3004 } 3005 3006 static void CheckPosition(MipsAssembler* assembler, 3007 Location pos, 3008 Register input, 3009 Location length, 3010 SlowPathCodeMIPS* slow_path, 3011 bool length_is_input_length = false) { 3012 // Where is the length in the Array? 3013 const uint32_t length_offset = mirror::Array::LengthOffset().Uint32Value(); 3014 3015 // Calculate length(input) - pos. 3016 if (pos.IsConstant()) { 3017 int32_t pos_const = pos.GetConstant()->AsIntConstant()->GetValue(); 3018 if (pos_const == 0) { 3019 if (!length_is_input_length) { 3020 // Check that length(input) >= length. 3021 __ LoadFromOffset(kLoadWord, AT, input, length_offset); 3022 EnoughItems(assembler, AT, length, slow_path); 3023 } 3024 } else { 3025 // Check that (length(input) - pos) >= zero. 3026 __ LoadFromOffset(kLoadWord, AT, input, length_offset); 3027 DCHECK_GT(pos_const, 0); 3028 __ Addiu32(AT, AT, -pos_const, TMP); 3029 __ Bltz(AT, slow_path->GetEntryLabel()); 3030 3031 // Verify that (length(input) - pos) >= length. 3032 EnoughItems(assembler, AT, length, slow_path); 3033 } 3034 } else if (length_is_input_length) { 3035 // The only way the copy can succeed is if pos is zero. 3036 Register pos_reg = pos.AsRegister<Register>(); 3037 __ Bnez(pos_reg, slow_path->GetEntryLabel()); 3038 } else { 3039 // Verify that pos >= 0. 3040 Register pos_reg = pos.AsRegister<Register>(); 3041 __ Bltz(pos_reg, slow_path->GetEntryLabel()); 3042 3043 // Check that (length(input) - pos) >= zero. 3044 __ LoadFromOffset(kLoadWord, AT, input, length_offset); 3045 __ Subu(AT, AT, pos_reg); 3046 __ Bltz(AT, slow_path->GetEntryLabel()); 3047 3048 // Verify that (length(input) - pos) >= length. 3049 EnoughItems(assembler, AT, length, slow_path); 3050 } 3051 } 3052 3053 void IntrinsicCodeGeneratorMIPS::VisitSystemArrayCopyChar(HInvoke* invoke) { 3054 MipsAssembler* assembler = GetAssembler(); 3055 LocationSummary* locations = invoke->GetLocations(); 3056 3057 Register src = locations->InAt(0).AsRegister<Register>(); 3058 Location src_pos = locations->InAt(1); 3059 Register dest = locations->InAt(2).AsRegister<Register>(); 3060 Location dest_pos = locations->InAt(3); 3061 Location length = locations->InAt(4); 3062 3063 MipsLabel loop; 3064 3065 Register dest_base = locations->GetTemp(0).AsRegister<Register>(); 3066 Register src_base = locations->GetTemp(1).AsRegister<Register>(); 3067 Register count = locations->GetTemp(2).AsRegister<Register>(); 3068 3069 SlowPathCodeMIPS* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathMIPS(invoke); 3070 codegen_->AddSlowPath(slow_path); 3071 3072 // Bail out if the source and destination are the same (to handle overlap). 3073 __ Beq(src, dest, slow_path->GetEntryLabel()); 3074 3075 // Bail out if the source is null. 3076 __ Beqz(src, slow_path->GetEntryLabel()); 3077 3078 // Bail out if the destination is null. 3079 __ Beqz(dest, slow_path->GetEntryLabel()); 3080 3081 // Load length into register for count. 3082 if (length.IsConstant()) { 3083 __ LoadConst32(count, length.GetConstant()->AsIntConstant()->GetValue()); 3084 } else { 3085 // If the length is negative, bail out. 3086 // We have already checked in the LocationsBuilder for the constant case. 3087 __ Bltz(length.AsRegister<Register>(), slow_path->GetEntryLabel()); 3088 3089 __ Move(count, length.AsRegister<Register>()); 3090 } 3091 3092 // Validity checks: source. 3093 CheckPosition(assembler, src_pos, src, Location::RegisterLocation(count), slow_path); 3094 3095 // Validity checks: dest. 3096 CheckPosition(assembler, dest_pos, dest, Location::RegisterLocation(count), slow_path); 3097 3098 // If count is zero, we're done. 3099 __ Beqz(count, slow_path->GetExitLabel()); 3100 3101 // Okay, everything checks out. Finally time to do the copy. 3102 // Check assumption that sizeof(Char) is 2 (used in scaling below). 3103 const size_t char_size = DataType::Size(DataType::Type::kUint16); 3104 DCHECK_EQ(char_size, 2u); 3105 3106 const size_t char_shift = DataType::SizeShift(DataType::Type::kUint16); 3107 3108 const uint32_t data_offset = mirror::Array::DataOffset(char_size).Uint32Value(); 3109 3110 // Calculate source and destination addresses. 3111 if (src_pos.IsConstant()) { 3112 int32_t src_pos_const = src_pos.GetConstant()->AsIntConstant()->GetValue(); 3113 3114 __ Addiu32(src_base, src, data_offset + char_size * src_pos_const, TMP); 3115 } else { 3116 __ Addiu32(src_base, src, data_offset, TMP); 3117 __ ShiftAndAdd(src_base, src_pos.AsRegister<Register>(), src_base, char_shift); 3118 } 3119 if (dest_pos.IsConstant()) { 3120 int32_t dest_pos_const = dest_pos.GetConstant()->AsIntConstant()->GetValue(); 3121 3122 __ Addiu32(dest_base, dest, data_offset + char_size * dest_pos_const, TMP); 3123 } else { 3124 __ Addiu32(dest_base, dest, data_offset, TMP); 3125 __ ShiftAndAdd(dest_base, dest_pos.AsRegister<Register>(), dest_base, char_shift); 3126 } 3127 3128 __ Bind(&loop); 3129 __ Lh(TMP, src_base, 0); 3130 __ Addiu(src_base, src_base, char_size); 3131 __ Addiu(count, count, -1); 3132 __ Sh(TMP, dest_base, 0); 3133 __ Addiu(dest_base, dest_base, char_size); 3134 __ Bnez(count, &loop); 3135 3136 __ Bind(slow_path->GetExitLabel()); 3137 } 3138 3139 // long java.lang.Integer.valueOf(long) 3140 void IntrinsicLocationsBuilderMIPS::VisitIntegerValueOf(HInvoke* invoke) { 3141 InvokeRuntimeCallingConvention calling_convention; 3142 IntrinsicVisitor::ComputeIntegerValueOfLocations( 3143 invoke, 3144 codegen_, 3145 calling_convention.GetReturnLocation(DataType::Type::kReference), 3146 Location::RegisterLocation(calling_convention.GetRegisterAt(0))); 3147 } 3148 3149 void IntrinsicCodeGeneratorMIPS::VisitIntegerValueOf(HInvoke* invoke) { 3150 IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo(); 3151 LocationSummary* locations = invoke->GetLocations(); 3152 MipsAssembler* assembler = GetAssembler(); 3153 InstructionCodeGeneratorMIPS* icodegen = 3154 down_cast<InstructionCodeGeneratorMIPS*>(codegen_->GetInstructionVisitor()); 3155 3156 Register out = locations->Out().AsRegister<Register>(); 3157 InvokeRuntimeCallingConvention calling_convention; 3158 if (invoke->InputAt(0)->IsConstant()) { 3159 int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue(); 3160 if (value >= info.low && value <= info.high) { 3161 // Just embed the j.l.Integer in the code. 3162 ScopedObjectAccess soa(Thread::Current()); 3163 mirror::Object* boxed = info.cache->Get(value + (-info.low)); 3164 DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed)); 3165 uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed)); 3166 __ LoadConst32(out, address); 3167 } else { 3168 // Allocate and initialize a new j.l.Integer. 3169 // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the 3170 // JIT object table. 3171 uint32_t address = 3172 dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer)); 3173 __ LoadConst32(calling_convention.GetRegisterAt(0), address); 3174 codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc()); 3175 CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); 3176 __ StoreConstToOffset(kStoreWord, value, out, info.value_offset, TMP); 3177 // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation 3178 // one. 3179 icodegen->GenerateMemoryBarrier(MemBarrierKind::kStoreStore); 3180 } 3181 } else { 3182 Register in = locations->InAt(0).AsRegister<Register>(); 3183 MipsLabel allocate, done; 3184 int32_t count = static_cast<uint32_t>(info.high) - info.low + 1; 3185 3186 // Is (info.low <= in) && (in <= info.high)? 3187 __ Addiu32(out, in, -info.low); 3188 // As unsigned quantities is out < (info.high - info.low + 1)? 3189 if (IsInt<16>(count)) { 3190 __ Sltiu(AT, out, count); 3191 } else { 3192 __ LoadConst32(AT, count); 3193 __ Sltu(AT, out, AT); 3194 } 3195 // Branch if out >= (info.high - info.low + 1). 3196 // This means that "in" is outside of the range [info.low, info.high]. 3197 __ Beqz(AT, &allocate); 3198 3199 // If the value is within the bounds, load the j.l.Integer directly from the array. 3200 uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value(); 3201 uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache)); 3202 __ LoadConst32(TMP, data_offset + address); 3203 __ ShiftAndAdd(out, out, TMP, TIMES_4); 3204 __ Lw(out, out, 0); 3205 __ MaybeUnpoisonHeapReference(out); 3206 __ B(&done); 3207 3208 __ Bind(&allocate); 3209 // Otherwise allocate and initialize a new j.l.Integer. 3210 address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer)); 3211 __ LoadConst32(calling_convention.GetRegisterAt(0), address); 3212 codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc()); 3213 CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); 3214 __ StoreToOffset(kStoreWord, in, out, info.value_offset); 3215 // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation 3216 // one. 3217 icodegen->GenerateMemoryBarrier(MemBarrierKind::kStoreStore); 3218 __ Bind(&done); 3219 } 3220 } 3221 3222 // static boolean java.lang.Thread.interrupted() 3223 void IntrinsicLocationsBuilderMIPS::VisitThreadInterrupted(HInvoke* invoke) { 3224 LocationSummary* locations = 3225 new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); 3226 locations->SetOut(Location::RequiresRegister()); 3227 } 3228 3229 void IntrinsicCodeGeneratorMIPS::VisitThreadInterrupted(HInvoke* invoke) { 3230 MipsAssembler* assembler = GetAssembler(); 3231 Register out = invoke->GetLocations()->Out().AsRegister<Register>(); 3232 int32_t offset = Thread::InterruptedOffset<kMipsPointerSize>().Int32Value(); 3233 __ LoadFromOffset(kLoadWord, out, TR, offset); 3234 MipsLabel done; 3235 __ Beqz(out, &done); 3236 __ Sync(0); 3237 __ StoreToOffset(kStoreWord, ZERO, TR, offset); 3238 __ Sync(0); 3239 __ Bind(&done); 3240 } 3241 3242 void IntrinsicLocationsBuilderMIPS::VisitReachabilityFence(HInvoke* invoke) { 3243 LocationSummary* locations = 3244 new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); 3245 locations->SetInAt(0, Location::Any()); 3246 } 3247 3248 void IntrinsicCodeGeneratorMIPS::VisitReachabilityFence(HInvoke* invoke ATTRIBUTE_UNUSED) { } 3249 3250 // Unimplemented intrinsics. 3251 3252 UNIMPLEMENTED_INTRINSIC(MIPS, MathCeil) 3253 UNIMPLEMENTED_INTRINSIC(MIPS, MathFloor) 3254 UNIMPLEMENTED_INTRINSIC(MIPS, MathRint) 3255 UNIMPLEMENTED_INTRINSIC(MIPS, MathRoundDouble) 3256 UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetLongVolatile); 3257 UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutLongVolatile); 3258 UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeCASLong) 3259 3260 UNIMPLEMENTED_INTRINSIC(MIPS, ReferenceGetReferent) 3261 UNIMPLEMENTED_INTRINSIC(MIPS, SystemArrayCopy) 3262 3263 UNIMPLEMENTED_INTRINSIC(MIPS, StringStringIndexOf); 3264 UNIMPLEMENTED_INTRINSIC(MIPS, StringStringIndexOfAfter); 3265 UNIMPLEMENTED_INTRINSIC(MIPS, StringBufferAppend); 3266 UNIMPLEMENTED_INTRINSIC(MIPS, StringBufferLength); 3267 UNIMPLEMENTED_INTRINSIC(MIPS, StringBufferToString); 3268 UNIMPLEMENTED_INTRINSIC(MIPS, StringBuilderAppend); 3269 UNIMPLEMENTED_INTRINSIC(MIPS, StringBuilderLength); 3270 UNIMPLEMENTED_INTRINSIC(MIPS, StringBuilderToString); 3271 3272 // 1.8. 3273 UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndAddInt) 3274 UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndAddLong) 3275 UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndSetInt) 3276 UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndSetLong) 3277 UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndSetObject) 3278 3279 UNREACHABLE_INTRINSICS(MIPS) 3280 3281 #undef __ 3282 3283 } // namespace mips 3284 } // namespace art 3285