1 /* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 #include "dex/compiler_ir.h" 17 #include "dex/compiler_internals.h" 18 #include "dex/quick/arm/arm_lir.h" 19 #include "dex/quick/mir_to_lir-inl.h" 20 #include "entrypoints/quick/quick_entrypoints.h" 21 #include "mirror/array.h" 22 #include "mirror/object_array-inl.h" 23 #include "mirror/object-inl.h" 24 #include "mirror/object_reference.h" 25 #include "verifier/method_verifier.h" 26 #include <functional> 27 28 namespace art { 29 30 // Shortcuts to repeatedly used long types. 31 typedef mirror::ObjectArray<mirror::Object> ObjArray; 32 typedef mirror::ObjectArray<mirror::Class> ClassArray; 33 34 /* 35 * This source files contains "gen" codegen routines that should 36 * be applicable to most targets. Only mid-level support utilities 37 * and "op" calls may be used here. 38 */ 39 40 /* 41 * Generate a kPseudoBarrier marker to indicate the boundary of special 42 * blocks. 43 */ 44 void Mir2Lir::GenBarrier() { 45 LIR* barrier = NewLIR0(kPseudoBarrier); 46 /* Mark all resources as being clobbered */ 47 DCHECK(!barrier->flags.use_def_invalid); 48 barrier->u.m.def_mask = &kEncodeAll; 49 } 50 51 void Mir2Lir::GenDivZeroException() { 52 LIR* branch = OpUnconditionalBranch(nullptr); 53 AddDivZeroCheckSlowPath(branch); 54 } 55 56 void Mir2Lir::GenDivZeroCheck(ConditionCode c_code) { 57 LIR* branch = OpCondBranch(c_code, nullptr); 58 AddDivZeroCheckSlowPath(branch); 59 } 60 61 void Mir2Lir::GenDivZeroCheck(RegStorage reg) { 62 LIR* branch = OpCmpImmBranch(kCondEq, reg, 0, nullptr); 63 AddDivZeroCheckSlowPath(branch); 64 } 65 66 void Mir2Lir::AddDivZeroCheckSlowPath(LIR* branch) { 67 class DivZeroCheckSlowPath : public Mir2Lir::LIRSlowPath { 68 public: 69 DivZeroCheckSlowPath(Mir2Lir* m2l, LIR* branch) 70 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch) { 71 } 72 73 void Compile() OVERRIDE { 74 m2l_->ResetRegPool(); 75 m2l_->ResetDefTracking(); 76 GenerateTargetLabel(kPseudoThrowTarget); 77 m2l_->CallRuntimeHelper(kQuickThrowDivZero, true); 78 } 79 }; 80 81 AddSlowPath(new (arena_) DivZeroCheckSlowPath(this, branch)); 82 } 83 84 void Mir2Lir::GenArrayBoundsCheck(RegStorage index, RegStorage length) { 85 class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath { 86 public: 87 ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch, RegStorage index, RegStorage length) 88 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch), 89 index_(index), length_(length) { 90 } 91 92 void Compile() OVERRIDE { 93 m2l_->ResetRegPool(); 94 m2l_->ResetDefTracking(); 95 GenerateTargetLabel(kPseudoThrowTarget); 96 m2l_->CallRuntimeHelperRegReg(kQuickThrowArrayBounds, index_, length_, true); 97 } 98 99 private: 100 const RegStorage index_; 101 const RegStorage length_; 102 }; 103 104 LIR* branch = OpCmpBranch(kCondUge, index, length, nullptr); 105 AddSlowPath(new (arena_) ArrayBoundsCheckSlowPath(this, branch, index, length)); 106 } 107 108 void Mir2Lir::GenArrayBoundsCheck(int index, RegStorage length) { 109 class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath { 110 public: 111 ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch, int index, RegStorage length) 112 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch), 113 index_(index), length_(length) { 114 } 115 116 void Compile() OVERRIDE { 117 m2l_->ResetRegPool(); 118 m2l_->ResetDefTracking(); 119 GenerateTargetLabel(kPseudoThrowTarget); 120 121 RegStorage arg1_32 = m2l_->TargetReg(kArg1, kNotWide); 122 RegStorage arg0_32 = m2l_->TargetReg(kArg0, kNotWide); 123 124 m2l_->OpRegCopy(arg1_32, length_); 125 m2l_->LoadConstant(arg0_32, index_); 126 m2l_->CallRuntimeHelperRegReg(kQuickThrowArrayBounds, arg0_32, arg1_32, true); 127 } 128 129 private: 130 const int32_t index_; 131 const RegStorage length_; 132 }; 133 134 LIR* branch = OpCmpImmBranch(kCondLs, length, index, nullptr); 135 AddSlowPath(new (arena_) ArrayBoundsCheckSlowPath(this, branch, index, length)); 136 } 137 138 LIR* Mir2Lir::GenNullCheck(RegStorage reg) { 139 class NullCheckSlowPath : public Mir2Lir::LIRSlowPath { 140 public: 141 NullCheckSlowPath(Mir2Lir* m2l, LIR* branch) 142 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch) { 143 } 144 145 void Compile() OVERRIDE { 146 m2l_->ResetRegPool(); 147 m2l_->ResetDefTracking(); 148 GenerateTargetLabel(kPseudoThrowTarget); 149 m2l_->CallRuntimeHelper(kQuickThrowNullPointer, true); 150 } 151 }; 152 153 LIR* branch = OpCmpImmBranch(kCondEq, reg, 0, nullptr); 154 AddSlowPath(new (arena_) NullCheckSlowPath(this, branch)); 155 return branch; 156 } 157 158 /* Perform null-check on a register. */ 159 LIR* Mir2Lir::GenNullCheck(RegStorage m_reg, int opt_flags) { 160 if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) { 161 return GenExplicitNullCheck(m_reg, opt_flags); 162 } 163 return nullptr; 164 } 165 166 /* Perform an explicit null-check on a register. */ 167 LIR* Mir2Lir::GenExplicitNullCheck(RegStorage m_reg, int opt_flags) { 168 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 169 return NULL; 170 } 171 return GenNullCheck(m_reg); 172 } 173 174 void Mir2Lir::MarkPossibleNullPointerException(int opt_flags) { 175 if (cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) { 176 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 177 return; 178 } 179 // Insert after last instruction. 180 MarkSafepointPC(last_lir_insn_); 181 } 182 } 183 184 void Mir2Lir::MarkPossibleNullPointerExceptionAfter(int opt_flags, LIR* after) { 185 if (cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) { 186 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 187 return; 188 } 189 MarkSafepointPCAfter(after); 190 } 191 } 192 193 void Mir2Lir::MarkPossibleStackOverflowException() { 194 if (cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) { 195 MarkSafepointPC(last_lir_insn_); 196 } 197 } 198 199 void Mir2Lir::ForceImplicitNullCheck(RegStorage reg, int opt_flags) { 200 if (cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) { 201 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { 202 return; 203 } 204 // Force an implicit null check by performing a memory operation (load) from the given 205 // register with offset 0. This will cause a signal if the register contains 0 (null). 206 RegStorage tmp = AllocTemp(); 207 // TODO: for Mips, would be best to use rZERO as the bogus register target. 208 LIR* load = Load32Disp(reg, 0, tmp); 209 FreeTemp(tmp); 210 MarkSafepointPC(load); 211 } 212 } 213 214 void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1, 215 RegLocation rl_src2, LIR* taken, 216 LIR* fall_through) { 217 DCHECK(!rl_src1.fp); 218 DCHECK(!rl_src2.fp); 219 ConditionCode cond; 220 switch (opcode) { 221 case Instruction::IF_EQ: 222 cond = kCondEq; 223 break; 224 case Instruction::IF_NE: 225 cond = kCondNe; 226 break; 227 case Instruction::IF_LT: 228 cond = kCondLt; 229 break; 230 case Instruction::IF_GE: 231 cond = kCondGe; 232 break; 233 case Instruction::IF_GT: 234 cond = kCondGt; 235 break; 236 case Instruction::IF_LE: 237 cond = kCondLe; 238 break; 239 default: 240 cond = static_cast<ConditionCode>(0); 241 LOG(FATAL) << "Unexpected opcode " << opcode; 242 } 243 244 // Normalize such that if either operand is constant, src2 will be constant 245 if (rl_src1.is_const) { 246 RegLocation rl_temp = rl_src1; 247 rl_src1 = rl_src2; 248 rl_src2 = rl_temp; 249 cond = FlipComparisonOrder(cond); 250 } 251 252 rl_src1 = LoadValue(rl_src1); 253 // Is this really an immediate comparison? 254 if (rl_src2.is_const) { 255 // If it's already live in a register or not easily materialized, just keep going 256 RegLocation rl_temp = UpdateLoc(rl_src2); 257 int32_t constant_value = mir_graph_->ConstantValue(rl_src2); 258 if ((rl_temp.location == kLocDalvikFrame) && 259 InexpensiveConstantInt(constant_value, opcode)) { 260 // OK - convert this to a compare immediate and branch 261 OpCmpImmBranch(cond, rl_src1.reg, mir_graph_->ConstantValue(rl_src2), taken); 262 return; 263 } 264 265 // It's also commonly more efficient to have a test against zero with Eq/Ne. This is not worse 266 // for x86, and allows a cbz/cbnz for Arm and Mips. At the same time, it works around a register 267 // mismatch for 64b systems, where a reference is compared against null, as dex bytecode uses 268 // the 32b literal 0 for null. 269 if (constant_value == 0 && (cond == kCondEq || cond == kCondNe)) { 270 // Use the OpCmpImmBranch and ignore the value in the register. 271 OpCmpImmBranch(cond, rl_src1.reg, 0, taken); 272 return; 273 } 274 } 275 276 rl_src2 = LoadValue(rl_src2); 277 OpCmpBranch(cond, rl_src1.reg, rl_src2.reg, taken); 278 } 279 280 void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken, 281 LIR* fall_through) { 282 ConditionCode cond; 283 DCHECK(!rl_src.fp); 284 rl_src = LoadValue(rl_src); 285 switch (opcode) { 286 case Instruction::IF_EQZ: 287 cond = kCondEq; 288 break; 289 case Instruction::IF_NEZ: 290 cond = kCondNe; 291 break; 292 case Instruction::IF_LTZ: 293 cond = kCondLt; 294 break; 295 case Instruction::IF_GEZ: 296 cond = kCondGe; 297 break; 298 case Instruction::IF_GTZ: 299 cond = kCondGt; 300 break; 301 case Instruction::IF_LEZ: 302 cond = kCondLe; 303 break; 304 default: 305 cond = static_cast<ConditionCode>(0); 306 LOG(FATAL) << "Unexpected opcode " << opcode; 307 } 308 OpCmpImmBranch(cond, rl_src.reg, 0, taken); 309 } 310 311 void Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) { 312 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 313 if (rl_src.location == kLocPhysReg) { 314 OpRegCopy(rl_result.reg, rl_src.reg); 315 } else { 316 LoadValueDirect(rl_src, rl_result.reg.GetLow()); 317 } 318 OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_result.reg.GetLow(), 31); 319 StoreValueWide(rl_dest, rl_result); 320 } 321 322 void Mir2Lir::GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest, 323 RegLocation rl_src) { 324 rl_src = LoadValue(rl_src, kCoreReg); 325 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 326 OpKind op = kOpInvalid; 327 switch (opcode) { 328 case Instruction::INT_TO_BYTE: 329 op = kOp2Byte; 330 break; 331 case Instruction::INT_TO_SHORT: 332 op = kOp2Short; 333 break; 334 case Instruction::INT_TO_CHAR: 335 op = kOp2Char; 336 break; 337 default: 338 LOG(ERROR) << "Bad int conversion type"; 339 } 340 OpRegReg(op, rl_result.reg, rl_src.reg); 341 StoreValue(rl_dest, rl_result); 342 } 343 344 /* 345 * Let helper function take care of everything. Will call 346 * Array::AllocFromCode(type_idx, method, count); 347 * Note: AllocFromCode will handle checks for errNegativeArraySize. 348 */ 349 void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest, 350 RegLocation rl_src) { 351 FlushAllRegs(); /* Everything to home location */ 352 const DexFile* dex_file = cu_->dex_file; 353 CompilerDriver* driver = cu_->compiler_driver; 354 if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *dex_file, type_idx)) { 355 bool is_type_initialized; // Ignored as an array does not have an initializer. 356 bool use_direct_type_ptr; 357 uintptr_t direct_type_ptr; 358 bool is_finalizable; 359 if (kEmbedClassInCode && 360 driver->CanEmbedTypeInCode(*dex_file, type_idx, &is_type_initialized, &use_direct_type_ptr, 361 &direct_type_ptr, &is_finalizable)) { 362 // The fast path. 363 if (!use_direct_type_ptr) { 364 LoadClassType(type_idx, kArg0); 365 CallRuntimeHelperRegMethodRegLocation(kQuickAllocArrayResolved, TargetReg(kArg0, kNotWide), 366 rl_src, true); 367 } else { 368 // Use the direct pointer. 369 CallRuntimeHelperImmMethodRegLocation(kQuickAllocArrayResolved, direct_type_ptr, rl_src, 370 true); 371 } 372 } else { 373 // The slow path. 374 CallRuntimeHelperImmMethodRegLocation(kQuickAllocArray, type_idx, rl_src, true); 375 } 376 } else { 377 CallRuntimeHelperImmMethodRegLocation(kQuickAllocArrayWithAccessCheck, type_idx, rl_src, true); 378 } 379 StoreValue(rl_dest, GetReturn(kRefReg)); 380 } 381 382 /* 383 * Similar to GenNewArray, but with post-allocation initialization. 384 * Verifier guarantees we're dealing with an array class. Current 385 * code throws runtime exception "bad Filled array req" for 'D' and 'J'. 386 * Current code also throws internal unimp if not 'L', '[' or 'I'. 387 */ 388 void Mir2Lir::GenFilledNewArray(CallInfo* info) { 389 int elems = info->num_arg_words; 390 int type_idx = info->index; 391 FlushAllRegs(); /* Everything to home location */ 392 QuickEntrypointEnum target; 393 if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file, 394 type_idx)) { 395 target = kQuickCheckAndAllocArray; 396 } else { 397 target = kQuickCheckAndAllocArrayWithAccessCheck; 398 } 399 CallRuntimeHelperImmMethodImm(target, type_idx, elems, true); 400 FreeTemp(TargetReg(kArg2, kNotWide)); 401 FreeTemp(TargetReg(kArg1, kNotWide)); 402 /* 403 * NOTE: the implicit target for Instruction::FILLED_NEW_ARRAY is the 404 * return region. Because AllocFromCode placed the new array 405 * in kRet0, we'll just lock it into place. When debugger support is 406 * added, it may be necessary to additionally copy all return 407 * values to a home location in thread-local storage 408 */ 409 RegStorage ref_reg = TargetReg(kRet0, kRef); 410 LockTemp(ref_reg); 411 412 // TODO: use the correct component size, currently all supported types 413 // share array alignment with ints (see comment at head of function) 414 size_t component_size = sizeof(int32_t); 415 416 // Having a range of 0 is legal 417 if (info->is_range && (elems > 0)) { 418 /* 419 * Bit of ugliness here. We're going generate a mem copy loop 420 * on the register range, but it is possible that some regs 421 * in the range have been promoted. This is unlikely, but 422 * before generating the copy, we'll just force a flush 423 * of any regs in the source range that have been promoted to 424 * home location. 425 */ 426 for (int i = 0; i < elems; i++) { 427 RegLocation loc = UpdateLoc(info->args[i]); 428 if (loc.location == kLocPhysReg) { 429 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 430 Store32Disp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg); 431 } 432 } 433 /* 434 * TUNING note: generated code here could be much improved, but 435 * this is an uncommon operation and isn't especially performance 436 * critical. 437 */ 438 // This is addressing the stack, which may be out of the 4G area. 439 RegStorage r_src = AllocTempRef(); 440 RegStorage r_dst = AllocTempRef(); 441 RegStorage r_idx = AllocTempRef(); // Not really a reference, but match src/dst. 442 RegStorage r_val; 443 switch (cu_->instruction_set) { 444 case kThumb2: 445 case kArm64: 446 r_val = TargetReg(kLr, kNotWide); 447 break; 448 case kX86: 449 case kX86_64: 450 FreeTemp(ref_reg); 451 r_val = AllocTemp(); 452 break; 453 case kMips: 454 r_val = AllocTemp(); 455 break; 456 default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set; 457 } 458 // Set up source pointer 459 RegLocation rl_first = info->args[0]; 460 OpRegRegImm(kOpAdd, r_src, TargetPtrReg(kSp), SRegOffset(rl_first.s_reg_low)); 461 // Set up the target pointer 462 OpRegRegImm(kOpAdd, r_dst, ref_reg, 463 mirror::Array::DataOffset(component_size).Int32Value()); 464 // Set up the loop counter (known to be > 0) 465 LoadConstant(r_idx, elems - 1); 466 // Generate the copy loop. Going backwards for convenience 467 LIR* target = NewLIR0(kPseudoTargetLabel); 468 // Copy next element 469 { 470 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 471 LoadBaseIndexed(r_src, r_idx, r_val, 2, k32); 472 // NOTE: No dalvik register annotation, local optimizations will be stopped 473 // by the loop boundaries. 474 } 475 StoreBaseIndexed(r_dst, r_idx, r_val, 2, k32); 476 FreeTemp(r_val); 477 OpDecAndBranch(kCondGe, r_idx, target); 478 if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { 479 // Restore the target pointer 480 OpRegRegImm(kOpAdd, ref_reg, r_dst, 481 -mirror::Array::DataOffset(component_size).Int32Value()); 482 } 483 } else if (!info->is_range) { 484 // TUNING: interleave 485 for (int i = 0; i < elems; i++) { 486 RegLocation rl_arg = LoadValue(info->args[i], kCoreReg); 487 Store32Disp(ref_reg, 488 mirror::Array::DataOffset(component_size).Int32Value() + i * 4, rl_arg.reg); 489 // If the LoadValue caused a temp to be allocated, free it 490 if (IsTemp(rl_arg.reg)) { 491 FreeTemp(rl_arg.reg); 492 } 493 } 494 } 495 if (info->result.location != kLocInvalid) { 496 StoreValue(info->result, GetReturn(kRefReg)); 497 } 498 } 499 500 // 501 // Slow path to ensure a class is initialized for sget/sput. 502 // 503 class StaticFieldSlowPath : public Mir2Lir::LIRSlowPath { 504 public: 505 StaticFieldSlowPath(Mir2Lir* m2l, LIR* unresolved, LIR* uninit, LIR* cont, int storage_index, 506 RegStorage r_base) : 507 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), unresolved, cont), uninit_(uninit), 508 storage_index_(storage_index), r_base_(r_base) { 509 } 510 511 void Compile() { 512 LIR* unresolved_target = GenerateTargetLabel(); 513 uninit_->target = unresolved_target; 514 m2l_->CallRuntimeHelperImm(kQuickInitializeStaticStorage, storage_index_, true); 515 // Copy helper's result into r_base, a no-op on all but MIPS. 516 m2l_->OpRegCopy(r_base_, m2l_->TargetReg(kRet0, kRef)); 517 518 m2l_->OpUnconditionalBranch(cont_); 519 } 520 521 private: 522 LIR* const uninit_; 523 const int storage_index_; 524 const RegStorage r_base_; 525 }; 526 527 void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double, 528 bool is_object) { 529 const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir); 530 cu_->compiler_driver->ProcessedStaticField(field_info.FastPut(), field_info.IsReferrersClass()); 531 OpSize store_size = LoadStoreOpSize(is_long_or_double, is_object); 532 if (!SLOW_FIELD_PATH && field_info.FastPut()) { 533 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 534 RegStorage r_base; 535 if (field_info.IsReferrersClass()) { 536 // Fast path, static storage base is this method's class 537 RegLocation rl_method = LoadCurrMethod(); 538 r_base = AllocTempRef(); 539 LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base, 540 kNotVolatile); 541 if (IsTemp(rl_method.reg)) { 542 FreeTemp(rl_method.reg); 543 } 544 } else { 545 // Medium path, static storage base in a different class which requires checks that the other 546 // class is initialized. 547 // TODO: remove initialized check now that we are initializing classes in the compiler driver. 548 DCHECK_NE(field_info.StorageIndex(), DexFile::kDexNoIndex); 549 // May do runtime call so everything to home locations. 550 FlushAllRegs(); 551 // Using fixed register to sync with possible call to runtime support. 552 RegStorage r_method = TargetReg(kArg1, kRef); 553 LockTemp(r_method); 554 LoadCurrMethodDirect(r_method); 555 r_base = TargetReg(kArg0, kRef); 556 LockTemp(r_base); 557 LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base, 558 kNotVolatile); 559 int32_t offset_of_field = ObjArray::OffsetOfElement(field_info.StorageIndex()).Int32Value(); 560 LoadRefDisp(r_base, offset_of_field, r_base, kNotVolatile); 561 // r_base now points at static storage (Class*) or NULL if the type is not yet resolved. 562 if (!field_info.IsInitialized() && 563 (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) { 564 // Check if r_base is NULL or a not yet initialized class. 565 566 // The slow path is invoked if the r_base is NULL or the class pointed 567 // to by it is not initialized. 568 LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL); 569 RegStorage r_tmp = TargetReg(kArg2, kNotWide); 570 LockTemp(r_tmp); 571 LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base, 572 mirror::Class::StatusOffset().Int32Value(), 573 mirror::Class::kStatusInitialized, nullptr, nullptr); 574 LIR* cont = NewLIR0(kPseudoTargetLabel); 575 576 AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont, 577 field_info.StorageIndex(), r_base)); 578 579 FreeTemp(r_tmp); 580 // Ensure load of status and store of value don't re-order. 581 // TODO: Presumably the actual value store is control-dependent on the status load, 582 // and will thus not be reordered in any case, since stores are never speculated. 583 // Does later code "know" that the class is now initialized? If so, we still 584 // need the barrier to guard later static loads. 585 GenMemBarrier(kLoadAny); 586 } 587 FreeTemp(r_method); 588 } 589 // rBase now holds static storage base 590 RegisterClass reg_class = RegClassForFieldLoadStore(store_size, field_info.IsVolatile()); 591 if (is_long_or_double) { 592 rl_src = LoadValueWide(rl_src, reg_class); 593 } else { 594 rl_src = LoadValue(rl_src, reg_class); 595 } 596 if (is_object) { 597 StoreRefDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg, 598 field_info.IsVolatile() ? kVolatile : kNotVolatile); 599 } else { 600 StoreBaseDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg, store_size, 601 field_info.IsVolatile() ? kVolatile : kNotVolatile); 602 } 603 if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) { 604 MarkGCCard(rl_src.reg, r_base); 605 } 606 FreeTemp(r_base); 607 } else { 608 FlushAllRegs(); // Everything to home locations 609 QuickEntrypointEnum target = 610 is_long_or_double ? kQuickSet64Static 611 : (is_object ? kQuickSetObjStatic : kQuickSet32Static); 612 CallRuntimeHelperImmRegLocation(target, field_info.FieldIndex(), rl_src, true); 613 } 614 } 615 616 void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest, 617 bool is_long_or_double, bool is_object) { 618 const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir); 619 cu_->compiler_driver->ProcessedStaticField(field_info.FastGet(), field_info.IsReferrersClass()); 620 OpSize load_size = LoadStoreOpSize(is_long_or_double, is_object); 621 if (!SLOW_FIELD_PATH && field_info.FastGet()) { 622 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 623 RegStorage r_base; 624 if (field_info.IsReferrersClass()) { 625 // Fast path, static storage base is this method's class 626 RegLocation rl_method = LoadCurrMethod(); 627 r_base = AllocTempRef(); 628 LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base, 629 kNotVolatile); 630 } else { 631 // Medium path, static storage base in a different class which requires checks that the other 632 // class is initialized 633 DCHECK_NE(field_info.StorageIndex(), DexFile::kDexNoIndex); 634 // May do runtime call so everything to home locations. 635 FlushAllRegs(); 636 // Using fixed register to sync with possible call to runtime support. 637 RegStorage r_method = TargetReg(kArg1, kRef); 638 LockTemp(r_method); 639 LoadCurrMethodDirect(r_method); 640 r_base = TargetReg(kArg0, kRef); 641 LockTemp(r_base); 642 LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base, 643 kNotVolatile); 644 int32_t offset_of_field = ObjArray::OffsetOfElement(field_info.StorageIndex()).Int32Value(); 645 LoadRefDisp(r_base, offset_of_field, r_base, kNotVolatile); 646 // r_base now points at static storage (Class*) or NULL if the type is not yet resolved. 647 if (!field_info.IsInitialized() && 648 (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) { 649 // Check if r_base is NULL or a not yet initialized class. 650 651 // The slow path is invoked if the r_base is NULL or the class pointed 652 // to by it is not initialized. 653 LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL); 654 RegStorage r_tmp = TargetReg(kArg2, kNotWide); 655 LockTemp(r_tmp); 656 LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base, 657 mirror::Class::StatusOffset().Int32Value(), 658 mirror::Class::kStatusInitialized, nullptr, nullptr); 659 LIR* cont = NewLIR0(kPseudoTargetLabel); 660 661 AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont, 662 field_info.StorageIndex(), r_base)); 663 664 FreeTemp(r_tmp); 665 // Ensure load of status and load of value don't re-order. 666 GenMemBarrier(kLoadAny); 667 } 668 FreeTemp(r_method); 669 } 670 // r_base now holds static storage base 671 RegisterClass reg_class = RegClassForFieldLoadStore(load_size, field_info.IsVolatile()); 672 RegLocation rl_result = EvalLoc(rl_dest, reg_class, true); 673 674 int field_offset = field_info.FieldOffset().Int32Value(); 675 if (is_object) { 676 LoadRefDisp(r_base, field_offset, rl_result.reg, field_info.IsVolatile() ? kVolatile : 677 kNotVolatile); 678 } else { 679 LoadBaseDisp(r_base, field_offset, rl_result.reg, load_size, field_info.IsVolatile() ? 680 kVolatile : kNotVolatile); 681 } 682 FreeTemp(r_base); 683 684 if (is_long_or_double) { 685 StoreValueWide(rl_dest, rl_result); 686 } else { 687 StoreValue(rl_dest, rl_result); 688 } 689 } else { 690 FlushAllRegs(); // Everything to home locations 691 QuickEntrypointEnum target = 692 is_long_or_double ? kQuickGet64Static 693 : (is_object ? kQuickGetObjStatic : kQuickGet32Static); 694 CallRuntimeHelperImm(target, field_info.FieldIndex(), true); 695 696 // FIXME: pGetXXStatic always return an int or int64 regardless of rl_dest.fp. 697 if (is_long_or_double) { 698 RegLocation rl_result = GetReturnWide(kCoreReg); 699 StoreValueWide(rl_dest, rl_result); 700 } else { 701 RegLocation rl_result = GetReturn(rl_dest.ref ? kRefReg : kCoreReg); 702 StoreValue(rl_dest, rl_result); 703 } 704 } 705 } 706 707 // Generate code for all slow paths. 708 void Mir2Lir::HandleSlowPaths() { 709 // We should check slow_paths_.Size() every time, because a new slow path 710 // may be created during slowpath->Compile(). 711 for (size_t i = 0; i < slow_paths_.Size(); ++i) { 712 LIRSlowPath* slowpath = slow_paths_.Get(i); 713 slowpath->Compile(); 714 } 715 slow_paths_.Reset(); 716 } 717 718 void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size, 719 RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, 720 bool is_object) { 721 const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir); 722 cu_->compiler_driver->ProcessedInstanceField(field_info.FastGet()); 723 OpSize load_size = LoadStoreOpSize(is_long_or_double, is_object); 724 if (!SLOW_FIELD_PATH && field_info.FastGet()) { 725 RegisterClass reg_class = RegClassForFieldLoadStore(load_size, field_info.IsVolatile()); 726 // A load of the class will lead to an iget with offset 0. 727 DCHECK_GE(field_info.FieldOffset().Int32Value(), 0); 728 rl_obj = LoadValue(rl_obj, kRefReg); 729 GenNullCheck(rl_obj.reg, opt_flags); 730 RegLocation rl_result = EvalLoc(rl_dest, reg_class, true); 731 int field_offset = field_info.FieldOffset().Int32Value(); 732 LIR* load_lir; 733 if (is_object) { 734 load_lir = LoadRefDisp(rl_obj.reg, field_offset, rl_result.reg, field_info.IsVolatile() ? 735 kVolatile : kNotVolatile); 736 } else { 737 load_lir = LoadBaseDisp(rl_obj.reg, field_offset, rl_result.reg, load_size, 738 field_info.IsVolatile() ? kVolatile : kNotVolatile); 739 } 740 MarkPossibleNullPointerExceptionAfter(opt_flags, load_lir); 741 if (is_long_or_double) { 742 StoreValueWide(rl_dest, rl_result); 743 } else { 744 StoreValue(rl_dest, rl_result); 745 } 746 } else { 747 QuickEntrypointEnum target = 748 is_long_or_double ? kQuickGet64Instance 749 : (is_object ? kQuickGetObjInstance : kQuickGet32Instance); 750 // Second argument of pGetXXInstance is always a reference. 751 DCHECK_EQ(static_cast<unsigned int>(rl_obj.wide), 0U); 752 CallRuntimeHelperImmRegLocation(target, field_info.FieldIndex(), rl_obj, true); 753 754 // FIXME: pGetXXInstance always return an int or int64 regardless of rl_dest.fp. 755 if (is_long_or_double) { 756 RegLocation rl_result = GetReturnWide(kCoreReg); 757 StoreValueWide(rl_dest, rl_result); 758 } else { 759 RegLocation rl_result = GetReturn(rl_dest.ref ? kRefReg : kCoreReg); 760 StoreValue(rl_dest, rl_result); 761 } 762 } 763 } 764 765 void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size, 766 RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, 767 bool is_object) { 768 const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir); 769 cu_->compiler_driver->ProcessedInstanceField(field_info.FastPut()); 770 OpSize store_size = LoadStoreOpSize(is_long_or_double, is_object); 771 if (!SLOW_FIELD_PATH && field_info.FastPut()) { 772 RegisterClass reg_class = RegClassForFieldLoadStore(store_size, field_info.IsVolatile()); 773 // Dex code never writes to the class field. 774 DCHECK_GE(static_cast<uint32_t>(field_info.FieldOffset().Int32Value()), 775 sizeof(mirror::HeapReference<mirror::Class>)); 776 rl_obj = LoadValue(rl_obj, kRefReg); 777 if (is_long_or_double) { 778 rl_src = LoadValueWide(rl_src, reg_class); 779 } else { 780 rl_src = LoadValue(rl_src, reg_class); 781 } 782 GenNullCheck(rl_obj.reg, opt_flags); 783 int field_offset = field_info.FieldOffset().Int32Value(); 784 LIR* store; 785 if (is_object) { 786 store = StoreRefDisp(rl_obj.reg, field_offset, rl_src.reg, field_info.IsVolatile() ? 787 kVolatile : kNotVolatile); 788 } else { 789 store = StoreBaseDisp(rl_obj.reg, field_offset, rl_src.reg, store_size, 790 field_info.IsVolatile() ? kVolatile : kNotVolatile); 791 } 792 MarkPossibleNullPointerExceptionAfter(opt_flags, store); 793 if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) { 794 MarkGCCard(rl_src.reg, rl_obj.reg); 795 } 796 } else { 797 QuickEntrypointEnum target = 798 is_long_or_double ? kQuickSet64Instance 799 : (is_object ? kQuickSetObjInstance : kQuickSet32Instance); 800 CallRuntimeHelperImmRegLocationRegLocation(target, field_info.FieldIndex(), rl_obj, rl_src, 801 true); 802 } 803 } 804 805 void Mir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index, 806 RegLocation rl_src) { 807 bool needs_range_check = !(opt_flags & MIR_IGNORE_RANGE_CHECK); 808 bool needs_null_check = !((cu_->disable_opt & (1 << kNullCheckElimination)) && 809 (opt_flags & MIR_IGNORE_NULL_CHECK)); 810 QuickEntrypointEnum target = needs_range_check 811 ? (needs_null_check ? kQuickAputObjectWithNullAndBoundCheck 812 : kQuickAputObjectWithBoundCheck) 813 : kQuickAputObject; 814 CallRuntimeHelperRegLocationRegLocationRegLocation(target, rl_array, rl_index, rl_src, true); 815 } 816 817 void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) { 818 RegLocation rl_method = LoadCurrMethod(); 819 CheckRegLocation(rl_method); 820 RegStorage res_reg = AllocTempRef(); 821 RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true); 822 if (!cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 823 *cu_->dex_file, 824 type_idx)) { 825 // Call out to helper which resolves type and verifies access. 826 // Resolved type returned in kRet0. 827 CallRuntimeHelperImmReg(kQuickInitializeTypeAndVerifyAccess, type_idx, rl_method.reg, true); 828 RegLocation rl_result = GetReturn(kRefReg); 829 StoreValue(rl_dest, rl_result); 830 } else { 831 // We're don't need access checks, load type from dex cache 832 int32_t dex_cache_offset = 833 mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(); 834 LoadRefDisp(rl_method.reg, dex_cache_offset, res_reg, kNotVolatile); 835 int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); 836 LoadRefDisp(res_reg, offset_of_type, rl_result.reg, kNotVolatile); 837 if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, 838 type_idx) || SLOW_TYPE_PATH) { 839 // Slow path, at runtime test if type is null and if so initialize 840 FlushAllRegs(); 841 LIR* branch = OpCmpImmBranch(kCondEq, rl_result.reg, 0, NULL); 842 LIR* cont = NewLIR0(kPseudoTargetLabel); 843 844 // Object to generate the slow path for class resolution. 845 class SlowPath : public LIRSlowPath { 846 public: 847 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx, 848 const RegLocation& rl_method, const RegLocation& rl_result) : 849 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx), 850 rl_method_(rl_method), rl_result_(rl_result) { 851 } 852 853 void Compile() { 854 GenerateTargetLabel(); 855 856 m2l_->CallRuntimeHelperImmReg(kQuickInitializeType, type_idx_, rl_method_.reg, true); 857 m2l_->OpRegCopy(rl_result_.reg, m2l_->TargetReg(kRet0, kRef)); 858 m2l_->OpUnconditionalBranch(cont_); 859 } 860 861 private: 862 const int type_idx_; 863 const RegLocation rl_method_; 864 const RegLocation rl_result_; 865 }; 866 867 // Add to list for future. 868 AddSlowPath(new (arena_) SlowPath(this, branch, cont, type_idx, rl_method, rl_result)); 869 870 StoreValue(rl_dest, rl_result); 871 } else { 872 // Fast path, we're done - just store result 873 StoreValue(rl_dest, rl_result); 874 } 875 } 876 } 877 878 void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) { 879 /* NOTE: Most strings should be available at compile time */ 880 int32_t offset_of_string = mirror::ObjectArray<mirror::String>::OffsetOfElement(string_idx). 881 Int32Value(); 882 if (!cu_->compiler_driver->CanAssumeStringIsPresentInDexCache( 883 *cu_->dex_file, string_idx) || SLOW_STRING_PATH) { 884 // slow path, resolve string if not in dex cache 885 FlushAllRegs(); 886 LockCallTemps(); // Using explicit registers 887 888 // If the Method* is already in a register, we can save a copy. 889 RegLocation rl_method = mir_graph_->GetMethodLoc(); 890 RegStorage r_method; 891 if (rl_method.location == kLocPhysReg) { 892 // A temp would conflict with register use below. 893 DCHECK(!IsTemp(rl_method.reg)); 894 r_method = rl_method.reg; 895 } else { 896 r_method = TargetReg(kArg2, kRef); 897 LoadCurrMethodDirect(r_method); 898 } 899 LoadRefDisp(r_method, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), 900 TargetReg(kArg0, kRef), kNotVolatile); 901 902 // Might call out to helper, which will return resolved string in kRet0 903 LoadRefDisp(TargetReg(kArg0, kRef), offset_of_string, TargetReg(kRet0, kRef), kNotVolatile); 904 LIR* fromfast = OpCmpImmBranch(kCondEq, TargetReg(kRet0, kRef), 0, NULL); 905 LIR* cont = NewLIR0(kPseudoTargetLabel); 906 907 { 908 // Object to generate the slow path for string resolution. 909 class SlowPath : public LIRSlowPath { 910 public: 911 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, RegStorage r_method, int32_t string_idx) : 912 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), 913 r_method_(r_method), string_idx_(string_idx) { 914 } 915 916 void Compile() { 917 GenerateTargetLabel(); 918 m2l_->CallRuntimeHelperRegImm(kQuickResolveString, r_method_, string_idx_, true); 919 m2l_->OpUnconditionalBranch(cont_); 920 } 921 922 private: 923 const RegStorage r_method_; 924 const int32_t string_idx_; 925 }; 926 927 AddSlowPath(new (arena_) SlowPath(this, fromfast, cont, r_method, string_idx)); 928 } 929 930 GenBarrier(); 931 StoreValue(rl_dest, GetReturn(kRefReg)); 932 } else { 933 RegLocation rl_method = LoadCurrMethod(); 934 RegStorage res_reg = AllocTempRef(); 935 RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true); 936 LoadRefDisp(rl_method.reg, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), res_reg, 937 kNotVolatile); 938 LoadRefDisp(res_reg, offset_of_string, rl_result.reg, kNotVolatile); 939 StoreValue(rl_dest, rl_result); 940 } 941 } 942 943 /* 944 * Let helper function take care of everything. Will 945 * call Class::NewInstanceFromCode(type_idx, method); 946 */ 947 void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) { 948 FlushAllRegs(); /* Everything to home location */ 949 // alloc will always check for resolution, do we also need to verify 950 // access because the verifier was unable to? 951 const DexFile* dex_file = cu_->dex_file; 952 CompilerDriver* driver = cu_->compiler_driver; 953 if (driver->CanAccessInstantiableTypeWithoutChecks(cu_->method_idx, *dex_file, type_idx)) { 954 bool is_type_initialized; 955 bool use_direct_type_ptr; 956 uintptr_t direct_type_ptr; 957 bool is_finalizable; 958 if (kEmbedClassInCode && 959 driver->CanEmbedTypeInCode(*dex_file, type_idx, &is_type_initialized, &use_direct_type_ptr, 960 &direct_type_ptr, &is_finalizable) && 961 !is_finalizable) { 962 // The fast path. 963 if (!use_direct_type_ptr) { 964 LoadClassType(type_idx, kArg0); 965 if (!is_type_initialized) { 966 CallRuntimeHelperRegMethod(kQuickAllocObjectResolved, TargetReg(kArg0, kRef), true); 967 } else { 968 CallRuntimeHelperRegMethod(kQuickAllocObjectInitialized, TargetReg(kArg0, kRef), true); 969 } 970 } else { 971 // Use the direct pointer. 972 if (!is_type_initialized) { 973 CallRuntimeHelperImmMethod(kQuickAllocObjectResolved, direct_type_ptr, true); 974 } else { 975 CallRuntimeHelperImmMethod(kQuickAllocObjectInitialized, direct_type_ptr, true); 976 } 977 } 978 } else { 979 // The slow path. 980 CallRuntimeHelperImmMethod(kQuickAllocObject, type_idx, true); 981 } 982 } else { 983 CallRuntimeHelperImmMethod(kQuickAllocObjectWithAccessCheck, type_idx, true); 984 } 985 StoreValue(rl_dest, GetReturn(kRefReg)); 986 } 987 988 void Mir2Lir::GenThrow(RegLocation rl_src) { 989 FlushAllRegs(); 990 CallRuntimeHelperRegLocation(kQuickDeliverException, rl_src, true); 991 } 992 993 // For final classes there are no sub-classes to check and so we can answer the instance-of 994 // question with simple comparisons. 995 void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest, 996 RegLocation rl_src) { 997 // X86 has its own implementation. 998 DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64); 999 1000 RegLocation object = LoadValue(rl_src, kRefReg); 1001 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1002 RegStorage result_reg = rl_result.reg; 1003 if (IsSameReg(result_reg, object.reg)) { 1004 result_reg = AllocTypedTemp(false, kCoreReg); 1005 DCHECK(!IsSameReg(result_reg, object.reg)); 1006 } 1007 LoadConstant(result_reg, 0); // assume false 1008 LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, NULL); 1009 1010 RegStorage check_class = AllocTypedTemp(false, kRefReg); 1011 RegStorage object_class = AllocTypedTemp(false, kRefReg); 1012 1013 LoadCurrMethodDirect(check_class); 1014 if (use_declaring_class) { 1015 LoadRefDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), check_class, 1016 kNotVolatile); 1017 LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class, 1018 kNotVolatile); 1019 } else { 1020 LoadRefDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 1021 check_class, kNotVolatile); 1022 LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class, 1023 kNotVolatile); 1024 int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); 1025 LoadRefDisp(check_class, offset_of_type, check_class, kNotVolatile); 1026 } 1027 1028 // FIXME: what should we be comparing here? compressed or decompressed references? 1029 if (cu_->instruction_set == kThumb2) { 1030 OpRegReg(kOpCmp, check_class, object_class); // Same? 1031 LIR* it = OpIT(kCondEq, ""); // if-convert the test 1032 LoadConstant(result_reg, 1); // .eq case - load true 1033 OpEndIT(it); 1034 } else { 1035 GenSelectConst32(check_class, object_class, kCondEq, 1, 0, result_reg, kCoreReg); 1036 } 1037 LIR* target = NewLIR0(kPseudoTargetLabel); 1038 null_branchover->target = target; 1039 FreeTemp(object_class); 1040 FreeTemp(check_class); 1041 if (IsTemp(result_reg)) { 1042 OpRegCopy(rl_result.reg, result_reg); 1043 FreeTemp(result_reg); 1044 } 1045 StoreValue(rl_dest, rl_result); 1046 } 1047 1048 void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final, 1049 bool type_known_abstract, bool use_declaring_class, 1050 bool can_assume_type_is_in_dex_cache, 1051 uint32_t type_idx, RegLocation rl_dest, 1052 RegLocation rl_src) { 1053 FlushAllRegs(); 1054 // May generate a call - use explicit registers 1055 LockCallTemps(); 1056 RegStorage method_reg = TargetReg(kArg1, kRef); 1057 LoadCurrMethodDirect(method_reg); // kArg1 <= current Method* 1058 RegStorage class_reg = TargetReg(kArg2, kRef); // kArg2 will hold the Class* 1059 RegStorage ref_reg = TargetReg(kArg0, kRef); // kArg0 will hold the ref. 1060 RegStorage ret_reg = GetReturn(kRefReg).reg; 1061 if (needs_access_check) { 1062 // Check we have access to type_idx and if not throw IllegalAccessError, 1063 // returns Class* in kArg0 1064 CallRuntimeHelperImm(kQuickInitializeTypeAndVerifyAccess, type_idx, true); 1065 OpRegCopy(class_reg, ret_reg); // Align usage with fast path 1066 LoadValueDirectFixed(rl_src, ref_reg); // kArg0 <= ref 1067 } else if (use_declaring_class) { 1068 LoadValueDirectFixed(rl_src, ref_reg); // kArg0 <= ref 1069 LoadRefDisp(method_reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), 1070 class_reg, kNotVolatile); 1071 } else { 1072 if (can_assume_type_is_in_dex_cache) { 1073 // Conditionally, as in the other case we will also load it. 1074 LoadValueDirectFixed(rl_src, ref_reg); // kArg0 <= ref 1075 } 1076 1077 // Load dex cache entry into class_reg (kArg2) 1078 LoadRefDisp(method_reg, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 1079 class_reg, kNotVolatile); 1080 int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); 1081 LoadRefDisp(class_reg, offset_of_type, class_reg, kNotVolatile); 1082 if (!can_assume_type_is_in_dex_cache) { 1083 LIR* slow_path_branch = OpCmpImmBranch(kCondEq, class_reg, 0, NULL); 1084 LIR* slow_path_target = NewLIR0(kPseudoTargetLabel); 1085 1086 // Should load value here. 1087 LoadValueDirectFixed(rl_src, ref_reg); // kArg0 <= ref 1088 1089 class InitTypeSlowPath : public Mir2Lir::LIRSlowPath { 1090 public: 1091 InitTypeSlowPath(Mir2Lir* m2l, LIR* branch, LIR* cont, uint32_t type_idx, 1092 RegLocation rl_src) 1093 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, cont), type_idx_(type_idx), 1094 rl_src_(rl_src) { 1095 } 1096 1097 void Compile() OVERRIDE { 1098 GenerateTargetLabel(); 1099 1100 m2l_->CallRuntimeHelperImm(kQuickInitializeType, type_idx_, true); 1101 m2l_->OpRegCopy(m2l_->TargetReg(kArg2, kRef), 1102 m2l_->TargetReg(kRet0, kRef)); // Align usage with fast path 1103 m2l_->OpUnconditionalBranch(cont_); 1104 } 1105 1106 private: 1107 uint32_t type_idx_; 1108 RegLocation rl_src_; 1109 }; 1110 1111 AddSlowPath(new (arena_) InitTypeSlowPath(this, slow_path_branch, slow_path_target, 1112 type_idx, rl_src)); 1113 } 1114 } 1115 /* kArg0 is ref, kArg2 is class. If ref==null, use directly as bool result */ 1116 RegLocation rl_result = GetReturn(kCoreReg); 1117 if (!IsSameReg(rl_result.reg, ref_reg)) { 1118 // On MIPS and x86_64 rArg0 != rl_result, place false in result if branch is taken. 1119 LoadConstant(rl_result.reg, 0); 1120 } 1121 LIR* branch1 = OpCmpImmBranch(kCondEq, ref_reg, 0, NULL); 1122 1123 /* load object->klass_ */ 1124 RegStorage ref_class_reg = TargetReg(kArg1, kRef); // kArg1 will hold the Class* of ref. 1125 DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); 1126 LoadRefDisp(ref_reg, mirror::Object::ClassOffset().Int32Value(), 1127 ref_class_reg, kNotVolatile); 1128 /* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */ 1129 LIR* branchover = NULL; 1130 if (type_known_final) { 1131 // rl_result == ref == class. 1132 GenSelectConst32(ref_class_reg, class_reg, kCondEq, 1, 0, rl_result.reg, 1133 kCoreReg); 1134 } else { 1135 if (cu_->instruction_set == kThumb2) { 1136 RegStorage r_tgt = LoadHelper(kQuickInstanceofNonTrivial); 1137 LIR* it = nullptr; 1138 if (!type_known_abstract) { 1139 /* Uses conditional nullification */ 1140 OpRegReg(kOpCmp, ref_class_reg, class_reg); // Same? 1141 it = OpIT(kCondEq, "EE"); // if-convert the test 1142 LoadConstant(rl_result.reg, 1); // .eq case - load true 1143 } 1144 OpRegCopy(ref_reg, class_reg); // .ne case - arg0 <= class 1145 OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class) 1146 if (it != nullptr) { 1147 OpEndIT(it); 1148 } 1149 FreeTemp(r_tgt); 1150 } else { 1151 if (!type_known_abstract) { 1152 /* Uses branchovers */ 1153 LoadConstant(rl_result.reg, 1); // assume true 1154 branchover = OpCmpBranch(kCondEq, TargetReg(kArg1, kRef), TargetReg(kArg2, kRef), NULL); 1155 } 1156 1157 OpRegCopy(TargetReg(kArg0, kRef), class_reg); // .ne case - arg0 <= class 1158 CallRuntimeHelper(kQuickInstanceofNonTrivial, false); 1159 } 1160 } 1161 // TODO: only clobber when type isn't final? 1162 ClobberCallerSave(); 1163 /* branch targets here */ 1164 LIR* target = NewLIR0(kPseudoTargetLabel); 1165 StoreValue(rl_dest, rl_result); 1166 branch1->target = target; 1167 if (branchover != nullptr) { 1168 branchover->target = target; 1169 } 1170 } 1171 1172 void Mir2Lir::GenInstanceof(uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src) { 1173 bool type_known_final, type_known_abstract, use_declaring_class; 1174 bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 1175 *cu_->dex_file, 1176 type_idx, 1177 &type_known_final, 1178 &type_known_abstract, 1179 &use_declaring_class); 1180 bool can_assume_type_is_in_dex_cache = !needs_access_check && 1181 cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx); 1182 1183 if ((use_declaring_class || can_assume_type_is_in_dex_cache) && type_known_final) { 1184 GenInstanceofFinal(use_declaring_class, type_idx, rl_dest, rl_src); 1185 } else { 1186 GenInstanceofCallingHelper(needs_access_check, type_known_final, type_known_abstract, 1187 use_declaring_class, can_assume_type_is_in_dex_cache, 1188 type_idx, rl_dest, rl_src); 1189 } 1190 } 1191 1192 void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src) { 1193 bool type_known_final, type_known_abstract, use_declaring_class; 1194 bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, 1195 *cu_->dex_file, 1196 type_idx, 1197 &type_known_final, 1198 &type_known_abstract, 1199 &use_declaring_class); 1200 // Note: currently type_known_final is unused, as optimizing will only improve the performance 1201 // of the exception throw path. 1202 DexCompilationUnit* cu = mir_graph_->GetCurrentDexCompilationUnit(); 1203 if (!needs_access_check && cu_->compiler_driver->IsSafeCast(cu, insn_idx)) { 1204 // Verifier type analysis proved this check cast would never cause an exception. 1205 return; 1206 } 1207 FlushAllRegs(); 1208 // May generate a call - use explicit registers 1209 LockCallTemps(); 1210 RegStorage method_reg = TargetReg(kArg1, kRef); 1211 LoadCurrMethodDirect(method_reg); // kArg1 <= current Method* 1212 RegStorage class_reg = TargetReg(kArg2, kRef); // kArg2 will hold the Class* 1213 if (needs_access_check) { 1214 // Check we have access to type_idx and if not throw IllegalAccessError, 1215 // returns Class* in kRet0 1216 // InitializeTypeAndVerifyAccess(idx, method) 1217 CallRuntimeHelperImm(kQuickInitializeTypeAndVerifyAccess, type_idx, true); 1218 OpRegCopy(class_reg, TargetReg(kRet0, kRef)); // Align usage with fast path 1219 } else if (use_declaring_class) { 1220 LoadRefDisp(method_reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), 1221 class_reg, kNotVolatile); 1222 } else { 1223 // Load dex cache entry into class_reg (kArg2) 1224 LoadRefDisp(method_reg, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), 1225 class_reg, kNotVolatile); 1226 int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); 1227 LoadRefDisp(class_reg, offset_of_type, class_reg, kNotVolatile); 1228 if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx)) { 1229 // Need to test presence of type in dex cache at runtime 1230 LIR* hop_branch = OpCmpImmBranch(kCondEq, class_reg, 0, NULL); 1231 LIR* cont = NewLIR0(kPseudoTargetLabel); 1232 1233 // Slow path to initialize the type. Executed if the type is NULL. 1234 class SlowPath : public LIRSlowPath { 1235 public: 1236 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx, 1237 const RegStorage class_reg) : 1238 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx), 1239 class_reg_(class_reg) { 1240 } 1241 1242 void Compile() { 1243 GenerateTargetLabel(); 1244 1245 // Call out to helper, which will return resolved type in kArg0 1246 // InitializeTypeFromCode(idx, method) 1247 m2l_->CallRuntimeHelperImmReg(kQuickInitializeType, type_idx_, 1248 m2l_->TargetReg(kArg1, kRef), true); 1249 m2l_->OpRegCopy(class_reg_, m2l_->TargetReg(kRet0, kRef)); // Align usage with fast path 1250 m2l_->OpUnconditionalBranch(cont_); 1251 } 1252 1253 public: 1254 const int type_idx_; 1255 const RegStorage class_reg_; 1256 }; 1257 1258 AddSlowPath(new (arena_) SlowPath(this, hop_branch, cont, type_idx, class_reg)); 1259 } 1260 } 1261 // At this point, class_reg (kArg2) has class 1262 LoadValueDirectFixed(rl_src, TargetReg(kArg0, kRef)); // kArg0 <= ref 1263 1264 // Slow path for the case where the classes are not equal. In this case we need 1265 // to call a helper function to do the check. 1266 class SlowPath : public LIRSlowPath { 1267 public: 1268 SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, bool load): 1269 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), load_(load) { 1270 } 1271 1272 void Compile() { 1273 GenerateTargetLabel(); 1274 1275 if (load_) { 1276 m2l_->LoadRefDisp(m2l_->TargetReg(kArg0, kRef), mirror::Object::ClassOffset().Int32Value(), 1277 m2l_->TargetReg(kArg1, kRef), kNotVolatile); 1278 } 1279 m2l_->CallRuntimeHelperRegReg(kQuickCheckCast, m2l_->TargetReg(kArg2, kRef), 1280 m2l_->TargetReg(kArg1, kRef), true); 1281 m2l_->OpUnconditionalBranch(cont_); 1282 } 1283 1284 private: 1285 const bool load_; 1286 }; 1287 1288 if (type_known_abstract) { 1289 // Easier case, run slow path if target is non-null (slow path will load from target) 1290 LIR* branch = OpCmpImmBranch(kCondNe, TargetReg(kArg0, kRef), 0, nullptr); 1291 LIR* cont = NewLIR0(kPseudoTargetLabel); 1292 AddSlowPath(new (arena_) SlowPath(this, branch, cont, true)); 1293 } else { 1294 // Harder, more common case. We need to generate a forward branch over the load 1295 // if the target is null. If it's non-null we perform the load and branch to the 1296 // slow path if the classes are not equal. 1297 1298 /* Null is OK - continue */ 1299 LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0, kRef), 0, nullptr); 1300 /* load object->klass_ */ 1301 DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); 1302 LoadRefDisp(TargetReg(kArg0, kRef), mirror::Object::ClassOffset().Int32Value(), 1303 TargetReg(kArg1, kRef), kNotVolatile); 1304 1305 LIR* branch2 = OpCmpBranch(kCondNe, TargetReg(kArg1, kRef), class_reg, nullptr); 1306 LIR* cont = NewLIR0(kPseudoTargetLabel); 1307 1308 // Add the slow path that will not perform load since this is already done. 1309 AddSlowPath(new (arena_) SlowPath(this, branch2, cont, false)); 1310 1311 // Set the null check to branch to the continuation. 1312 branch1->target = cont; 1313 } 1314 } 1315 1316 void Mir2Lir::GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest, 1317 RegLocation rl_src1, RegLocation rl_src2) { 1318 RegLocation rl_result; 1319 if (cu_->instruction_set == kThumb2) { 1320 /* 1321 * NOTE: This is the one place in the code in which we might have 1322 * as many as six live temporary registers. There are 5 in the normal 1323 * set for Arm. Until we have spill capabilities, temporarily add 1324 * lr to the temp set. It is safe to do this locally, but note that 1325 * lr is used explicitly elsewhere in the code generator and cannot 1326 * normally be used as a general temp register. 1327 */ 1328 MarkTemp(TargetReg(kLr, kNotWide)); // Add lr to the temp pool 1329 FreeTemp(TargetReg(kLr, kNotWide)); // and make it available 1330 } 1331 rl_src1 = LoadValueWide(rl_src1, kCoreReg); 1332 rl_src2 = LoadValueWide(rl_src2, kCoreReg); 1333 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1334 // The longs may overlap - use intermediate temp if so 1335 if ((rl_result.reg.GetLowReg() == rl_src1.reg.GetHighReg()) || (rl_result.reg.GetLowReg() == rl_src2.reg.GetHighReg())) { 1336 RegStorage t_reg = AllocTemp(); 1337 OpRegRegReg(first_op, t_reg, rl_src1.reg.GetLow(), rl_src2.reg.GetLow()); 1338 OpRegRegReg(second_op, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh()); 1339 OpRegCopy(rl_result.reg.GetLow(), t_reg); 1340 FreeTemp(t_reg); 1341 } else { 1342 OpRegRegReg(first_op, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), rl_src2.reg.GetLow()); 1343 OpRegRegReg(second_op, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh()); 1344 } 1345 /* 1346 * NOTE: If rl_dest refers to a frame variable in a large frame, the 1347 * following StoreValueWide might need to allocate a temp register. 1348 * To further work around the lack of a spill capability, explicitly 1349 * free any temps from rl_src1 & rl_src2 that aren't still live in rl_result. 1350 * Remove when spill is functional. 1351 */ 1352 FreeRegLocTemps(rl_result, rl_src1); 1353 FreeRegLocTemps(rl_result, rl_src2); 1354 StoreValueWide(rl_dest, rl_result); 1355 if (cu_->instruction_set == kThumb2) { 1356 Clobber(TargetReg(kLr, kNotWide)); 1357 UnmarkTemp(TargetReg(kLr, kNotWide)); // Remove lr from the temp pool 1358 } 1359 } 1360 1361 void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, 1362 RegLocation rl_src1, RegLocation rl_shift) { 1363 QuickEntrypointEnum target; 1364 switch (opcode) { 1365 case Instruction::SHL_LONG: 1366 case Instruction::SHL_LONG_2ADDR: 1367 target = kQuickShlLong; 1368 break; 1369 case Instruction::SHR_LONG: 1370 case Instruction::SHR_LONG_2ADDR: 1371 target = kQuickShrLong; 1372 break; 1373 case Instruction::USHR_LONG: 1374 case Instruction::USHR_LONG_2ADDR: 1375 target = kQuickUshrLong; 1376 break; 1377 default: 1378 LOG(FATAL) << "Unexpected case"; 1379 target = kQuickShlLong; 1380 } 1381 FlushAllRegs(); /* Send everything to home location */ 1382 CallRuntimeHelperRegLocationRegLocation(target, rl_src1, rl_shift, false); 1383 RegLocation rl_result = GetReturnWide(kCoreReg); 1384 StoreValueWide(rl_dest, rl_result); 1385 } 1386 1387 1388 void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, 1389 RegLocation rl_src1, RegLocation rl_src2) { 1390 DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64); 1391 OpKind op = kOpBkpt; 1392 bool is_div_rem = false; 1393 bool check_zero = false; 1394 bool unary = false; 1395 RegLocation rl_result; 1396 bool shift_op = false; 1397 switch (opcode) { 1398 case Instruction::NEG_INT: 1399 op = kOpNeg; 1400 unary = true; 1401 break; 1402 case Instruction::NOT_INT: 1403 op = kOpMvn; 1404 unary = true; 1405 break; 1406 case Instruction::ADD_INT: 1407 case Instruction::ADD_INT_2ADDR: 1408 op = kOpAdd; 1409 break; 1410 case Instruction::SUB_INT: 1411 case Instruction::SUB_INT_2ADDR: 1412 op = kOpSub; 1413 break; 1414 case Instruction::MUL_INT: 1415 case Instruction::MUL_INT_2ADDR: 1416 op = kOpMul; 1417 break; 1418 case Instruction::DIV_INT: 1419 case Instruction::DIV_INT_2ADDR: 1420 check_zero = true; 1421 op = kOpDiv; 1422 is_div_rem = true; 1423 break; 1424 /* NOTE: returns in kArg1 */ 1425 case Instruction::REM_INT: 1426 case Instruction::REM_INT_2ADDR: 1427 check_zero = true; 1428 op = kOpRem; 1429 is_div_rem = true; 1430 break; 1431 case Instruction::AND_INT: 1432 case Instruction::AND_INT_2ADDR: 1433 op = kOpAnd; 1434 break; 1435 case Instruction::OR_INT: 1436 case Instruction::OR_INT_2ADDR: 1437 op = kOpOr; 1438 break; 1439 case Instruction::XOR_INT: 1440 case Instruction::XOR_INT_2ADDR: 1441 op = kOpXor; 1442 break; 1443 case Instruction::SHL_INT: 1444 case Instruction::SHL_INT_2ADDR: 1445 shift_op = true; 1446 op = kOpLsl; 1447 break; 1448 case Instruction::SHR_INT: 1449 case Instruction::SHR_INT_2ADDR: 1450 shift_op = true; 1451 op = kOpAsr; 1452 break; 1453 case Instruction::USHR_INT: 1454 case Instruction::USHR_INT_2ADDR: 1455 shift_op = true; 1456 op = kOpLsr; 1457 break; 1458 default: 1459 LOG(FATAL) << "Invalid word arith op: " << opcode; 1460 } 1461 if (!is_div_rem) { 1462 if (unary) { 1463 rl_src1 = LoadValue(rl_src1, kCoreReg); 1464 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1465 OpRegReg(op, rl_result.reg, rl_src1.reg); 1466 } else { 1467 if ((shift_op) && (cu_->instruction_set != kArm64)) { 1468 rl_src2 = LoadValue(rl_src2, kCoreReg); 1469 RegStorage t_reg = AllocTemp(); 1470 OpRegRegImm(kOpAnd, t_reg, rl_src2.reg, 31); 1471 rl_src1 = LoadValue(rl_src1, kCoreReg); 1472 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1473 OpRegRegReg(op, rl_result.reg, rl_src1.reg, t_reg); 1474 FreeTemp(t_reg); 1475 } else { 1476 rl_src1 = LoadValue(rl_src1, kCoreReg); 1477 rl_src2 = LoadValue(rl_src2, kCoreReg); 1478 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1479 OpRegRegReg(op, rl_result.reg, rl_src1.reg, rl_src2.reg); 1480 } 1481 } 1482 StoreValue(rl_dest, rl_result); 1483 } else { 1484 bool done = false; // Set to true if we happen to find a way to use a real instruction. 1485 if (cu_->instruction_set == kMips || cu_->instruction_set == kArm64) { 1486 rl_src1 = LoadValue(rl_src1, kCoreReg); 1487 rl_src2 = LoadValue(rl_src2, kCoreReg); 1488 if (check_zero) { 1489 GenDivZeroCheck(rl_src2.reg); 1490 } 1491 rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv); 1492 done = true; 1493 } else if (cu_->instruction_set == kThumb2) { 1494 if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) { 1495 // Use ARM SDIV instruction for division. For remainder we also need to 1496 // calculate using a MUL and subtract. 1497 rl_src1 = LoadValue(rl_src1, kCoreReg); 1498 rl_src2 = LoadValue(rl_src2, kCoreReg); 1499 if (check_zero) { 1500 GenDivZeroCheck(rl_src2.reg); 1501 } 1502 rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv); 1503 done = true; 1504 } 1505 } 1506 1507 // If we haven't already generated the code use the callout function. 1508 if (!done) { 1509 FlushAllRegs(); /* Send everything to home location */ 1510 LoadValueDirectFixed(rl_src2, TargetReg(kArg1, kNotWide)); 1511 RegStorage r_tgt = CallHelperSetup(kQuickIdivmod); 1512 LoadValueDirectFixed(rl_src1, TargetReg(kArg0, kNotWide)); 1513 if (check_zero) { 1514 GenDivZeroCheck(TargetReg(kArg1, kNotWide)); 1515 } 1516 // NOTE: callout here is not a safepoint. 1517 CallHelper(r_tgt, kQuickIdivmod, false /* not a safepoint */); 1518 if (op == kOpDiv) 1519 rl_result = GetReturn(kCoreReg); 1520 else 1521 rl_result = GetReturnAlt(); 1522 } 1523 StoreValue(rl_dest, rl_result); 1524 } 1525 } 1526 1527 /* 1528 * The following are the first-level codegen routines that analyze the format 1529 * of each bytecode then either dispatch special purpose codegen routines 1530 * or produce corresponding Thumb instructions directly. 1531 */ 1532 1533 // Returns true if no more than two bits are set in 'x'. 1534 static bool IsPopCountLE2(unsigned int x) { 1535 x &= x - 1; 1536 return (x & (x - 1)) == 0; 1537 } 1538 1539 // Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit' 1540 // and store the result in 'rl_dest'. 1541 bool Mir2Lir::HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div, 1542 RegLocation rl_src, RegLocation rl_dest, int lit) { 1543 if ((lit < 2) || ((cu_->instruction_set != kThumb2) && !IsPowerOfTwo(lit))) { 1544 return false; 1545 } 1546 // No divide instruction for Arm, so check for more special cases 1547 if ((cu_->instruction_set == kThumb2) && !IsPowerOfTwo(lit)) { 1548 return SmallLiteralDivRem(dalvik_opcode, is_div, rl_src, rl_dest, lit); 1549 } 1550 int k = LowestSetBit(lit); 1551 if (k >= 30) { 1552 // Avoid special cases. 1553 return false; 1554 } 1555 rl_src = LoadValue(rl_src, kCoreReg); 1556 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1557 if (is_div) { 1558 RegStorage t_reg = AllocTemp(); 1559 if (lit == 2) { 1560 // Division by 2 is by far the most common division by constant. 1561 OpRegRegImm(kOpLsr, t_reg, rl_src.reg, 32 - k); 1562 OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg); 1563 OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k); 1564 } else { 1565 OpRegRegImm(kOpAsr, t_reg, rl_src.reg, 31); 1566 OpRegRegImm(kOpLsr, t_reg, t_reg, 32 - k); 1567 OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg); 1568 OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k); 1569 } 1570 } else { 1571 RegStorage t_reg1 = AllocTemp(); 1572 RegStorage t_reg2 = AllocTemp(); 1573 if (lit == 2) { 1574 OpRegRegImm(kOpLsr, t_reg1, rl_src.reg, 32 - k); 1575 OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg); 1576 OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit -1); 1577 OpRegRegReg(kOpSub, rl_result.reg, t_reg2, t_reg1); 1578 } else { 1579 OpRegRegImm(kOpAsr, t_reg1, rl_src.reg, 31); 1580 OpRegRegImm(kOpLsr, t_reg1, t_reg1, 32 - k); 1581 OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg); 1582 OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit - 1); 1583 OpRegRegReg(kOpSub, rl_result.reg, t_reg2, t_reg1); 1584 } 1585 } 1586 StoreValue(rl_dest, rl_result); 1587 return true; 1588 } 1589 1590 // Returns true if it added instructions to 'cu' to multiply 'rl_src' by 'lit' 1591 // and store the result in 'rl_dest'. 1592 bool Mir2Lir::HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) { 1593 if (lit < 0) { 1594 return false; 1595 } 1596 if (lit == 0) { 1597 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1598 LoadConstant(rl_result.reg, 0); 1599 StoreValue(rl_dest, rl_result); 1600 return true; 1601 } 1602 if (lit == 1) { 1603 rl_src = LoadValue(rl_src, kCoreReg); 1604 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1605 OpRegCopy(rl_result.reg, rl_src.reg); 1606 StoreValue(rl_dest, rl_result); 1607 return true; 1608 } 1609 // There is RegRegRegShift on Arm, so check for more special cases 1610 if (cu_->instruction_set == kThumb2) { 1611 return EasyMultiply(rl_src, rl_dest, lit); 1612 } 1613 // Can we simplify this multiplication? 1614 bool power_of_two = false; 1615 bool pop_count_le2 = false; 1616 bool power_of_two_minus_one = false; 1617 if (IsPowerOfTwo(lit)) { 1618 power_of_two = true; 1619 } else if (IsPopCountLE2(lit)) { 1620 pop_count_le2 = true; 1621 } else if (IsPowerOfTwo(lit + 1)) { 1622 power_of_two_minus_one = true; 1623 } else { 1624 return false; 1625 } 1626 rl_src = LoadValue(rl_src, kCoreReg); 1627 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1628 if (power_of_two) { 1629 // Shift. 1630 OpRegRegImm(kOpLsl, rl_result.reg, rl_src.reg, LowestSetBit(lit)); 1631 } else if (pop_count_le2) { 1632 // Shift and add and shift. 1633 int first_bit = LowestSetBit(lit); 1634 int second_bit = LowestSetBit(lit ^ (1 << first_bit)); 1635 GenMultiplyByTwoBitMultiplier(rl_src, rl_result, lit, first_bit, second_bit); 1636 } else { 1637 // Reverse subtract: (src << (shift + 1)) - src. 1638 DCHECK(power_of_two_minus_one); 1639 // TUNING: rsb dst, src, src lsl#LowestSetBit(lit + 1) 1640 RegStorage t_reg = AllocTemp(); 1641 OpRegRegImm(kOpLsl, t_reg, rl_src.reg, LowestSetBit(lit + 1)); 1642 OpRegRegReg(kOpSub, rl_result.reg, t_reg, rl_src.reg); 1643 } 1644 StoreValue(rl_dest, rl_result); 1645 return true; 1646 } 1647 1648 void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src, 1649 int lit) { 1650 RegLocation rl_result; 1651 OpKind op = static_cast<OpKind>(0); /* Make gcc happy */ 1652 int shift_op = false; 1653 bool is_div = false; 1654 1655 switch (opcode) { 1656 case Instruction::RSUB_INT_LIT8: 1657 case Instruction::RSUB_INT: { 1658 rl_src = LoadValue(rl_src, kCoreReg); 1659 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1660 if (cu_->instruction_set == kThumb2) { 1661 OpRegRegImm(kOpRsub, rl_result.reg, rl_src.reg, lit); 1662 } else { 1663 OpRegReg(kOpNeg, rl_result.reg, rl_src.reg); 1664 OpRegImm(kOpAdd, rl_result.reg, lit); 1665 } 1666 StoreValue(rl_dest, rl_result); 1667 return; 1668 } 1669 1670 case Instruction::SUB_INT: 1671 case Instruction::SUB_INT_2ADDR: 1672 lit = -lit; 1673 // Intended fallthrough 1674 case Instruction::ADD_INT: 1675 case Instruction::ADD_INT_2ADDR: 1676 case Instruction::ADD_INT_LIT8: 1677 case Instruction::ADD_INT_LIT16: 1678 op = kOpAdd; 1679 break; 1680 case Instruction::MUL_INT: 1681 case Instruction::MUL_INT_2ADDR: 1682 case Instruction::MUL_INT_LIT8: 1683 case Instruction::MUL_INT_LIT16: { 1684 if (HandleEasyMultiply(rl_src, rl_dest, lit)) { 1685 return; 1686 } 1687 op = kOpMul; 1688 break; 1689 } 1690 case Instruction::AND_INT: 1691 case Instruction::AND_INT_2ADDR: 1692 case Instruction::AND_INT_LIT8: 1693 case Instruction::AND_INT_LIT16: 1694 op = kOpAnd; 1695 break; 1696 case Instruction::OR_INT: 1697 case Instruction::OR_INT_2ADDR: 1698 case Instruction::OR_INT_LIT8: 1699 case Instruction::OR_INT_LIT16: 1700 op = kOpOr; 1701 break; 1702 case Instruction::XOR_INT: 1703 case Instruction::XOR_INT_2ADDR: 1704 case Instruction::XOR_INT_LIT8: 1705 case Instruction::XOR_INT_LIT16: 1706 op = kOpXor; 1707 break; 1708 case Instruction::SHL_INT_LIT8: 1709 case Instruction::SHL_INT: 1710 case Instruction::SHL_INT_2ADDR: 1711 lit &= 31; 1712 shift_op = true; 1713 op = kOpLsl; 1714 break; 1715 case Instruction::SHR_INT_LIT8: 1716 case Instruction::SHR_INT: 1717 case Instruction::SHR_INT_2ADDR: 1718 lit &= 31; 1719 shift_op = true; 1720 op = kOpAsr; 1721 break; 1722 case Instruction::USHR_INT_LIT8: 1723 case Instruction::USHR_INT: 1724 case Instruction::USHR_INT_2ADDR: 1725 lit &= 31; 1726 shift_op = true; 1727 op = kOpLsr; 1728 break; 1729 1730 case Instruction::DIV_INT: 1731 case Instruction::DIV_INT_2ADDR: 1732 case Instruction::DIV_INT_LIT8: 1733 case Instruction::DIV_INT_LIT16: 1734 case Instruction::REM_INT: 1735 case Instruction::REM_INT_2ADDR: 1736 case Instruction::REM_INT_LIT8: 1737 case Instruction::REM_INT_LIT16: { 1738 if (lit == 0) { 1739 GenDivZeroException(); 1740 return; 1741 } 1742 if ((opcode == Instruction::DIV_INT) || 1743 (opcode == Instruction::DIV_INT_2ADDR) || 1744 (opcode == Instruction::DIV_INT_LIT8) || 1745 (opcode == Instruction::DIV_INT_LIT16)) { 1746 is_div = true; 1747 } else { 1748 is_div = false; 1749 } 1750 if (HandleEasyDivRem(opcode, is_div, rl_src, rl_dest, lit)) { 1751 return; 1752 } 1753 1754 bool done = false; 1755 if (cu_->instruction_set == kMips || cu_->instruction_set == kArm64) { 1756 rl_src = LoadValue(rl_src, kCoreReg); 1757 rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div); 1758 done = true; 1759 } else if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { 1760 rl_result = GenDivRemLit(rl_dest, rl_src, lit, is_div); 1761 done = true; 1762 } else if (cu_->instruction_set == kThumb2) { 1763 if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) { 1764 // Use ARM SDIV instruction for division. For remainder we also need to 1765 // calculate using a MUL and subtract. 1766 rl_src = LoadValue(rl_src, kCoreReg); 1767 rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div); 1768 done = true; 1769 } 1770 } 1771 1772 if (!done) { 1773 FlushAllRegs(); /* Everything to home location. */ 1774 LoadValueDirectFixed(rl_src, TargetReg(kArg0, kNotWide)); 1775 Clobber(TargetReg(kArg0, kNotWide)); 1776 CallRuntimeHelperRegImm(kQuickIdivmod, TargetReg(kArg0, kNotWide), lit, false); 1777 if (is_div) 1778 rl_result = GetReturn(kCoreReg); 1779 else 1780 rl_result = GetReturnAlt(); 1781 } 1782 StoreValue(rl_dest, rl_result); 1783 return; 1784 } 1785 default: 1786 LOG(FATAL) << "Unexpected opcode " << opcode; 1787 } 1788 rl_src = LoadValue(rl_src, kCoreReg); 1789 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1790 // Avoid shifts by literal 0 - no support in Thumb. Change to copy. 1791 if (shift_op && (lit == 0)) { 1792 OpRegCopy(rl_result.reg, rl_src.reg); 1793 } else { 1794 OpRegRegImm(op, rl_result.reg, rl_src.reg, lit); 1795 } 1796 StoreValue(rl_dest, rl_result); 1797 } 1798 1799 void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, 1800 RegLocation rl_src1, RegLocation rl_src2) { 1801 RegLocation rl_result; 1802 OpKind first_op = kOpBkpt; 1803 OpKind second_op = kOpBkpt; 1804 bool call_out = false; 1805 bool check_zero = false; 1806 int ret_reg = TargetReg(kRet0, kNotWide).GetReg(); 1807 QuickEntrypointEnum target; 1808 1809 switch (opcode) { 1810 case Instruction::NOT_LONG: 1811 rl_src2 = LoadValueWide(rl_src2, kCoreReg); 1812 rl_result = EvalLoc(rl_dest, kCoreReg, true); 1813 // Check for destructive overlap 1814 if (rl_result.reg.GetLowReg() == rl_src2.reg.GetHighReg()) { 1815 RegStorage t_reg = AllocTemp(); 1816 OpRegCopy(t_reg, rl_src2.reg.GetHigh()); 1817 OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow()); 1818 OpRegReg(kOpMvn, rl_result.reg.GetHigh(), t_reg); 1819 FreeTemp(t_reg); 1820 } else { 1821 OpRegReg(kOpMvn, rl_result.reg.GetLow(), rl_src2.reg.GetLow()); 1822 OpRegReg(kOpMvn, rl_result.reg.GetHigh(), rl_src2.reg.GetHigh()); 1823 } 1824 StoreValueWide(rl_dest, rl_result); 1825 return; 1826 case Instruction::ADD_LONG: 1827 case Instruction::ADD_LONG_2ADDR: 1828 first_op = kOpAdd; 1829 second_op = kOpAdc; 1830 break; 1831 case Instruction::SUB_LONG: 1832 case Instruction::SUB_LONG_2ADDR: 1833 first_op = kOpSub; 1834 second_op = kOpSbc; 1835 break; 1836 case Instruction::MUL_LONG: 1837 case Instruction::MUL_LONG_2ADDR: 1838 call_out = true; 1839 ret_reg = TargetReg(kRet0, kNotWide).GetReg(); 1840 target = kQuickLmul; 1841 break; 1842 case Instruction::DIV_LONG: 1843 case Instruction::DIV_LONG_2ADDR: 1844 call_out = true; 1845 check_zero = true; 1846 ret_reg = TargetReg(kRet0, kNotWide).GetReg(); 1847 target = kQuickLdiv; 1848 break; 1849 case Instruction::REM_LONG: 1850 case Instruction::REM_LONG_2ADDR: 1851 call_out = true; 1852 check_zero = true; 1853 target = kQuickLmod; 1854 /* NOTE - for Arm, result is in kArg2/kArg3 instead of kRet0/kRet1 */ 1855 ret_reg = (cu_->instruction_set == kThumb2) ? TargetReg(kArg2, kNotWide).GetReg() : 1856 TargetReg(kRet0, kNotWide).GetReg(); 1857 break; 1858 case Instruction::AND_LONG_2ADDR: 1859 case Instruction::AND_LONG: 1860 first_op = kOpAnd; 1861 second_op = kOpAnd; 1862 break; 1863 case Instruction::OR_LONG: 1864 case Instruction::OR_LONG_2ADDR: 1865 first_op = kOpOr; 1866 second_op = kOpOr; 1867 break; 1868 case Instruction::XOR_LONG: 1869 case Instruction::XOR_LONG_2ADDR: 1870 first_op = kOpXor; 1871 second_op = kOpXor; 1872 break; 1873 default: 1874 LOG(FATAL) << "Invalid long arith op"; 1875 } 1876 if (!call_out) { 1877 GenLong3Addr(first_op, second_op, rl_dest, rl_src1, rl_src2); 1878 } else { 1879 FlushAllRegs(); /* Send everything to home location */ 1880 if (check_zero) { 1881 RegStorage r_tmp1 = TargetReg(kArg0, kWide); 1882 RegStorage r_tmp2 = TargetReg(kArg2, kWide); 1883 LoadValueDirectWideFixed(rl_src2, r_tmp2); 1884 RegStorage r_tgt = CallHelperSetup(target); 1885 GenDivZeroCheckWide(r_tmp2); 1886 LoadValueDirectWideFixed(rl_src1, r_tmp1); 1887 // NOTE: callout here is not a safepoint 1888 CallHelper(r_tgt, target, false /* not safepoint */); 1889 } else { 1890 CallRuntimeHelperRegLocationRegLocation(target, rl_src1, rl_src2, false); 1891 } 1892 // Adjust return regs in to handle case of rem returning kArg2/kArg3 1893 if (ret_reg == TargetReg(kRet0, kNotWide).GetReg()) 1894 rl_result = GetReturnWide(kCoreReg); 1895 else 1896 rl_result = GetReturnWideAlt(); 1897 StoreValueWide(rl_dest, rl_result); 1898 } 1899 } 1900 1901 void Mir2Lir::GenConst(RegLocation rl_dest, int value) { 1902 RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true); 1903 LoadConstantNoClobber(rl_result.reg, value); 1904 StoreValue(rl_dest, rl_result); 1905 if (value == 0) { 1906 Workaround7250540(rl_dest, rl_result.reg); 1907 } 1908 } 1909 1910 void Mir2Lir::GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest, 1911 RegLocation rl_src) { 1912 /* 1913 * Don't optimize the register usage since it calls out to support 1914 * functions 1915 */ 1916 1917 FlushAllRegs(); /* Send everything to home location */ 1918 CallRuntimeHelperRegLocation(trampoline, rl_src, false); 1919 if (rl_dest.wide) { 1920 RegLocation rl_result; 1921 rl_result = GetReturnWide(LocToRegClass(rl_dest)); 1922 StoreValueWide(rl_dest, rl_result); 1923 } else { 1924 RegLocation rl_result; 1925 rl_result = GetReturn(LocToRegClass(rl_dest)); 1926 StoreValue(rl_dest, rl_result); 1927 } 1928 } 1929 1930 class SuspendCheckSlowPath : public Mir2Lir::LIRSlowPath { 1931 public: 1932 SuspendCheckSlowPath(Mir2Lir* m2l, LIR* branch, LIR* cont) 1933 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, cont) { 1934 } 1935 1936 void Compile() OVERRIDE { 1937 m2l_->ResetRegPool(); 1938 m2l_->ResetDefTracking(); 1939 GenerateTargetLabel(kPseudoSuspendTarget); 1940 m2l_->CallRuntimeHelper(kQuickTestSuspend, true); 1941 if (cont_ != nullptr) { 1942 m2l_->OpUnconditionalBranch(cont_); 1943 } 1944 } 1945 }; 1946 1947 /* Check if we need to check for pending suspend request */ 1948 void Mir2Lir::GenSuspendTest(int opt_flags) { 1949 if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitSuspendChecks()) { 1950 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 1951 return; 1952 } 1953 FlushAllRegs(); 1954 LIR* branch = OpTestSuspend(NULL); 1955 LIR* cont = NewLIR0(kPseudoTargetLabel); 1956 AddSlowPath(new (arena_) SuspendCheckSlowPath(this, branch, cont)); 1957 } else { 1958 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 1959 return; 1960 } 1961 FlushAllRegs(); // TODO: needed? 1962 LIR* inst = CheckSuspendUsingLoad(); 1963 MarkSafepointPC(inst); 1964 } 1965 } 1966 1967 /* Check if we need to check for pending suspend request */ 1968 void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target) { 1969 if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitSuspendChecks()) { 1970 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 1971 OpUnconditionalBranch(target); 1972 return; 1973 } 1974 OpTestSuspend(target); 1975 FlushAllRegs(); 1976 LIR* branch = OpUnconditionalBranch(nullptr); 1977 AddSlowPath(new (arena_) SuspendCheckSlowPath(this, branch, target)); 1978 } else { 1979 // For the implicit suspend check, just perform the trigger 1980 // load and branch to the target. 1981 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) { 1982 OpUnconditionalBranch(target); 1983 return; 1984 } 1985 FlushAllRegs(); 1986 LIR* inst = CheckSuspendUsingLoad(); 1987 MarkSafepointPC(inst); 1988 OpUnconditionalBranch(target); 1989 } 1990 } 1991 1992 /* Call out to helper assembly routine that will null check obj and then lock it. */ 1993 void Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { 1994 FlushAllRegs(); 1995 CallRuntimeHelperRegLocation(kQuickLockObject, rl_src, true); 1996 } 1997 1998 /* Call out to helper assembly routine that will null check obj and then unlock it. */ 1999 void Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { 2000 FlushAllRegs(); 2001 CallRuntimeHelperRegLocation(kQuickUnlockObject, rl_src, true); 2002 } 2003 2004 /* Generic code for generating a wide constant into a VR. */ 2005 void Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) { 2006 RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true); 2007 LoadConstantWide(rl_result.reg, value); 2008 StoreValueWide(rl_dest, rl_result); 2009 } 2010 2011 void Mir2Lir::GenSmallPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) { 2012 const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; 2013 const uint16_t entries = table[1]; 2014 // Chained cmp-and-branch. 2015 const int32_t* as_int32 = reinterpret_cast<const int32_t*>(&table[2]); 2016 int32_t current_key = as_int32[0]; 2017 const int32_t* targets = &as_int32[1]; 2018 rl_src = LoadValue(rl_src, kCoreReg); 2019 int i = 0; 2020 for (; i < entries; i++, current_key++) { 2021 if (!InexpensiveConstantInt(current_key, Instruction::Code::IF_EQ)) { 2022 // Switch to using a temp and add. 2023 break; 2024 } 2025 BasicBlock* case_block = 2026 mir_graph_->FindBlock(current_dalvik_offset_ + targets[i]); 2027 OpCmpImmBranch(kCondEq, rl_src.reg, current_key, &block_label_list_[case_block->id]); 2028 } 2029 if (i < entries) { 2030 // The rest do not seem to be inexpensive. Try to allocate a temp and use add. 2031 RegStorage key_temp = AllocTypedTemp(false, kCoreReg, false); 2032 if (key_temp.Valid()) { 2033 LoadConstantNoClobber(key_temp, current_key); 2034 for (; i < entries - 1; i++, current_key++) { 2035 BasicBlock* case_block = 2036 mir_graph_->FindBlock(current_dalvik_offset_ + targets[i]); 2037 OpCmpBranch(kCondEq, rl_src.reg, key_temp, &block_label_list_[case_block->id]); 2038 OpRegImm(kOpAdd, key_temp, 1); // Increment key. 2039 } 2040 BasicBlock* case_block = 2041 mir_graph_->FindBlock(current_dalvik_offset_ + targets[i]); 2042 OpCmpBranch(kCondEq, rl_src.reg, key_temp, &block_label_list_[case_block->id]); 2043 } else { 2044 // No free temp, just finish the old loop. 2045 for (; i < entries; i++, current_key++) { 2046 BasicBlock* case_block = 2047 mir_graph_->FindBlock(current_dalvik_offset_ + targets[i]); 2048 OpCmpImmBranch(kCondEq, rl_src.reg, current_key, &block_label_list_[case_block->id]); 2049 } 2050 } 2051 } 2052 } 2053 2054 void Mir2Lir::GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) { 2055 const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; 2056 if (cu_->verbose) { 2057 DumpSparseSwitchTable(table); 2058 } 2059 2060 const uint16_t entries = table[1]; 2061 if (entries <= kSmallSwitchThreshold) { 2062 GenSmallPackedSwitch(mir, table_offset, rl_src); 2063 } else { 2064 // Use the backend-specific implementation. 2065 GenLargePackedSwitch(mir, table_offset, rl_src); 2066 } 2067 } 2068 2069 void Mir2Lir::GenSmallSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) { 2070 const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; 2071 const uint16_t entries = table[1]; 2072 // Chained cmp-and-branch. 2073 const int32_t* keys = reinterpret_cast<const int32_t*>(&table[2]); 2074 const int32_t* targets = &keys[entries]; 2075 rl_src = LoadValue(rl_src, kCoreReg); 2076 for (int i = 0; i < entries; i++) { 2077 int key = keys[i]; 2078 BasicBlock* case_block = 2079 mir_graph_->FindBlock(current_dalvik_offset_ + targets[i]); 2080 OpCmpImmBranch(kCondEq, rl_src.reg, key, &block_label_list_[case_block->id]); 2081 } 2082 } 2083 2084 void Mir2Lir::GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) { 2085 const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset; 2086 if (cu_->verbose) { 2087 DumpSparseSwitchTable(table); 2088 } 2089 2090 const uint16_t entries = table[1]; 2091 if (entries <= kSmallSwitchThreshold) { 2092 GenSmallSparseSwitch(mir, table_offset, rl_src); 2093 } else { 2094 // Use the backend-specific implementation. 2095 GenLargeSparseSwitch(mir, table_offset, rl_src); 2096 } 2097 } 2098 2099 } // namespace art 2100