1 /* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include "dex/compiler_ir.h" 18 #include "dex_file-inl.h" 19 #include "entrypoints/quick/quick_entrypoints.h" 20 #include "invoke_type.h" 21 #include "mirror/array.h" 22 #include "mirror/string.h" 23 #include "mir_to_lir-inl.h" 24 #include "x86/codegen_x86.h" 25 26 namespace art { 27 28 /* 29 * This source files contains "gen" codegen routines that should 30 * be applicable to most targets. Only mid-level support utilities 31 * and "op" calls may be used here. 32 */ 33 34 /* 35 * To save scheduling time, helper calls are broken into two parts: generation of 36 * the helper target address, and the actual call to the helper. Because x86 37 * has a memory call operation, part 1 is a NOP for x86. For other targets, 38 * load arguments between the two parts. 39 */ 40 int Mir2Lir::CallHelperSetup(ThreadOffset helper_offset) { 41 return (cu_->instruction_set == kX86) ? 0 : LoadHelper(helper_offset); 42 } 43 44 /* NOTE: if r_tgt is a temp, it will be freed following use */ 45 LIR* Mir2Lir::CallHelper(int r_tgt, ThreadOffset helper_offset, bool safepoint_pc, bool use_link) { 46 LIR* call_inst; 47 OpKind op = use_link ? kOpBlx : kOpBx; 48 if (cu_->instruction_set == kX86) { 49 call_inst = OpThreadMem(op, helper_offset); 50 } else { 51 call_inst = OpReg(op, r_tgt); 52 FreeTemp(r_tgt); 53 } 54 if (safepoint_pc) { 55 MarkSafepointPC(call_inst); 56 } 57 return call_inst; 58 } 59 60 void Mir2Lir::CallRuntimeHelperImm(ThreadOffset helper_offset, int arg0, bool safepoint_pc) { 61 int r_tgt = CallHelperSetup(helper_offset); 62 LoadConstant(TargetReg(kArg0), arg0); 63 ClobberCalleeSave(); 64 CallHelper(r_tgt, helper_offset, safepoint_pc); 65 } 66 67 void Mir2Lir::CallRuntimeHelperReg(ThreadOffset helper_offset, int arg0, bool safepoint_pc) { 68 int r_tgt = CallHelperSetup(helper_offset); 69 OpRegCopy(TargetReg(kArg0), arg0); 70 ClobberCalleeSave(); 71 CallHelper(r_tgt, helper_offset, safepoint_pc); 72 } 73 74 void Mir2Lir::CallRuntimeHelperRegLocation(ThreadOffset helper_offset, RegLocation arg0, 75 bool safepoint_pc) { 76 int r_tgt = CallHelperSetup(helper_offset); 77 if (arg0.wide == 0) { 78 LoadValueDirectFixed(arg0, TargetReg(kArg0)); 79 } else { 80 LoadValueDirectWideFixed(arg0, TargetReg(kArg0), TargetReg(kArg1)); 81 } 82 ClobberCalleeSave(); 83 CallHelper(r_tgt, helper_offset, safepoint_pc); 84 } 85 86 void Mir2Lir::CallRuntimeHelperImmImm(ThreadOffset helper_offset, int arg0, int arg1, 87 bool safepoint_pc) { 88 int r_tgt = CallHelperSetup(helper_offset); 89 LoadConstant(TargetReg(kArg0), arg0); 90 LoadConstant(TargetReg(kArg1), arg1); 91 ClobberCalleeSave(); 92 CallHelper(r_tgt, helper_offset, safepoint_pc); 93 } 94 95 void Mir2Lir::CallRuntimeHelperImmRegLocation(ThreadOffset helper_offset, int arg0, 96 RegLocation arg1, bool safepoint_pc) { 97 int r_tgt = CallHelperSetup(helper_offset); 98 if (arg1.wide == 0) { 99 LoadValueDirectFixed(arg1, TargetReg(kArg1)); 100 } else { 101 LoadValueDirectWideFixed(arg1, TargetReg(kArg1), TargetReg(kArg2)); 102 } 103 LoadConstant(TargetReg(kArg0), arg0); 104 ClobberCalleeSave(); 105 CallHelper(r_tgt, helper_offset, safepoint_pc); 106 } 107 108 void Mir2Lir::CallRuntimeHelperRegLocationImm(ThreadOffset helper_offset, RegLocation arg0, int arg1, 109 bool safepoint_pc) { 110 int r_tgt = CallHelperSetup(helper_offset); 111 LoadValueDirectFixed(arg0, TargetReg(kArg0)); 112 LoadConstant(TargetReg(kArg1), arg1); 113 ClobberCalleeSave(); 114 CallHelper(r_tgt, helper_offset, safepoint_pc); 115 } 116 117 void Mir2Lir::CallRuntimeHelperImmReg(ThreadOffset helper_offset, int arg0, int arg1, 118 bool safepoint_pc) { 119 int r_tgt = CallHelperSetup(helper_offset); 120 OpRegCopy(TargetReg(kArg1), arg1); 121 LoadConstant(TargetReg(kArg0), arg0); 122 ClobberCalleeSave(); 123 CallHelper(r_tgt, helper_offset, safepoint_pc); 124 } 125 126 void Mir2Lir::CallRuntimeHelperRegImm(ThreadOffset helper_offset, int arg0, int arg1, 127 bool safepoint_pc) { 128 int r_tgt = CallHelperSetup(helper_offset); 129 OpRegCopy(TargetReg(kArg0), arg0); 130 LoadConstant(TargetReg(kArg1), arg1); 131 ClobberCalleeSave(); 132 CallHelper(r_tgt, helper_offset, safepoint_pc); 133 } 134 135 void Mir2Lir::CallRuntimeHelperImmMethod(ThreadOffset helper_offset, int arg0, bool safepoint_pc) { 136 int r_tgt = CallHelperSetup(helper_offset); 137 LoadCurrMethodDirect(TargetReg(kArg1)); 138 LoadConstant(TargetReg(kArg0), arg0); 139 ClobberCalleeSave(); 140 CallHelper(r_tgt, helper_offset, safepoint_pc); 141 } 142 143 void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(ThreadOffset helper_offset, RegLocation arg0, 144 RegLocation arg1, bool safepoint_pc) { 145 int r_tgt = CallHelperSetup(helper_offset); 146 if (arg0.wide == 0) { 147 LoadValueDirectFixed(arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0)); 148 if (arg1.wide == 0) { 149 if (cu_->instruction_set == kMips) { 150 LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1)); 151 } else { 152 LoadValueDirectFixed(arg1, TargetReg(kArg1)); 153 } 154 } else { 155 if (cu_->instruction_set == kMips) { 156 LoadValueDirectWideFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1), arg1.fp ? TargetReg(kFArg3) : TargetReg(kArg2)); 157 } else { 158 LoadValueDirectWideFixed(arg1, TargetReg(kArg1), TargetReg(kArg2)); 159 } 160 } 161 } else { 162 LoadValueDirectWideFixed(arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0), arg0.fp ? TargetReg(kFArg1) : TargetReg(kArg1)); 163 if (arg1.wide == 0) { 164 LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2)); 165 } else { 166 LoadValueDirectWideFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2), arg1.fp ? TargetReg(kFArg3) : TargetReg(kArg3)); 167 } 168 } 169 ClobberCalleeSave(); 170 CallHelper(r_tgt, helper_offset, safepoint_pc); 171 } 172 173 void Mir2Lir::CallRuntimeHelperRegReg(ThreadOffset helper_offset, int arg0, int arg1, 174 bool safepoint_pc) { 175 int r_tgt = CallHelperSetup(helper_offset); 176 DCHECK_NE(TargetReg(kArg0), arg1); // check copy into arg0 won't clobber arg1 177 OpRegCopy(TargetReg(kArg0), arg0); 178 OpRegCopy(TargetReg(kArg1), arg1); 179 ClobberCalleeSave(); 180 CallHelper(r_tgt, helper_offset, safepoint_pc); 181 } 182 183 void Mir2Lir::CallRuntimeHelperRegRegImm(ThreadOffset helper_offset, int arg0, int arg1, 184 int arg2, bool safepoint_pc) { 185 int r_tgt = CallHelperSetup(helper_offset); 186 DCHECK_NE(TargetReg(kArg0), arg1); // check copy into arg0 won't clobber arg1 187 OpRegCopy(TargetReg(kArg0), arg0); 188 OpRegCopy(TargetReg(kArg1), arg1); 189 LoadConstant(TargetReg(kArg2), arg2); 190 ClobberCalleeSave(); 191 CallHelper(r_tgt, helper_offset, safepoint_pc); 192 } 193 194 void Mir2Lir::CallRuntimeHelperImmMethodRegLocation(ThreadOffset helper_offset, 195 int arg0, RegLocation arg2, bool safepoint_pc) { 196 int r_tgt = CallHelperSetup(helper_offset); 197 LoadValueDirectFixed(arg2, TargetReg(kArg2)); 198 LoadCurrMethodDirect(TargetReg(kArg1)); 199 LoadConstant(TargetReg(kArg0), arg0); 200 ClobberCalleeSave(); 201 CallHelper(r_tgt, helper_offset, safepoint_pc); 202 } 203 204 void Mir2Lir::CallRuntimeHelperImmMethodImm(ThreadOffset helper_offset, int arg0, 205 int arg2, bool safepoint_pc) { 206 int r_tgt = CallHelperSetup(helper_offset); 207 LoadCurrMethodDirect(TargetReg(kArg1)); 208 LoadConstant(TargetReg(kArg2), arg2); 209 LoadConstant(TargetReg(kArg0), arg0); 210 ClobberCalleeSave(); 211 CallHelper(r_tgt, helper_offset, safepoint_pc); 212 } 213 214 void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation(ThreadOffset helper_offset, 215 int arg0, RegLocation arg1, 216 RegLocation arg2, bool safepoint_pc) { 217 int r_tgt = CallHelperSetup(helper_offset); 218 LoadValueDirectFixed(arg1, TargetReg(kArg1)); 219 if (arg2.wide == 0) { 220 LoadValueDirectFixed(arg2, TargetReg(kArg2)); 221 } else { 222 LoadValueDirectWideFixed(arg2, TargetReg(kArg2), TargetReg(kArg3)); 223 } 224 LoadConstant(TargetReg(kArg0), arg0); 225 ClobberCalleeSave(); 226 CallHelper(r_tgt, helper_offset, safepoint_pc); 227 } 228 229 /* 230 * If there are any ins passed in registers that have not been promoted 231 * to a callee-save register, flush them to the frame. Perform intial 232 * assignment of promoted arguments. 233 * 234 * ArgLocs is an array of location records describing the incoming arguments 235 * with one location record per word of argument. 236 */ 237 void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) { 238 /* 239 * Dummy up a RegLocation for the incoming Method* 240 * It will attempt to keep kArg0 live (or copy it to home location 241 * if promoted). 242 */ 243 RegLocation rl_src = rl_method; 244 rl_src.location = kLocPhysReg; 245 rl_src.low_reg = TargetReg(kArg0); 246 rl_src.home = false; 247 MarkLive(rl_src.low_reg, rl_src.s_reg_low); 248 StoreValue(rl_method, rl_src); 249 // If Method* has been promoted, explicitly flush 250 if (rl_method.location == kLocPhysReg) { 251 StoreWordDisp(TargetReg(kSp), 0, TargetReg(kArg0)); 252 } 253 254 if (cu_->num_ins == 0) 255 return; 256 const int num_arg_regs = 3; 257 static SpecialTargetRegister arg_regs[] = {kArg1, kArg2, kArg3}; 258 int start_vreg = cu_->num_dalvik_registers - cu_->num_ins; 259 /* 260 * Copy incoming arguments to their proper home locations. 261 * NOTE: an older version of dx had an issue in which 262 * it would reuse static method argument registers. 263 * This could result in the same Dalvik virtual register 264 * being promoted to both core and fp regs. To account for this, 265 * we only copy to the corresponding promoted physical register 266 * if it matches the type of the SSA name for the incoming 267 * argument. It is also possible that long and double arguments 268 * end up half-promoted. In those cases, we must flush the promoted 269 * half to memory as well. 270 */ 271 for (int i = 0; i < cu_->num_ins; i++) { 272 PromotionMap* v_map = &promotion_map_[start_vreg + i]; 273 if (i < num_arg_regs) { 274 // If arriving in register 275 bool need_flush = true; 276 RegLocation* t_loc = &ArgLocs[i]; 277 if ((v_map->core_location == kLocPhysReg) && !t_loc->fp) { 278 OpRegCopy(v_map->core_reg, TargetReg(arg_regs[i])); 279 need_flush = false; 280 } else if ((v_map->fp_location == kLocPhysReg) && t_loc->fp) { 281 OpRegCopy(v_map->FpReg, TargetReg(arg_regs[i])); 282 need_flush = false; 283 } else { 284 need_flush = true; 285 } 286 287 // For wide args, force flush if not fully promoted 288 if (t_loc->wide) { 289 PromotionMap* p_map = v_map + (t_loc->high_word ? -1 : +1); 290 // Is only half promoted? 291 need_flush |= (p_map->core_location != v_map->core_location) || 292 (p_map->fp_location != v_map->fp_location); 293 if ((cu_->instruction_set == kThumb2) && t_loc->fp && !need_flush) { 294 /* 295 * In Arm, a double is represented as a pair of consecutive single float 296 * registers starting at an even number. It's possible that both Dalvik vRegs 297 * representing the incoming double were independently promoted as singles - but 298 * not in a form usable as a double. If so, we need to flush - even though the 299 * incoming arg appears fully in register. At this point in the code, both 300 * halves of the double are promoted. Make sure they are in a usable form. 301 */ 302 int lowreg_index = start_vreg + i + (t_loc->high_word ? -1 : 0); 303 int low_reg = promotion_map_[lowreg_index].FpReg; 304 int high_reg = promotion_map_[lowreg_index + 1].FpReg; 305 if (((low_reg & 0x1) != 0) || (high_reg != (low_reg + 1))) { 306 need_flush = true; 307 } 308 } 309 } 310 if (need_flush) { 311 StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), 312 TargetReg(arg_regs[i]), kWord); 313 } 314 } else { 315 // If arriving in frame & promoted 316 if (v_map->core_location == kLocPhysReg) { 317 LoadWordDisp(TargetReg(kSp), SRegOffset(start_vreg + i), 318 v_map->core_reg); 319 } 320 if (v_map->fp_location == kLocPhysReg) { 321 LoadWordDisp(TargetReg(kSp), SRegOffset(start_vreg + i), 322 v_map->FpReg); 323 } 324 } 325 } 326 } 327 328 /* 329 * Bit of a hack here - in the absence of a real scheduling pass, 330 * emit the next instruction in static & direct invoke sequences. 331 */ 332 static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info, 333 int state, const MethodReference& target_method, 334 uint32_t unused, 335 uintptr_t direct_code, uintptr_t direct_method, 336 InvokeType type) { 337 Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get()); 338 if (cu->instruction_set != kThumb2) { 339 // Disable sharpening 340 direct_code = 0; 341 direct_method = 0; 342 } 343 if (direct_code != 0 && direct_method != 0) { 344 switch (state) { 345 case 0: // Get the current Method* [sets kArg0] 346 if (direct_code != static_cast<unsigned int>(-1)) { 347 cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code); 348 } else { 349 CHECK_EQ(cu->dex_file, target_method.dex_file); 350 LIR* data_target = cg->ScanLiteralPool(cg->code_literal_list_, 351 target_method.dex_method_index, 0); 352 if (data_target == NULL) { 353 data_target = cg->AddWordData(&cg->code_literal_list_, target_method.dex_method_index); 354 data_target->operands[1] = type; 355 } 356 LIR* load_pc_rel = cg->OpPcRelLoad(cg->TargetReg(kInvokeTgt), data_target); 357 cg->AppendLIR(load_pc_rel); 358 DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target); 359 } 360 if (direct_method != static_cast<unsigned int>(-1)) { 361 cg->LoadConstant(cg->TargetReg(kArg0), direct_method); 362 } else { 363 CHECK_EQ(cu->dex_file, target_method.dex_file); 364 LIR* data_target = cg->ScanLiteralPool(cg->method_literal_list_, 365 target_method.dex_method_index, 0); 366 if (data_target == NULL) { 367 data_target = cg->AddWordData(&cg->method_literal_list_, target_method.dex_method_index); 368 data_target->operands[1] = type; 369 } 370 LIR* load_pc_rel = cg->OpPcRelLoad(cg->TargetReg(kArg0), data_target); 371 cg->AppendLIR(load_pc_rel); 372 DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target); 373 } 374 break; 375 default: 376 return -1; 377 } 378 } else { 379 switch (state) { 380 case 0: // Get the current Method* [sets kArg0] 381 // TUNING: we can save a reg copy if Method* has been promoted. 382 cg->LoadCurrMethodDirect(cg->TargetReg(kArg0)); 383 break; 384 case 1: // Get method->dex_cache_resolved_methods_ 385 cg->LoadWordDisp(cg->TargetReg(kArg0), 386 mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(), cg->TargetReg(kArg0)); 387 // Set up direct code if known. 388 if (direct_code != 0) { 389 if (direct_code != static_cast<unsigned int>(-1)) { 390 cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code); 391 } else { 392 CHECK_EQ(cu->dex_file, target_method.dex_file); 393 LIR* data_target = cg->ScanLiteralPool(cg->code_literal_list_, 394 target_method.dex_method_index, 0); 395 if (data_target == NULL) { 396 data_target = cg->AddWordData(&cg->code_literal_list_, target_method.dex_method_index); 397 data_target->operands[1] = type; 398 } 399 LIR* load_pc_rel = cg->OpPcRelLoad(cg->TargetReg(kInvokeTgt), data_target); 400 cg->AppendLIR(load_pc_rel); 401 DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target); 402 } 403 } 404 break; 405 case 2: // Grab target method* 406 CHECK_EQ(cu->dex_file, target_method.dex_file); 407 cg->LoadWordDisp(cg->TargetReg(kArg0), 408 mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() + 409 (target_method.dex_method_index * 4), 410 cg-> TargetReg(kArg0)); 411 break; 412 case 3: // Grab the code from the method* 413 if (cu->instruction_set != kX86) { 414 if (direct_code == 0) { 415 cg->LoadWordDisp(cg->TargetReg(kArg0), 416 mirror::ArtMethod::GetEntryPointFromCompiledCodeOffset().Int32Value(), 417 cg->TargetReg(kInvokeTgt)); 418 } 419 break; 420 } 421 // Intentional fallthrough for x86 422 default: 423 return -1; 424 } 425 } 426 return state + 1; 427 } 428 429 /* 430 * Bit of a hack here - in the absence of a real scheduling pass, 431 * emit the next instruction in a virtual invoke sequence. 432 * We can use kLr as a temp prior to target address loading 433 * Note also that we'll load the first argument ("this") into 434 * kArg1 here rather than the standard LoadArgRegs. 435 */ 436 static int NextVCallInsn(CompilationUnit* cu, CallInfo* info, 437 int state, const MethodReference& target_method, 438 uint32_t method_idx, uintptr_t unused, uintptr_t unused2, 439 InvokeType unused3) { 440 Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get()); 441 /* 442 * This is the fast path in which the target virtual method is 443 * fully resolved at compile time. 444 */ 445 switch (state) { 446 case 0: { // Get "this" [set kArg1] 447 RegLocation rl_arg = info->args[0]; 448 cg->LoadValueDirectFixed(rl_arg, cg->TargetReg(kArg1)); 449 break; 450 } 451 case 1: // Is "this" null? [use kArg1] 452 cg->GenNullCheck(info->args[0].s_reg_low, cg->TargetReg(kArg1), info->opt_flags); 453 // get this->klass_ [use kArg1, set kInvokeTgt] 454 cg->LoadWordDisp(cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(), 455 cg->TargetReg(kInvokeTgt)); 456 break; 457 case 2: // Get this->klass_->vtable [usr kInvokeTgt, set kInvokeTgt] 458 cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), mirror::Class::VTableOffset().Int32Value(), 459 cg->TargetReg(kInvokeTgt)); 460 break; 461 case 3: // Get target method [use kInvokeTgt, set kArg0] 462 cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), (method_idx * 4) + 463 mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value(), 464 cg->TargetReg(kArg0)); 465 break; 466 case 4: // Get the compiled code address [uses kArg0, sets kInvokeTgt] 467 if (cu->instruction_set != kX86) { 468 cg->LoadWordDisp(cg->TargetReg(kArg0), 469 mirror::ArtMethod::GetEntryPointFromCompiledCodeOffset().Int32Value(), 470 cg->TargetReg(kInvokeTgt)); 471 break; 472 } 473 // Intentional fallthrough for X86 474 default: 475 return -1; 476 } 477 return state + 1; 478 } 479 480 /* 481 * All invoke-interface calls bounce off of art_quick_invoke_interface_trampoline, 482 * which will locate the target and continue on via a tail call. 483 */ 484 static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state, 485 const MethodReference& target_method, 486 uint32_t unused, uintptr_t unused2, 487 uintptr_t direct_method, InvokeType unused4) { 488 Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get()); 489 if (cu->instruction_set != kThumb2) { 490 // Disable sharpening 491 direct_method = 0; 492 } 493 ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline); 494 495 if (direct_method != 0) { 496 switch (state) { 497 case 0: // Load the trampoline target [sets kInvokeTgt]. 498 if (cu->instruction_set != kX86) { 499 cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline.Int32Value(), 500 cg->TargetReg(kInvokeTgt)); 501 } 502 // Get the interface Method* [sets kArg0] 503 if (direct_method != static_cast<unsigned int>(-1)) { 504 cg->LoadConstant(cg->TargetReg(kArg0), direct_method); 505 } else { 506 CHECK_EQ(cu->dex_file, target_method.dex_file); 507 LIR* data_target = cg->ScanLiteralPool(cg->method_literal_list_, 508 target_method.dex_method_index, 0); 509 if (data_target == NULL) { 510 data_target = cg->AddWordData(&cg->method_literal_list_, 511 target_method.dex_method_index); 512 data_target->operands[1] = kInterface; 513 } 514 LIR* load_pc_rel = cg->OpPcRelLoad(cg->TargetReg(kArg0), data_target); 515 cg->AppendLIR(load_pc_rel); 516 DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target); 517 } 518 break; 519 default: 520 return -1; 521 } 522 } else { 523 switch (state) { 524 case 0: 525 // Get the current Method* [sets kArg0] - TUNING: remove copy of method if it is promoted. 526 cg->LoadCurrMethodDirect(cg->TargetReg(kArg0)); 527 // Load the trampoline target [sets kInvokeTgt]. 528 if (cu->instruction_set != kX86) { 529 cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline.Int32Value(), 530 cg->TargetReg(kInvokeTgt)); 531 } 532 break; 533 case 1: // Get method->dex_cache_resolved_methods_ [set/use kArg0] 534 cg->LoadWordDisp(cg->TargetReg(kArg0), 535 mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(), 536 cg->TargetReg(kArg0)); 537 break; 538 case 2: // Grab target method* [set/use kArg0] 539 CHECK_EQ(cu->dex_file, target_method.dex_file); 540 cg->LoadWordDisp(cg->TargetReg(kArg0), 541 mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() + 542 (target_method.dex_method_index * 4), 543 cg->TargetReg(kArg0)); 544 break; 545 default: 546 return -1; 547 } 548 } 549 return state + 1; 550 } 551 552 static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info, ThreadOffset trampoline, 553 int state, const MethodReference& target_method, 554 uint32_t method_idx) { 555 Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get()); 556 /* 557 * This handles the case in which the base method is not fully 558 * resolved at compile time, we bail to a runtime helper. 559 */ 560 if (state == 0) { 561 if (cu->instruction_set != kX86) { 562 // Load trampoline target 563 cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline.Int32Value(), cg->TargetReg(kInvokeTgt)); 564 } 565 // Load kArg0 with method index 566 CHECK_EQ(cu->dex_file, target_method.dex_file); 567 cg->LoadConstant(cg->TargetReg(kArg0), target_method.dex_method_index); 568 return 1; 569 } 570 return -1; 571 } 572 573 static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info, 574 int state, 575 const MethodReference& target_method, 576 uint32_t method_idx, 577 uintptr_t unused, uintptr_t unused2, 578 InvokeType unused3) { 579 ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck); 580 return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0); 581 } 582 583 static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state, 584 const MethodReference& target_method, 585 uint32_t method_idx, uintptr_t unused, 586 uintptr_t unused2, InvokeType unused3) { 587 ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck); 588 return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0); 589 } 590 591 static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state, 592 const MethodReference& target_method, 593 uint32_t method_idx, uintptr_t unused, 594 uintptr_t unused2, InvokeType unused3) { 595 ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck); 596 return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0); 597 } 598 599 static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state, 600 const MethodReference& target_method, 601 uint32_t method_idx, uintptr_t unused, 602 uintptr_t unused2, InvokeType unused3) { 603 ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck); 604 return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0); 605 } 606 607 static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu, 608 CallInfo* info, int state, 609 const MethodReference& target_method, 610 uint32_t unused, 611 uintptr_t unused2, uintptr_t unused3, 612 InvokeType unused4) { 613 ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck); 614 return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0); 615 } 616 617 int Mir2Lir::LoadArgRegs(CallInfo* info, int call_state, 618 NextCallInsn next_call_insn, 619 const MethodReference& target_method, 620 uint32_t vtable_idx, uintptr_t direct_code, 621 uintptr_t direct_method, InvokeType type, bool skip_this) { 622 int last_arg_reg = TargetReg(kArg3); 623 int next_reg = TargetReg(kArg1); 624 int next_arg = 0; 625 if (skip_this) { 626 next_reg++; 627 next_arg++; 628 } 629 for (; (next_reg <= last_arg_reg) && (next_arg < info->num_arg_words); next_reg++) { 630 RegLocation rl_arg = info->args[next_arg++]; 631 rl_arg = UpdateRawLoc(rl_arg); 632 if (rl_arg.wide && (next_reg <= TargetReg(kArg2))) { 633 LoadValueDirectWideFixed(rl_arg, next_reg, next_reg + 1); 634 next_reg++; 635 next_arg++; 636 } else { 637 if (rl_arg.wide) { 638 rl_arg.wide = false; 639 rl_arg.is_const = false; 640 } 641 LoadValueDirectFixed(rl_arg, next_reg); 642 } 643 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 644 direct_code, direct_method, type); 645 } 646 return call_state; 647 } 648 649 /* 650 * Load up to 5 arguments, the first three of which will be in 651 * kArg1 .. kArg3. On entry kArg0 contains the current method pointer, 652 * and as part of the load sequence, it must be replaced with 653 * the target method pointer. Note, this may also be called 654 * for "range" variants if the number of arguments is 5 or fewer. 655 */ 656 int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info, 657 int call_state, LIR** pcrLabel, NextCallInsn next_call_insn, 658 const MethodReference& target_method, 659 uint32_t vtable_idx, uintptr_t direct_code, 660 uintptr_t direct_method, InvokeType type, bool skip_this) { 661 RegLocation rl_arg; 662 663 /* If no arguments, just return */ 664 if (info->num_arg_words == 0) 665 return call_state; 666 667 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 668 direct_code, direct_method, type); 669 670 DCHECK_LE(info->num_arg_words, 5); 671 if (info->num_arg_words > 3) { 672 int32_t next_use = 3; 673 // Detect special case of wide arg spanning arg3/arg4 674 RegLocation rl_use0 = info->args[0]; 675 RegLocation rl_use1 = info->args[1]; 676 RegLocation rl_use2 = info->args[2]; 677 if (((!rl_use0.wide && !rl_use1.wide) || rl_use0.wide) && 678 rl_use2.wide) { 679 int reg = -1; 680 // Wide spans, we need the 2nd half of uses[2]. 681 rl_arg = UpdateLocWide(rl_use2); 682 if (rl_arg.location == kLocPhysReg) { 683 reg = rl_arg.high_reg; 684 } else { 685 // kArg2 & rArg3 can safely be used here 686 reg = TargetReg(kArg3); 687 LoadWordDisp(TargetReg(kSp), SRegOffset(rl_arg.s_reg_low) + 4, reg); 688 call_state = next_call_insn(cu_, info, call_state, target_method, 689 vtable_idx, direct_code, direct_method, type); 690 } 691 StoreBaseDisp(TargetReg(kSp), (next_use + 1) * 4, reg, kWord); 692 StoreBaseDisp(TargetReg(kSp), 16 /* (3+1)*4 */, reg, kWord); 693 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 694 direct_code, direct_method, type); 695 next_use++; 696 } 697 // Loop through the rest 698 while (next_use < info->num_arg_words) { 699 int low_reg; 700 int high_reg = -1; 701 rl_arg = info->args[next_use]; 702 rl_arg = UpdateRawLoc(rl_arg); 703 if (rl_arg.location == kLocPhysReg) { 704 low_reg = rl_arg.low_reg; 705 high_reg = rl_arg.high_reg; 706 } else { 707 low_reg = TargetReg(kArg2); 708 if (rl_arg.wide) { 709 high_reg = TargetReg(kArg3); 710 LoadValueDirectWideFixed(rl_arg, low_reg, high_reg); 711 } else { 712 LoadValueDirectFixed(rl_arg, low_reg); 713 } 714 call_state = next_call_insn(cu_, info, call_state, target_method, 715 vtable_idx, direct_code, direct_method, type); 716 } 717 int outs_offset = (next_use + 1) * 4; 718 if (rl_arg.wide) { 719 StoreBaseDispWide(TargetReg(kSp), outs_offset, low_reg, high_reg); 720 next_use += 2; 721 } else { 722 StoreWordDisp(TargetReg(kSp), outs_offset, low_reg); 723 next_use++; 724 } 725 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 726 direct_code, direct_method, type); 727 } 728 } 729 730 call_state = LoadArgRegs(info, call_state, next_call_insn, 731 target_method, vtable_idx, direct_code, direct_method, 732 type, skip_this); 733 734 if (pcrLabel) { 735 *pcrLabel = GenNullCheck(info->args[0].s_reg_low, TargetReg(kArg1), info->opt_flags); 736 } 737 return call_state; 738 } 739 740 /* 741 * May have 0+ arguments (also used for jumbo). Note that 742 * source virtual registers may be in physical registers, so may 743 * need to be flushed to home location before copying. This 744 * applies to arg3 and above (see below). 745 * 746 * Two general strategies: 747 * If < 20 arguments 748 * Pass args 3-18 using vldm/vstm block copy 749 * Pass arg0, arg1 & arg2 in kArg1-kArg3 750 * If 20+ arguments 751 * Pass args arg19+ using memcpy block copy 752 * Pass arg0, arg1 & arg2 in kArg1-kArg3 753 * 754 */ 755 int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, 756 LIR** pcrLabel, NextCallInsn next_call_insn, 757 const MethodReference& target_method, 758 uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method, 759 InvokeType type, bool skip_this) { 760 // If we can treat it as non-range (Jumbo ops will use range form) 761 if (info->num_arg_words <= 5) 762 return GenDalvikArgsNoRange(info, call_state, pcrLabel, 763 next_call_insn, target_method, vtable_idx, 764 direct_code, direct_method, type, skip_this); 765 /* 766 * First load the non-register arguments. Both forms expect all 767 * of the source arguments to be in their home frame location, so 768 * scan the s_reg names and flush any that have been promoted to 769 * frame backing storage. 770 */ 771 // Scan the rest of the args - if in phys_reg flush to memory 772 for (int next_arg = 0; next_arg < info->num_arg_words;) { 773 RegLocation loc = info->args[next_arg]; 774 if (loc.wide) { 775 loc = UpdateLocWide(loc); 776 if ((next_arg >= 2) && (loc.location == kLocPhysReg)) { 777 StoreBaseDispWide(TargetReg(kSp), SRegOffset(loc.s_reg_low), 778 loc.low_reg, loc.high_reg); 779 } 780 next_arg += 2; 781 } else { 782 loc = UpdateLoc(loc); 783 if ((next_arg >= 3) && (loc.location == kLocPhysReg)) { 784 StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), 785 loc.low_reg, kWord); 786 } 787 next_arg++; 788 } 789 } 790 791 int start_offset = SRegOffset(info->args[3].s_reg_low); 792 int outs_offset = 4 /* Method* */ + (3 * 4); 793 if (cu_->instruction_set != kThumb2) { 794 // Generate memcpy 795 OpRegRegImm(kOpAdd, TargetReg(kArg0), TargetReg(kSp), outs_offset); 796 OpRegRegImm(kOpAdd, TargetReg(kArg1), TargetReg(kSp), start_offset); 797 CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0), 798 TargetReg(kArg1), (info->num_arg_words - 3) * 4, false); 799 } else { 800 if (info->num_arg_words >= 20) { 801 // Generate memcpy 802 OpRegRegImm(kOpAdd, TargetReg(kArg0), TargetReg(kSp), outs_offset); 803 OpRegRegImm(kOpAdd, TargetReg(kArg1), TargetReg(kSp), start_offset); 804 CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0), 805 TargetReg(kArg1), (info->num_arg_words - 3) * 4, false); 806 } else { 807 // Use vldm/vstm pair using kArg3 as a temp 808 int regs_left = std::min(info->num_arg_words - 3, 16); 809 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 810 direct_code, direct_method, type); 811 OpRegRegImm(kOpAdd, TargetReg(kArg3), TargetReg(kSp), start_offset); 812 LIR* ld = OpVldm(TargetReg(kArg3), regs_left); 813 // TUNING: loosen barrier 814 ld->def_mask = ENCODE_ALL; 815 SetMemRefType(ld, true /* is_load */, kDalvikReg); 816 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 817 direct_code, direct_method, type); 818 OpRegRegImm(kOpAdd, TargetReg(kArg3), TargetReg(kSp), 4 /* Method* */ + (3 * 4)); 819 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 820 direct_code, direct_method, type); 821 LIR* st = OpVstm(TargetReg(kArg3), regs_left); 822 SetMemRefType(st, false /* is_load */, kDalvikReg); 823 st->def_mask = ENCODE_ALL; 824 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 825 direct_code, direct_method, type); 826 } 827 } 828 829 call_state = LoadArgRegs(info, call_state, next_call_insn, 830 target_method, vtable_idx, direct_code, direct_method, 831 type, skip_this); 832 833 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, 834 direct_code, direct_method, type); 835 if (pcrLabel) { 836 *pcrLabel = GenNullCheck(info->args[0].s_reg_low, TargetReg(kArg1), info->opt_flags); 837 } 838 return call_state; 839 } 840 841 RegLocation Mir2Lir::InlineTarget(CallInfo* info) { 842 RegLocation res; 843 if (info->result.location == kLocInvalid) { 844 res = GetReturn(false); 845 } else { 846 res = info->result; 847 } 848 return res; 849 } 850 851 RegLocation Mir2Lir::InlineTargetWide(CallInfo* info) { 852 RegLocation res; 853 if (info->result.location == kLocInvalid) { 854 res = GetReturnWide(false); 855 } else { 856 res = info->result; 857 } 858 return res; 859 } 860 861 bool Mir2Lir::GenInlinedCharAt(CallInfo* info) { 862 if (cu_->instruction_set == kMips) { 863 // TODO - add Mips implementation 864 return false; 865 } 866 // Location of reference to data array 867 int value_offset = mirror::String::ValueOffset().Int32Value(); 868 // Location of count 869 int count_offset = mirror::String::CountOffset().Int32Value(); 870 // Starting offset within data array 871 int offset_offset = mirror::String::OffsetOffset().Int32Value(); 872 // Start of char data with array_ 873 int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value(); 874 875 RegLocation rl_obj = info->args[0]; 876 RegLocation rl_idx = info->args[1]; 877 rl_obj = LoadValue(rl_obj, kCoreReg); 878 rl_idx = LoadValue(rl_idx, kCoreReg); 879 int reg_max; 880 GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, info->opt_flags); 881 bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK)); 882 LIR* launch_pad = NULL; 883 int reg_off = INVALID_REG; 884 int reg_ptr = INVALID_REG; 885 if (cu_->instruction_set != kX86) { 886 reg_off = AllocTemp(); 887 reg_ptr = AllocTemp(); 888 if (range_check) { 889 reg_max = AllocTemp(); 890 LoadWordDisp(rl_obj.low_reg, count_offset, reg_max); 891 } 892 LoadWordDisp(rl_obj.low_reg, offset_offset, reg_off); 893 LoadWordDisp(rl_obj.low_reg, value_offset, reg_ptr); 894 if (range_check) { 895 // Set up a launch pad to allow retry in case of bounds violation */ 896 launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info)); 897 intrinsic_launchpads_.Insert(launch_pad); 898 OpRegReg(kOpCmp, rl_idx.low_reg, reg_max); 899 FreeTemp(reg_max); 900 OpCondBranch(kCondCs, launch_pad); 901 } 902 } else { 903 if (range_check) { 904 reg_max = AllocTemp(); 905 LoadWordDisp(rl_obj.low_reg, count_offset, reg_max); 906 // Set up a launch pad to allow retry in case of bounds violation */ 907 launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info)); 908 intrinsic_launchpads_.Insert(launch_pad); 909 OpRegReg(kOpCmp, rl_idx.low_reg, reg_max); 910 FreeTemp(reg_max); 911 OpCondBranch(kCondCc, launch_pad); 912 } 913 reg_off = AllocTemp(); 914 reg_ptr = AllocTemp(); 915 LoadWordDisp(rl_obj.low_reg, offset_offset, reg_off); 916 LoadWordDisp(rl_obj.low_reg, value_offset, reg_ptr); 917 } 918 OpRegImm(kOpAdd, reg_ptr, data_offset); 919 OpRegReg(kOpAdd, reg_off, rl_idx.low_reg); 920 FreeTemp(rl_obj.low_reg); 921 FreeTemp(rl_idx.low_reg); 922 RegLocation rl_dest = InlineTarget(info); 923 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 924 LoadBaseIndexed(reg_ptr, reg_off, rl_result.low_reg, 1, kUnsignedHalf); 925 FreeTemp(reg_off); 926 FreeTemp(reg_ptr); 927 StoreValue(rl_dest, rl_result); 928 if (range_check) { 929 launch_pad->operands[2] = 0; // no resumption 930 } 931 // Record that we've already inlined & null checked 932 info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK); 933 return true; 934 } 935 936 // Generates an inlined String.is_empty or String.length. 937 bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) { 938 if (cu_->instruction_set == kMips) { 939 // TODO - add Mips implementation 940 return false; 941 } 942 // dst = src.length(); 943 RegLocation rl_obj = info->args[0]; 944 rl_obj = LoadValue(rl_obj, kCoreReg); 945 RegLocation rl_dest = InlineTarget(info); 946 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 947 GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, info->opt_flags); 948 LoadWordDisp(rl_obj.low_reg, mirror::String::CountOffset().Int32Value(), rl_result.low_reg); 949 if (is_empty) { 950 // dst = (dst == 0); 951 if (cu_->instruction_set == kThumb2) { 952 int t_reg = AllocTemp(); 953 OpRegReg(kOpNeg, t_reg, rl_result.low_reg); 954 OpRegRegReg(kOpAdc, rl_result.low_reg, rl_result.low_reg, t_reg); 955 } else { 956 DCHECK_EQ(cu_->instruction_set, kX86); 957 OpRegImm(kOpSub, rl_result.low_reg, 1); 958 OpRegImm(kOpLsr, rl_result.low_reg, 31); 959 } 960 } 961 StoreValue(rl_dest, rl_result); 962 return true; 963 } 964 965 bool Mir2Lir::GenInlinedAbsInt(CallInfo* info) { 966 if (cu_->instruction_set == kMips) { 967 // TODO - add Mips implementation 968 return false; 969 } 970 RegLocation rl_src = info->args[0]; 971 rl_src = LoadValue(rl_src, kCoreReg); 972 RegLocation rl_dest = InlineTarget(info); 973 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 974 int sign_reg = AllocTemp(); 975 // abs(x) = y<=x>>31, (x+y)^y. 976 OpRegRegImm(kOpAsr, sign_reg, rl_src.low_reg, 31); 977 OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, sign_reg); 978 OpRegReg(kOpXor, rl_result.low_reg, sign_reg); 979 StoreValue(rl_dest, rl_result); 980 return true; 981 } 982 983 bool Mir2Lir::GenInlinedAbsLong(CallInfo* info) { 984 if (cu_->instruction_set == kMips) { 985 // TODO - add Mips implementation 986 return false; 987 } 988 if (cu_->instruction_set == kThumb2) { 989 RegLocation rl_src = info->args[0]; 990 rl_src = LoadValueWide(rl_src, kCoreReg); 991 RegLocation rl_dest = InlineTargetWide(info); 992 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 993 int sign_reg = AllocTemp(); 994 // abs(x) = y<=x>>31, (x+y)^y. 995 OpRegRegImm(kOpAsr, sign_reg, rl_src.high_reg, 31); 996 OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, sign_reg); 997 OpRegRegReg(kOpAdc, rl_result.high_reg, rl_src.high_reg, sign_reg); 998 OpRegReg(kOpXor, rl_result.low_reg, sign_reg); 999 OpRegReg(kOpXor, rl_result.high_reg, sign_reg); 1000 StoreValueWide(rl_dest, rl_result); 1001 return true; 1002 } else { 1003 DCHECK_EQ(cu_->instruction_set, kX86); 1004 // Reuse source registers to avoid running out of temps 1005 RegLocation rl_src = info->args[0]; 1006 rl_src = LoadValueWide(rl_src, kCoreReg); 1007 RegLocation rl_dest = InlineTargetWide(info); 1008 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1009 OpRegCopyWide(rl_result.low_reg, rl_result.high_reg, rl_src.low_reg, rl_src.high_reg); 1010 FreeTemp(rl_src.low_reg); 1011 FreeTemp(rl_src.high_reg); 1012 int sign_reg = AllocTemp(); 1013 // abs(x) = y<=x>>31, (x+y)^y. 1014 OpRegRegImm(kOpAsr, sign_reg, rl_result.high_reg, 31); 1015 OpRegReg(kOpAdd, rl_result.low_reg, sign_reg); 1016 OpRegReg(kOpAdc, rl_result.high_reg, sign_reg); 1017 OpRegReg(kOpXor, rl_result.low_reg, sign_reg); 1018 OpRegReg(kOpXor, rl_result.high_reg, sign_reg); 1019 StoreValueWide(rl_dest, rl_result); 1020 return true; 1021 } 1022 } 1023 1024 bool Mir2Lir::GenInlinedFloatCvt(CallInfo* info) { 1025 if (cu_->instruction_set == kMips) { 1026 // TODO - add Mips implementation 1027 return false; 1028 } 1029 RegLocation rl_src = info->args[0]; 1030 RegLocation rl_dest = InlineTarget(info); 1031 StoreValue(rl_dest, rl_src); 1032 return true; 1033 } 1034 1035 bool Mir2Lir::GenInlinedDoubleCvt(CallInfo* info) { 1036 if (cu_->instruction_set == kMips) { 1037 // TODO - add Mips implementation 1038 return false; 1039 } 1040 RegLocation rl_src = info->args[0]; 1041 RegLocation rl_dest = InlineTargetWide(info); 1042 StoreValueWide(rl_dest, rl_src); 1043 return true; 1044 } 1045 1046 /* 1047 * Fast string.index_of(I) & (II). Tests for simple case of char <= 0xffff, 1048 * otherwise bails to standard library code. 1049 */ 1050 bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { 1051 if (cu_->instruction_set == kMips) { 1052 // TODO - add Mips implementation 1053 return false; 1054 } 1055 ClobberCalleeSave(); 1056 LockCallTemps(); // Using fixed registers 1057 int reg_ptr = TargetReg(kArg0); 1058 int reg_char = TargetReg(kArg1); 1059 int reg_start = TargetReg(kArg2); 1060 1061 RegLocation rl_obj = info->args[0]; 1062 RegLocation rl_char = info->args[1]; 1063 RegLocation rl_start = info->args[2]; 1064 LoadValueDirectFixed(rl_obj, reg_ptr); 1065 LoadValueDirectFixed(rl_char, reg_char); 1066 if (zero_based) { 1067 LoadConstant(reg_start, 0); 1068 } else { 1069 LoadValueDirectFixed(rl_start, reg_start); 1070 } 1071 int r_tgt = (cu_->instruction_set != kX86) ? LoadHelper(QUICK_ENTRYPOINT_OFFSET(pIndexOf)) : 0; 1072 GenNullCheck(rl_obj.s_reg_low, reg_ptr, info->opt_flags); 1073 LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info)); 1074 intrinsic_launchpads_.Insert(launch_pad); 1075 OpCmpImmBranch(kCondGt, reg_char, 0xFFFF, launch_pad); 1076 // NOTE: not a safepoint 1077 if (cu_->instruction_set != kX86) { 1078 OpReg(kOpBlx, r_tgt); 1079 } else { 1080 OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pIndexOf)); 1081 } 1082 LIR* resume_tgt = NewLIR0(kPseudoTargetLabel); 1083 launch_pad->operands[2] = reinterpret_cast<uintptr_t>(resume_tgt); 1084 // Record that we've already inlined & null checked 1085 info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK); 1086 RegLocation rl_return = GetReturn(false); 1087 RegLocation rl_dest = InlineTarget(info); 1088 StoreValue(rl_dest, rl_return); 1089 return true; 1090 } 1091 1092 /* Fast string.compareTo(Ljava/lang/string;)I. */ 1093 bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) { 1094 if (cu_->instruction_set == kMips) { 1095 // TODO - add Mips implementation 1096 return false; 1097 } 1098 ClobberCalleeSave(); 1099 LockCallTemps(); // Using fixed registers 1100 int reg_this = TargetReg(kArg0); 1101 int reg_cmp = TargetReg(kArg1); 1102 1103 RegLocation rl_this = info->args[0]; 1104 RegLocation rl_cmp = info->args[1]; 1105 LoadValueDirectFixed(rl_this, reg_this); 1106 LoadValueDirectFixed(rl_cmp, reg_cmp); 1107 int r_tgt = (cu_->instruction_set != kX86) ? 1108 LoadHelper(QUICK_ENTRYPOINT_OFFSET(pStringCompareTo)) : 0; 1109 GenNullCheck(rl_this.s_reg_low, reg_this, info->opt_flags); 1110 // TUNING: check if rl_cmp.s_reg_low is already null checked 1111 LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info)); 1112 intrinsic_launchpads_.Insert(launch_pad); 1113 OpCmpImmBranch(kCondEq, reg_cmp, 0, launch_pad); 1114 // NOTE: not a safepoint 1115 if (cu_->instruction_set != kX86) { 1116 OpReg(kOpBlx, r_tgt); 1117 } else { 1118 OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pStringCompareTo)); 1119 } 1120 launch_pad->operands[2] = 0; // No return possible 1121 // Record that we've already inlined & null checked 1122 info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK); 1123 RegLocation rl_return = GetReturn(false); 1124 RegLocation rl_dest = InlineTarget(info); 1125 StoreValue(rl_dest, rl_return); 1126 return true; 1127 } 1128 1129 bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) { 1130 RegLocation rl_dest = InlineTarget(info); 1131 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1132 ThreadOffset offset = Thread::PeerOffset(); 1133 if (cu_->instruction_set == kThumb2 || cu_->instruction_set == kMips) { 1134 LoadWordDisp(TargetReg(kSelf), offset.Int32Value(), rl_result.low_reg); 1135 } else { 1136 CHECK(cu_->instruction_set == kX86); 1137 reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.low_reg, offset); 1138 } 1139 StoreValue(rl_dest, rl_result); 1140 return true; 1141 } 1142 1143 bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info, 1144 bool is_long, bool is_volatile) { 1145 if (cu_->instruction_set == kMips) { 1146 // TODO - add Mips implementation 1147 return false; 1148 } 1149 // Unused - RegLocation rl_src_unsafe = info->args[0]; 1150 RegLocation rl_src_obj = info->args[1]; // Object 1151 RegLocation rl_src_offset = info->args[2]; // long low 1152 rl_src_offset.wide = 0; // ignore high half in info->args[3] 1153 RegLocation rl_dest = InlineTarget(info); // result reg 1154 if (is_volatile) { 1155 GenMemBarrier(kLoadLoad); 1156 } 1157 RegLocation rl_object = LoadValue(rl_src_obj, kCoreReg); 1158 RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg); 1159 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); 1160 if (is_long) { 1161 OpRegReg(kOpAdd, rl_object.low_reg, rl_offset.low_reg); 1162 LoadBaseDispWide(rl_object.low_reg, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG); 1163 StoreValueWide(rl_dest, rl_result); 1164 } else { 1165 LoadBaseIndexed(rl_object.low_reg, rl_offset.low_reg, rl_result.low_reg, 0, kWord); 1166 StoreValue(rl_dest, rl_result); 1167 } 1168 return true; 1169 } 1170 1171 bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long, 1172 bool is_object, bool is_volatile, bool is_ordered) { 1173 if (cu_->instruction_set == kMips) { 1174 // TODO - add Mips implementation 1175 return false; 1176 } 1177 if (cu_->instruction_set == kX86 && is_object) { 1178 // TODO: fix X86, it exhausts registers for card marking. 1179 return false; 1180 } 1181 // Unused - RegLocation rl_src_unsafe = info->args[0]; 1182 RegLocation rl_src_obj = info->args[1]; // Object 1183 RegLocation rl_src_offset = info->args[2]; // long low 1184 rl_src_offset.wide = 0; // ignore high half in info->args[3] 1185 RegLocation rl_src_value = info->args[4]; // value to store 1186 if (is_volatile || is_ordered) { 1187 GenMemBarrier(kStoreStore); 1188 } 1189 RegLocation rl_object = LoadValue(rl_src_obj, kCoreReg); 1190 RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg); 1191 RegLocation rl_value; 1192 if (is_long) { 1193 rl_value = LoadValueWide(rl_src_value, kCoreReg); 1194 OpRegReg(kOpAdd, rl_object.low_reg, rl_offset.low_reg); 1195 StoreBaseDispWide(rl_object.low_reg, 0, rl_value.low_reg, rl_value.high_reg); 1196 } else { 1197 rl_value = LoadValue(rl_src_value, kCoreReg); 1198 StoreBaseIndexed(rl_object.low_reg, rl_offset.low_reg, rl_value.low_reg, 0, kWord); 1199 } 1200 if (is_volatile) { 1201 GenMemBarrier(kStoreLoad); 1202 } 1203 if (is_object) { 1204 MarkGCCard(rl_value.low_reg, rl_object.low_reg); 1205 } 1206 return true; 1207 } 1208 1209 bool Mir2Lir::GenIntrinsic(CallInfo* info) { 1210 if (info->opt_flags & MIR_INLINED) { 1211 return false; 1212 } 1213 /* 1214 * TODO: move these to a target-specific structured constant array 1215 * and use a generic match function. The list of intrinsics may be 1216 * slightly different depending on target. 1217 * TODO: Fold this into a matching function that runs during 1218 * basic block building. This should be part of the action for 1219 * small method inlining and recognition of the special object init 1220 * method. By doing this during basic block construction, we can also 1221 * take advantage of/generate new useful dataflow info. 1222 */ 1223 StringPiece tgt_methods_declaring_class( 1224 cu_->dex_file->GetMethodDeclaringClassDescriptor(cu_->dex_file->GetMethodId(info->index))); 1225 if (tgt_methods_declaring_class.starts_with("Ljava/lang/Double;")) { 1226 std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file)); 1227 if (tgt_method == "long java.lang.Double.doubleToRawLongBits(double)") { 1228 return GenInlinedDoubleCvt(info); 1229 } 1230 if (tgt_method == "double java.lang.Double.longBitsToDouble(long)") { 1231 return GenInlinedDoubleCvt(info); 1232 } 1233 } else if (tgt_methods_declaring_class.starts_with("Ljava/lang/Float;")) { 1234 std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file)); 1235 if (tgt_method == "int java.lang.Float.float_to_raw_int_bits(float)") { 1236 return GenInlinedFloatCvt(info); 1237 } 1238 if (tgt_method == "float java.lang.Float.intBitsToFloat(int)") { 1239 return GenInlinedFloatCvt(info); 1240 } 1241 } else if (tgt_methods_declaring_class.starts_with("Ljava/lang/Math;") || 1242 tgt_methods_declaring_class.starts_with("Ljava/lang/StrictMath;")) { 1243 std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file)); 1244 if (tgt_method == "int java.lang.Math.abs(int)" || 1245 tgt_method == "int java.lang.StrictMath.abs(int)") { 1246 return GenInlinedAbsInt(info); 1247 } 1248 if (tgt_method == "long java.lang.Math.abs(long)" || 1249 tgt_method == "long java.lang.StrictMath.abs(long)") { 1250 return GenInlinedAbsLong(info); 1251 } 1252 if (tgt_method == "int java.lang.Math.max(int, int)" || 1253 tgt_method == "int java.lang.StrictMath.max(int, int)") { 1254 return GenInlinedMinMaxInt(info, false /* is_min */); 1255 } 1256 if (tgt_method == "int java.lang.Math.min(int, int)" || 1257 tgt_method == "int java.lang.StrictMath.min(int, int)") { 1258 return GenInlinedMinMaxInt(info, true /* is_min */); 1259 } 1260 if (tgt_method == "double java.lang.Math.sqrt(double)" || 1261 tgt_method == "double java.lang.StrictMath.sqrt(double)") { 1262 return GenInlinedSqrt(info); 1263 } 1264 } else if (tgt_methods_declaring_class.starts_with("Ljava/lang/String;")) { 1265 std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file)); 1266 if (tgt_method == "char java.lang.String.charAt(int)") { 1267 return GenInlinedCharAt(info); 1268 } 1269 if (tgt_method == "int java.lang.String.compareTo(java.lang.String)") { 1270 return GenInlinedStringCompareTo(info); 1271 } 1272 if (tgt_method == "boolean java.lang.String.is_empty()") { 1273 return GenInlinedStringIsEmptyOrLength(info, true /* is_empty */); 1274 } 1275 if (tgt_method == "int java.lang.String.index_of(int, int)") { 1276 return GenInlinedIndexOf(info, false /* base 0 */); 1277 } 1278 if (tgt_method == "int java.lang.String.index_of(int)") { 1279 return GenInlinedIndexOf(info, true /* base 0 */); 1280 } 1281 if (tgt_method == "int java.lang.String.length()") { 1282 return GenInlinedStringIsEmptyOrLength(info, false /* is_empty */); 1283 } 1284 } else if (tgt_methods_declaring_class.starts_with("Ljava/lang/Thread;")) { 1285 std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file)); 1286 if (tgt_method == "java.lang.Thread java.lang.Thread.currentThread()") { 1287 return GenInlinedCurrentThread(info); 1288 } 1289 } else if (tgt_methods_declaring_class.starts_with("Lsun/misc/Unsafe;")) { 1290 std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file)); 1291 if (tgt_method == "boolean sun.misc.Unsafe.compareAndSwapInt(java.lang.Object, long, int, int)") { 1292 return GenInlinedCas32(info, false); 1293 } 1294 if (tgt_method == "boolean sun.misc.Unsafe.compareAndSwapObject(java.lang.Object, long, java.lang.Object, java.lang.Object)") { 1295 return GenInlinedCas32(info, true); 1296 } 1297 if (tgt_method == "int sun.misc.Unsafe.getInt(java.lang.Object, long)") { 1298 return GenInlinedUnsafeGet(info, false /* is_long */, false /* is_volatile */); 1299 } 1300 if (tgt_method == "int sun.misc.Unsafe.getIntVolatile(java.lang.Object, long)") { 1301 return GenInlinedUnsafeGet(info, false /* is_long */, true /* is_volatile */); 1302 } 1303 if (tgt_method == "void sun.misc.Unsafe.putInt(java.lang.Object, long, int)") { 1304 return GenInlinedUnsafePut(info, false /* is_long */, false /* is_object */, 1305 false /* is_volatile */, false /* is_ordered */); 1306 } 1307 if (tgt_method == "void sun.misc.Unsafe.putIntVolatile(java.lang.Object, long, int)") { 1308 return GenInlinedUnsafePut(info, false /* is_long */, false /* is_object */, 1309 true /* is_volatile */, false /* is_ordered */); 1310 } 1311 if (tgt_method == "void sun.misc.Unsafe.putOrderedInt(java.lang.Object, long, int)") { 1312 return GenInlinedUnsafePut(info, false /* is_long */, false /* is_object */, 1313 false /* is_volatile */, true /* is_ordered */); 1314 } 1315 if (tgt_method == "long sun.misc.Unsafe.getLong(java.lang.Object, long)") { 1316 return GenInlinedUnsafeGet(info, true /* is_long */, false /* is_volatile */); 1317 } 1318 if (tgt_method == "long sun.misc.Unsafe.getLongVolatile(java.lang.Object, long)") { 1319 return GenInlinedUnsafeGet(info, true /* is_long */, true /* is_volatile */); 1320 } 1321 if (tgt_method == "void sun.misc.Unsafe.putLong(java.lang.Object, long, long)") { 1322 return GenInlinedUnsafePut(info, true /* is_long */, false /* is_object */, 1323 false /* is_volatile */, false /* is_ordered */); 1324 } 1325 if (tgt_method == "void sun.misc.Unsafe.putLongVolatile(java.lang.Object, long, long)") { 1326 return GenInlinedUnsafePut(info, true /* is_long */, false /* is_object */, 1327 true /* is_volatile */, false /* is_ordered */); 1328 } 1329 if (tgt_method == "void sun.misc.Unsafe.putOrderedLong(java.lang.Object, long, long)") { 1330 return GenInlinedUnsafePut(info, true /* is_long */, false /* is_object */, 1331 false /* is_volatile */, true /* is_ordered */); 1332 } 1333 if (tgt_method == "java.lang.Object sun.misc.Unsafe.getObject(java.lang.Object, long)") { 1334 return GenInlinedUnsafeGet(info, false /* is_long */, false /* is_volatile */); 1335 } 1336 if (tgt_method == "java.lang.Object sun.misc.Unsafe.getObjectVolatile(java.lang.Object, long)") { 1337 return GenInlinedUnsafeGet(info, false /* is_long */, true /* is_volatile */); 1338 } 1339 if (tgt_method == "void sun.misc.Unsafe.putObject(java.lang.Object, long, java.lang.Object)") { 1340 return GenInlinedUnsafePut(info, false /* is_long */, true /* is_object */, 1341 false /* is_volatile */, false /* is_ordered */); 1342 } 1343 if (tgt_method == "void sun.misc.Unsafe.putObjectVolatile(java.lang.Object, long, java.lang.Object)") { 1344 return GenInlinedUnsafePut(info, false /* is_long */, true /* is_object */, 1345 true /* is_volatile */, false /* is_ordered */); 1346 } 1347 if (tgt_method == "void sun.misc.Unsafe.putOrderedObject(java.lang.Object, long, java.lang.Object)") { 1348 return GenInlinedUnsafePut(info, false /* is_long */, true /* is_object */, 1349 false /* is_volatile */, true /* is_ordered */); 1350 } 1351 } 1352 return false; 1353 } 1354 1355 void Mir2Lir::GenInvoke(CallInfo* info) { 1356 if (GenIntrinsic(info)) { 1357 return; 1358 } 1359 InvokeType original_type = info->type; // avoiding mutation by ComputeInvokeInfo 1360 int call_state = 0; 1361 LIR* null_ck; 1362 LIR** p_null_ck = NULL; 1363 NextCallInsn next_call_insn; 1364 FlushAllRegs(); /* Everything to home location */ 1365 // Explicit register usage 1366 LockCallTemps(); 1367 1368 DexCompilationUnit* cUnit = mir_graph_->GetCurrentDexCompilationUnit(); 1369 MethodReference target_method(cUnit->GetDexFile(), info->index); 1370 int vtable_idx; 1371 uintptr_t direct_code; 1372 uintptr_t direct_method; 1373 bool skip_this; 1374 bool fast_path = 1375 cu_->compiler_driver->ComputeInvokeInfo(mir_graph_->GetCurrentDexCompilationUnit(), 1376 current_dalvik_offset_, 1377 info->type, target_method, 1378 vtable_idx, 1379 direct_code, direct_method, 1380 true) && !SLOW_INVOKE_PATH; 1381 if (info->type == kInterface) { 1382 if (fast_path) { 1383 p_null_ck = &null_ck; 1384 } 1385 next_call_insn = fast_path ? NextInterfaceCallInsn : NextInterfaceCallInsnWithAccessCheck; 1386 skip_this = false; 1387 } else if (info->type == kDirect) { 1388 if (fast_path) { 1389 p_null_ck = &null_ck; 1390 } 1391 next_call_insn = fast_path ? NextSDCallInsn : NextDirectCallInsnSP; 1392 skip_this = false; 1393 } else if (info->type == kStatic) { 1394 next_call_insn = fast_path ? NextSDCallInsn : NextStaticCallInsnSP; 1395 skip_this = false; 1396 } else if (info->type == kSuper) { 1397 DCHECK(!fast_path); // Fast path is a direct call. 1398 next_call_insn = NextSuperCallInsnSP; 1399 skip_this = false; 1400 } else { 1401 DCHECK_EQ(info->type, kVirtual); 1402 next_call_insn = fast_path ? NextVCallInsn : NextVCallInsnSP; 1403 skip_this = fast_path; 1404 } 1405 if (!info->is_range) { 1406 call_state = GenDalvikArgsNoRange(info, call_state, p_null_ck, 1407 next_call_insn, target_method, 1408 vtable_idx, direct_code, direct_method, 1409 original_type, skip_this); 1410 } else { 1411 call_state = GenDalvikArgsRange(info, call_state, p_null_ck, 1412 next_call_insn, target_method, vtable_idx, 1413 direct_code, direct_method, original_type, 1414 skip_this); 1415 } 1416 // Finish up any of the call sequence not interleaved in arg loading 1417 while (call_state >= 0) { 1418 call_state = next_call_insn(cu_, info, call_state, target_method, 1419 vtable_idx, direct_code, direct_method, 1420 original_type); 1421 } 1422 LIR* call_inst; 1423 if (cu_->instruction_set != kX86) { 1424 call_inst = OpReg(kOpBlx, TargetReg(kInvokeTgt)); 1425 } else { 1426 if (fast_path && info->type != kInterface) { 1427 call_inst = OpMem(kOpBlx, TargetReg(kArg0), 1428 mirror::ArtMethod::GetEntryPointFromCompiledCodeOffset().Int32Value()); 1429 } else { 1430 ThreadOffset trampoline(-1); 1431 switch (info->type) { 1432 case kInterface: 1433 trampoline = fast_path ? QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline) 1434 : QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck); 1435 break; 1436 case kDirect: 1437 trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck); 1438 break; 1439 case kStatic: 1440 trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck); 1441 break; 1442 case kSuper: 1443 trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck); 1444 break; 1445 case kVirtual: 1446 trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck); 1447 break; 1448 default: 1449 LOG(FATAL) << "Unexpected invoke type"; 1450 } 1451 call_inst = OpThreadMem(kOpBlx, trampoline); 1452 } 1453 } 1454 MarkSafepointPC(call_inst); 1455 1456 ClobberCalleeSave(); 1457 if (info->result.location != kLocInvalid) { 1458 // We have a following MOVE_RESULT - do it now. 1459 if (info->result.wide) { 1460 RegLocation ret_loc = GetReturnWide(info->result.fp); 1461 StoreValueWide(info->result, ret_loc); 1462 } else { 1463 RegLocation ret_loc = GetReturn(info->result.fp); 1464 StoreValue(info->result, ret_loc); 1465 } 1466 } 1467 } 1468 1469 } // namespace art 1470