1 /* 2 * Copyright (C) 2009 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include "Dalvik.h" 18 #include "libdex/OpCode.h" 19 #include "interp/Jit.h" 20 #include "CompilerInternals.h" 21 #include "Dataflow.h" 22 23 /* 24 * Parse an instruction, return the length of the instruction 25 */ 26 static inline int parseInsn(const u2 *codePtr, DecodedInstruction *decInsn, 27 bool printMe) 28 { 29 u2 instr = *codePtr; 30 OpCode opcode = instr & 0xff; 31 int insnWidth; 32 33 // Don't parse instruction data 34 if (opcode == OP_NOP && instr != 0) { 35 return 0; 36 } else { 37 insnWidth = gDvm.instrWidth[opcode]; 38 if (insnWidth < 0) { 39 insnWidth = -insnWidth; 40 } 41 } 42 43 dexDecodeInstruction(gDvm.instrFormat, codePtr, decInsn); 44 if (printMe) { 45 char *decodedString = dvmCompilerGetDalvikDisassembly(decInsn, NULL); 46 LOGD("%p: %#06x %s\n", codePtr, opcode, decodedString); 47 } 48 return insnWidth; 49 } 50 51 #define UNKNOWN_TARGET 0xffffffff 52 53 /* 54 * Identify block-ending instructions and collect supplemental information 55 * regarding the following instructions. 56 */ 57 static inline bool findBlockBoundary(const Method *caller, MIR *insn, 58 unsigned int curOffset, 59 unsigned int *target, bool *isInvoke, 60 const Method **callee) 61 { 62 switch (insn->dalvikInsn.opCode) { 63 /* Target is not compile-time constant */ 64 case OP_RETURN_VOID: 65 case OP_RETURN: 66 case OP_RETURN_WIDE: 67 case OP_RETURN_OBJECT: 68 case OP_THROW: 69 *target = UNKNOWN_TARGET; 70 break; 71 case OP_INVOKE_VIRTUAL: 72 case OP_INVOKE_VIRTUAL_RANGE: 73 case OP_INVOKE_INTERFACE: 74 case OP_INVOKE_INTERFACE_RANGE: 75 case OP_INVOKE_VIRTUAL_QUICK: 76 case OP_INVOKE_VIRTUAL_QUICK_RANGE: 77 *isInvoke = true; 78 break; 79 case OP_INVOKE_SUPER: 80 case OP_INVOKE_SUPER_RANGE: { 81 int mIndex = caller->clazz->pDvmDex-> 82 pResMethods[insn->dalvikInsn.vB]->methodIndex; 83 const Method *calleeMethod = 84 caller->clazz->super->vtable[mIndex]; 85 86 if (calleeMethod && !dvmIsNativeMethod(calleeMethod)) { 87 *target = (unsigned int) calleeMethod->insns; 88 } 89 *isInvoke = true; 90 *callee = calleeMethod; 91 break; 92 } 93 case OP_INVOKE_STATIC: 94 case OP_INVOKE_STATIC_RANGE: { 95 const Method *calleeMethod = 96 caller->clazz->pDvmDex->pResMethods[insn->dalvikInsn.vB]; 97 98 if (calleeMethod && !dvmIsNativeMethod(calleeMethod)) { 99 *target = (unsigned int) calleeMethod->insns; 100 } 101 *isInvoke = true; 102 *callee = calleeMethod; 103 break; 104 } 105 case OP_INVOKE_SUPER_QUICK: 106 case OP_INVOKE_SUPER_QUICK_RANGE: { 107 const Method *calleeMethod = 108 caller->clazz->super->vtable[insn->dalvikInsn.vB]; 109 110 if (calleeMethod && !dvmIsNativeMethod(calleeMethod)) { 111 *target = (unsigned int) calleeMethod->insns; 112 } 113 *isInvoke = true; 114 *callee = calleeMethod; 115 break; 116 } 117 case OP_INVOKE_DIRECT: 118 case OP_INVOKE_DIRECT_RANGE: { 119 const Method *calleeMethod = 120 caller->clazz->pDvmDex->pResMethods[insn->dalvikInsn.vB]; 121 if (calleeMethod && !dvmIsNativeMethod(calleeMethod)) { 122 *target = (unsigned int) calleeMethod->insns; 123 } 124 *isInvoke = true; 125 *callee = calleeMethod; 126 break; 127 } 128 case OP_GOTO: 129 case OP_GOTO_16: 130 case OP_GOTO_32: 131 *target = curOffset + (int) insn->dalvikInsn.vA; 132 break; 133 134 case OP_IF_EQ: 135 case OP_IF_NE: 136 case OP_IF_LT: 137 case OP_IF_GE: 138 case OP_IF_GT: 139 case OP_IF_LE: 140 *target = curOffset + (int) insn->dalvikInsn.vC; 141 break; 142 143 case OP_IF_EQZ: 144 case OP_IF_NEZ: 145 case OP_IF_LTZ: 146 case OP_IF_GEZ: 147 case OP_IF_GTZ: 148 case OP_IF_LEZ: 149 *target = curOffset + (int) insn->dalvikInsn.vB; 150 break; 151 152 default: 153 return false; 154 } 155 return true; 156 } 157 158 static inline bool isGoto(MIR *insn) 159 { 160 switch (insn->dalvikInsn.opCode) { 161 case OP_GOTO: 162 case OP_GOTO_16: 163 case OP_GOTO_32: 164 return true; 165 default: 166 return false; 167 } 168 } 169 170 /* 171 * Identify unconditional branch instructions 172 */ 173 static inline bool isUnconditionalBranch(MIR *insn) 174 { 175 switch (insn->dalvikInsn.opCode) { 176 case OP_RETURN_VOID: 177 case OP_RETURN: 178 case OP_RETURN_WIDE: 179 case OP_RETURN_OBJECT: 180 return true; 181 default: 182 return isGoto(insn); 183 } 184 } 185 186 /* 187 * dvmHashTableLookup() callback 188 */ 189 static int compareMethod(const CompilerMethodStats *m1, 190 const CompilerMethodStats *m2) 191 { 192 return (int) m1->method - (int) m2->method; 193 } 194 195 /* 196 * Analyze the body of the method to collect high-level information regarding 197 * inlining: 198 * - is empty method? 199 * - is getter/setter? 200 * - can throw exception? 201 * 202 * Currently the inliner only handles getters and setters. When its capability 203 * becomes more sophisticated more information will be retrieved here. 204 */ 205 static int analyzeInlineTarget(DecodedInstruction *dalvikInsn, int attributes, 206 int offset) 207 { 208 int flags = dexGetInstrFlags(gDvm.instrFlags, dalvikInsn->opCode); 209 int dalvikOpCode = dalvikInsn->opCode; 210 211 if ((flags & kInstrInvoke) && 212 (dalvikOpCode != OP_INVOKE_DIRECT_EMPTY)) { 213 attributes &= ~METHOD_IS_LEAF; 214 } 215 216 if (!(flags & kInstrCanReturn)) { 217 if (!(dvmCompilerDataFlowAttributes[dalvikOpCode] & 218 DF_IS_GETTER)) { 219 attributes &= ~METHOD_IS_GETTER; 220 } 221 if (!(dvmCompilerDataFlowAttributes[dalvikOpCode] & 222 DF_IS_SETTER)) { 223 attributes &= ~METHOD_IS_SETTER; 224 } 225 } 226 227 /* 228 * The expected instruction sequence is setter will never return value and 229 * getter will also do. Clear the bits if the behavior is discovered 230 * otherwise. 231 */ 232 if (flags & kInstrCanReturn) { 233 if (dalvikOpCode == OP_RETURN_VOID) { 234 attributes &= ~METHOD_IS_GETTER; 235 } 236 else { 237 attributes &= ~METHOD_IS_SETTER; 238 } 239 } 240 241 if (flags & kInstrCanThrow) { 242 attributes &= ~METHOD_IS_THROW_FREE; 243 } 244 245 if (offset == 0 && dalvikOpCode == OP_RETURN_VOID) { 246 attributes |= METHOD_IS_EMPTY; 247 } 248 249 /* 250 * Check if this opcode is selected for single stepping. 251 * If so, don't inline the callee as there is no stack frame for the 252 * interpreter to single-step through the instruction. 253 */ 254 if (SINGLE_STEP_OP(dalvikOpCode)) { 255 attributes &= ~(METHOD_IS_GETTER | METHOD_IS_SETTER); 256 } 257 258 return attributes; 259 } 260 261 /* 262 * Analyze each method whose traces are ever compiled. Collect a variety of 263 * statistics like the ratio of exercised vs overall code and code bloat 264 * ratios. If isCallee is true, also analyze each instruction in more details 265 * to see if it is suitable for inlining. 266 */ 267 CompilerMethodStats *dvmCompilerAnalyzeMethodBody(const Method *method, 268 bool isCallee) 269 { 270 const DexCode *dexCode = dvmGetMethodCode(method); 271 const u2 *codePtr = dexCode->insns; 272 const u2 *codeEnd = dexCode->insns + dexCode->insnsSize; 273 int insnSize = 0; 274 int hashValue = dvmComputeUtf8Hash(method->name); 275 276 CompilerMethodStats dummyMethodEntry; // For hash table lookup 277 CompilerMethodStats *realMethodEntry; // For hash table storage 278 279 /* For lookup only */ 280 dummyMethodEntry.method = method; 281 realMethodEntry = dvmHashTableLookup(gDvmJit.methodStatsTable, hashValue, 282 &dummyMethodEntry, 283 (HashCompareFunc) compareMethod, 284 false); 285 286 /* This method has never been analyzed before - create an entry */ 287 if (realMethodEntry == NULL) { 288 realMethodEntry = 289 (CompilerMethodStats *) calloc(1, sizeof(CompilerMethodStats)); 290 realMethodEntry->method = method; 291 292 dvmHashTableLookup(gDvmJit.methodStatsTable, hashValue, 293 realMethodEntry, 294 (HashCompareFunc) compareMethod, 295 true); 296 } 297 298 /* This method is invoked as a callee and has been analyzed - just return */ 299 if ((isCallee == true) && (realMethodEntry->attributes & METHOD_IS_CALLEE)) 300 return realMethodEntry; 301 302 /* 303 * Similarly, return if this method has been compiled before as a hot 304 * method already. 305 */ 306 if ((isCallee == false) && 307 (realMethodEntry->attributes & METHOD_IS_HOT)) 308 return realMethodEntry; 309 310 int attributes; 311 312 /* Method hasn't been analyzed for the desired purpose yet */ 313 if (isCallee) { 314 /* Aggressively set the attributes until proven otherwise */ 315 attributes = METHOD_IS_LEAF | METHOD_IS_THROW_FREE | METHOD_IS_CALLEE | 316 METHOD_IS_GETTER | METHOD_IS_SETTER; 317 } else { 318 attributes = METHOD_IS_HOT; 319 } 320 321 /* Count the number of instructions */ 322 while (codePtr < codeEnd) { 323 DecodedInstruction dalvikInsn; 324 int width = parseInsn(codePtr, &dalvikInsn, false); 325 326 /* Terminate when the data section is seen */ 327 if (width == 0) 328 break; 329 330 if (isCallee) { 331 attributes = analyzeInlineTarget(&dalvikInsn, attributes, insnSize); 332 } 333 334 insnSize += width; 335 codePtr += width; 336 } 337 338 /* 339 * Only handle simple getters/setters with one instruction followed by 340 * return 341 */ 342 if ((attributes & (METHOD_IS_GETTER | METHOD_IS_SETTER)) && 343 (insnSize != 3)) { 344 attributes &= ~(METHOD_IS_GETTER | METHOD_IS_SETTER); 345 } 346 347 realMethodEntry->dalvikSize = insnSize * 2; 348 realMethodEntry->attributes |= attributes; 349 350 #if 0 351 /* Uncomment the following to explore various callee patterns */ 352 if (attributes & METHOD_IS_THROW_FREE) { 353 LOGE("%s%s is inlinable%s", method->clazz->descriptor, method->name, 354 (attributes & METHOD_IS_EMPTY) ? " empty" : ""); 355 } 356 357 if (attributes & METHOD_IS_LEAF) { 358 LOGE("%s%s is leaf %d%s", method->clazz->descriptor, method->name, 359 insnSize, insnSize < 5 ? " (small)" : ""); 360 } 361 362 if (attributes & (METHOD_IS_GETTER | METHOD_IS_SETTER)) { 363 LOGE("%s%s is %s", method->clazz->descriptor, method->name, 364 attributes & METHOD_IS_GETTER ? "getter": "setter"); 365 } 366 if (attributes == 367 (METHOD_IS_LEAF | METHOD_IS_THROW_FREE | METHOD_IS_CALLEE)) { 368 LOGE("%s%s is inlinable non setter/getter", method->clazz->descriptor, 369 method->name); 370 } 371 #endif 372 373 return realMethodEntry; 374 } 375 376 /* 377 * Crawl the stack of the thread that requesed compilation to see if any of the 378 * ancestors are on the blacklist. 379 */ 380 bool filterMethodByCallGraph(Thread *thread, const char *curMethodName) 381 { 382 /* Crawl the Dalvik stack frames and compare the method name*/ 383 StackSaveArea *ssaPtr = ((StackSaveArea *) thread->curFrame) - 1; 384 while (ssaPtr != ((StackSaveArea *) NULL) - 1) { 385 const Method *method = ssaPtr->method; 386 if (method) { 387 int hashValue = dvmComputeUtf8Hash(method->name); 388 bool found = 389 dvmHashTableLookup(gDvmJit.methodTable, hashValue, 390 (char *) method->name, 391 (HashCompareFunc) strcmp, false) != 392 NULL; 393 if (found) { 394 LOGD("Method %s (--> %s) found on the JIT %s list", 395 method->name, curMethodName, 396 gDvmJit.includeSelectedMethod ? "white" : "black"); 397 return true; 398 } 399 400 } 401 ssaPtr = ((StackSaveArea *) ssaPtr->prevFrame) - 1; 402 }; 403 return false; 404 } 405 406 /* 407 * Main entry point to start trace compilation. Basic blocks are constructed 408 * first and they will be passed to the codegen routines to convert Dalvik 409 * bytecode into machine code. 410 */ 411 bool dvmCompileTrace(JitTraceDescription *desc, int numMaxInsts, 412 JitTranslationInfo *info, jmp_buf *bailPtr, 413 int optHints) 414 { 415 const DexCode *dexCode = dvmGetMethodCode(desc->method); 416 const JitTraceRun* currRun = &desc->trace[0]; 417 unsigned int curOffset = currRun->frag.startOffset; 418 unsigned int numInsts = currRun->frag.numInsts; 419 const u2 *codePtr = dexCode->insns + curOffset; 420 int traceSize = 0; // # of half-words 421 const u2 *startCodePtr = codePtr; 422 BasicBlock *startBB, *curBB, *lastBB; 423 int numBlocks = 0; 424 static int compilationId; 425 CompilationUnit cUnit; 426 #if defined(WITH_JIT_TUNING) 427 CompilerMethodStats *methodStats; 428 #endif 429 430 /* If we've already compiled this trace, just return success */ 431 if (dvmJitGetCodeAddr(startCodePtr) && !info->discardResult) { 432 /* 433 * Make sure the codeAddress is NULL so that it won't clobber the 434 * existing entry. 435 */ 436 info->codeAddress = NULL; 437 return true; 438 } 439 440 compilationId++; 441 memset(&cUnit, 0, sizeof(CompilationUnit)); 442 443 #if defined(WITH_JIT_TUNING) 444 /* Locate the entry to store compilation statistics for this method */ 445 methodStats = dvmCompilerAnalyzeMethodBody(desc->method, false); 446 #endif 447 448 /* Set the recover buffer pointer */ 449 cUnit.bailPtr = bailPtr; 450 451 /* Initialize the printMe flag */ 452 cUnit.printMe = gDvmJit.printMe; 453 454 /* Initialize the profile flag */ 455 cUnit.executionCount = gDvmJit.profile; 456 457 /* Setup the method */ 458 cUnit.method = desc->method; 459 460 /* Initialize the PC reconstruction list */ 461 dvmInitGrowableList(&cUnit.pcReconstructionList, 8); 462 463 /* Identify traces that we don't want to compile */ 464 if (gDvmJit.methodTable) { 465 int len = strlen(desc->method->clazz->descriptor) + 466 strlen(desc->method->name) + 1; 467 char *fullSignature = dvmCompilerNew(len, true); 468 strcpy(fullSignature, desc->method->clazz->descriptor); 469 strcat(fullSignature, desc->method->name); 470 471 int hashValue = dvmComputeUtf8Hash(fullSignature); 472 473 /* 474 * Doing three levels of screening to see whether we want to skip 475 * compiling this method 476 */ 477 478 /* First, check the full "class;method" signature */ 479 bool methodFound = 480 dvmHashTableLookup(gDvmJit.methodTable, hashValue, 481 fullSignature, (HashCompareFunc) strcmp, 482 false) != 483 NULL; 484 485 /* Full signature not found - check the enclosing class */ 486 if (methodFound == false) { 487 int hashValue = dvmComputeUtf8Hash(desc->method->clazz->descriptor); 488 methodFound = 489 dvmHashTableLookup(gDvmJit.methodTable, hashValue, 490 (char *) desc->method->clazz->descriptor, 491 (HashCompareFunc) strcmp, false) != 492 NULL; 493 /* Enclosing class not found - check the method name */ 494 if (methodFound == false) { 495 int hashValue = dvmComputeUtf8Hash(desc->method->name); 496 methodFound = 497 dvmHashTableLookup(gDvmJit.methodTable, hashValue, 498 (char *) desc->method->name, 499 (HashCompareFunc) strcmp, false) != 500 NULL; 501 502 /* 503 * Debug by call-graph is enabled. Check if the debug list 504 * covers any methods on the VM stack. 505 */ 506 if (methodFound == false && gDvmJit.checkCallGraph == true) { 507 methodFound = 508 filterMethodByCallGraph(info->requestingThread, 509 desc->method->name); 510 } 511 } 512 } 513 514 /* 515 * Under the following conditions, the trace will be *conservatively* 516 * compiled by only containing single-step instructions to and from the 517 * interpreter. 518 * 1) If includeSelectedMethod == false, the method matches the full or 519 * partial signature stored in the hash table. 520 * 521 * 2) If includeSelectedMethod == true, the method does not match the 522 * full and partial signature stored in the hash table. 523 */ 524 if (gDvmJit.includeSelectedMethod != methodFound) { 525 cUnit.allSingleStep = true; 526 } else { 527 /* Compile the trace as normal */ 528 529 /* Print the method we cherry picked */ 530 if (gDvmJit.includeSelectedMethod == true) { 531 cUnit.printMe = true; 532 } 533 } 534 } 535 536 /* Allocate the entry block */ 537 lastBB = startBB = curBB = dvmCompilerNewBB(kTraceEntryBlock); 538 curBB->startOffset = curOffset; 539 curBB->id = numBlocks++; 540 541 curBB = dvmCompilerNewBB(kDalvikByteCode); 542 curBB->startOffset = curOffset; 543 curBB->id = numBlocks++; 544 545 /* Make the first real dalvik block the fallthrough of the entry block */ 546 startBB->fallThrough = curBB; 547 lastBB->next = curBB; 548 lastBB = curBB; 549 550 if (cUnit.printMe) { 551 LOGD("--------\nCompiler: Building trace for %s, offset 0x%x\n", 552 desc->method->name, curOffset); 553 } 554 555 /* 556 * Analyze the trace descriptor and include up to the maximal number 557 * of Dalvik instructions into the IR. 558 */ 559 while (1) { 560 MIR *insn; 561 int width; 562 insn = dvmCompilerNew(sizeof(MIR), true); 563 insn->offset = curOffset; 564 width = parseInsn(codePtr, &insn->dalvikInsn, cUnit.printMe); 565 566 /* The trace should never incude instruction data */ 567 assert(width); 568 insn->width = width; 569 traceSize += width; 570 dvmCompilerAppendMIR(curBB, insn); 571 cUnit.numInsts++; 572 573 int flags = dexGetInstrFlags(gDvm.instrFlags, insn->dalvikInsn.opCode); 574 575 if ((flags & kInstrInvoke) && 576 (insn->dalvikInsn.opCode != OP_INVOKE_DIRECT_EMPTY)) { 577 assert(numInsts == 1); 578 CallsiteInfo *callsiteInfo = 579 dvmCompilerNew(sizeof(CallsiteInfo), true); 580 callsiteInfo->clazz = currRun[1].meta; 581 callsiteInfo->method = currRun[2].meta; 582 insn->meta.callsiteInfo = callsiteInfo; 583 } 584 585 /* Instruction limit reached - terminate the trace here */ 586 if (cUnit.numInsts >= numMaxInsts) { 587 break; 588 } 589 if (--numInsts == 0) { 590 if (currRun->frag.runEnd) { 591 break; 592 } else { 593 /* Advance to the next trace description (ie non-meta info) */ 594 do { 595 currRun++; 596 } while (!currRun->frag.isCode); 597 598 /* Dummy end-of-run marker seen */ 599 if (currRun->frag.numInsts == 0) { 600 break; 601 } 602 603 curBB = dvmCompilerNewBB(kDalvikByteCode); 604 lastBB->next = curBB; 605 lastBB = curBB; 606 curBB->id = numBlocks++; 607 curOffset = currRun->frag.startOffset; 608 numInsts = currRun->frag.numInsts; 609 curBB->startOffset = curOffset; 610 codePtr = dexCode->insns + curOffset; 611 } 612 } else { 613 curOffset += width; 614 codePtr += width; 615 } 616 } 617 618 #if defined(WITH_JIT_TUNING) 619 /* Convert # of half-word to bytes */ 620 methodStats->compiledDalvikSize += traceSize * 2; 621 #endif 622 623 /* 624 * Now scan basic blocks containing real code to connect the 625 * taken/fallthrough links. Also create chaining cells for code not included 626 * in the trace. 627 */ 628 for (curBB = startBB; curBB; curBB = curBB->next) { 629 MIR *lastInsn = curBB->lastMIRInsn; 630 /* Skip empty blocks */ 631 if (lastInsn == NULL) { 632 continue; 633 } 634 curOffset = lastInsn->offset; 635 unsigned int targetOffset = curOffset; 636 unsigned int fallThroughOffset = curOffset + lastInsn->width; 637 bool isInvoke = false; 638 const Method *callee = NULL; 639 640 findBlockBoundary(desc->method, curBB->lastMIRInsn, curOffset, 641 &targetOffset, &isInvoke, &callee); 642 643 /* Link the taken and fallthrough blocks */ 644 BasicBlock *searchBB; 645 646 int flags = dexGetInstrFlags(gDvm.instrFlags, 647 lastInsn->dalvikInsn.opCode); 648 649 if (flags & kInstrInvoke) { 650 cUnit.hasInvoke = true; 651 } 652 653 /* No backward branch in the trace - start searching the next BB */ 654 for (searchBB = curBB->next; searchBB; searchBB = searchBB->next) { 655 if (targetOffset == searchBB->startOffset) { 656 curBB->taken = searchBB; 657 } 658 if (fallThroughOffset == searchBB->startOffset) { 659 curBB->fallThrough = searchBB; 660 661 /* 662 * Fallthrough block of an invoke instruction needs to be 663 * aligned to 4-byte boundary (alignment instruction to be 664 * inserted later. 665 */ 666 if (flags & kInstrInvoke) { 667 searchBB->isFallThroughFromInvoke = true; 668 } 669 } 670 } 671 672 /* 673 * Some blocks are ended by non-control-flow-change instructions, 674 * currently only due to trace length constraint. In this case we need 675 * to generate an explicit branch at the end of the block to jump to 676 * the chaining cell. 677 * 678 * NOTE: INVOKE_DIRECT_EMPTY is actually not an invoke but a nop 679 */ 680 curBB->needFallThroughBranch = 681 ((flags & (kInstrCanBranch | kInstrCanSwitch | kInstrCanReturn | 682 kInstrInvoke)) == 0) || 683 (lastInsn->dalvikInsn.opCode == OP_INVOKE_DIRECT_EMPTY); 684 685 /* Only form a loop if JIT_OPT_NO_LOOP is not set */ 686 if (curBB->taken == NULL && 687 curBB->fallThrough == NULL && 688 flags == (kInstrCanBranch | kInstrCanContinue) && 689 fallThroughOffset == startBB->startOffset && 690 JIT_OPT_NO_LOOP != (optHints & JIT_OPT_NO_LOOP)) { 691 BasicBlock *loopBranch = curBB; 692 BasicBlock *exitBB; 693 BasicBlock *exitChainingCell; 694 695 if (cUnit.printMe) { 696 LOGD("Natural loop detected!"); 697 } 698 exitBB = dvmCompilerNewBB(kTraceExitBlock); 699 lastBB->next = exitBB; 700 lastBB = exitBB; 701 702 exitBB->startOffset = targetOffset; 703 exitBB->id = numBlocks++; 704 exitBB->needFallThroughBranch = true; 705 706 loopBranch->taken = exitBB; 707 #if defined(WITH_SELF_VERIFICATION) 708 BasicBlock *backwardCell = 709 dvmCompilerNewBB(kChainingCellBackwardBranch); 710 lastBB->next = backwardCell; 711 lastBB = backwardCell; 712 713 backwardCell->startOffset = startBB->startOffset; 714 backwardCell->id = numBlocks++; 715 loopBranch->fallThrough = backwardCell; 716 #elif defined(WITH_JIT_TUNING) 717 if (gDvmJit.profile) { 718 BasicBlock *backwardCell = 719 dvmCompilerNewBB(kChainingCellBackwardBranch); 720 lastBB->next = backwardCell; 721 lastBB = backwardCell; 722 723 backwardCell->startOffset = startBB->startOffset; 724 backwardCell->id = numBlocks++; 725 loopBranch->fallThrough = backwardCell; 726 } else { 727 loopBranch->fallThrough = startBB->next; 728 } 729 #else 730 loopBranch->fallThrough = startBB->next; 731 #endif 732 733 /* Create the chaining cell as the fallthrough of the exit block */ 734 exitChainingCell = dvmCompilerNewBB(kChainingCellNormal); 735 lastBB->next = exitChainingCell; 736 lastBB = exitChainingCell; 737 738 exitChainingCell->startOffset = targetOffset; 739 exitChainingCell->id = numBlocks++; 740 741 exitBB->fallThrough = exitChainingCell; 742 743 cUnit.hasLoop = true; 744 } 745 746 if (lastInsn->dalvikInsn.opCode == OP_PACKED_SWITCH || 747 lastInsn->dalvikInsn.opCode == OP_SPARSE_SWITCH) { 748 int i; 749 const u2 *switchData = desc->method->insns + lastInsn->offset + 750 lastInsn->dalvikInsn.vB; 751 int size = switchData[1]; 752 int maxChains = MIN(size, MAX_CHAINED_SWITCH_CASES); 753 754 /* 755 * Generate the landing pad for cases whose ranks are higher than 756 * MAX_CHAINED_SWITCH_CASES. The code will re-enter the interpreter 757 * through the NoChain point. 758 */ 759 if (maxChains != size) { 760 cUnit.switchOverflowPad = 761 desc->method->insns + lastInsn->offset; 762 } 763 764 s4 *targets = (s4 *) (switchData + 2 + 765 (lastInsn->dalvikInsn.opCode == OP_PACKED_SWITCH ? 766 2 : size * 2)); 767 768 /* One chaining cell for the first MAX_CHAINED_SWITCH_CASES cases */ 769 for (i = 0; i < maxChains; i++) { 770 BasicBlock *caseChain = dvmCompilerNewBB(kChainingCellNormal); 771 lastBB->next = caseChain; 772 lastBB = caseChain; 773 774 caseChain->startOffset = lastInsn->offset + targets[i]; 775 caseChain->id = numBlocks++; 776 } 777 778 /* One more chaining cell for the default case */ 779 BasicBlock *caseChain = dvmCompilerNewBB(kChainingCellNormal); 780 lastBB->next = caseChain; 781 lastBB = caseChain; 782 783 caseChain->startOffset = lastInsn->offset + lastInsn->width; 784 caseChain->id = numBlocks++; 785 /* Fallthrough block not included in the trace */ 786 } else if (!isUnconditionalBranch(lastInsn) && 787 curBB->fallThrough == NULL) { 788 /* 789 * If the chaining cell is after an invoke or 790 * instruction that cannot change the control flow, request a hot 791 * chaining cell. 792 */ 793 if (isInvoke || curBB->needFallThroughBranch) { 794 lastBB->next = dvmCompilerNewBB(kChainingCellHot); 795 } else { 796 lastBB->next = dvmCompilerNewBB(kChainingCellNormal); 797 } 798 lastBB = lastBB->next; 799 lastBB->id = numBlocks++; 800 lastBB->startOffset = fallThroughOffset; 801 curBB->fallThrough = lastBB; 802 } 803 /* Target block not included in the trace */ 804 if (curBB->taken == NULL && 805 (isGoto(lastInsn) || isInvoke || 806 (targetOffset != UNKNOWN_TARGET && targetOffset != curOffset))) { 807 BasicBlock *newBB; 808 if (isInvoke) { 809 /* Monomorphic callee */ 810 if (callee) { 811 newBB = dvmCompilerNewBB(kChainingCellInvokeSingleton); 812 newBB->startOffset = 0; 813 newBB->containingMethod = callee; 814 /* Will resolve at runtime */ 815 } else { 816 newBB = dvmCompilerNewBB(kChainingCellInvokePredicted); 817 newBB->startOffset = 0; 818 } 819 /* For unconditional branches, request a hot chaining cell */ 820 } else { 821 #if !defined(WITH_SELF_VERIFICATION) 822 newBB = dvmCompilerNewBB(flags & kInstrUnconditional ? 823 kChainingCellHot : 824 kChainingCellNormal); 825 newBB->startOffset = targetOffset; 826 #else 827 /* Handle branches that branch back into the block */ 828 if (targetOffset >= curBB->firstMIRInsn->offset && 829 targetOffset <= curBB->lastMIRInsn->offset) { 830 newBB = dvmCompilerNewBB(kChainingCellBackwardBranch); 831 } else { 832 newBB = dvmCompilerNewBB(flags & kInstrUnconditional ? 833 kChainingCellHot : 834 kChainingCellNormal); 835 } 836 newBB->startOffset = targetOffset; 837 #endif 838 } 839 newBB->id = numBlocks++; 840 curBB->taken = newBB; 841 lastBB->next = newBB; 842 lastBB = newBB; 843 } 844 } 845 846 /* Now create a special block to host PC reconstruction code */ 847 lastBB->next = dvmCompilerNewBB(kPCReconstruction); 848 lastBB = lastBB->next; 849 lastBB->id = numBlocks++; 850 851 /* And one final block that publishes the PC and raise the exception */ 852 lastBB->next = dvmCompilerNewBB(kExceptionHandling); 853 lastBB = lastBB->next; 854 lastBB->id = numBlocks++; 855 856 if (cUnit.printMe) { 857 char* signature = dexProtoCopyMethodDescriptor(&desc->method->prototype); 858 LOGD("TRACEINFO (%d): 0x%08x %s%s.%s 0x%x %d of %d, %d blocks", 859 compilationId, 860 (intptr_t) desc->method->insns, 861 desc->method->clazz->descriptor, 862 desc->method->name, 863 signature, 864 desc->trace[0].frag.startOffset, 865 traceSize, 866 dexCode->insnsSize, 867 numBlocks); 868 free(signature); 869 } 870 871 BasicBlock **blockList; 872 873 cUnit.traceDesc = desc; 874 cUnit.numBlocks = numBlocks; 875 blockList = cUnit.blockList = 876 dvmCompilerNew(sizeof(BasicBlock *) * numBlocks, true); 877 878 int i; 879 880 for (i = 0, curBB = startBB; i < numBlocks; i++) { 881 blockList[i] = curBB; 882 curBB = curBB->next; 883 } 884 /* Make sure all blocks are added to the cUnit */ 885 assert(curBB == NULL); 886 887 /* Set the instruction set to use (NOTE: later components may change it) */ 888 cUnit.instructionSet = dvmCompilerInstructionSet(); 889 890 /* Inline transformation @ the MIR level */ 891 if (cUnit.hasInvoke && !(gDvmJit.disableOpt & (1 << kMethodInlining))) { 892 dvmCompilerInlineMIR(&cUnit); 893 } 894 895 /* Preparation for SSA conversion */ 896 dvmInitializeSSAConversion(&cUnit); 897 898 if (cUnit.hasLoop) { 899 /* 900 * Loop is not optimizable (for example lack of a single induction 901 * variable), punt and recompile the trace with loop optimization 902 * disabled. 903 */ 904 bool loopOpt = dvmCompilerLoopOpt(&cUnit); 905 if (loopOpt == false) { 906 if (cUnit.printMe) { 907 LOGD("Loop is not optimizable - retry codegen"); 908 } 909 /* Reset the compiler resource pool */ 910 dvmCompilerArenaReset(); 911 return dvmCompileTrace(desc, cUnit.numInsts, info, bailPtr, 912 optHints | JIT_OPT_NO_LOOP); 913 } 914 } 915 else { 916 dvmCompilerNonLoopAnalysis(&cUnit); 917 } 918 919 dvmCompilerInitializeRegAlloc(&cUnit); // Needs to happen after SSA naming 920 921 if (cUnit.printMe) { 922 dvmCompilerDumpCompilationUnit(&cUnit); 923 } 924 925 /* Allocate Registers */ 926 dvmCompilerRegAlloc(&cUnit); 927 928 /* Convert MIR to LIR, etc. */ 929 dvmCompilerMIR2LIR(&cUnit); 930 931 /* Convert LIR into machine code. Loop for recoverable retries */ 932 do { 933 dvmCompilerAssembleLIR(&cUnit, info); 934 cUnit.assemblerRetries++; 935 if (cUnit.printMe && cUnit.assemblerStatus != kSuccess) 936 LOGD("Assembler abort #%d on %d",cUnit.assemblerRetries, 937 cUnit.assemblerStatus); 938 } while (cUnit.assemblerStatus == kRetryAll); 939 940 if (cUnit.printMe) { 941 dvmCompilerCodegenDump(&cUnit); 942 LOGD("End %s%s, %d Dalvik instructions", 943 desc->method->clazz->descriptor, desc->method->name, 944 cUnit.numInsts); 945 } 946 947 /* Reset the compiler resource pool */ 948 dvmCompilerArenaReset(); 949 950 if (cUnit.assemblerStatus == kRetryHalve) { 951 /* Halve the instruction count and start from the top */ 952 return dvmCompileTrace(desc, cUnit.numInsts / 2, info, bailPtr, 953 optHints); 954 } 955 956 assert(cUnit.assemblerStatus == kSuccess); 957 #if defined(WITH_JIT_TUNING) 958 methodStats->nativeSize += cUnit.totalSize; 959 #endif 960 return info->codeAddress != NULL; 961 } 962 963 /* 964 * Since we are including instructions from possibly a cold method into the 965 * current trace, we need to make sure that all the associated information 966 * with the callee is properly initialized. If not, we punt on this inline 967 * target. 968 * 969 * TODO: volatile instructions will be handled later. 970 */ 971 bool dvmCompilerCanIncludeThisInstruction(const Method *method, 972 const DecodedInstruction *insn) 973 { 974 switch (insn->opCode) { 975 case OP_NEW_INSTANCE: 976 case OP_CHECK_CAST: { 977 ClassObject *classPtr = (void*) 978 (method->clazz->pDvmDex->pResClasses[insn->vB]); 979 980 /* Class hasn't been initialized yet */ 981 if (classPtr == NULL) { 982 return false; 983 } 984 return true; 985 } 986 case OP_SGET_OBJECT: 987 case OP_SGET_BOOLEAN: 988 case OP_SGET_CHAR: 989 case OP_SGET_BYTE: 990 case OP_SGET_SHORT: 991 case OP_SGET: 992 case OP_SGET_WIDE: 993 case OP_SPUT_OBJECT: 994 case OP_SPUT_BOOLEAN: 995 case OP_SPUT_CHAR: 996 case OP_SPUT_BYTE: 997 case OP_SPUT_SHORT: 998 case OP_SPUT: 999 case OP_SPUT_WIDE: { 1000 void *fieldPtr = (void*) 1001 (method->clazz->pDvmDex->pResFields[insn->vB]); 1002 1003 if (fieldPtr == NULL) { 1004 return false; 1005 } 1006 return true; 1007 } 1008 case OP_INVOKE_SUPER: 1009 case OP_INVOKE_SUPER_RANGE: { 1010 int mIndex = method->clazz->pDvmDex-> 1011 pResMethods[insn->vB]->methodIndex; 1012 const Method *calleeMethod = method->clazz->super->vtable[mIndex]; 1013 if (calleeMethod == NULL) { 1014 return false; 1015 } 1016 return true; 1017 } 1018 case OP_INVOKE_SUPER_QUICK: 1019 case OP_INVOKE_SUPER_QUICK_RANGE: { 1020 const Method *calleeMethod = method->clazz->super->vtable[insn->vB]; 1021 if (calleeMethod == NULL) { 1022 return false; 1023 } 1024 return true; 1025 } 1026 case OP_INVOKE_STATIC: 1027 case OP_INVOKE_STATIC_RANGE: 1028 case OP_INVOKE_DIRECT: 1029 case OP_INVOKE_DIRECT_RANGE: { 1030 const Method *calleeMethod = 1031 method->clazz->pDvmDex->pResMethods[insn->vB]; 1032 if (calleeMethod == NULL) { 1033 return false; 1034 } 1035 return true; 1036 } 1037 case OP_CONST_CLASS: { 1038 void *classPtr = (void*) 1039 (method->clazz->pDvmDex->pResClasses[insn->vB]); 1040 1041 if (classPtr == NULL) { 1042 return false; 1043 } 1044 return true; 1045 } 1046 case OP_CONST_STRING_JUMBO: 1047 case OP_CONST_STRING: { 1048 void *strPtr = (void*) 1049 (method->clazz->pDvmDex->pResStrings[insn->vB]); 1050 1051 if (strPtr == NULL) { 1052 return false; 1053 } 1054 return true; 1055 } 1056 default: 1057 return true; 1058 } 1059 } 1060 1061 /* 1062 * Similar to dvmCompileTrace, but the entity processed here is the whole 1063 * method. 1064 * 1065 * TODO: implementation will be revisited when the trace builder can provide 1066 * whole-method traces. 1067 */ 1068 bool dvmCompileMethod(CompilationUnit *cUnit, const Method *method, 1069 JitTranslationInfo *info) 1070 { 1071 const DexCode *dexCode = dvmGetMethodCode(method); 1072 const u2 *codePtr = dexCode->insns; 1073 const u2 *codeEnd = dexCode->insns + dexCode->insnsSize; 1074 int blockID = 0; 1075 unsigned int curOffset = 0; 1076 1077 /* If we've already compiled this trace, just return success */ 1078 if (dvmJitGetCodeAddr(codePtr) && !info->discardResult) { 1079 return true; 1080 } 1081 1082 /* Doing method-based compilation */ 1083 cUnit->wholeMethod = true; 1084 1085 BasicBlock *firstBlock = dvmCompilerNewBB(kDalvikByteCode); 1086 firstBlock->id = blockID++; 1087 1088 /* Allocate the bit-vector to track the beginning of basic blocks */ 1089 BitVector *bbStartAddr = dvmCompilerAllocBitVector(dexCode->insnsSize+1, 1090 false); 1091 dvmCompilerSetBit(bbStartAddr, 0); 1092 1093 int numInvokeTargets = 0; 1094 1095 /* 1096 * Sequentially go through every instruction first and put them in a single 1097 * basic block. Identify block boundaries at the mean time. 1098 */ 1099 while (codePtr < codeEnd) { 1100 MIR *insn = dvmCompilerNew(sizeof(MIR), true); 1101 insn->offset = curOffset; 1102 int width = parseInsn(codePtr, &insn->dalvikInsn, false); 1103 bool isInvoke = false; 1104 const Method *callee; 1105 insn->width = width; 1106 1107 /* Terminate when the data section is seen */ 1108 if (width == 0) 1109 break; 1110 1111 if (!dvmCompilerCanIncludeThisInstruction(cUnit->method, 1112 &insn->dalvikInsn)) { 1113 return false; 1114 } 1115 1116 dvmCompilerAppendMIR(firstBlock, insn); 1117 /* 1118 * Check whether this is a block ending instruction and whether it 1119 * suggests the start of a new block 1120 */ 1121 unsigned int target = curOffset; 1122 1123 /* 1124 * If findBlockBoundary returns true, it means the current instruction 1125 * is terminating the current block. If it is a branch, the target 1126 * address will be recorded in target. 1127 */ 1128 if (findBlockBoundary(method, insn, curOffset, &target, &isInvoke, 1129 &callee)) { 1130 dvmCompilerSetBit(bbStartAddr, curOffset + width); 1131 /* Each invoke needs a chaining cell block */ 1132 if (isInvoke) { 1133 numInvokeTargets++; 1134 } 1135 /* A branch will end the current block */ 1136 else if (target != curOffset && target != UNKNOWN_TARGET) { 1137 dvmCompilerSetBit(bbStartAddr, target); 1138 } 1139 } 1140 1141 codePtr += width; 1142 /* each bit represents 16-bit quantity */ 1143 curOffset += width; 1144 } 1145 1146 /* 1147 * The number of blocks will be equal to the number of bits set to 1 in the 1148 * bit vector minus 1, because the bit representing the location after the 1149 * last instruction is set to one. 1150 * 1151 * We also add additional blocks for invoke chaining and the number is 1152 * denoted by numInvokeTargets. 1153 */ 1154 int numBlocks = dvmCountSetBits(bbStartAddr); 1155 if (dvmIsBitSet(bbStartAddr, dexCode->insnsSize)) { 1156 numBlocks--; 1157 } 1158 1159 BasicBlock **blockList; 1160 blockList = cUnit->blockList = 1161 dvmCompilerNew(sizeof(BasicBlock *) * (numBlocks + numInvokeTargets), 1162 true); 1163 1164 /* 1165 * Register the first block onto the list and start splitting it into 1166 * sub-blocks. 1167 */ 1168 blockList[0] = firstBlock; 1169 cUnit->numBlocks = 1; 1170 1171 int i; 1172 for (i = 0; i < numBlocks; i++) { 1173 MIR *insn; 1174 BasicBlock *curBB = blockList[i]; 1175 curOffset = curBB->lastMIRInsn->offset; 1176 1177 for (insn = curBB->firstMIRInsn->next; insn; insn = insn->next) { 1178 /* Found the beginning of a new block, see if it is created yet */ 1179 if (dvmIsBitSet(bbStartAddr, insn->offset)) { 1180 int j; 1181 for (j = 0; j < cUnit->numBlocks; j++) { 1182 if (blockList[j]->firstMIRInsn->offset == insn->offset) 1183 break; 1184 } 1185 1186 /* Block not split yet - do it now */ 1187 if (j == cUnit->numBlocks) { 1188 BasicBlock *newBB = dvmCompilerNewBB(kDalvikByteCode); 1189 newBB->id = blockID++; 1190 newBB->firstMIRInsn = insn; 1191 newBB->startOffset = insn->offset; 1192 newBB->lastMIRInsn = curBB->lastMIRInsn; 1193 curBB->lastMIRInsn = insn->prev; 1194 insn->prev->next = NULL; 1195 insn->prev = NULL; 1196 1197 /* 1198 * If the insn is not an unconditional branch, set up the 1199 * fallthrough link. 1200 */ 1201 if (!isUnconditionalBranch(curBB->lastMIRInsn)) { 1202 curBB->fallThrough = newBB; 1203 } 1204 1205 /* 1206 * Fallthrough block of an invoke instruction needs to be 1207 * aligned to 4-byte boundary (alignment instruction to be 1208 * inserted later. 1209 */ 1210 if (dexGetInstrFlags(gDvm.instrFlags, 1211 curBB->lastMIRInsn->dalvikInsn.opCode) & 1212 kInstrInvoke) { 1213 newBB->isFallThroughFromInvoke = true; 1214 } 1215 1216 /* enqueue the new block */ 1217 blockList[cUnit->numBlocks++] = newBB; 1218 break; 1219 } 1220 } 1221 } 1222 } 1223 1224 if (numBlocks != cUnit->numBlocks) { 1225 LOGE("Expect %d vs %d basic blocks\n", numBlocks, cUnit->numBlocks); 1226 dvmCompilerAbort(cUnit); 1227 } 1228 1229 /* Connect the basic blocks through the taken links */ 1230 for (i = 0; i < numBlocks; i++) { 1231 BasicBlock *curBB = blockList[i]; 1232 MIR *insn = curBB->lastMIRInsn; 1233 unsigned int target = insn->offset; 1234 bool isInvoke = false; 1235 const Method *callee = NULL; 1236 1237 findBlockBoundary(method, insn, target, &target, &isInvoke, &callee); 1238 1239 /* Found a block ended on a branch (not invoke) */ 1240 if (isInvoke == false && target != insn->offset) { 1241 int j; 1242 /* Forward branch */ 1243 if (target > insn->offset) { 1244 j = i + 1; 1245 } else { 1246 /* Backward branch */ 1247 j = 0; 1248 } 1249 for (; j < numBlocks; j++) { 1250 if (blockList[j]->firstMIRInsn->offset == target) { 1251 curBB->taken = blockList[j]; 1252 break; 1253 } 1254 } 1255 } 1256 1257 if (isInvoke) { 1258 BasicBlock *newBB; 1259 /* Monomorphic callee */ 1260 if (callee) { 1261 newBB = dvmCompilerNewBB(kChainingCellInvokeSingleton); 1262 newBB->startOffset = 0; 1263 newBB->containingMethod = callee; 1264 /* Will resolve at runtime */ 1265 } else { 1266 newBB = dvmCompilerNewBB(kChainingCellInvokePredicted); 1267 newBB->startOffset = 0; 1268 } 1269 newBB->id = blockID++; 1270 curBB->taken = newBB; 1271 /* enqueue the new block */ 1272 blockList[cUnit->numBlocks++] = newBB; 1273 } 1274 } 1275 1276 if (cUnit->numBlocks != numBlocks + numInvokeTargets) { 1277 LOGE("Expect %d vs %d total blocks\n", numBlocks + numInvokeTargets, 1278 cUnit->numBlocks); 1279 dvmCompilerDumpCompilationUnit(cUnit); 1280 dvmCompilerAbort(cUnit); 1281 } 1282 1283 /* Set the instruction set to use (NOTE: later components may change it) */ 1284 cUnit->instructionSet = dvmCompilerInstructionSet(); 1285 1286 /* Preparation for SSA conversion */ 1287 dvmInitializeSSAConversion(cUnit); 1288 1289 /* SSA analysis */ 1290 dvmCompilerNonLoopAnalysis(cUnit); 1291 1292 /* Needs to happen after SSA naming */ 1293 dvmCompilerInitializeRegAlloc(cUnit); 1294 1295 /* Allocate Registers */ 1296 dvmCompilerRegAlloc(cUnit); 1297 1298 /* Convert MIR to LIR, etc. */ 1299 dvmCompilerMIR2LIR(cUnit); 1300 1301 /* Convert LIR into machine code. */ 1302 dvmCompilerAssembleLIR(cUnit, info); 1303 1304 if (cUnit->assemblerStatus != kSuccess) { 1305 return false; 1306 } 1307 1308 dvmCompilerDumpCompilationUnit(cUnit); 1309 1310 dvmCompilerArenaReset(); 1311 1312 return info->codeAddress != NULL; 1313 } 1314