1 /* 2 * Copyright (C) 2009 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include "Dalvik.h" 18 #include "libdex/OpCode.h" 19 #include "interp/Jit.h" 20 #include "CompilerInternals.h" 21 22 /* 23 * Parse an instruction, return the length of the instruction 24 */ 25 static inline int parseInsn(const u2 *codePtr, DecodedInstruction *decInsn, 26 bool printMe) 27 { 28 u2 instr = *codePtr; 29 OpCode opcode = instr & 0xff; 30 int insnWidth; 31 32 // Don't parse instruction data 33 if (opcode == OP_NOP && instr != 0) { 34 return 0; 35 } else { 36 insnWidth = gDvm.instrWidth[opcode]; 37 if (insnWidth < 0) { 38 insnWidth = -insnWidth; 39 } 40 } 41 42 dexDecodeInstruction(gDvm.instrFormat, codePtr, decInsn); 43 if (printMe) { 44 char *decodedString = dvmCompilerGetDalvikDisassembly(decInsn); 45 LOGD("%p: %#06x %s\n", codePtr, opcode, decodedString); 46 } 47 return insnWidth; 48 } 49 50 #define UNKNOWN_TARGET 0xffffffff 51 52 /* 53 * Identify block-ending instructions and collect supplemental information 54 * regarding the following instructions. 55 */ 56 static inline bool findBlockBoundary(const Method *caller, MIR *insn, 57 unsigned int curOffset, 58 unsigned int *target, bool *isInvoke, 59 const Method **callee) 60 { 61 switch (insn->dalvikInsn.opCode) { 62 /* Target is not compile-time constant */ 63 case OP_RETURN_VOID: 64 case OP_RETURN: 65 case OP_RETURN_WIDE: 66 case OP_RETURN_OBJECT: 67 case OP_THROW: 68 *target = UNKNOWN_TARGET; 69 break; 70 case OP_INVOKE_VIRTUAL: 71 case OP_INVOKE_VIRTUAL_RANGE: 72 case OP_INVOKE_INTERFACE: 73 case OP_INVOKE_INTERFACE_RANGE: 74 case OP_INVOKE_VIRTUAL_QUICK: 75 case OP_INVOKE_VIRTUAL_QUICK_RANGE: 76 *isInvoke = true; 77 break; 78 case OP_INVOKE_SUPER: 79 case OP_INVOKE_SUPER_RANGE: { 80 int mIndex = caller->clazz->pDvmDex-> 81 pResMethods[insn->dalvikInsn.vB]->methodIndex; 82 const Method *calleeMethod = 83 caller->clazz->super->vtable[mIndex]; 84 85 if (calleeMethod && !dvmIsNativeMethod(calleeMethod)) { 86 *target = (unsigned int) calleeMethod->insns; 87 } 88 *isInvoke = true; 89 *callee = calleeMethod; 90 break; 91 } 92 case OP_INVOKE_STATIC: 93 case OP_INVOKE_STATIC_RANGE: { 94 const Method *calleeMethod = 95 caller->clazz->pDvmDex->pResMethods[insn->dalvikInsn.vB]; 96 97 if (calleeMethod && !dvmIsNativeMethod(calleeMethod)) { 98 *target = (unsigned int) calleeMethod->insns; 99 } 100 *isInvoke = true; 101 *callee = calleeMethod; 102 break; 103 } 104 case OP_INVOKE_SUPER_QUICK: 105 case OP_INVOKE_SUPER_QUICK_RANGE: { 106 const Method *calleeMethod = 107 caller->clazz->super->vtable[insn->dalvikInsn.vB]; 108 109 if (calleeMethod && !dvmIsNativeMethod(calleeMethod)) { 110 *target = (unsigned int) calleeMethod->insns; 111 } 112 *isInvoke = true; 113 *callee = calleeMethod; 114 break; 115 } 116 case OP_INVOKE_DIRECT: 117 case OP_INVOKE_DIRECT_RANGE: { 118 const Method *calleeMethod = 119 caller->clazz->pDvmDex->pResMethods[insn->dalvikInsn.vB]; 120 if (calleeMethod && !dvmIsNativeMethod(calleeMethod)) { 121 *target = (unsigned int) calleeMethod->insns; 122 } 123 *isInvoke = true; 124 *callee = calleeMethod; 125 break; 126 } 127 case OP_GOTO: 128 case OP_GOTO_16: 129 case OP_GOTO_32: 130 *target = curOffset + (int) insn->dalvikInsn.vA; 131 break; 132 133 case OP_IF_EQ: 134 case OP_IF_NE: 135 case OP_IF_LT: 136 case OP_IF_GE: 137 case OP_IF_GT: 138 case OP_IF_LE: 139 *target = curOffset + (int) insn->dalvikInsn.vC; 140 break; 141 142 case OP_IF_EQZ: 143 case OP_IF_NEZ: 144 case OP_IF_LTZ: 145 case OP_IF_GEZ: 146 case OP_IF_GTZ: 147 case OP_IF_LEZ: 148 *target = curOffset + (int) insn->dalvikInsn.vB; 149 break; 150 151 default: 152 return false; 153 } 154 return true; 155 } 156 157 static inline bool isGoto(MIR *insn) 158 { 159 switch (insn->dalvikInsn.opCode) { 160 case OP_GOTO: 161 case OP_GOTO_16: 162 case OP_GOTO_32: 163 return true; 164 default: 165 return false; 166 } 167 } 168 169 /* 170 * Identify unconditional branch instructions 171 */ 172 static inline bool isUnconditionalBranch(MIR *insn) 173 { 174 switch (insn->dalvikInsn.opCode) { 175 case OP_RETURN_VOID: 176 case OP_RETURN: 177 case OP_RETURN_WIDE: 178 case OP_RETURN_OBJECT: 179 return true; 180 default: 181 return isGoto(insn); 182 } 183 } 184 185 /* 186 * dvmHashTableLookup() callback 187 */ 188 static int compareMethod(const CompilerMethodStats *m1, 189 const CompilerMethodStats *m2) 190 { 191 return (int) m1->method - (int) m2->method; 192 } 193 194 #if defined(WITH_JIT_TUNING) 195 /* 196 * Analyze each method whose traces are ever compiled. Collect a variety of 197 * statistics like the ratio of exercised vs overall code and code bloat 198 * ratios. 199 */ 200 static CompilerMethodStats *analyzeMethodBody(const Method *method) 201 { 202 const DexCode *dexCode = dvmGetMethodCode(method); 203 const u2 *codePtr = dexCode->insns; 204 const u2 *codeEnd = dexCode->insns + dexCode->insnsSize; 205 int insnSize = 0; 206 int hashValue = dvmComputeUtf8Hash(method->name); 207 208 CompilerMethodStats dummyMethodEntry; // For hash table lookup 209 CompilerMethodStats *realMethodEntry; // For hash table storage 210 211 /* For lookup only */ 212 dummyMethodEntry.method = method; 213 realMethodEntry = dvmHashTableLookup(gDvmJit.methodStatsTable, hashValue, 214 &dummyMethodEntry, 215 (HashCompareFunc) compareMethod, 216 false); 217 218 /* Part of this method has been compiled before - just return the entry */ 219 if (realMethodEntry != NULL) { 220 return realMethodEntry; 221 } 222 223 /* 224 * First time to compile this method - set up a new entry in the hash table 225 */ 226 realMethodEntry = 227 (CompilerMethodStats *) calloc(1, sizeof(CompilerMethodStats)); 228 realMethodEntry->method = method; 229 230 dvmHashTableLookup(gDvmJit.methodStatsTable, hashValue, 231 realMethodEntry, 232 (HashCompareFunc) compareMethod, 233 true); 234 235 /* Count the number of instructions */ 236 while (codePtr < codeEnd) { 237 DecodedInstruction dalvikInsn; 238 int width = parseInsn(codePtr, &dalvikInsn, false); 239 240 /* Terminate when the data section is seen */ 241 if (width == 0) 242 break; 243 244 insnSize += width; 245 codePtr += width; 246 } 247 248 realMethodEntry->dalvikSize = insnSize * 2; 249 return realMethodEntry; 250 } 251 #endif 252 253 /* 254 * Crawl the stack of the thread that requesed compilation to see if any of the 255 * ancestors are on the blacklist. 256 */ 257 bool filterMethodByCallGraph(Thread *thread, const char *curMethodName) 258 { 259 /* Crawl the Dalvik stack frames and compare the method name*/ 260 StackSaveArea *ssaPtr = ((StackSaveArea *) thread->curFrame) - 1; 261 while (ssaPtr != ((StackSaveArea *) NULL) - 1) { 262 const Method *method = ssaPtr->method; 263 if (method) { 264 int hashValue = dvmComputeUtf8Hash(method->name); 265 bool found = 266 dvmHashTableLookup(gDvmJit.methodTable, hashValue, 267 (char *) method->name, 268 (HashCompareFunc) strcmp, false) != 269 NULL; 270 if (found) { 271 LOGD("Method %s (--> %s) found on the JIT %s list", 272 method->name, curMethodName, 273 gDvmJit.includeSelectedMethod ? "white" : "black"); 274 return true; 275 } 276 277 } 278 ssaPtr = ((StackSaveArea *) ssaPtr->prevFrame) - 1; 279 }; 280 return false; 281 } 282 283 /* 284 * Main entry point to start trace compilation. Basic blocks are constructed 285 * first and they will be passed to the codegen routines to convert Dalvik 286 * bytecode into machine code. 287 */ 288 bool dvmCompileTrace(JitTraceDescription *desc, int numMaxInsts, 289 JitTranslationInfo *info, jmp_buf *bailPtr) 290 { 291 const DexCode *dexCode = dvmGetMethodCode(desc->method); 292 const JitTraceRun* currRun = &desc->trace[0]; 293 unsigned int curOffset = currRun->frag.startOffset; 294 unsigned int numInsts = currRun->frag.numInsts; 295 const u2 *codePtr = dexCode->insns + curOffset; 296 int traceSize = 0; // # of half-words 297 const u2 *startCodePtr = codePtr; 298 BasicBlock *startBB, *curBB, *lastBB; 299 int numBlocks = 0; 300 static int compilationId; 301 CompilationUnit cUnit; 302 #if defined(WITH_JIT_TUNING) 303 CompilerMethodStats *methodStats; 304 #endif 305 306 /* If we've already compiled this trace, just return success */ 307 if (dvmJitGetCodeAddr(startCodePtr) && !info->discardResult) { 308 return true; 309 } 310 311 compilationId++; 312 memset(&cUnit, 0, sizeof(CompilationUnit)); 313 314 #if defined(WITH_JIT_TUNING) 315 /* Locate the entry to store compilation statistics for this method */ 316 methodStats = analyzeMethodBody(desc->method); 317 #endif 318 319 /* Set the recover buffer pointer */ 320 cUnit.bailPtr = bailPtr; 321 322 /* Initialize the printMe flag */ 323 cUnit.printMe = gDvmJit.printMe; 324 325 /* Initialize the profile flag */ 326 cUnit.executionCount = gDvmJit.profile; 327 328 /* Identify traces that we don't want to compile */ 329 if (gDvmJit.methodTable) { 330 int len = strlen(desc->method->clazz->descriptor) + 331 strlen(desc->method->name) + 1; 332 char *fullSignature = dvmCompilerNew(len, true); 333 strcpy(fullSignature, desc->method->clazz->descriptor); 334 strcat(fullSignature, desc->method->name); 335 336 int hashValue = dvmComputeUtf8Hash(fullSignature); 337 338 /* 339 * Doing three levels of screening to see whether we want to skip 340 * compiling this method 341 */ 342 343 /* First, check the full "class;method" signature */ 344 bool methodFound = 345 dvmHashTableLookup(gDvmJit.methodTable, hashValue, 346 fullSignature, (HashCompareFunc) strcmp, 347 false) != 348 NULL; 349 350 /* Full signature not found - check the enclosing class */ 351 if (methodFound == false) { 352 int hashValue = dvmComputeUtf8Hash(desc->method->clazz->descriptor); 353 methodFound = 354 dvmHashTableLookup(gDvmJit.methodTable, hashValue, 355 (char *) desc->method->clazz->descriptor, 356 (HashCompareFunc) strcmp, false) != 357 NULL; 358 /* Enclosing class not found - check the method name */ 359 if (methodFound == false) { 360 int hashValue = dvmComputeUtf8Hash(desc->method->name); 361 methodFound = 362 dvmHashTableLookup(gDvmJit.methodTable, hashValue, 363 (char *) desc->method->name, 364 (HashCompareFunc) strcmp, false) != 365 NULL; 366 367 /* 368 * Debug by call-graph is enabled. Check if the debug list 369 * covers any methods on the VM stack. 370 */ 371 if (methodFound == false && gDvmJit.checkCallGraph == true) { 372 methodFound = 373 filterMethodByCallGraph(info->requestingThread, 374 desc->method->name); 375 } 376 } 377 } 378 379 /* 380 * Under the following conditions, the trace will be *conservatively* 381 * compiled by only containing single-step instructions to and from the 382 * interpreter. 383 * 1) If includeSelectedMethod == false, the method matches the full or 384 * partial signature stored in the hash table. 385 * 386 * 2) If includeSelectedMethod == true, the method does not match the 387 * full and partial signature stored in the hash table. 388 */ 389 if (gDvmJit.includeSelectedMethod != methodFound) { 390 cUnit.allSingleStep = true; 391 } else { 392 /* Compile the trace as normal */ 393 394 /* Print the method we cherry picked */ 395 if (gDvmJit.includeSelectedMethod == true) { 396 cUnit.printMe = true; 397 } 398 } 399 } 400 401 /* Allocate the entry block */ 402 lastBB = startBB = curBB = dvmCompilerNewBB(kEntryBlock); 403 curBB->startOffset = curOffset; 404 curBB->id = numBlocks++; 405 406 curBB = dvmCompilerNewBB(kDalvikByteCode); 407 curBB->startOffset = curOffset; 408 curBB->id = numBlocks++; 409 410 /* Make the first real dalvik block the fallthrough of the entry block */ 411 startBB->fallThrough = curBB; 412 lastBB->next = curBB; 413 lastBB = curBB; 414 415 if (cUnit.printMe) { 416 LOGD("--------\nCompiler: Building trace for %s, offset 0x%x\n", 417 desc->method->name, curOffset); 418 } 419 420 /* 421 * Analyze the trace descriptor and include up to the maximal number 422 * of Dalvik instructions into the IR. 423 */ 424 while (1) { 425 MIR *insn; 426 int width; 427 insn = dvmCompilerNew(sizeof(MIR), true); 428 insn->offset = curOffset; 429 width = parseInsn(codePtr, &insn->dalvikInsn, cUnit.printMe); 430 431 /* The trace should never incude instruction data */ 432 assert(width); 433 insn->width = width; 434 traceSize += width; 435 dvmCompilerAppendMIR(curBB, insn); 436 cUnit.numInsts++; 437 /* Instruction limit reached - terminate the trace here */ 438 if (cUnit.numInsts >= numMaxInsts) { 439 break; 440 } 441 if (--numInsts == 0) { 442 if (currRun->frag.runEnd) { 443 break; 444 } else { 445 curBB = dvmCompilerNewBB(kDalvikByteCode); 446 lastBB->next = curBB; 447 lastBB = curBB; 448 curBB->id = numBlocks++; 449 currRun++; 450 curOffset = currRun->frag.startOffset; 451 numInsts = currRun->frag.numInsts; 452 curBB->startOffset = curOffset; 453 codePtr = dexCode->insns + curOffset; 454 } 455 } else { 456 curOffset += width; 457 codePtr += width; 458 } 459 } 460 461 #if defined(WITH_JIT_TUNING) 462 /* Convert # of half-word to bytes */ 463 methodStats->compiledDalvikSize += traceSize * 2; 464 #endif 465 466 /* 467 * Now scan basic blocks containing real code to connect the 468 * taken/fallthrough links. Also create chaining cells for code not included 469 * in the trace. 470 */ 471 for (curBB = startBB; curBB; curBB = curBB->next) { 472 MIR *lastInsn = curBB->lastMIRInsn; 473 /* Skip empty blocks */ 474 if (lastInsn == NULL) { 475 continue; 476 } 477 curOffset = lastInsn->offset; 478 unsigned int targetOffset = curOffset; 479 unsigned int fallThroughOffset = curOffset + lastInsn->width; 480 bool isInvoke = false; 481 const Method *callee = NULL; 482 483 findBlockBoundary(desc->method, curBB->lastMIRInsn, curOffset, 484 &targetOffset, &isInvoke, &callee); 485 486 /* Link the taken and fallthrough blocks */ 487 BasicBlock *searchBB; 488 489 /* No backward branch in the trace - start searching the next BB */ 490 for (searchBB = curBB->next; searchBB; searchBB = searchBB->next) { 491 if (targetOffset == searchBB->startOffset) { 492 curBB->taken = searchBB; 493 } 494 if (fallThroughOffset == searchBB->startOffset) { 495 curBB->fallThrough = searchBB; 496 } 497 } 498 499 int flags = dexGetInstrFlags(gDvm.instrFlags, 500 lastInsn->dalvikInsn.opCode); 501 502 /* 503 * Some blocks are ended by non-control-flow-change instructions, 504 * currently only due to trace length constraint. In this case we need 505 * to generate an explicit branch at the end of the block to jump to 506 * the chaining cell. 507 * 508 * NOTE: INVOKE_DIRECT_EMPTY is actually not an invoke but a nop 509 */ 510 curBB->needFallThroughBranch = 511 ((flags & (kInstrCanBranch | kInstrCanSwitch | kInstrCanReturn | 512 kInstrInvoke)) == 0) || 513 (lastInsn->dalvikInsn.opCode == OP_INVOKE_DIRECT_EMPTY); 514 515 if (curBB->taken == NULL && 516 curBB->fallThrough == NULL && 517 flags == (kInstrCanBranch | kInstrCanContinue) && 518 fallThroughOffset == startBB->startOffset) { 519 BasicBlock *loopBranch = curBB; 520 BasicBlock *exitBB; 521 BasicBlock *exitChainingCell; 522 523 if (cUnit.printMe) { 524 LOGD("Natural loop detected!"); 525 } 526 exitBB = dvmCompilerNewBB(kExitBlock); 527 lastBB->next = exitBB; 528 lastBB = exitBB; 529 530 exitBB->startOffset = targetOffset; 531 exitBB->id = numBlocks++; 532 exitBB->needFallThroughBranch = true; 533 534 loopBranch->taken = exitBB; 535 #if defined(WITH_SELF_VERIFICATION) 536 BasicBlock *backwardCell = 537 dvmCompilerNewBB(kChainingCellBackwardBranch); 538 lastBB->next = backwardCell; 539 lastBB = backwardCell; 540 541 backwardCell->startOffset = startBB->startOffset; 542 backwardCell->id = numBlocks++; 543 loopBranch->fallThrough = backwardCell; 544 #elif defined(WITH_JIT_TUNING) 545 if (gDvmJit.profile) { 546 BasicBlock *backwardCell = 547 dvmCompilerNewBB(kChainingCellBackwardBranch); 548 lastBB->next = backwardCell; 549 lastBB = backwardCell; 550 551 backwardCell->startOffset = startBB->startOffset; 552 backwardCell->id = numBlocks++; 553 loopBranch->fallThrough = backwardCell; 554 } else { 555 loopBranch->fallThrough = startBB->next; 556 } 557 #else 558 loopBranch->fallThrough = startBB->next; 559 #endif 560 561 /* Create the chaining cell as the fallthrough of the exit block */ 562 exitChainingCell = dvmCompilerNewBB(kChainingCellNormal); 563 lastBB->next = exitChainingCell; 564 lastBB = exitChainingCell; 565 566 exitChainingCell->startOffset = targetOffset; 567 exitChainingCell->id = numBlocks++; 568 569 exitBB->fallThrough = exitChainingCell; 570 571 cUnit.hasLoop = true; 572 } 573 574 if (lastInsn->dalvikInsn.opCode == OP_PACKED_SWITCH || 575 lastInsn->dalvikInsn.opCode == OP_SPARSE_SWITCH) { 576 int i; 577 const u2 *switchData = desc->method->insns + lastInsn->offset + 578 lastInsn->dalvikInsn.vB; 579 int size = switchData[1]; 580 int maxChains = MIN(size, MAX_CHAINED_SWITCH_CASES); 581 582 /* 583 * Generate the landing pad for cases whose ranks are higher than 584 * MAX_CHAINED_SWITCH_CASES. The code will re-enter the interpreter 585 * through the NoChain point. 586 */ 587 if (maxChains != size) { 588 cUnit.switchOverflowPad = 589 desc->method->insns + lastInsn->offset; 590 } 591 592 s4 *targets = (s4 *) (switchData + 2 + 593 (lastInsn->dalvikInsn.opCode == OP_PACKED_SWITCH ? 594 2 : size * 2)); 595 596 /* One chaining cell for the first MAX_CHAINED_SWITCH_CASES cases */ 597 for (i = 0; i < maxChains; i++) { 598 BasicBlock *caseChain = dvmCompilerNewBB(kChainingCellNormal); 599 lastBB->next = caseChain; 600 lastBB = caseChain; 601 602 caseChain->startOffset = lastInsn->offset + targets[i]; 603 caseChain->id = numBlocks++; 604 } 605 606 /* One more chaining cell for the default case */ 607 BasicBlock *caseChain = dvmCompilerNewBB(kChainingCellNormal); 608 lastBB->next = caseChain; 609 lastBB = caseChain; 610 611 caseChain->startOffset = lastInsn->offset + lastInsn->width; 612 caseChain->id = numBlocks++; 613 /* Fallthrough block not included in the trace */ 614 } else if (!isUnconditionalBranch(lastInsn) && 615 curBB->fallThrough == NULL) { 616 /* 617 * If the chaining cell is after an invoke or 618 * instruction that cannot change the control flow, request a hot 619 * chaining cell. 620 */ 621 if (isInvoke || curBB->needFallThroughBranch) { 622 lastBB->next = dvmCompilerNewBB(kChainingCellHot); 623 } else { 624 lastBB->next = dvmCompilerNewBB(kChainingCellNormal); 625 } 626 lastBB = lastBB->next; 627 lastBB->id = numBlocks++; 628 lastBB->startOffset = fallThroughOffset; 629 curBB->fallThrough = lastBB; 630 } 631 /* Target block not included in the trace */ 632 if (curBB->taken == NULL && 633 (isGoto(lastInsn) || isInvoke || 634 (targetOffset != UNKNOWN_TARGET && targetOffset != curOffset))) { 635 BasicBlock *newBB; 636 if (isInvoke) { 637 /* Monomorphic callee */ 638 if (callee) { 639 newBB = dvmCompilerNewBB(kChainingCellInvokeSingleton); 640 newBB->startOffset = 0; 641 newBB->containingMethod = callee; 642 /* Will resolve at runtime */ 643 } else { 644 newBB = dvmCompilerNewBB(kChainingCellInvokePredicted); 645 newBB->startOffset = 0; 646 } 647 /* For unconditional branches, request a hot chaining cell */ 648 } else { 649 #if !defined(WITH_SELF_VERIFICATION) 650 newBB = dvmCompilerNewBB(flags & kInstrUnconditional ? 651 kChainingCellHot : 652 kChainingCellNormal); 653 newBB->startOffset = targetOffset; 654 #else 655 /* Handle branches that branch back into the block */ 656 if (targetOffset >= curBB->firstMIRInsn->offset && 657 targetOffset <= curBB->lastMIRInsn->offset) { 658 newBB = dvmCompilerNewBB(kChainingCellBackwardBranch); 659 } else { 660 newBB = dvmCompilerNewBB(flags & kInstrUnconditional ? 661 kChainingCellHot : 662 kChainingCellNormal); 663 } 664 newBB->startOffset = targetOffset; 665 #endif 666 } 667 newBB->id = numBlocks++; 668 curBB->taken = newBB; 669 lastBB->next = newBB; 670 lastBB = newBB; 671 } 672 } 673 674 /* Now create a special block to host PC reconstruction code */ 675 lastBB->next = dvmCompilerNewBB(kPCReconstruction); 676 lastBB = lastBB->next; 677 lastBB->id = numBlocks++; 678 679 /* And one final block that publishes the PC and raise the exception */ 680 lastBB->next = dvmCompilerNewBB(kExceptionHandling); 681 lastBB = lastBB->next; 682 lastBB->id = numBlocks++; 683 684 if (cUnit.printMe) { 685 LOGD("TRACEINFO (%d): 0x%08x %s%s 0x%x %d of %d, %d blocks", 686 compilationId, 687 (intptr_t) desc->method->insns, 688 desc->method->clazz->descriptor, 689 desc->method->name, 690 desc->trace[0].frag.startOffset, 691 traceSize, 692 dexCode->insnsSize, 693 numBlocks); 694 } 695 696 BasicBlock **blockList; 697 698 cUnit.method = desc->method; 699 cUnit.traceDesc = desc; 700 cUnit.numBlocks = numBlocks; 701 dvmInitGrowableList(&cUnit.pcReconstructionList, 8); 702 blockList = cUnit.blockList = 703 dvmCompilerNew(sizeof(BasicBlock *) * numBlocks, true); 704 705 int i; 706 707 for (i = 0, curBB = startBB; i < numBlocks; i++) { 708 blockList[i] = curBB; 709 curBB = curBB->next; 710 } 711 /* Make sure all blocks are added to the cUnit */ 712 assert(curBB == NULL); 713 714 /* Preparation for SSA conversion */ 715 dvmInitializeSSAConversion(&cUnit); 716 717 718 if (cUnit.hasLoop) { 719 dvmCompilerLoopOpt(&cUnit); 720 } 721 else { 722 dvmCompilerNonLoopAnalysis(&cUnit); 723 } 724 725 dvmCompilerInitializeRegAlloc(&cUnit); // Needs to happen after SSA naming 726 727 if (cUnit.printMe) { 728 dvmCompilerDumpCompilationUnit(&cUnit); 729 } 730 731 /* Set the instruction set to use (NOTE: later components may change it) */ 732 cUnit.instructionSet = dvmCompilerInstructionSet(); 733 734 /* Allocate Registers */ 735 dvmCompilerRegAlloc(&cUnit); 736 737 /* Convert MIR to LIR, etc. */ 738 dvmCompilerMIR2LIR(&cUnit); 739 740 /* Convert LIR into machine code. */ 741 dvmCompilerAssembleLIR(&cUnit, info); 742 743 if (cUnit.printMe) { 744 if (cUnit.halveInstCount) { 745 LOGD("Assembler aborted"); 746 } else { 747 dvmCompilerCodegenDump(&cUnit); 748 } 749 LOGD("End %s%s, %d Dalvik instructions", 750 desc->method->clazz->descriptor, desc->method->name, 751 cUnit.numInsts); 752 } 753 754 /* Reset the compiler resource pool */ 755 dvmCompilerArenaReset(); 756 757 /* Success */ 758 if (!cUnit.halveInstCount) { 759 #if defined(WITH_JIT_TUNING) 760 methodStats->nativeSize += cUnit.totalSize; 761 #endif 762 return info->codeAddress != NULL; 763 764 /* Halve the instruction count and retry again */ 765 } else { 766 return dvmCompileTrace(desc, cUnit.numInsts / 2, info, bailPtr); 767 } 768 } 769 770 /* 771 * Similar to dvmCompileTrace, but the entity processed here is the whole 772 * method. 773 * 774 * TODO: implementation will be revisited when the trace builder can provide 775 * whole-method traces. 776 */ 777 bool dvmCompileMethod(const Method *method, JitTranslationInfo *info) 778 { 779 const DexCode *dexCode = dvmGetMethodCode(method); 780 const u2 *codePtr = dexCode->insns; 781 const u2 *codeEnd = dexCode->insns + dexCode->insnsSize; 782 int blockID = 0; 783 unsigned int curOffset = 0; 784 785 BasicBlock *firstBlock = dvmCompilerNewBB(kDalvikByteCode); 786 firstBlock->id = blockID++; 787 788 /* Allocate the bit-vector to track the beginning of basic blocks */ 789 BitVector *bbStartAddr = dvmCompilerAllocBitVector(dexCode->insnsSize+1, 790 false); 791 dvmCompilerSetBit(bbStartAddr, 0); 792 793 /* 794 * Sequentially go through every instruction first and put them in a single 795 * basic block. Identify block boundaries at the mean time. 796 */ 797 while (codePtr < codeEnd) { 798 MIR *insn = dvmCompilerNew(sizeof(MIR), true); 799 insn->offset = curOffset; 800 int width = parseInsn(codePtr, &insn->dalvikInsn, false); 801 bool isInvoke = false; 802 const Method *callee; 803 insn->width = width; 804 805 /* Terminate when the data section is seen */ 806 if (width == 0) 807 break; 808 dvmCompilerAppendMIR(firstBlock, insn); 809 /* 810 * Check whether this is a block ending instruction and whether it 811 * suggests the start of a new block 812 */ 813 unsigned int target = curOffset; 814 815 /* 816 * If findBlockBoundary returns true, it means the current instruction 817 * is terminating the current block. If it is a branch, the target 818 * address will be recorded in target. 819 */ 820 if (findBlockBoundary(method, insn, curOffset, &target, &isInvoke, 821 &callee)) { 822 dvmCompilerSetBit(bbStartAddr, curOffset + width); 823 if (target != curOffset) { 824 dvmCompilerSetBit(bbStartAddr, target); 825 } 826 } 827 828 codePtr += width; 829 /* each bit represents 16-bit quantity */ 830 curOffset += width; 831 } 832 833 /* 834 * The number of blocks will be equal to the number of bits set to 1 in the 835 * bit vector minus 1, because the bit representing the location after the 836 * last instruction is set to one. 837 */ 838 int numBlocks = dvmCountSetBits(bbStartAddr); 839 if (dvmIsBitSet(bbStartAddr, dexCode->insnsSize)) { 840 numBlocks--; 841 } 842 843 CompilationUnit cUnit; 844 BasicBlock **blockList; 845 846 memset(&cUnit, 0, sizeof(CompilationUnit)); 847 cUnit.method = method; 848 blockList = cUnit.blockList = 849 dvmCompilerNew(sizeof(BasicBlock *) * numBlocks, true); 850 851 /* 852 * Register the first block onto the list and start split it into block 853 * boundaries from there. 854 */ 855 blockList[0] = firstBlock; 856 cUnit.numBlocks = 1; 857 858 int i; 859 for (i = 0; i < numBlocks; i++) { 860 MIR *insn; 861 BasicBlock *curBB = blockList[i]; 862 curOffset = curBB->lastMIRInsn->offset; 863 864 for (insn = curBB->firstMIRInsn->next; insn; insn = insn->next) { 865 /* Found the beginning of a new block, see if it is created yet */ 866 if (dvmIsBitSet(bbStartAddr, insn->offset)) { 867 int j; 868 for (j = 0; j < cUnit.numBlocks; j++) { 869 if (blockList[j]->firstMIRInsn->offset == insn->offset) 870 break; 871 } 872 873 /* Block not split yet - do it now */ 874 if (j == cUnit.numBlocks) { 875 BasicBlock *newBB = dvmCompilerNewBB(kDalvikByteCode); 876 newBB->id = blockID++; 877 newBB->firstMIRInsn = insn; 878 newBB->startOffset = insn->offset; 879 newBB->lastMIRInsn = curBB->lastMIRInsn; 880 curBB->lastMIRInsn = insn->prev; 881 insn->prev->next = NULL; 882 insn->prev = NULL; 883 884 /* 885 * If the insn is not an unconditional branch, set up the 886 * fallthrough link. 887 */ 888 if (!isUnconditionalBranch(curBB->lastMIRInsn)) { 889 curBB->fallThrough = newBB; 890 } 891 892 /* enqueue the new block */ 893 blockList[cUnit.numBlocks++] = newBB; 894 break; 895 } 896 } 897 } 898 } 899 900 if (numBlocks != cUnit.numBlocks) { 901 LOGE("Expect %d vs %d basic blocks\n", numBlocks, cUnit.numBlocks); 902 dvmCompilerAbort(&cUnit); 903 } 904 905 /* Connect the basic blocks through the taken links */ 906 for (i = 0; i < numBlocks; i++) { 907 BasicBlock *curBB = blockList[i]; 908 MIR *insn = curBB->lastMIRInsn; 909 unsigned int target = insn->offset; 910 bool isInvoke; 911 const Method *callee; 912 913 findBlockBoundary(method, insn, target, &target, &isInvoke, &callee); 914 915 /* Found a block ended on a branch */ 916 if (target != insn->offset) { 917 int j; 918 /* Forward branch */ 919 if (target > insn->offset) { 920 j = i + 1; 921 } else { 922 /* Backward branch */ 923 j = 0; 924 } 925 for (; j < numBlocks; j++) { 926 if (blockList[j]->firstMIRInsn->offset == target) { 927 curBB->taken = blockList[j]; 928 break; 929 } 930 } 931 932 /* Don't create dummy block for the callee yet */ 933 if (j == numBlocks && !isInvoke) { 934 LOGE("Target not found for insn %x: expect target %x\n", 935 curBB->lastMIRInsn->offset, target); 936 dvmCompilerAbort(&cUnit); 937 } 938 } 939 } 940 941 /* Set the instruction set to use (NOTE: later components may change it) */ 942 cUnit.instructionSet = dvmCompilerInstructionSet(); 943 944 dvmCompilerMIR2LIR(&cUnit); 945 946 dvmCompilerAssembleLIR(&cUnit, info); 947 948 dvmCompilerDumpCompilationUnit(&cUnit); 949 950 dvmCompilerArenaReset(); 951 952 return info->codeAddress != NULL; 953 } 954