1 /* 2 * Copyright (C) 2008 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 /* 18 * Main interpreter entry point and support functions. 19 * 20 * The entry point selects the "standard" or "debug" interpreter and 21 * facilitates switching between them. The standard interpreter may 22 * use the "fast" or "portable" implementation. 23 * 24 * Some debugger support functions are included here. 25 */ 26 #include "Dalvik.h" 27 #include "interp/InterpDefs.h" 28 #if defined(WITH_JIT) 29 #include "interp/Jit.h" 30 #endif 31 32 33 /* 34 * =========================================================================== 35 * Debugger support 36 * =========================================================================== 37 */ 38 39 // fwd 40 static BreakpointSet* dvmBreakpointSetAlloc(); 41 static void dvmBreakpointSetFree(BreakpointSet* pSet); 42 43 #if defined(WITH_JIT) 44 /* Target-specific save/restore */ 45 extern "C" void dvmJitCalleeSave(double *saveArea); 46 extern "C" void dvmJitCalleeRestore(double *saveArea); 47 /* Interpreter entry points from compiled code */ 48 extern "C" void dvmJitToInterpNormal(); 49 extern "C" void dvmJitToInterpNoChain(); 50 extern "C" void dvmJitToInterpPunt(); 51 extern "C" void dvmJitToInterpSingleStep(); 52 extern "C" void dvmJitToInterpTraceSelect(); 53 #if defined(WITH_SELF_VERIFICATION) 54 extern "C" void dvmJitToInterpBackwardBranch(); 55 #endif 56 #endif 57 58 /* 59 * Initialize global breakpoint structures. 60 */ 61 bool dvmBreakpointStartup() 62 { 63 gDvm.breakpointSet = dvmBreakpointSetAlloc(); 64 return (gDvm.breakpointSet != NULL); 65 } 66 67 /* 68 * Free resources. 69 */ 70 void dvmBreakpointShutdown() 71 { 72 dvmBreakpointSetFree(gDvm.breakpointSet); 73 } 74 75 76 /* 77 * This represents a breakpoint inserted in the instruction stream. 78 * 79 * The debugger may ask us to create the same breakpoint multiple times. 80 * We only remove the breakpoint when the last instance is cleared. 81 */ 82 struct Breakpoint { 83 Method* method; /* method we're associated with */ 84 u2* addr; /* absolute memory address */ 85 u1 originalOpcode; /* original 8-bit opcode value */ 86 int setCount; /* #of times this breakpoint was set */ 87 }; 88 89 /* 90 * Set of breakpoints. 91 */ 92 struct BreakpointSet { 93 /* grab lock before reading or writing anything else in here */ 94 pthread_mutex_t lock; 95 96 /* vector of breakpoint structures */ 97 int alloc; 98 int count; 99 Breakpoint* breakpoints; 100 }; 101 102 /* 103 * Initialize a BreakpointSet. Initially empty. 104 */ 105 static BreakpointSet* dvmBreakpointSetAlloc() 106 { 107 BreakpointSet* pSet = (BreakpointSet*) calloc(1, sizeof(*pSet)); 108 109 dvmInitMutex(&pSet->lock); 110 /* leave the rest zeroed -- will alloc on first use */ 111 112 return pSet; 113 } 114 115 /* 116 * Free storage associated with a BreakpointSet. 117 */ 118 static void dvmBreakpointSetFree(BreakpointSet* pSet) 119 { 120 if (pSet == NULL) 121 return; 122 123 free(pSet->breakpoints); 124 free(pSet); 125 } 126 127 /* 128 * Lock the breakpoint set. 129 * 130 * It's not currently necessary to switch to VMWAIT in the event of 131 * contention, because nothing in here can block. However, it's possible 132 * that the bytecode-updater code could become fancier in the future, so 133 * we do the trylock dance as a bit of future-proofing. 134 */ 135 static void dvmBreakpointSetLock(BreakpointSet* pSet) 136 { 137 if (dvmTryLockMutex(&pSet->lock) != 0) { 138 Thread* self = dvmThreadSelf(); 139 ThreadStatus oldStatus = dvmChangeStatus(self, THREAD_VMWAIT); 140 dvmLockMutex(&pSet->lock); 141 dvmChangeStatus(self, oldStatus); 142 } 143 } 144 145 /* 146 * Unlock the breakpoint set. 147 */ 148 static void dvmBreakpointSetUnlock(BreakpointSet* pSet) 149 { 150 dvmUnlockMutex(&pSet->lock); 151 } 152 153 /* 154 * Return the #of breakpoints. 155 */ 156 static int dvmBreakpointSetCount(const BreakpointSet* pSet) 157 { 158 return pSet->count; 159 } 160 161 /* 162 * See if we already have an entry for this address. 163 * 164 * The BreakpointSet's lock must be acquired before calling here. 165 * 166 * Returns the index of the breakpoint entry, or -1 if not found. 167 */ 168 static int dvmBreakpointSetFind(const BreakpointSet* pSet, const u2* addr) 169 { 170 int i; 171 172 for (i = 0; i < pSet->count; i++) { 173 Breakpoint* pBreak = &pSet->breakpoints[i]; 174 if (pBreak->addr == addr) 175 return i; 176 } 177 178 return -1; 179 } 180 181 /* 182 * Retrieve the opcode that was originally at the specified location. 183 * 184 * The BreakpointSet's lock must be acquired before calling here. 185 * 186 * Returns "true" with the opcode in *pOrig on success. 187 */ 188 static bool dvmBreakpointSetOriginalOpcode(const BreakpointSet* pSet, 189 const u2* addr, u1* pOrig) 190 { 191 int idx = dvmBreakpointSetFind(pSet, addr); 192 if (idx < 0) 193 return false; 194 195 *pOrig = pSet->breakpoints[idx].originalOpcode; 196 return true; 197 } 198 199 /* 200 * Check the opcode. If it's a "magic" NOP, indicating the start of 201 * switch or array data in the instruction stream, we don't want to set 202 * a breakpoint. 203 * 204 * This can happen because the line number information dx generates 205 * associates the switch data with the switch statement's line number, 206 * and some debuggers put breakpoints at every address associated with 207 * a given line. The result is that the breakpoint stomps on the NOP 208 * instruction that doubles as a data table magic number, and an explicit 209 * check in the interpreter results in an exception being thrown. 210 * 211 * We don't want to simply refuse to add the breakpoint to the table, 212 * because that confuses the housekeeping. We don't want to reject the 213 * debugger's event request, and we want to be sure that there's exactly 214 * one un-set operation for every set op. 215 */ 216 static bool instructionIsMagicNop(const u2* addr) 217 { 218 u2 curVal = *addr; 219 return ((GET_OPCODE(curVal)) == OP_NOP && (curVal >> 8) != 0); 220 } 221 222 /* 223 * Add a breakpoint at a specific address. If the address is already 224 * present in the table, this just increments the count. 225 * 226 * For a new entry, this will extract and preserve the current opcode from 227 * the instruction stream, and replace it with a breakpoint opcode. 228 * 229 * The BreakpointSet's lock must be acquired before calling here. 230 * 231 * Returns "true" on success. 232 */ 233 static bool dvmBreakpointSetAdd(BreakpointSet* pSet, Method* method, 234 unsigned int instrOffset) 235 { 236 const int kBreakpointGrowth = 10; 237 const u2* addr = method->insns + instrOffset; 238 int idx = dvmBreakpointSetFind(pSet, addr); 239 Breakpoint* pBreak; 240 241 if (idx < 0) { 242 if (pSet->count == pSet->alloc) { 243 int newSize = pSet->alloc + kBreakpointGrowth; 244 Breakpoint* newVec; 245 246 ALOGV("+++ increasing breakpoint set size to %d", newSize); 247 248 /* pSet->breakpoints will be NULL on first entry */ 249 newVec = (Breakpoint*)realloc(pSet->breakpoints, newSize * sizeof(Breakpoint)); 250 if (newVec == NULL) 251 return false; 252 253 pSet->breakpoints = newVec; 254 pSet->alloc = newSize; 255 } 256 257 pBreak = &pSet->breakpoints[pSet->count++]; 258 pBreak->method = method; 259 pBreak->addr = (u2*)addr; 260 pBreak->originalOpcode = *(u1*)addr; 261 pBreak->setCount = 1; 262 263 /* 264 * Change the opcode. We must ensure that the BreakpointSet 265 * updates happen before we change the opcode. 266 * 267 * If the method has not been verified, we do NOT insert the 268 * breakpoint yet, since that will screw up the verifier. The 269 * debugger is allowed to insert breakpoints in unverified code, 270 * but since we don't execute unverified code we don't need to 271 * alter the bytecode yet. 272 * 273 * The class init code will "flush" all pending opcode writes 274 * before verification completes. 275 */ 276 assert(*(u1*)addr != OP_BREAKPOINT); 277 if (dvmIsClassVerified(method->clazz)) { 278 ALOGV("Class %s verified, adding breakpoint at %p", 279 method->clazz->descriptor, addr); 280 if (instructionIsMagicNop(addr)) { 281 ALOGV("Refusing to set breakpoint on %04x at %s.%s + %#x", 282 *addr, method->clazz->descriptor, method->name, 283 instrOffset); 284 } else { 285 ANDROID_MEMBAR_FULL(); 286 dvmDexChangeDex1(method->clazz->pDvmDex, (u1*)addr, 287 OP_BREAKPOINT); 288 } 289 } else { 290 ALOGV("Class %s NOT verified, deferring breakpoint at %p", 291 method->clazz->descriptor, addr); 292 } 293 } else { 294 /* 295 * Breakpoint already exists, just increase the count. 296 */ 297 pBreak = &pSet->breakpoints[idx]; 298 pBreak->setCount++; 299 } 300 301 return true; 302 } 303 304 /* 305 * Remove one instance of the specified breakpoint. When the count 306 * reaches zero, the entry is removed from the table, and the original 307 * opcode is restored. 308 * 309 * The BreakpointSet's lock must be acquired before calling here. 310 */ 311 static void dvmBreakpointSetRemove(BreakpointSet* pSet, Method* method, 312 unsigned int instrOffset) 313 { 314 const u2* addr = method->insns + instrOffset; 315 int idx = dvmBreakpointSetFind(pSet, addr); 316 317 if (idx < 0) { 318 /* breakpoint not found in set -- unexpected */ 319 if (*(u1*)addr == OP_BREAKPOINT) { 320 ALOGE("Unable to restore breakpoint opcode (%s.%s +%#x)", 321 method->clazz->descriptor, method->name, instrOffset); 322 dvmAbort(); 323 } else { 324 ALOGW("Breakpoint was already restored? (%s.%s +%#x)", 325 method->clazz->descriptor, method->name, instrOffset); 326 } 327 } else { 328 Breakpoint* pBreak = &pSet->breakpoints[idx]; 329 if (pBreak->setCount == 1) { 330 /* 331 * Must restore opcode before removing set entry. 332 * 333 * If the breakpoint was never flushed, we could be ovewriting 334 * a value with the same value. Not a problem, though we 335 * could end up causing a copy-on-write here when we didn't 336 * need to. (Not worth worrying about.) 337 */ 338 dvmDexChangeDex1(method->clazz->pDvmDex, (u1*)addr, 339 pBreak->originalOpcode); 340 ANDROID_MEMBAR_FULL(); 341 342 if (idx != pSet->count-1) { 343 /* shift down */ 344 memmove(&pSet->breakpoints[idx], &pSet->breakpoints[idx+1], 345 (pSet->count-1 - idx) * sizeof(pSet->breakpoints[0])); 346 } 347 pSet->count--; 348 pSet->breakpoints[pSet->count].addr = (u2*) 0xdecadead; // debug 349 } else { 350 pBreak->setCount--; 351 assert(pBreak->setCount > 0); 352 } 353 } 354 } 355 356 /* 357 * Flush any breakpoints associated with methods in "clazz". We want to 358 * change the opcode, which might not have happened when the breakpoint 359 * was initially set because the class was in the process of being 360 * verified. 361 * 362 * The BreakpointSet's lock must be acquired before calling here. 363 */ 364 static void dvmBreakpointSetFlush(BreakpointSet* pSet, ClassObject* clazz) 365 { 366 int i; 367 for (i = 0; i < pSet->count; i++) { 368 Breakpoint* pBreak = &pSet->breakpoints[i]; 369 if (pBreak->method->clazz == clazz) { 370 /* 371 * The breakpoint is associated with a method in this class. 372 * It might already be there or it might not; either way, 373 * flush it out. 374 */ 375 ALOGV("Flushing breakpoint at %p for %s", 376 pBreak->addr, clazz->descriptor); 377 if (instructionIsMagicNop(pBreak->addr)) { 378 ALOGV("Refusing to flush breakpoint on %04x at %s.%s + %#x", 379 *pBreak->addr, pBreak->method->clazz->descriptor, 380 pBreak->method->name, pBreak->addr - pBreak->method->insns); 381 } else { 382 dvmDexChangeDex1(clazz->pDvmDex, (u1*)pBreak->addr, 383 OP_BREAKPOINT); 384 } 385 } 386 } 387 } 388 389 390 /* 391 * Do any debugger-attach-time initialization. 392 */ 393 void dvmInitBreakpoints() 394 { 395 /* quick sanity check */ 396 BreakpointSet* pSet = gDvm.breakpointSet; 397 dvmBreakpointSetLock(pSet); 398 if (dvmBreakpointSetCount(pSet) != 0) { 399 ALOGW("WARNING: %d leftover breakpoints", dvmBreakpointSetCount(pSet)); 400 /* generally not good, but we can keep going */ 401 } 402 dvmBreakpointSetUnlock(pSet); 403 } 404 405 /* 406 * Add an address to the list, putting it in the first non-empty slot. 407 * 408 * Sometimes the debugger likes to add two entries for one breakpoint. 409 * We add two entries here, so that we get the right behavior when it's 410 * removed twice. 411 * 412 * This will only be run from the JDWP thread, and it will happen while 413 * we are updating the event list, which is synchronized. We're guaranteed 414 * to be the only one adding entries, and the lock ensures that nobody 415 * will be trying to remove them while we're in here. 416 * 417 * "addr" is the absolute address of the breakpoint bytecode. 418 */ 419 void dvmAddBreakAddr(Method* method, unsigned int instrOffset) 420 { 421 BreakpointSet* pSet = gDvm.breakpointSet; 422 dvmBreakpointSetLock(pSet); 423 dvmBreakpointSetAdd(pSet, method, instrOffset); 424 dvmBreakpointSetUnlock(pSet); 425 } 426 427 /* 428 * Remove an address from the list by setting the entry to NULL. 429 * 430 * This can be called from the JDWP thread (because the debugger has 431 * cancelled the breakpoint) or from an event thread (because it's a 432 * single-shot breakpoint, e.g. "run to line"). We only get here as 433 * the result of removing an entry from the event list, which is 434 * synchronized, so it should not be possible for two threads to be 435 * updating breakpoints at the same time. 436 */ 437 void dvmClearBreakAddr(Method* method, unsigned int instrOffset) 438 { 439 BreakpointSet* pSet = gDvm.breakpointSet; 440 dvmBreakpointSetLock(pSet); 441 dvmBreakpointSetRemove(pSet, method, instrOffset); 442 dvmBreakpointSetUnlock(pSet); 443 } 444 445 /* 446 * Get the original opcode from under a breakpoint. 447 * 448 * On SMP hardware it's possible one core might try to execute a breakpoint 449 * after another core has cleared it. We need to handle the case where 450 * there's no entry in the breakpoint set. (The memory barriers in the 451 * locks and in the breakpoint update code should ensure that, once we've 452 * observed the absence of a breakpoint entry, we will also now observe 453 * the restoration of the original opcode. The fact that we're holding 454 * the lock prevents other threads from confusing things further.) 455 */ 456 u1 dvmGetOriginalOpcode(const u2* addr) 457 { 458 BreakpointSet* pSet = gDvm.breakpointSet; 459 u1 orig = 0; 460 461 dvmBreakpointSetLock(pSet); 462 if (!dvmBreakpointSetOriginalOpcode(pSet, addr, &orig)) { 463 orig = *(u1*)addr; 464 if (orig == OP_BREAKPOINT) { 465 ALOGE("GLITCH: can't find breakpoint, opcode is still set"); 466 dvmAbort(); 467 } 468 } 469 dvmBreakpointSetUnlock(pSet); 470 471 return orig; 472 } 473 474 /* 475 * Flush any breakpoints associated with methods in "clazz". 476 * 477 * We don't want to modify the bytecode of a method before the verifier 478 * gets a chance to look at it, so we postpone opcode replacement until 479 * after verification completes. 480 */ 481 void dvmFlushBreakpoints(ClassObject* clazz) 482 { 483 BreakpointSet* pSet = gDvm.breakpointSet; 484 485 if (pSet == NULL) 486 return; 487 488 assert(dvmIsClassVerified(clazz)); 489 dvmBreakpointSetLock(pSet); 490 dvmBreakpointSetFlush(pSet, clazz); 491 dvmBreakpointSetUnlock(pSet); 492 } 493 494 /* 495 * Add a single step event. Currently this is a global item. 496 * 497 * We set up some initial values based on the thread's current state. This 498 * won't work well if the thread is running, so it's up to the caller to 499 * verify that it's suspended. 500 * 501 * This is only called from the JDWP thread. 502 */ 503 bool dvmAddSingleStep(Thread* thread, int size, int depth) 504 { 505 StepControl* pCtrl = &gDvm.stepControl; 506 507 if (pCtrl->active && thread != pCtrl->thread) { 508 ALOGW("WARNING: single-step active for %p; adding %p", 509 pCtrl->thread, thread); 510 511 /* 512 * Keep going, overwriting previous. This can happen if you 513 * suspend a thread in Object.wait, hit the single-step key, then 514 * switch to another thread and do the same thing again. 515 * The first thread's step is still pending. 516 * 517 * TODO: consider making single-step per-thread. Adds to the 518 * overhead, but could be useful in rare situations. 519 */ 520 } 521 522 pCtrl->size = static_cast<JdwpStepSize>(size); 523 pCtrl->depth = static_cast<JdwpStepDepth>(depth); 524 pCtrl->thread = thread; 525 526 /* 527 * We may be stepping into or over method calls, or running until we 528 * return from the current method. To make this work we need to track 529 * the current line, current method, and current stack depth. We need 530 * to be checking these after most instructions, notably those that 531 * call methods, return from methods, or are on a different line from the 532 * previous instruction. 533 * 534 * We have to start with a snapshot of the current state. If we're in 535 * an interpreted method, everything we need is in the current frame. If 536 * we're in a native method, possibly with some extra JNI frames pushed 537 * on by PushLocalFrame, we want to use the topmost native method. 538 */ 539 const StackSaveArea* saveArea; 540 u4* fp; 541 u4* prevFp = NULL; 542 543 for (fp = thread->interpSave.curFrame; fp != NULL; 544 fp = saveArea->prevFrame) { 545 const Method* method; 546 547 saveArea = SAVEAREA_FROM_FP(fp); 548 method = saveArea->method; 549 550 if (!dvmIsBreakFrame((u4*)fp) && !dvmIsNativeMethod(method)) 551 break; 552 prevFp = fp; 553 } 554 if (fp == NULL) { 555 ALOGW("Unexpected: step req in native-only threadid=%d", 556 thread->threadId); 557 return false; 558 } 559 if (prevFp != NULL) { 560 /* 561 * First interpreted frame wasn't the one at the bottom. Break 562 * frames are only inserted when calling from native->interp, so we 563 * don't need to worry about one being here. 564 */ 565 ALOGV("##### init step while in native method"); 566 fp = prevFp; 567 assert(!dvmIsBreakFrame((u4*)fp)); 568 assert(dvmIsNativeMethod(SAVEAREA_FROM_FP(fp)->method)); 569 saveArea = SAVEAREA_FROM_FP(fp); 570 } 571 572 /* 573 * Pull the goodies out. "xtra.currentPc" should be accurate since 574 * we update it on every instruction while the debugger is connected. 575 */ 576 pCtrl->method = saveArea->method; 577 // Clear out any old address set 578 if (pCtrl->pAddressSet != NULL) { 579 // (discard const) 580 free((void *)pCtrl->pAddressSet); 581 pCtrl->pAddressSet = NULL; 582 } 583 if (dvmIsNativeMethod(pCtrl->method)) { 584 pCtrl->line = -1; 585 } else { 586 pCtrl->line = dvmLineNumFromPC(saveArea->method, 587 saveArea->xtra.currentPc - saveArea->method->insns); 588 pCtrl->pAddressSet 589 = dvmAddressSetForLine(saveArea->method, pCtrl->line); 590 } 591 pCtrl->frameDepth = 592 dvmComputeVagueFrameDepth(thread, thread->interpSave.curFrame); 593 pCtrl->active = true; 594 595 ALOGV("##### step init: thread=%p meth=%p '%s' line=%d frameDepth=%d depth=%s size=%s", 596 pCtrl->thread, pCtrl->method, pCtrl->method->name, 597 pCtrl->line, pCtrl->frameDepth, 598 dvmJdwpStepDepthStr(pCtrl->depth), 599 dvmJdwpStepSizeStr(pCtrl->size)); 600 601 return true; 602 } 603 604 /* 605 * Disable a single step event. 606 */ 607 void dvmClearSingleStep(Thread* thread) 608 { 609 UNUSED_PARAMETER(thread); 610 611 gDvm.stepControl.active = false; 612 } 613 614 /* 615 * The interpreter just threw. Handle any special subMode requirements. 616 * All interpSave state must be valid on entry. 617 */ 618 void dvmReportExceptionThrow(Thread* self, Object* exception) 619 { 620 const Method* curMethod = self->interpSave.method; 621 #if defined(WITH_JIT) 622 if (self->interpBreak.ctl.subMode & kSubModeJitTraceBuild) { 623 dvmJitEndTraceSelect(self, self->interpSave.pc); 624 } 625 if (self->interpBreak.ctl.breakFlags & kInterpSingleStep) { 626 /* Discard any single-step native returns to translation */ 627 self->jitResumeNPC = NULL; 628 } 629 #endif 630 if (self->interpBreak.ctl.subMode & kSubModeDebuggerActive) { 631 void *catchFrame; 632 int offset = self->interpSave.pc - curMethod->insns; 633 int catchRelPc = dvmFindCatchBlock(self, offset, exception, 634 true, &catchFrame); 635 dvmDbgPostException(self->interpSave.curFrame, offset, catchFrame, 636 catchRelPc, exception); 637 } 638 } 639 640 /* 641 * The interpreter is preparing to do an invoke (both native & normal). 642 * Handle any special subMode requirements. All interpSave state 643 * must be valid on entry. 644 */ 645 void dvmReportInvoke(Thread* self, const Method* methodToCall) 646 { 647 TRACE_METHOD_ENTER(self, methodToCall); 648 } 649 650 /* 651 * The interpreter is preparing to do a native invoke. Handle any 652 * special subMode requirements. NOTE: for a native invoke, 653 * dvmReportInvoke() and dvmReportPreNativeInvoke() will both 654 * be called prior to the invoke. fp is the Dalvik FP of the calling 655 * method. 656 */ 657 void dvmReportPreNativeInvoke(const Method* methodToCall, Thread* self, u4* fp) 658 { 659 #if defined(WITH_JIT) 660 /* 661 * Actively building a trace? If so, end it now. The trace 662 * builder can't follow into or through a native method. 663 */ 664 if (self->interpBreak.ctl.subMode & kSubModeJitTraceBuild) { 665 dvmCheckJit(self->interpSave.pc, self); 666 } 667 #endif 668 if (self->interpBreak.ctl.subMode & kSubModeDebuggerActive) { 669 Object* thisPtr = dvmGetThisPtr(self->interpSave.method, fp); 670 assert(thisPtr == NULL || dvmIsHeapAddress(thisPtr)); 671 dvmDbgPostLocationEvent(methodToCall, -1, thisPtr, DBG_METHOD_ENTRY); 672 } 673 } 674 675 /* 676 * The interpreter has returned from a native invoke. Handle any 677 * special subMode requirements. fp is the Dalvik FP of the calling 678 * method. 679 */ 680 void dvmReportPostNativeInvoke(const Method* methodToCall, Thread* self, u4* fp) 681 { 682 if (self->interpBreak.ctl.subMode & kSubModeDebuggerActive) { 683 Object* thisPtr = dvmGetThisPtr(self->interpSave.method, fp); 684 assert(thisPtr == NULL || dvmIsHeapAddress(thisPtr)); 685 dvmDbgPostLocationEvent(methodToCall, -1, thisPtr, DBG_METHOD_EXIT); 686 } 687 if (self->interpBreak.ctl.subMode & kSubModeMethodTrace) { 688 dvmFastNativeMethodTraceExit(methodToCall, self); 689 } 690 } 691 692 /* 693 * The interpreter has returned from a normal method. Handle any special 694 * subMode requirements. All interpSave state must be valid on entry. 695 */ 696 void dvmReportReturn(Thread* self) 697 { 698 TRACE_METHOD_EXIT(self, self->interpSave.method); 699 #if defined(WITH_JIT) 700 if (dvmIsBreakFrame(self->interpSave.curFrame) && 701 (self->interpBreak.ctl.subMode & kSubModeJitTraceBuild)) { 702 dvmCheckJit(self->interpSave.pc, self); 703 } 704 #endif 705 } 706 707 /* 708 * Update the debugger on interesting events, such as hitting a breakpoint 709 * or a single-step point. This is called from the top of the interpreter 710 * loop, before the current instruction is processed. 711 * 712 * Set "methodEntry" if we've just entered the method. This detects 713 * method exit by checking to see if the next instruction is "return". 714 * 715 * This can't catch native method entry/exit, so we have to handle that 716 * at the point of invocation. We also need to catch it in dvmCallMethod 717 * if we want to capture native->native calls made through JNI. 718 * 719 * Notes to self: 720 * - Don't want to switch to VMWAIT while posting events to the debugger. 721 * Let the debugger code decide if we need to change state. 722 * - We may want to check for debugger-induced thread suspensions on 723 * every instruction. That would make a "suspend all" more responsive 724 * and reduce the chances of multiple simultaneous events occurring. 725 * However, it could change the behavior some. 726 * 727 * TODO: method entry/exit events are probably less common than location 728 * breakpoints. We may be able to speed things up a bit if we don't query 729 * the event list unless we know there's at least one lurking within. 730 */ 731 static void updateDebugger(const Method* method, const u2* pc, const u4* fp, 732 Thread* self) 733 { 734 int eventFlags = 0; 735 736 /* 737 * Update xtra.currentPc on every instruction. We need to do this if 738 * there's a chance that we could get suspended. This can happen if 739 * eventFlags != 0 here, or somebody manually requests a suspend 740 * (which gets handled at PERIOD_CHECKS time). One place where this 741 * needs to be correct is in dvmAddSingleStep(). 742 */ 743 dvmExportPC(pc, fp); 744 745 if (self->debugIsMethodEntry) { 746 eventFlags |= DBG_METHOD_ENTRY; 747 self->debugIsMethodEntry = false; 748 } 749 750 /* 751 * See if we have a breakpoint here. 752 * 753 * Depending on the "mods" associated with event(s) on this address, 754 * we may or may not actually send a message to the debugger. 755 */ 756 if (GET_OPCODE(*pc) == OP_BREAKPOINT) { 757 ALOGV("+++ breakpoint hit at %p", pc); 758 eventFlags |= DBG_BREAKPOINT; 759 } 760 761 /* 762 * If the debugger is single-stepping one of our threads, check to 763 * see if we're that thread and we've reached a step point. 764 */ 765 const StepControl* pCtrl = &gDvm.stepControl; 766 if (pCtrl->active && pCtrl->thread == self) { 767 int frameDepth; 768 bool doStop = false; 769 const char* msg = NULL; 770 771 assert(!dvmIsNativeMethod(method)); 772 773 if (pCtrl->depth == SD_INTO) { 774 /* 775 * Step into method calls. We break when the line number 776 * or method pointer changes. If we're in SS_MIN mode, we 777 * always stop. 778 */ 779 if (pCtrl->method != method) { 780 doStop = true; 781 msg = "new method"; 782 } else if (pCtrl->size == SS_MIN) { 783 doStop = true; 784 msg = "new instruction"; 785 } else if (!dvmAddressSetGet( 786 pCtrl->pAddressSet, pc - method->insns)) { 787 doStop = true; 788 msg = "new line"; 789 } 790 } else if (pCtrl->depth == SD_OVER) { 791 /* 792 * Step over method calls. We break when the line number is 793 * different and the frame depth is <= the original frame 794 * depth. (We can't just compare on the method, because we 795 * might get unrolled past it by an exception, and it's tricky 796 * to identify recursion.) 797 */ 798 frameDepth = dvmComputeVagueFrameDepth(self, fp); 799 if (frameDepth < pCtrl->frameDepth) { 800 /* popped up one or more frames, always trigger */ 801 doStop = true; 802 msg = "method pop"; 803 } else if (frameDepth == pCtrl->frameDepth) { 804 /* same depth, see if we moved */ 805 if (pCtrl->size == SS_MIN) { 806 doStop = true; 807 msg = "new instruction"; 808 } else if (!dvmAddressSetGet(pCtrl->pAddressSet, 809 pc - method->insns)) { 810 doStop = true; 811 msg = "new line"; 812 } 813 } 814 } else { 815 assert(pCtrl->depth == SD_OUT); 816 /* 817 * Return from the current method. We break when the frame 818 * depth pops up. 819 * 820 * This differs from the "method exit" break in that it stops 821 * with the PC at the next instruction in the returned-to 822 * function, rather than the end of the returning function. 823 */ 824 frameDepth = dvmComputeVagueFrameDepth(self, fp); 825 if (frameDepth < pCtrl->frameDepth) { 826 doStop = true; 827 msg = "method pop"; 828 } 829 } 830 831 if (doStop) { 832 ALOGV("#####S %s", msg); 833 eventFlags |= DBG_SINGLE_STEP; 834 } 835 } 836 837 /* 838 * Check to see if this is a "return" instruction. JDWP says we should 839 * send the event *after* the code has been executed, but it also says 840 * the location we provide is the last instruction. Since the "return" 841 * instruction has no interesting side effects, we should be safe. 842 * (We can't just move this down to the returnFromMethod label because 843 * we potentially need to combine it with other events.) 844 * 845 * We're also not supposed to generate a method exit event if the method 846 * terminates "with a thrown exception". 847 */ 848 u2 opcode = GET_OPCODE(*pc); 849 if (opcode == OP_RETURN_VOID || opcode == OP_RETURN || opcode == OP_RETURN_VOID_BARRIER || 850 opcode == OP_RETURN_OBJECT || opcode == OP_RETURN_WIDE) 851 { 852 eventFlags |= DBG_METHOD_EXIT; 853 } 854 855 /* 856 * If there's something interesting going on, see if it matches one 857 * of the debugger filters. 858 */ 859 if (eventFlags != 0) { 860 Object* thisPtr = dvmGetThisPtr(method, fp); 861 if (thisPtr != NULL && !dvmIsHeapAddress(thisPtr)) { 862 /* 863 * TODO: remove this check if we're confident that the "this" 864 * pointer is where it should be -- slows us down, especially 865 * during single-step. 866 */ 867 char* desc = dexProtoCopyMethodDescriptor(&method->prototype); 868 ALOGE("HEY: invalid 'this' ptr %p (%s.%s %s)", thisPtr, 869 method->clazz->descriptor, method->name, desc); 870 free(desc); 871 dvmAbort(); 872 } 873 dvmDbgPostLocationEvent(method, pc - method->insns, thisPtr, 874 eventFlags); 875 } 876 } 877 878 /* 879 * Recover the "this" pointer from the current interpreted method. "this" 880 * is always in "in0" for non-static methods. 881 * 882 * The "ins" start at (#of registers - #of ins). Note in0 != v0. 883 * 884 * This works because "dx" guarantees that it will work. It's probably 885 * fairly common to have a virtual method that doesn't use its "this" 886 * pointer, in which case we're potentially wasting a register. However, 887 * the debugger doesn't treat "this" as just another argument. For 888 * example, events (such as breakpoints) can be enabled for specific 889 * values of "this". There is also a separate StackFrame.ThisObject call 890 * in JDWP that is expected to work for any non-native non-static method. 891 * 892 * Because we need it when setting up debugger event filters, we want to 893 * be able to do this quickly. 894 */ 895 Object* dvmGetThisPtr(const Method* method, const u4* fp) 896 { 897 if (dvmIsStaticMethod(method)) 898 return NULL; 899 return (Object*)fp[method->registersSize - method->insSize]; 900 } 901 902 903 #if defined(WITH_TRACKREF_CHECKS) 904 /* 905 * Verify that all internally-tracked references have been released. If 906 * they haven't, print them and abort the VM. 907 * 908 * "debugTrackedRefStart" indicates how many refs were on the list when 909 * we were first invoked. 910 */ 911 void dvmInterpCheckTrackedRefs(Thread* self, const Method* method, 912 int debugTrackedRefStart) 913 { 914 if (dvmReferenceTableEntries(&self->internalLocalRefTable) 915 != (size_t) debugTrackedRefStart) 916 { 917 char* desc; 918 Object** top; 919 int count; 920 921 count = dvmReferenceTableEntries(&self->internalLocalRefTable); 922 923 ALOGE("TRACK: unreleased internal reference (prev=%d total=%d)", 924 debugTrackedRefStart, count); 925 desc = dexProtoCopyMethodDescriptor(&method->prototype); 926 ALOGE(" current method is %s.%s %s", method->clazz->descriptor, 927 method->name, desc); 928 free(desc); 929 top = self->internalLocalRefTable.table + debugTrackedRefStart; 930 while (top < self->internalLocalRefTable.nextEntry) { 931 ALOGE(" %p (%s)", 932 *top, 933 ((*top)->clazz != NULL) ? (*top)->clazz->descriptor : ""); 934 top++; 935 } 936 dvmDumpThread(self, false); 937 938 dvmAbort(); 939 } 940 //ALOGI("TRACK OK"); 941 } 942 #endif 943 944 945 #ifdef LOG_INSTR 946 /* 947 * Dump the v-registers. Sent to the ILOG log tag. 948 */ 949 void dvmDumpRegs(const Method* method, const u4* framePtr, bool inOnly) 950 { 951 int i, localCount; 952 953 localCount = method->registersSize - method->insSize; 954 955 ALOG(LOG_VERBOSE, LOG_TAG"i", "Registers (fp=%p):", framePtr); 956 for (i = method->registersSize-1; i >= 0; i--) { 957 if (i >= localCount) { 958 ALOG(LOG_VERBOSE, LOG_TAG"i", " v%-2d in%-2d : 0x%08x", 959 i, i-localCount, framePtr[i]); 960 } else { 961 if (inOnly) { 962 ALOG(LOG_VERBOSE, LOG_TAG"i", " [...]"); 963 break; 964 } 965 const char* name = ""; 966 #if 0 // "locals" structure has changed -- need to rewrite this 967 int j; 968 DexFile* pDexFile = method->clazz->pDexFile; 969 const DexCode* pDexCode = dvmGetMethodCode(method); 970 int localsSize = dexGetLocalsSize(pDexFile, pDexCode); 971 const DexLocal* locals = dvmDexGetLocals(pDexFile, pDexCode); 972 for (j = 0; j < localsSize, j++) { 973 if (locals[j].registerNum == (u4) i) { 974 name = dvmDexStringStr(locals[j].pName); 975 break; 976 } 977 } 978 #endif 979 ALOG(LOG_VERBOSE, LOG_TAG"i", " v%-2d : 0x%08x %s", 980 i, framePtr[i], name); 981 } 982 } 983 } 984 #endif 985 986 987 /* 988 * =========================================================================== 989 * Entry point and general support functions 990 * =========================================================================== 991 */ 992 993 /* 994 * Find the matching case. Returns the offset to the handler instructions. 995 * 996 * Returns 3 if we don't find a match (it's the size of the packed-switch 997 * instruction). 998 */ 999 s4 dvmInterpHandlePackedSwitch(const u2* switchData, s4 testVal) 1000 { 1001 const int kInstrLen = 3; 1002 1003 /* 1004 * Packed switch data format: 1005 * ushort ident = 0x0100 magic value 1006 * ushort size number of entries in the table 1007 * int first_key first (and lowest) switch case value 1008 * int targets[size] branch targets, relative to switch opcode 1009 * 1010 * Total size is (4+size*2) 16-bit code units. 1011 */ 1012 if (*switchData++ != kPackedSwitchSignature) { 1013 /* should have been caught by verifier */ 1014 dvmThrowInternalError("bad packed switch magic"); 1015 return kInstrLen; 1016 } 1017 1018 u2 size = *switchData++; 1019 assert(size > 0); 1020 1021 s4 firstKey = *switchData++; 1022 firstKey |= (*switchData++) << 16; 1023 1024 int index = testVal - firstKey; 1025 if (index < 0 || index >= size) { 1026 LOGVV("Value %d not found in switch (%d-%d)", 1027 testVal, firstKey, firstKey+size-1); 1028 return kInstrLen; 1029 } 1030 1031 /* The entries are guaranteed to be aligned on a 32-bit boundary; 1032 * we can treat them as a native int array. 1033 */ 1034 const s4* entries = (const s4*) switchData; 1035 assert(((u4)entries & 0x3) == 0); 1036 1037 assert(index >= 0 && index < size); 1038 LOGVV("Value %d found in slot %d (goto 0x%02x)", 1039 testVal, index, 1040 s4FromSwitchData(&entries[index])); 1041 return s4FromSwitchData(&entries[index]); 1042 } 1043 1044 /* 1045 * Find the matching case. Returns the offset to the handler instructions. 1046 * 1047 * Returns 3 if we don't find a match (it's the size of the sparse-switch 1048 * instruction). 1049 */ 1050 s4 dvmInterpHandleSparseSwitch(const u2* switchData, s4 testVal) 1051 { 1052 const int kInstrLen = 3; 1053 u2 size; 1054 const s4* keys; 1055 const s4* entries; 1056 1057 /* 1058 * Sparse switch data format: 1059 * ushort ident = 0x0200 magic value 1060 * ushort size number of entries in the table; > 0 1061 * int keys[size] keys, sorted low-to-high; 32-bit aligned 1062 * int targets[size] branch targets, relative to switch opcode 1063 * 1064 * Total size is (2+size*4) 16-bit code units. 1065 */ 1066 1067 if (*switchData++ != kSparseSwitchSignature) { 1068 /* should have been caught by verifier */ 1069 dvmThrowInternalError("bad sparse switch magic"); 1070 return kInstrLen; 1071 } 1072 1073 size = *switchData++; 1074 assert(size > 0); 1075 1076 /* The keys are guaranteed to be aligned on a 32-bit boundary; 1077 * we can treat them as a native int array. 1078 */ 1079 keys = (const s4*) switchData; 1080 assert(((u4)keys & 0x3) == 0); 1081 1082 /* The entries are guaranteed to be aligned on a 32-bit boundary; 1083 * we can treat them as a native int array. 1084 */ 1085 entries = keys + size; 1086 assert(((u4)entries & 0x3) == 0); 1087 1088 /* 1089 * Binary-search through the array of keys, which are guaranteed to 1090 * be sorted low-to-high. 1091 */ 1092 int lo = 0; 1093 int hi = size - 1; 1094 while (lo <= hi) { 1095 int mid = (lo + hi) >> 1; 1096 1097 s4 foundVal = s4FromSwitchData(&keys[mid]); 1098 if (testVal < foundVal) { 1099 hi = mid - 1; 1100 } else if (testVal > foundVal) { 1101 lo = mid + 1; 1102 } else { 1103 LOGVV("Value %d found in entry %d (goto 0x%02x)", 1104 testVal, mid, s4FromSwitchData(&entries[mid])); 1105 return s4FromSwitchData(&entries[mid]); 1106 } 1107 } 1108 1109 LOGVV("Value %d not found in switch", testVal); 1110 return kInstrLen; 1111 } 1112 1113 /* 1114 * Copy data for a fill-array-data instruction. On a little-endian machine 1115 * we can just do a memcpy(), on a big-endian system we have work to do. 1116 * 1117 * The trick here is that dexopt has byte-swapped each code unit, which is 1118 * exactly what we want for short/char data. For byte data we need to undo 1119 * the swap, and for 4- or 8-byte values we need to swap pieces within 1120 * each word. 1121 */ 1122 static void copySwappedArrayData(void* dest, const u2* src, u4 size, u2 width) 1123 { 1124 #if __BYTE_ORDER == __LITTLE_ENDIAN 1125 memcpy(dest, src, size*width); 1126 #else 1127 int i; 1128 1129 switch (width) { 1130 case 1: 1131 /* un-swap pairs of bytes as we go */ 1132 for (i = (size-1) & ~1; i >= 0; i -= 2) { 1133 ((u1*)dest)[i] = ((u1*)src)[i+1]; 1134 ((u1*)dest)[i+1] = ((u1*)src)[i]; 1135 } 1136 /* 1137 * "src" is padded to end on a two-byte boundary, but we don't want to 1138 * assume "dest" is, so we handle odd length specially. 1139 */ 1140 if ((size & 1) != 0) { 1141 ((u1*)dest)[size-1] = ((u1*)src)[size]; 1142 } 1143 break; 1144 case 2: 1145 /* already swapped correctly */ 1146 memcpy(dest, src, size*width); 1147 break; 1148 case 4: 1149 /* swap word halves */ 1150 for (i = 0; i < (int) size; i++) { 1151 ((u4*)dest)[i] = (src[(i << 1) + 1] << 16) | src[i << 1]; 1152 } 1153 break; 1154 case 8: 1155 /* swap word halves and words */ 1156 for (i = 0; i < (int) (size << 1); i += 2) { 1157 ((int*)dest)[i] = (src[(i << 1) + 3] << 16) | src[(i << 1) + 2]; 1158 ((int*)dest)[i+1] = (src[(i << 1) + 1] << 16) | src[i << 1]; 1159 } 1160 break; 1161 default: 1162 ALOGE("Unexpected width %d in copySwappedArrayData", width); 1163 dvmAbort(); 1164 break; 1165 } 1166 #endif 1167 } 1168 1169 /* 1170 * Fill the array with predefined constant values. 1171 * 1172 * Returns true if job is completed, otherwise false to indicate that 1173 * an exception has been thrown. 1174 */ 1175 bool dvmInterpHandleFillArrayData(ArrayObject* arrayObj, const u2* arrayData) 1176 { 1177 u2 width; 1178 u4 size; 1179 1180 if (arrayObj == NULL) { 1181 dvmThrowNullPointerException(NULL); 1182 return false; 1183 } 1184 assert (!IS_CLASS_FLAG_SET(((Object *)arrayObj)->clazz, 1185 CLASS_ISOBJECTARRAY)); 1186 1187 /* 1188 * Array data table format: 1189 * ushort ident = 0x0300 magic value 1190 * ushort width width of each element in the table 1191 * uint size number of elements in the table 1192 * ubyte data[size*width] table of data values (may contain a single-byte 1193 * padding at the end) 1194 * 1195 * Total size is 4+(width * size + 1)/2 16-bit code units. 1196 */ 1197 if (arrayData[0] != kArrayDataSignature) { 1198 dvmThrowInternalError("bad array data magic"); 1199 return false; 1200 } 1201 1202 width = arrayData[1]; 1203 size = arrayData[2] | (((u4)arrayData[3]) << 16); 1204 1205 if (size > arrayObj->length) { 1206 dvmThrowArrayIndexOutOfBoundsException(arrayObj->length, size); 1207 return false; 1208 } 1209 copySwappedArrayData(arrayObj->contents, &arrayData[4], size, width); 1210 return true; 1211 } 1212 1213 /* 1214 * Find the concrete method that corresponds to "methodIdx". The code in 1215 * "method" is executing invoke-method with "thisClass" as its first argument. 1216 * 1217 * Returns NULL with an exception raised on failure. 1218 */ 1219 Method* dvmInterpFindInterfaceMethod(ClassObject* thisClass, u4 methodIdx, 1220 const Method* method, DvmDex* methodClassDex) 1221 { 1222 Method* absMethod; 1223 Method* methodToCall; 1224 int i, vtableIndex; 1225 1226 /* 1227 * Resolve the method. This gives us the abstract method from the 1228 * interface class declaration. 1229 */ 1230 absMethod = dvmDexGetResolvedMethod(methodClassDex, methodIdx); 1231 if (absMethod == NULL) { 1232 absMethod = dvmResolveInterfaceMethod(method->clazz, methodIdx); 1233 if (absMethod == NULL) { 1234 ALOGV("+ unknown method"); 1235 return NULL; 1236 } 1237 } 1238 1239 /* make sure absMethod->methodIndex means what we think it means */ 1240 assert(dvmIsAbstractMethod(absMethod)); 1241 1242 /* 1243 * Run through the "this" object's iftable. Find the entry for 1244 * absMethod's class, then use absMethod->methodIndex to find 1245 * the method's entry. The value there is the offset into our 1246 * vtable of the actual method to execute. 1247 * 1248 * The verifier does not guarantee that objects stored into 1249 * interface references actually implement the interface, so this 1250 * check cannot be eliminated. 1251 */ 1252 for (i = 0; i < thisClass->iftableCount; i++) { 1253 if (thisClass->iftable[i].clazz == absMethod->clazz) 1254 break; 1255 } 1256 if (i == thisClass->iftableCount) { 1257 /* impossible in verified DEX, need to check for it in unverified */ 1258 dvmThrowIncompatibleClassChangeError("interface not implemented"); 1259 return NULL; 1260 } 1261 1262 assert(absMethod->methodIndex < 1263 thisClass->iftable[i].clazz->virtualMethodCount); 1264 1265 vtableIndex = 1266 thisClass->iftable[i].methodIndexArray[absMethod->methodIndex]; 1267 assert(vtableIndex >= 0 && vtableIndex < thisClass->vtableCount); 1268 methodToCall = thisClass->vtable[vtableIndex]; 1269 1270 #if 0 1271 /* this can happen when there's a stale class file */ 1272 if (dvmIsAbstractMethod(methodToCall)) { 1273 dvmThrowAbstractMethodError("interface method not implemented"); 1274 return NULL; 1275 } 1276 #else 1277 assert(!dvmIsAbstractMethod(methodToCall) || 1278 methodToCall->nativeFunc != NULL); 1279 #endif 1280 1281 LOGVV("+++ interface=%s.%s concrete=%s.%s", 1282 absMethod->clazz->descriptor, absMethod->name, 1283 methodToCall->clazz->descriptor, methodToCall->name); 1284 assert(methodToCall != NULL); 1285 1286 return methodToCall; 1287 } 1288 1289 1290 1291 /* 1292 * Helpers for dvmThrowVerificationError(). 1293 * 1294 * Each returns a newly-allocated string. 1295 */ 1296 #define kThrowShow_accessFromClass 1 1297 static std::string classNameFromIndex(const Method* method, int ref, 1298 VerifyErrorRefType refType, int flags) 1299 { 1300 const DvmDex* pDvmDex = method->clazz->pDvmDex; 1301 if (refType == VERIFY_ERROR_REF_FIELD) { 1302 /* get class ID from field ID */ 1303 const DexFieldId* pFieldId = dexGetFieldId(pDvmDex->pDexFile, ref); 1304 ref = pFieldId->classIdx; 1305 } else if (refType == VERIFY_ERROR_REF_METHOD) { 1306 /* get class ID from method ID */ 1307 const DexMethodId* pMethodId = dexGetMethodId(pDvmDex->pDexFile, ref); 1308 ref = pMethodId->classIdx; 1309 } 1310 1311 const char* className = dexStringByTypeIdx(pDvmDex->pDexFile, ref); 1312 std::string dotClassName(dvmHumanReadableDescriptor(className)); 1313 if (flags == 0) { 1314 return dotClassName; 1315 } 1316 1317 std::string result; 1318 if ((flags & kThrowShow_accessFromClass) != 0) { 1319 result += "tried to access class " + dotClassName; 1320 result += " from class " + dvmHumanReadableDescriptor(method->clazz->descriptor); 1321 } else { 1322 assert(false); // should've been caught above 1323 } 1324 1325 return result; 1326 } 1327 static std::string fieldNameFromIndex(const Method* method, int ref, 1328 VerifyErrorRefType refType, int flags) 1329 { 1330 if (refType != VERIFY_ERROR_REF_FIELD) { 1331 ALOGW("Expected ref type %d, got %d", VERIFY_ERROR_REF_FIELD, refType); 1332 return NULL; /* no message */ 1333 } 1334 1335 const DvmDex* pDvmDex = method->clazz->pDvmDex; 1336 const DexFieldId* pFieldId = dexGetFieldId(pDvmDex->pDexFile, ref); 1337 const char* className = dexStringByTypeIdx(pDvmDex->pDexFile, pFieldId->classIdx); 1338 const char* fieldName = dexStringById(pDvmDex->pDexFile, pFieldId->nameIdx); 1339 1340 std::string dotName(dvmHumanReadableDescriptor(className)); 1341 1342 if ((flags & kThrowShow_accessFromClass) != 0) { 1343 std::string result; 1344 result += "tried to access field "; 1345 result += dotName + "." + fieldName; 1346 result += " from class "; 1347 result += dvmHumanReadableDescriptor(method->clazz->descriptor); 1348 return result; 1349 } 1350 return dotName + "." + fieldName; 1351 } 1352 static std::string methodNameFromIndex(const Method* method, int ref, 1353 VerifyErrorRefType refType, int flags) 1354 { 1355 if (refType != VERIFY_ERROR_REF_METHOD) { 1356 ALOGW("Expected ref type %d, got %d", VERIFY_ERROR_REF_METHOD,refType); 1357 return NULL; /* no message */ 1358 } 1359 1360 const DvmDex* pDvmDex = method->clazz->pDvmDex; 1361 const DexMethodId* pMethodId = dexGetMethodId(pDvmDex->pDexFile, ref); 1362 const char* className = dexStringByTypeIdx(pDvmDex->pDexFile, pMethodId->classIdx); 1363 const char* methodName = dexStringById(pDvmDex->pDexFile, pMethodId->nameIdx); 1364 1365 std::string dotName(dvmHumanReadableDescriptor(className)); 1366 1367 if ((flags & kThrowShow_accessFromClass) != 0) { 1368 char* desc = dexProtoCopyMethodDescriptor(&method->prototype); 1369 std::string result; 1370 result += "tried to access method "; 1371 result += dotName + "." + methodName + ":" + desc; 1372 result += " from class " + dvmHumanReadableDescriptor(method->clazz->descriptor); 1373 free(desc); 1374 return result; 1375 } 1376 return dotName + "." + methodName; 1377 } 1378 1379 /* 1380 * Throw an exception for a problem identified by the verifier. 1381 * 1382 * This is used by the invoke-verification-error instruction. It always 1383 * throws an exception. 1384 * 1385 * "kind" indicates the kind of failure encountered by the verifier. It 1386 * has two parts, an error code and an indication of the reference type. 1387 */ 1388 void dvmThrowVerificationError(const Method* method, int kind, int ref) 1389 { 1390 int errorPart = kind & ~(0xff << kVerifyErrorRefTypeShift); 1391 int errorRefPart = kind >> kVerifyErrorRefTypeShift; 1392 VerifyError errorKind = static_cast<VerifyError>(errorPart); 1393 VerifyErrorRefType refType = static_cast<VerifyErrorRefType>(errorRefPart); 1394 ClassObject* exceptionClass = gDvm.exVerifyError; 1395 std::string msg; 1396 1397 switch ((VerifyError) errorKind) { 1398 case VERIFY_ERROR_NO_CLASS: 1399 exceptionClass = gDvm.exNoClassDefFoundError; 1400 msg = classNameFromIndex(method, ref, refType, 0); 1401 break; 1402 case VERIFY_ERROR_NO_FIELD: 1403 exceptionClass = gDvm.exNoSuchFieldError; 1404 msg = fieldNameFromIndex(method, ref, refType, 0); 1405 break; 1406 case VERIFY_ERROR_NO_METHOD: 1407 exceptionClass = gDvm.exNoSuchMethodError; 1408 msg = methodNameFromIndex(method, ref, refType, 0); 1409 break; 1410 case VERIFY_ERROR_ACCESS_CLASS: 1411 exceptionClass = gDvm.exIllegalAccessError; 1412 msg = classNameFromIndex(method, ref, refType, 1413 kThrowShow_accessFromClass); 1414 break; 1415 case VERIFY_ERROR_ACCESS_FIELD: 1416 exceptionClass = gDvm.exIllegalAccessError; 1417 msg = fieldNameFromIndex(method, ref, refType, 1418 kThrowShow_accessFromClass); 1419 break; 1420 case VERIFY_ERROR_ACCESS_METHOD: 1421 exceptionClass = gDvm.exIllegalAccessError; 1422 msg = methodNameFromIndex(method, ref, refType, 1423 kThrowShow_accessFromClass); 1424 break; 1425 case VERIFY_ERROR_CLASS_CHANGE: 1426 exceptionClass = gDvm.exIncompatibleClassChangeError; 1427 msg = classNameFromIndex(method, ref, refType, 0); 1428 break; 1429 case VERIFY_ERROR_INSTANTIATION: 1430 exceptionClass = gDvm.exInstantiationError; 1431 msg = classNameFromIndex(method, ref, refType, 0); 1432 break; 1433 1434 case VERIFY_ERROR_GENERIC: 1435 /* generic VerifyError; use default exception, no message */ 1436 break; 1437 case VERIFY_ERROR_NONE: 1438 /* should never happen; use default exception */ 1439 assert(false); 1440 msg = "weird - no error specified"; 1441 break; 1442 1443 /* no default clause -- want warning if enum updated */ 1444 } 1445 1446 dvmThrowException(exceptionClass, msg.c_str()); 1447 } 1448 1449 /* 1450 * Update interpBreak for a single thread. 1451 */ 1452 void updateInterpBreak(Thread* thread, ExecutionSubModes subMode, bool enable) 1453 { 1454 InterpBreak oldValue, newValue; 1455 do { 1456 oldValue = newValue = thread->interpBreak; 1457 newValue.ctl.breakFlags = kInterpNoBreak; // Assume full reset 1458 if (enable) 1459 newValue.ctl.subMode |= subMode; 1460 else 1461 newValue.ctl.subMode &= ~subMode; 1462 if (newValue.ctl.subMode & SINGLESTEP_BREAK_MASK) 1463 newValue.ctl.breakFlags |= kInterpSingleStep; 1464 if (newValue.ctl.subMode & SAFEPOINT_BREAK_MASK) 1465 newValue.ctl.breakFlags |= kInterpSafePoint; 1466 #ifndef DVM_NO_ASM_INTERP 1467 newValue.ctl.curHandlerTable = (newValue.ctl.breakFlags) ? 1468 thread->altHandlerTable : thread->mainHandlerTable; 1469 #endif 1470 } while (dvmQuasiAtomicCas64(oldValue.all, newValue.all, 1471 &thread->interpBreak.all) != 0); 1472 } 1473 1474 /* 1475 * Update interpBreak for all threads. 1476 */ 1477 void updateAllInterpBreak(ExecutionSubModes subMode, bool enable) 1478 { 1479 Thread* self = dvmThreadSelf(); 1480 Thread* thread; 1481 1482 dvmLockThreadList(self); 1483 for (thread = gDvm.threadList; thread != NULL; thread = thread->next) { 1484 updateInterpBreak(thread, subMode, enable); 1485 } 1486 dvmUnlockThreadList(); 1487 } 1488 1489 /* 1490 * Update the normal and debugger suspend counts for a thread. 1491 * threadSuspendCount must be acquired before calling this to 1492 * ensure a clean update of suspendCount, dbgSuspendCount and 1493 * sumThreadSuspendCount. 1494 * 1495 * CLEANUP TODO: Currently only the JIT is using sumThreadSuspendCount. 1496 * Move under WITH_JIT ifdefs. 1497 */ 1498 void dvmAddToSuspendCounts(Thread* thread, int delta, int dbgDelta) 1499 { 1500 thread->suspendCount += delta; 1501 thread->dbgSuspendCount += dbgDelta; 1502 updateInterpBreak(thread, kSubModeSuspendPending, 1503 (thread->suspendCount != 0)); 1504 // Update the global suspend count total 1505 gDvm.sumThreadSuspendCount += delta; 1506 } 1507 1508 1509 void dvmDisableSubMode(Thread* thread, ExecutionSubModes subMode) 1510 { 1511 updateInterpBreak(thread, subMode, false); 1512 } 1513 1514 void dvmEnableSubMode(Thread* thread, ExecutionSubModes subMode) 1515 { 1516 updateInterpBreak(thread, subMode, true); 1517 } 1518 1519 void dvmEnableAllSubMode(ExecutionSubModes subMode) 1520 { 1521 updateAllInterpBreak(subMode, true); 1522 } 1523 1524 void dvmDisableAllSubMode(ExecutionSubModes subMode) 1525 { 1526 updateAllInterpBreak(subMode, false); 1527 } 1528 1529 /* 1530 * Do a sanity check on interpreter state saved to Thread. 1531 * A failure here doesn't necessarily mean that something is wrong, 1532 * so this code should only be used during development to suggest 1533 * a possible problem. 1534 */ 1535 void dvmCheckInterpStateConsistency() 1536 { 1537 Thread* self = dvmThreadSelf(); 1538 Thread* thread; 1539 uint8_t breakFlags; 1540 uint8_t subMode; 1541 #ifndef DVM_NO_ASM_INTERP 1542 void* handlerTable; 1543 #endif 1544 1545 dvmLockThreadList(self); 1546 breakFlags = self->interpBreak.ctl.breakFlags; 1547 subMode = self->interpBreak.ctl.subMode; 1548 #ifndef DVM_NO_ASM_INTERP 1549 handlerTable = self->interpBreak.ctl.curHandlerTable; 1550 #endif 1551 for (thread = gDvm.threadList; thread != NULL; thread = thread->next) { 1552 if (subMode != thread->interpBreak.ctl.subMode) { 1553 ALOGD("Warning: subMode mismatch - %#x:%#x, tid[%d]", 1554 subMode,thread->interpBreak.ctl.subMode,thread->threadId); 1555 } 1556 if (breakFlags != thread->interpBreak.ctl.breakFlags) { 1557 ALOGD("Warning: breakFlags mismatch - %#x:%#x, tid[%d]", 1558 breakFlags,thread->interpBreak.ctl.breakFlags,thread->threadId); 1559 } 1560 #ifndef DVM_NO_ASM_INTERP 1561 if (handlerTable != thread->interpBreak.ctl.curHandlerTable) { 1562 ALOGD("Warning: curHandlerTable mismatch - %#x:%#x, tid[%d]", 1563 (int)handlerTable,(int)thread->interpBreak.ctl.curHandlerTable, 1564 thread->threadId); 1565 } 1566 #endif 1567 #if defined(WITH_JIT) 1568 if (thread->pJitProfTable != gDvmJit.pProfTable) { 1569 ALOGD("Warning: pJitProfTable mismatch - %#x:%#x, tid[%d]", 1570 (int)thread->pJitProfTable,(int)gDvmJit.pProfTable, 1571 thread->threadId); 1572 } 1573 if (thread->jitThreshold != gDvmJit.threshold) { 1574 ALOGD("Warning: jitThreshold mismatch - %#x:%#x, tid[%d]", 1575 (int)thread->jitThreshold,(int)gDvmJit.threshold, 1576 thread->threadId); 1577 } 1578 #endif 1579 } 1580 dvmUnlockThreadList(); 1581 } 1582 1583 /* 1584 * Arm a safepoint callback for a thread. If funct is null, 1585 * clear any pending callback. 1586 * TODO: only gc is currently using this feature, and will have 1587 * at most a single outstanding callback request. Until we need 1588 * something more capable and flexible, enforce this limit. 1589 */ 1590 void dvmArmSafePointCallback(Thread* thread, SafePointCallback funct, 1591 void* arg) 1592 { 1593 dvmLockMutex(&thread->callbackMutex); 1594 if ((funct == NULL) || (thread->callback == NULL)) { 1595 thread->callback = funct; 1596 thread->callbackArg = arg; 1597 if (funct != NULL) { 1598 dvmEnableSubMode(thread, kSubModeCallbackPending); 1599 } else { 1600 dvmDisableSubMode(thread, kSubModeCallbackPending); 1601 } 1602 } else { 1603 // Already armed. Different? 1604 if ((funct != thread->callback) || 1605 (arg != thread->callbackArg)) { 1606 // Yes - report failure and die 1607 ALOGE("ArmSafePointCallback failed, thread %d", thread->threadId); 1608 dvmUnlockMutex(&thread->callbackMutex); 1609 dvmAbort(); 1610 } 1611 } 1612 dvmUnlockMutex(&thread->callbackMutex); 1613 } 1614 1615 /* 1616 * One-time initialization at thread creation. Here we initialize 1617 * useful constants. 1618 */ 1619 void dvmInitInterpreterState(Thread* self) 1620 { 1621 #if defined(WITH_JIT) 1622 /* 1623 * Reserve a static entity here to quickly setup runtime contents as 1624 * gcc will issue block copy instructions. 1625 */ 1626 static struct JitToInterpEntries jitToInterpEntries = { 1627 dvmJitToInterpNormal, 1628 dvmJitToInterpNoChain, 1629 dvmJitToInterpPunt, 1630 dvmJitToInterpSingleStep, 1631 dvmJitToInterpTraceSelect, 1632 #if defined(WITH_SELF_VERIFICATION) 1633 dvmJitToInterpBackwardBranch, 1634 #else 1635 NULL, 1636 #endif 1637 }; 1638 #endif 1639 1640 // Begin initialization 1641 self->cardTable = gDvm.biasedCardTableBase; 1642 #if defined(WITH_JIT) 1643 // One-time initializations 1644 self->jitToInterpEntries = jitToInterpEntries; 1645 self->icRechainCount = PREDICTED_CHAIN_COUNTER_RECHAIN; 1646 self->pProfileCountdown = &gDvmJit.profileCountdown; 1647 // Jit state that can change 1648 dvmJitUpdateThreadStateSingle(self); 1649 #endif 1650 dvmInitializeInterpBreak(self); 1651 } 1652 1653 /* 1654 * For a newly-created thread, we need to start off with interpBreak 1655 * set to any existing global modes. The caller must hold the 1656 * thread list lock. 1657 */ 1658 void dvmInitializeInterpBreak(Thread* thread) 1659 { 1660 if (gDvm.instructionCountEnableCount > 0) { 1661 dvmEnableSubMode(thread, kSubModeInstCounting); 1662 } 1663 TracingMode mode = dvmGetMethodTracingMode(); 1664 if (mode != TRACING_INACTIVE) { 1665 if (mode == SAMPLE_PROFILING_ACTIVE) { 1666 dvmEnableSubMode(thread, kSubModeSampleTrace); 1667 } else { 1668 dvmEnableSubMode(thread, kSubModeMethodTrace); 1669 } 1670 } 1671 if (gDvm.emulatorTraceEnableCount > 0) { 1672 dvmEnableSubMode(thread, kSubModeEmulatorTrace); 1673 } 1674 if (gDvm.debuggerActive) { 1675 dvmEnableSubMode(thread, kSubModeDebuggerActive); 1676 } 1677 #if defined(WITH_JIT) 1678 dvmJitUpdateThreadStateSingle(thread); 1679 #endif 1680 #if 0 1681 // Debugging stress mode - force checkBefore 1682 dvmEnableSubMode(thread, kSubModeCheckAlways); 1683 #endif 1684 } 1685 1686 /* 1687 * Inter-instruction handler invoked in between instruction interpretations 1688 * to handle exceptional events such as debugging housekeeping, instruction 1689 * count profiling, JIT trace building, etc. Dalvik PC has been exported 1690 * prior to call, but Thread copy of dPC & fp are not current. 1691 */ 1692 void dvmCheckBefore(const u2 *pc, u4 *fp, Thread* self) 1693 { 1694 const Method* method = self->interpSave.method; 1695 assert(pc >= method->insns && pc < 1696 method->insns + dvmGetMethodInsnsSize(method)); 1697 1698 #if 0 1699 /* 1700 * When we hit a specific method, enable verbose instruction logging. 1701 * Sometimes it's helpful to use the debugger attach as a trigger too. 1702 */ 1703 if (*pIsMethodEntry) { 1704 static const char* cd = "Landroid/test/Arithmetic;"; 1705 static const char* mn = "shiftTest2"; 1706 static const char* sg = "()V"; 1707 1708 if (/*self->interpBreak.ctl.subMode & kSubModeDebuggerActive &&*/ 1709 strcmp(method->clazz->descriptor, cd) == 0 && 1710 strcmp(method->name, mn) == 0 && 1711 strcmp(method->shorty, sg) == 0) 1712 { 1713 ALOGW("Reached %s.%s, enabling verbose mode", 1714 method->clazz->descriptor, method->name); 1715 android_setMinPriority(LOG_TAG"i", ANDROID_LOG_VERBOSE); 1716 dumpRegs(method, fp, true); 1717 } 1718 1719 if (!gDvm.debuggerActive) 1720 *pIsMethodEntry = false; 1721 } 1722 #endif 1723 1724 /* Safe point handling */ 1725 if (self->suspendCount || 1726 (self->interpBreak.ctl.subMode & kSubModeCallbackPending)) { 1727 // Are we are a safe point? 1728 int flags; 1729 flags = dexGetFlagsFromOpcode(dexOpcodeFromCodeUnit(*pc)); 1730 if (flags & (VERIFY_GC_INST_MASK & ~kInstrCanThrow)) { 1731 // Yes, at a safe point. Pending callback? 1732 if (self->interpBreak.ctl.subMode & kSubModeCallbackPending) { 1733 SafePointCallback callback; 1734 void* arg; 1735 // Get consistent funct/arg pair 1736 dvmLockMutex(&self->callbackMutex); 1737 callback = self->callback; 1738 arg = self->callbackArg; 1739 dvmUnlockMutex(&self->callbackMutex); 1740 // Update Thread structure 1741 self->interpSave.pc = pc; 1742 self->interpSave.curFrame = fp; 1743 if (callback != NULL) { 1744 // Do the callback 1745 if (!callback(self,arg)) { 1746 // disarm 1747 dvmArmSafePointCallback(self, NULL, NULL); 1748 } 1749 } 1750 } 1751 // Need to suspend? 1752 if (self->suspendCount) { 1753 dvmExportPC(pc, fp); 1754 dvmCheckSuspendPending(self); 1755 } 1756 } 1757 } 1758 1759 if (self->interpBreak.ctl.subMode & kSubModeDebuggerActive) { 1760 updateDebugger(method, pc, fp, self); 1761 } 1762 if (gDvm.instructionCountEnableCount != 0) { 1763 /* 1764 * Count up the #of executed instructions. This isn't synchronized 1765 * for thread-safety; if we need that we should make this 1766 * thread-local and merge counts into the global area when threads 1767 * exit (perhaps suspending all other threads GC-style and pulling 1768 * the data out of them). 1769 */ 1770 gDvm.executedInstrCounts[GET_OPCODE(*pc)]++; 1771 } 1772 1773 1774 #if defined(WITH_TRACKREF_CHECKS) 1775 dvmInterpCheckTrackedRefs(self, method, 1776 self->interpSave.debugTrackedRefStart); 1777 #endif 1778 1779 #if defined(WITH_JIT) 1780 // Does the JIT need anything done now? 1781 if (self->interpBreak.ctl.subMode & 1782 (kSubModeJitTraceBuild | kSubModeJitSV)) { 1783 // Are we building a trace? 1784 if (self->interpBreak.ctl.subMode & kSubModeJitTraceBuild) { 1785 dvmCheckJit(pc, self); 1786 } 1787 1788 #if defined(WITH_SELF_VERIFICATION) 1789 // Are we replaying a trace? 1790 if (self->interpBreak.ctl.subMode & kSubModeJitSV) { 1791 dvmCheckSelfVerification(pc, self); 1792 } 1793 #endif 1794 } 1795 #endif 1796 1797 /* 1798 * CountedStep processing. NOTE: must be the last here to allow 1799 * preceeding special case handler to manipulate single-step count. 1800 */ 1801 if (self->interpBreak.ctl.subMode & kSubModeCountedStep) { 1802 if (self->singleStepCount == 0) { 1803 // We've exhausted our single step count 1804 dvmDisableSubMode(self, kSubModeCountedStep); 1805 #if defined(WITH_JIT) 1806 #if 0 1807 /* 1808 * For debugging. If jitResumeDPC is non-zero, then 1809 * we expect to return to a trace in progress. There 1810 * are valid reasons why we wouldn't (such as an exception 1811 * throw), but here we can keep track. 1812 */ 1813 if (self->jitResumeDPC != NULL) { 1814 if (self->jitResumeDPC == pc) { 1815 if (self->jitResumeNPC != NULL) { 1816 ALOGD("SS return to trace - pc:%#x to 0x:%x", 1817 (int)pc, (int)self->jitResumeNPC); 1818 } else { 1819 ALOGD("SS return to interp - pc:%#x",(int)pc); 1820 } 1821 } else { 1822 ALOGD("SS failed to return. Expected %#x, now at %#x", 1823 (int)self->jitResumeDPC, (int)pc); 1824 } 1825 } 1826 #endif 1827 #if 0 1828 // TODO - fix JIT single-stepping resume mode (b/5551114) 1829 // self->jitResumeNPC needs to be cleared in callPrep 1830 1831 // If we've got a native return and no other reasons to 1832 // remain in singlestep/break mode, do a long jump 1833 if (self->jitResumeNPC != NULL && 1834 self->interpBreak.ctl.breakFlags == 0) { 1835 assert(self->jitResumeDPC == pc); 1836 self->jitResumeDPC = NULL; 1837 dvmJitResumeTranslation(self, pc, fp); 1838 // Doesn't return 1839 dvmAbort(); 1840 } 1841 // In case resume is blocked by non-zero breakFlags, clear 1842 // jitResumeNPC here. 1843 self->jitResumeNPC = NULL; 1844 self->jitResumeDPC = NULL; 1845 self->inJitCodeCache = NULL; 1846 #endif 1847 #endif 1848 } else { 1849 self->singleStepCount--; 1850 #if defined(WITH_JIT) 1851 if ((self->singleStepCount > 0) && (self->jitResumeNPC != NULL)) { 1852 /* 1853 * Direct return to an existing translation following a 1854 * single step is valid only if we step once. If we're 1855 * here, an additional step was added so we need to invalidate 1856 * the return to translation. 1857 */ 1858 self->jitResumeNPC = NULL; 1859 self->inJitCodeCache = NULL; 1860 } 1861 #endif 1862 } 1863 } 1864 } 1865 1866 /* 1867 * Main interpreter loop entry point. 1868 * 1869 * This begins executing code at the start of "method". On exit, "pResult" 1870 * holds the return value of the method (or, if "method" returns NULL, it 1871 * holds an undefined value). 1872 * 1873 * The interpreted stack frame, which holds the method arguments, has 1874 * already been set up. 1875 */ 1876 void dvmInterpret(Thread* self, const Method* method, JValue* pResult) 1877 { 1878 InterpSaveState interpSaveState; 1879 ExecutionSubModes savedSubModes; 1880 1881 #if defined(WITH_JIT) 1882 /* Target-specific save/restore */ 1883 double calleeSave[JIT_CALLEE_SAVE_DOUBLE_COUNT]; 1884 /* 1885 * If the previous VM left the code cache through single-stepping the 1886 * inJitCodeCache flag will be set when the VM is re-entered (for example, 1887 * in self-verification mode we single-step NEW_INSTANCE which may re-enter 1888 * the VM through findClassFromLoaderNoInit). Because of that, we cannot 1889 * assert that self->inJitCodeCache is NULL here. 1890 */ 1891 #endif 1892 1893 /* 1894 * Save interpreter state from previous activation, linking 1895 * new to last. 1896 */ 1897 interpSaveState = self->interpSave; 1898 self->interpSave.prev = &interpSaveState; 1899 /* 1900 * Strip out and save any flags that should not be inherited by 1901 * nested interpreter activation. 1902 */ 1903 savedSubModes = (ExecutionSubModes)( 1904 self->interpBreak.ctl.subMode & LOCAL_SUBMODE); 1905 if (savedSubModes != kSubModeNormal) { 1906 dvmDisableSubMode(self, savedSubModes); 1907 } 1908 #if defined(WITH_JIT) 1909 dvmJitCalleeSave(calleeSave); 1910 #endif 1911 1912 1913 #if defined(WITH_TRACKREF_CHECKS) 1914 self->interpSave.debugTrackedRefStart = 1915 dvmReferenceTableEntries(&self->internalLocalRefTable); 1916 #endif 1917 self->debugIsMethodEntry = true; 1918 #if defined(WITH_JIT) 1919 /* Initialize the state to kJitNot */ 1920 self->jitState = kJitNot; 1921 #endif 1922 1923 /* 1924 * Initialize working state. 1925 * 1926 * No need to initialize "retval". 1927 */ 1928 self->interpSave.method = method; 1929 self->interpSave.curFrame = (u4*) self->interpSave.curFrame; 1930 self->interpSave.pc = method->insns; 1931 1932 assert(!dvmIsNativeMethod(method)); 1933 1934 /* 1935 * Make sure the class is ready to go. Shouldn't be possible to get 1936 * here otherwise. 1937 */ 1938 if (method->clazz->status < CLASS_INITIALIZING || 1939 method->clazz->status == CLASS_ERROR) 1940 { 1941 ALOGE("ERROR: tried to execute code in unprepared class '%s' (%d)", 1942 method->clazz->descriptor, method->clazz->status); 1943 dvmDumpThread(self, false); 1944 dvmAbort(); 1945 } 1946 1947 typedef void (*Interpreter)(Thread*); 1948 Interpreter stdInterp; 1949 if (gDvm.executionMode == kExecutionModeInterpFast) 1950 stdInterp = dvmMterpStd; 1951 #if defined(WITH_JIT) 1952 else if (gDvm.executionMode == kExecutionModeJit || 1953 gDvm.executionMode == kExecutionModeNcgO0 || 1954 gDvm.executionMode == kExecutionModeNcgO1) 1955 stdInterp = dvmMterpStd; 1956 #endif 1957 else 1958 stdInterp = dvmInterpretPortable; 1959 1960 // Call the interpreter 1961 (*stdInterp)(self); 1962 1963 *pResult = self->interpSave.retval; 1964 1965 /* Restore interpreter state from previous activation */ 1966 self->interpSave = interpSaveState; 1967 #if defined(WITH_JIT) 1968 dvmJitCalleeRestore(calleeSave); 1969 #endif 1970 if (savedSubModes != kSubModeNormal) { 1971 dvmEnableSubMode(self, savedSubModes); 1972 } 1973 } 1974