1 /* 2 * This file was generated automatically by gen-mterp.py for 'x86'. 3 * 4 * --> DO NOT EDIT <-- 5 */ 6 7 /* File: c/header.cpp */ 8 /* 9 * Copyright (C) 2008 The Android Open Source Project 10 * 11 * Licensed under the Apache License, Version 2.0 (the "License"); 12 * you may not use this file except in compliance with the License. 13 * You may obtain a copy of the License at 14 * 15 * http://www.apache.org/licenses/LICENSE-2.0 16 * 17 * Unless required by applicable law or agreed to in writing, software 18 * distributed under the License is distributed on an "AS IS" BASIS, 19 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 20 * See the License for the specific language governing permissions and 21 * limitations under the License. 22 */ 23 24 /* common includes */ 25 #include "Dalvik.h" 26 #include "interp/InterpDefs.h" 27 #include "mterp/Mterp.h" 28 #include <math.h> // needed for fmod, fmodf 29 #include "mterp/common/FindInterface.h" 30 31 /* 32 * Configuration defines. These affect the C implementations, i.e. the 33 * portable interpreter(s) and C stubs. 34 * 35 * Some defines are controlled by the Makefile, e.g.: 36 * WITH_INSTR_CHECKS 37 * WITH_TRACKREF_CHECKS 38 * EASY_GDB 39 * NDEBUG 40 */ 41 42 #ifdef WITH_INSTR_CHECKS /* instruction-level paranoia (slow!) */ 43 # define CHECK_BRANCH_OFFSETS 44 # define CHECK_REGISTER_INDICES 45 #endif 46 47 /* 48 * Some architectures require 64-bit alignment for access to 64-bit data 49 * types. We can't just use pointers to copy 64-bit values out of our 50 * interpreted register set, because gcc may assume the pointer target is 51 * aligned and generate invalid code. 52 * 53 * There are two common approaches: 54 * (1) Use a union that defines a 32-bit pair and a 64-bit value. 55 * (2) Call memcpy(). 56 * 57 * Depending upon what compiler you're using and what options are specified, 58 * one may be faster than the other. For example, the compiler might 59 * convert a memcpy() of 8 bytes into a series of instructions and omit 60 * the call. The union version could cause some strange side-effects, 61 * e.g. for a while ARM gcc thought it needed separate storage for each 62 * inlined instance, and generated instructions to zero out ~700 bytes of 63 * stack space at the top of the interpreter. 64 * 65 * The default is to use memcpy(). The current gcc for ARM seems to do 66 * better with the union. 67 */ 68 #if defined(__ARM_EABI__) 69 # define NO_UNALIGN_64__UNION 70 #endif 71 /* 72 * MIPS ABI requires 64-bit alignment for access to 64-bit data types. 73 * 74 * Use memcpy() to do the transfer 75 */ 76 #if defined(__mips__) 77 /* # define NO_UNALIGN_64__UNION */ 78 #endif 79 80 81 //#define LOG_INSTR /* verbose debugging */ 82 /* set and adjust ANDROID_LOG_TAGS='*:i jdwp:i dalvikvm:i dalvikvmi:i' */ 83 84 /* 85 * Export another copy of the PC on every instruction; this is largely 86 * redundant with EXPORT_PC and the debugger code. This value can be 87 * compared against what we have stored on the stack with EXPORT_PC to 88 * help ensure that we aren't missing any export calls. 89 */ 90 #if WITH_EXTRA_GC_CHECKS > 1 91 # define EXPORT_EXTRA_PC() (self->currentPc2 = pc) 92 #else 93 # define EXPORT_EXTRA_PC() 94 #endif 95 96 /* 97 * Adjust the program counter. "_offset" is a signed int, in 16-bit units. 98 * 99 * Assumes the existence of "const u2* pc" and "const u2* curMethod->insns". 100 * 101 * We don't advance the program counter until we finish an instruction or 102 * branch, because we do want to have to unroll the PC if there's an 103 * exception. 104 */ 105 #ifdef CHECK_BRANCH_OFFSETS 106 # define ADJUST_PC(_offset) do { \ 107 int myoff = _offset; /* deref only once */ \ 108 if (pc + myoff < curMethod->insns || \ 109 pc + myoff >= curMethod->insns + dvmGetMethodInsnsSize(curMethod)) \ 110 { \ 111 char* desc; \ 112 desc = dexProtoCopyMethodDescriptor(&curMethod->prototype); \ 113 ALOGE("Invalid branch %d at 0x%04x in %s.%s %s", \ 114 myoff, (int) (pc - curMethod->insns), \ 115 curMethod->clazz->descriptor, curMethod->name, desc); \ 116 free(desc); \ 117 dvmAbort(); \ 118 } \ 119 pc += myoff; \ 120 EXPORT_EXTRA_PC(); \ 121 } while (false) 122 #else 123 # define ADJUST_PC(_offset) do { \ 124 pc += _offset; \ 125 EXPORT_EXTRA_PC(); \ 126 } while (false) 127 #endif 128 129 /* 130 * If enabled, log instructions as we execute them. 131 */ 132 #ifdef LOG_INSTR 133 # define ILOGD(...) ILOG(LOG_DEBUG, __VA_ARGS__) 134 # define ILOGV(...) ILOG(LOG_VERBOSE, __VA_ARGS__) 135 # define ILOG(_level, ...) do { \ 136 char debugStrBuf[128]; \ 137 snprintf(debugStrBuf, sizeof(debugStrBuf), __VA_ARGS__); \ 138 if (curMethod != NULL) \ 139 ALOG(_level, LOG_TAG"i", "%-2d|%04x%s", \ 140 self->threadId, (int)(pc - curMethod->insns), debugStrBuf); \ 141 else \ 142 ALOG(_level, LOG_TAG"i", "%-2d|####%s", \ 143 self->threadId, debugStrBuf); \ 144 } while(false) 145 void dvmDumpRegs(const Method* method, const u4* framePtr, bool inOnly); 146 # define DUMP_REGS(_meth, _frame, _inOnly) dvmDumpRegs(_meth, _frame, _inOnly) 147 static const char kSpacing[] = " "; 148 #else 149 # define ILOGD(...) ((void)0) 150 # define ILOGV(...) ((void)0) 151 # define DUMP_REGS(_meth, _frame, _inOnly) ((void)0) 152 #endif 153 154 /* get a long from an array of u4 */ 155 static inline s8 getLongFromArray(const u4* ptr, int idx) 156 { 157 #if defined(NO_UNALIGN_64__UNION) 158 union { s8 ll; u4 parts[2]; } conv; 159 160 ptr += idx; 161 conv.parts[0] = ptr[0]; 162 conv.parts[1] = ptr[1]; 163 return conv.ll; 164 #else 165 s8 val; 166 memcpy(&val, &ptr[idx], 8); 167 return val; 168 #endif 169 } 170 171 /* store a long into an array of u4 */ 172 static inline void putLongToArray(u4* ptr, int idx, s8 val) 173 { 174 #if defined(NO_UNALIGN_64__UNION) 175 union { s8 ll; u4 parts[2]; } conv; 176 177 ptr += idx; 178 conv.ll = val; 179 ptr[0] = conv.parts[0]; 180 ptr[1] = conv.parts[1]; 181 #else 182 memcpy(&ptr[idx], &val, 8); 183 #endif 184 } 185 186 /* get a double from an array of u4 */ 187 static inline double getDoubleFromArray(const u4* ptr, int idx) 188 { 189 #if defined(NO_UNALIGN_64__UNION) 190 union { double d; u4 parts[2]; } conv; 191 192 ptr += idx; 193 conv.parts[0] = ptr[0]; 194 conv.parts[1] = ptr[1]; 195 return conv.d; 196 #else 197 double dval; 198 memcpy(&dval, &ptr[idx], 8); 199 return dval; 200 #endif 201 } 202 203 /* store a double into an array of u4 */ 204 static inline void putDoubleToArray(u4* ptr, int idx, double dval) 205 { 206 #if defined(NO_UNALIGN_64__UNION) 207 union { double d; u4 parts[2]; } conv; 208 209 ptr += idx; 210 conv.d = dval; 211 ptr[0] = conv.parts[0]; 212 ptr[1] = conv.parts[1]; 213 #else 214 memcpy(&ptr[idx], &dval, 8); 215 #endif 216 } 217 218 /* 219 * If enabled, validate the register number on every access. Otherwise, 220 * just do an array access. 221 * 222 * Assumes the existence of "u4* fp". 223 * 224 * "_idx" may be referenced more than once. 225 */ 226 #ifdef CHECK_REGISTER_INDICES 227 # define GET_REGISTER(_idx) \ 228 ( (_idx) < curMethod->registersSize ? \ 229 (fp[(_idx)]) : (assert(!"bad reg"),1969) ) 230 # define SET_REGISTER(_idx, _val) \ 231 ( (_idx) < curMethod->registersSize ? \ 232 (fp[(_idx)] = (u4)(_val)) : (assert(!"bad reg"),1969) ) 233 # define GET_REGISTER_AS_OBJECT(_idx) ((Object *)GET_REGISTER(_idx)) 234 # define SET_REGISTER_AS_OBJECT(_idx, _val) SET_REGISTER(_idx, (s4)_val) 235 # define GET_REGISTER_INT(_idx) ((s4) GET_REGISTER(_idx)) 236 # define SET_REGISTER_INT(_idx, _val) SET_REGISTER(_idx, (s4)_val) 237 # define GET_REGISTER_WIDE(_idx) \ 238 ( (_idx) < curMethod->registersSize-1 ? \ 239 getLongFromArray(fp, (_idx)) : (assert(!"bad reg"),1969) ) 240 # define SET_REGISTER_WIDE(_idx, _val) \ 241 ( (_idx) < curMethod->registersSize-1 ? \ 242 (void)putLongToArray(fp, (_idx), (_val)) : assert(!"bad reg") ) 243 # define GET_REGISTER_FLOAT(_idx) \ 244 ( (_idx) < curMethod->registersSize ? \ 245 (*((float*) &fp[(_idx)])) : (assert(!"bad reg"),1969.0f) ) 246 # define SET_REGISTER_FLOAT(_idx, _val) \ 247 ( (_idx) < curMethod->registersSize ? \ 248 (*((float*) &fp[(_idx)]) = (_val)) : (assert(!"bad reg"),1969.0f) ) 249 # define GET_REGISTER_DOUBLE(_idx) \ 250 ( (_idx) < curMethod->registersSize-1 ? \ 251 getDoubleFromArray(fp, (_idx)) : (assert(!"bad reg"),1969.0) ) 252 # define SET_REGISTER_DOUBLE(_idx, _val) \ 253 ( (_idx) < curMethod->registersSize-1 ? \ 254 (void)putDoubleToArray(fp, (_idx), (_val)) : assert(!"bad reg") ) 255 #else 256 # define GET_REGISTER(_idx) (fp[(_idx)]) 257 # define SET_REGISTER(_idx, _val) (fp[(_idx)] = (_val)) 258 # define GET_REGISTER_AS_OBJECT(_idx) ((Object*) fp[(_idx)]) 259 # define SET_REGISTER_AS_OBJECT(_idx, _val) (fp[(_idx)] = (u4)(_val)) 260 # define GET_REGISTER_INT(_idx) ((s4)GET_REGISTER(_idx)) 261 # define SET_REGISTER_INT(_idx, _val) SET_REGISTER(_idx, (s4)_val) 262 # define GET_REGISTER_WIDE(_idx) getLongFromArray(fp, (_idx)) 263 # define SET_REGISTER_WIDE(_idx, _val) putLongToArray(fp, (_idx), (_val)) 264 # define GET_REGISTER_FLOAT(_idx) (*((float*) &fp[(_idx)])) 265 # define SET_REGISTER_FLOAT(_idx, _val) (*((float*) &fp[(_idx)]) = (_val)) 266 # define GET_REGISTER_DOUBLE(_idx) getDoubleFromArray(fp, (_idx)) 267 # define SET_REGISTER_DOUBLE(_idx, _val) putDoubleToArray(fp, (_idx), (_val)) 268 #endif 269 270 /* 271 * Get 16 bits from the specified offset of the program counter. We always 272 * want to load 16 bits at a time from the instruction stream -- it's more 273 * efficient than 8 and won't have the alignment problems that 32 might. 274 * 275 * Assumes existence of "const u2* pc". 276 */ 277 #define FETCH(_offset) (pc[(_offset)]) 278 279 /* 280 * Extract instruction byte from 16-bit fetch (_inst is a u2). 281 */ 282 #define INST_INST(_inst) ((_inst) & 0xff) 283 284 /* 285 * Replace the opcode (used when handling breakpoints). _opcode is a u1. 286 */ 287 #define INST_REPLACE_OP(_inst, _opcode) (((_inst) & 0xff00) | _opcode) 288 289 /* 290 * Extract the "vA, vB" 4-bit registers from the instruction word (_inst is u2). 291 */ 292 #define INST_A(_inst) (((_inst) >> 8) & 0x0f) 293 #define INST_B(_inst) ((_inst) >> 12) 294 295 /* 296 * Get the 8-bit "vAA" 8-bit register index from the instruction word. 297 * (_inst is u2) 298 */ 299 #define INST_AA(_inst) ((_inst) >> 8) 300 301 /* 302 * The current PC must be available to Throwable constructors, e.g. 303 * those created by the various exception throw routines, so that the 304 * exception stack trace can be generated correctly. If we don't do this, 305 * the offset within the current method won't be shown correctly. See the 306 * notes in Exception.c. 307 * 308 * This is also used to determine the address for precise GC. 309 * 310 * Assumes existence of "u4* fp" and "const u2* pc". 311 */ 312 #define EXPORT_PC() (SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc) 313 314 /* 315 * Check to see if "obj" is NULL. If so, throw an exception. Assumes the 316 * pc has already been exported to the stack. 317 * 318 * Perform additional checks on debug builds. 319 * 320 * Use this to check for NULL when the instruction handler calls into 321 * something that could throw an exception (so we have already called 322 * EXPORT_PC at the top). 323 */ 324 static inline bool checkForNull(Object* obj) 325 { 326 if (obj == NULL) { 327 dvmThrowNullPointerException(NULL); 328 return false; 329 } 330 #ifdef WITH_EXTRA_OBJECT_VALIDATION 331 if (!dvmIsHeapAddress(obj)) { 332 ALOGE("Invalid object %p", obj); 333 dvmAbort(); 334 } 335 #endif 336 #ifndef NDEBUG 337 if (obj->clazz == NULL || ((u4) obj->clazz) <= 65536) { 338 /* probable heap corruption */ 339 ALOGE("Invalid object class %p (in %p)", obj->clazz, obj); 340 dvmAbort(); 341 } 342 #endif 343 return true; 344 } 345 346 /* 347 * Check to see if "obj" is NULL. If so, export the PC into the stack 348 * frame and throw an exception. 349 * 350 * Perform additional checks on debug builds. 351 * 352 * Use this to check for NULL when the instruction handler doesn't do 353 * anything else that can throw an exception. 354 */ 355 static inline bool checkForNullExportPC(Object* obj, u4* fp, const u2* pc) 356 { 357 if (obj == NULL) { 358 EXPORT_PC(); 359 dvmThrowNullPointerException(NULL); 360 return false; 361 } 362 #ifdef WITH_EXTRA_OBJECT_VALIDATION 363 if (!dvmIsHeapAddress(obj)) { 364 ALOGE("Invalid object %p", obj); 365 dvmAbort(); 366 } 367 #endif 368 #ifndef NDEBUG 369 if (obj->clazz == NULL || ((u4) obj->clazz) <= 65536) { 370 /* probable heap corruption */ 371 ALOGE("Invalid object class %p (in %p)", obj->clazz, obj); 372 dvmAbort(); 373 } 374 #endif 375 return true; 376 } 377 378 /* File: cstubs/stubdefs.cpp */ 379 /* 380 * In the C mterp stubs, "goto" is a function call followed immediately 381 * by a return. 382 */ 383 384 #define GOTO_TARGET_DECL(_target, ...) \ 385 extern "C" void dvmMterp_##_target(Thread* self, ## __VA_ARGS__); 386 387 /* (void)xxx to quiet unused variable compiler warnings. */ 388 #define GOTO_TARGET(_target, ...) \ 389 void dvmMterp_##_target(Thread* self, ## __VA_ARGS__) { \ 390 u2 ref, vsrc1, vsrc2, vdst; \ 391 u2 inst = FETCH(0); \ 392 const Method* methodToCall; \ 393 StackSaveArea* debugSaveArea; \ 394 (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst; \ 395 (void)methodToCall; (void)debugSaveArea; 396 397 #define GOTO_TARGET_END } 398 399 /* 400 * Redefine what used to be local variable accesses into Thread struct 401 * references. (These are undefined down in "footer.cpp".) 402 */ 403 #define retval self->interpSave.retval 404 #define pc self->interpSave.pc 405 #define fp self->interpSave.curFrame 406 #define curMethod self->interpSave.method 407 #define methodClassDex self->interpSave.methodClassDex 408 #define debugTrackedRefStart self->interpSave.debugTrackedRefStart 409 410 /* ugh */ 411 #define STUB_HACK(x) x 412 #if defined(WITH_JIT) 413 #define JIT_STUB_HACK(x) x 414 #else 415 #define JIT_STUB_HACK(x) 416 #endif 417 418 /* 419 * InterpSave's pc and fp must be valid when breaking out to a 420 * "Reportxxx" routine. Because the portable interpreter uses local 421 * variables for these, we must flush prior. Stubs, however, use 422 * the interpSave vars directly, so this is a nop for stubs. 423 */ 424 #define PC_FP_TO_SELF() 425 #define PC_TO_SELF() 426 427 /* 428 * Opcode handler framing macros. Here, each opcode is a separate function 429 * that takes a "self" argument and returns void. We can't declare 430 * these "static" because they may be called from an assembly stub. 431 * (void)xxx to quiet unused variable compiler warnings. 432 */ 433 #define HANDLE_OPCODE(_op) \ 434 extern "C" void dvmMterp_##_op(Thread* self); \ 435 void dvmMterp_##_op(Thread* self) { \ 436 u4 ref; \ 437 u2 vsrc1, vsrc2, vdst; \ 438 u2 inst = FETCH(0); \ 439 (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst; 440 441 #define OP_END } 442 443 /* 444 * Like the "portable" FINISH, but don't reload "inst", and return to caller 445 * when done. Further, debugger/profiler checks are handled 446 * before handler execution in mterp, so we don't do them here either. 447 */ 448 #if defined(WITH_JIT) 449 #define FINISH(_offset) { \ 450 ADJUST_PC(_offset); \ 451 if (self->interpBreak.ctl.subMode & kSubModeJitTraceBuild) { \ 452 dvmCheckJit(pc, self); \ 453 } \ 454 return; \ 455 } 456 #else 457 #define FINISH(_offset) { \ 458 ADJUST_PC(_offset); \ 459 return; \ 460 } 461 #endif 462 463 #define FINISH_BKPT(_opcode) /* FIXME? */ 464 #define DISPATCH_EXTENDED(_opcode) /* FIXME? */ 465 466 /* 467 * The "goto label" statements turn into function calls followed by 468 * return statements. Some of the functions take arguments, which in the 469 * portable interpreter are handled by assigning values to globals. 470 */ 471 472 #define GOTO_exceptionThrown() \ 473 do { \ 474 dvmMterp_exceptionThrown(self); \ 475 return; \ 476 } while(false) 477 478 #define GOTO_returnFromMethod() \ 479 do { \ 480 dvmMterp_returnFromMethod(self); \ 481 return; \ 482 } while(false) 483 484 #define GOTO_invoke(_target, _methodCallRange) \ 485 do { \ 486 dvmMterp_##_target(self, _methodCallRange); \ 487 return; \ 488 } while(false) 489 490 #define GOTO_invokeMethod(_methodCallRange, _methodToCall, _vsrc1, _vdst) \ 491 do { \ 492 dvmMterp_invokeMethod(self, _methodCallRange, _methodToCall, \ 493 _vsrc1, _vdst); \ 494 return; \ 495 } while(false) 496 497 /* 498 * As a special case, "goto bail" turns into a longjmp. 499 */ 500 #define GOTO_bail() \ 501 dvmMterpStdBail(self) 502 503 /* 504 * Periodically check for thread suspension. 505 * 506 * While we're at it, see if a debugger has attached or the profiler has 507 * started. 508 */ 509 #define PERIODIC_CHECKS(_pcadj) { \ 510 if (dvmCheckSuspendQuick(self)) { \ 511 EXPORT_PC(); /* need for precise GC */ \ 512 dvmCheckSuspendPending(self); \ 513 } \ 514 } 515 516 /* File: c/opcommon.cpp */ 517 /* forward declarations of goto targets */ 518 GOTO_TARGET_DECL(filledNewArray, bool methodCallRange); 519 GOTO_TARGET_DECL(invokeVirtual, bool methodCallRange); 520 GOTO_TARGET_DECL(invokeSuper, bool methodCallRange); 521 GOTO_TARGET_DECL(invokeInterface, bool methodCallRange); 522 GOTO_TARGET_DECL(invokeDirect, bool methodCallRange); 523 GOTO_TARGET_DECL(invokeStatic, bool methodCallRange); 524 GOTO_TARGET_DECL(invokeVirtualQuick, bool methodCallRange); 525 GOTO_TARGET_DECL(invokeSuperQuick, bool methodCallRange); 526 GOTO_TARGET_DECL(invokeMethod, bool methodCallRange, const Method* methodToCall, 527 u2 count, u2 regs); 528 GOTO_TARGET_DECL(returnFromMethod); 529 GOTO_TARGET_DECL(exceptionThrown); 530 531 /* 532 * =========================================================================== 533 * 534 * What follows are opcode definitions shared between multiple opcodes with 535 * minor substitutions handled by the C pre-processor. These should probably 536 * use the mterp substitution mechanism instead, with the code here moved 537 * into common fragment files (like the asm "binop.S"), although it's hard 538 * to give up the C preprocessor in favor of the much simpler text subst. 539 * 540 * =========================================================================== 541 */ 542 543 #define HANDLE_NUMCONV(_opcode, _opname, _fromtype, _totype) \ 544 HANDLE_OPCODE(_opcode /*vA, vB*/) \ 545 vdst = INST_A(inst); \ 546 vsrc1 = INST_B(inst); \ 547 ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \ 548 SET_REGISTER##_totype(vdst, \ 549 GET_REGISTER##_fromtype(vsrc1)); \ 550 FINISH(1); 551 552 #define HANDLE_FLOAT_TO_INT(_opcode, _opname, _fromvtype, _fromrtype, \ 553 _tovtype, _tortype) \ 554 HANDLE_OPCODE(_opcode /*vA, vB*/) \ 555 { \ 556 /* spec defines specific handling for +/- inf and NaN values */ \ 557 _fromvtype val; \ 558 _tovtype intMin, intMax, result; \ 559 vdst = INST_A(inst); \ 560 vsrc1 = INST_B(inst); \ 561 ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \ 562 val = GET_REGISTER##_fromrtype(vsrc1); \ 563 intMin = (_tovtype) 1 << (sizeof(_tovtype) * 8 -1); \ 564 intMax = ~intMin; \ 565 result = (_tovtype) val; \ 566 if (val >= intMax) /* +inf */ \ 567 result = intMax; \ 568 else if (val <= intMin) /* -inf */ \ 569 result = intMin; \ 570 else if (val != val) /* NaN */ \ 571 result = 0; \ 572 else \ 573 result = (_tovtype) val; \ 574 SET_REGISTER##_tortype(vdst, result); \ 575 } \ 576 FINISH(1); 577 578 #define HANDLE_INT_TO_SMALL(_opcode, _opname, _type) \ 579 HANDLE_OPCODE(_opcode /*vA, vB*/) \ 580 vdst = INST_A(inst); \ 581 vsrc1 = INST_B(inst); \ 582 ILOGV("|int-to-%s v%d,v%d", (_opname), vdst, vsrc1); \ 583 SET_REGISTER(vdst, (_type) GET_REGISTER(vsrc1)); \ 584 FINISH(1); 585 586 /* NOTE: the comparison result is always a signed 4-byte integer */ 587 #define HANDLE_OP_CMPX(_opcode, _opname, _varType, _type, _nanVal) \ 588 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \ 589 { \ 590 int result; \ 591 u2 regs; \ 592 _varType val1, val2; \ 593 vdst = INST_AA(inst); \ 594 regs = FETCH(1); \ 595 vsrc1 = regs & 0xff; \ 596 vsrc2 = regs >> 8; \ 597 ILOGV("|cmp%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \ 598 val1 = GET_REGISTER##_type(vsrc1); \ 599 val2 = GET_REGISTER##_type(vsrc2); \ 600 if (val1 == val2) \ 601 result = 0; \ 602 else if (val1 < val2) \ 603 result = -1; \ 604 else if (val1 > val2) \ 605 result = 1; \ 606 else \ 607 result = (_nanVal); \ 608 ILOGV("+ result=%d", result); \ 609 SET_REGISTER(vdst, result); \ 610 } \ 611 FINISH(2); 612 613 #define HANDLE_OP_IF_XX(_opcode, _opname, _cmp) \ 614 HANDLE_OPCODE(_opcode /*vA, vB, +CCCC*/) \ 615 vsrc1 = INST_A(inst); \ 616 vsrc2 = INST_B(inst); \ 617 if ((s4) GET_REGISTER(vsrc1) _cmp (s4) GET_REGISTER(vsrc2)) { \ 618 int branchOffset = (s2)FETCH(1); /* sign-extended */ \ 619 ILOGV("|if-%s v%d,v%d,+0x%04x", (_opname), vsrc1, vsrc2, \ 620 branchOffset); \ 621 ILOGV("> branch taken"); \ 622 if (branchOffset < 0) \ 623 PERIODIC_CHECKS(branchOffset); \ 624 FINISH(branchOffset); \ 625 } else { \ 626 ILOGV("|if-%s v%d,v%d,-", (_opname), vsrc1, vsrc2); \ 627 FINISH(2); \ 628 } 629 630 #define HANDLE_OP_IF_XXZ(_opcode, _opname, _cmp) \ 631 HANDLE_OPCODE(_opcode /*vAA, +BBBB*/) \ 632 vsrc1 = INST_AA(inst); \ 633 if ((s4) GET_REGISTER(vsrc1) _cmp 0) { \ 634 int branchOffset = (s2)FETCH(1); /* sign-extended */ \ 635 ILOGV("|if-%s v%d,+0x%04x", (_opname), vsrc1, branchOffset); \ 636 ILOGV("> branch taken"); \ 637 if (branchOffset < 0) \ 638 PERIODIC_CHECKS(branchOffset); \ 639 FINISH(branchOffset); \ 640 } else { \ 641 ILOGV("|if-%s v%d,-", (_opname), vsrc1); \ 642 FINISH(2); \ 643 } 644 645 #define HANDLE_UNOP(_opcode, _opname, _pfx, _sfx, _type) \ 646 HANDLE_OPCODE(_opcode /*vA, vB*/) \ 647 vdst = INST_A(inst); \ 648 vsrc1 = INST_B(inst); \ 649 ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \ 650 SET_REGISTER##_type(vdst, _pfx GET_REGISTER##_type(vsrc1) _sfx); \ 651 FINISH(1); 652 653 #define HANDLE_OP_X_INT(_opcode, _opname, _op, _chkdiv) \ 654 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \ 655 { \ 656 u2 srcRegs; \ 657 vdst = INST_AA(inst); \ 658 srcRegs = FETCH(1); \ 659 vsrc1 = srcRegs & 0xff; \ 660 vsrc2 = srcRegs >> 8; \ 661 ILOGV("|%s-int v%d,v%d", (_opname), vdst, vsrc1); \ 662 if (_chkdiv != 0) { \ 663 s4 firstVal, secondVal, result; \ 664 firstVal = GET_REGISTER(vsrc1); \ 665 secondVal = GET_REGISTER(vsrc2); \ 666 if (secondVal == 0) { \ 667 EXPORT_PC(); \ 668 dvmThrowArithmeticException("divide by zero"); \ 669 GOTO_exceptionThrown(); \ 670 } \ 671 if ((u4)firstVal == 0x80000000 && secondVal == -1) { \ 672 if (_chkdiv == 1) \ 673 result = firstVal; /* division */ \ 674 else \ 675 result = 0; /* remainder */ \ 676 } else { \ 677 result = firstVal _op secondVal; \ 678 } \ 679 SET_REGISTER(vdst, result); \ 680 } else { \ 681 /* non-div/rem case */ \ 682 SET_REGISTER(vdst, \ 683 (s4) GET_REGISTER(vsrc1) _op (s4) GET_REGISTER(vsrc2)); \ 684 } \ 685 } \ 686 FINISH(2); 687 688 #define HANDLE_OP_SHX_INT(_opcode, _opname, _cast, _op) \ 689 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \ 690 { \ 691 u2 srcRegs; \ 692 vdst = INST_AA(inst); \ 693 srcRegs = FETCH(1); \ 694 vsrc1 = srcRegs & 0xff; \ 695 vsrc2 = srcRegs >> 8; \ 696 ILOGV("|%s-int v%d,v%d", (_opname), vdst, vsrc1); \ 697 SET_REGISTER(vdst, \ 698 _cast GET_REGISTER(vsrc1) _op (GET_REGISTER(vsrc2) & 0x1f)); \ 699 } \ 700 FINISH(2); 701 702 #define HANDLE_OP_X_INT_LIT16(_opcode, _opname, _op, _chkdiv) \ 703 HANDLE_OPCODE(_opcode /*vA, vB, #+CCCC*/) \ 704 vdst = INST_A(inst); \ 705 vsrc1 = INST_B(inst); \ 706 vsrc2 = FETCH(1); \ 707 ILOGV("|%s-int/lit16 v%d,v%d,#+0x%04x", \ 708 (_opname), vdst, vsrc1, vsrc2); \ 709 if (_chkdiv != 0) { \ 710 s4 firstVal, result; \ 711 firstVal = GET_REGISTER(vsrc1); \ 712 if ((s2) vsrc2 == 0) { \ 713 EXPORT_PC(); \ 714 dvmThrowArithmeticException("divide by zero"); \ 715 GOTO_exceptionThrown(); \ 716 } \ 717 if ((u4)firstVal == 0x80000000 && ((s2) vsrc2) == -1) { \ 718 /* won't generate /lit16 instr for this; check anyway */ \ 719 if (_chkdiv == 1) \ 720 result = firstVal; /* division */ \ 721 else \ 722 result = 0; /* remainder */ \ 723 } else { \ 724 result = firstVal _op (s2) vsrc2; \ 725 } \ 726 SET_REGISTER(vdst, result); \ 727 } else { \ 728 /* non-div/rem case */ \ 729 SET_REGISTER(vdst, GET_REGISTER(vsrc1) _op (s2) vsrc2); \ 730 } \ 731 FINISH(2); 732 733 #define HANDLE_OP_X_INT_LIT8(_opcode, _opname, _op, _chkdiv) \ 734 HANDLE_OPCODE(_opcode /*vAA, vBB, #+CC*/) \ 735 { \ 736 u2 litInfo; \ 737 vdst = INST_AA(inst); \ 738 litInfo = FETCH(1); \ 739 vsrc1 = litInfo & 0xff; \ 740 vsrc2 = litInfo >> 8; /* constant */ \ 741 ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x", \ 742 (_opname), vdst, vsrc1, vsrc2); \ 743 if (_chkdiv != 0) { \ 744 s4 firstVal, result; \ 745 firstVal = GET_REGISTER(vsrc1); \ 746 if ((s1) vsrc2 == 0) { \ 747 EXPORT_PC(); \ 748 dvmThrowArithmeticException("divide by zero"); \ 749 GOTO_exceptionThrown(); \ 750 } \ 751 if ((u4)firstVal == 0x80000000 && ((s1) vsrc2) == -1) { \ 752 if (_chkdiv == 1) \ 753 result = firstVal; /* division */ \ 754 else \ 755 result = 0; /* remainder */ \ 756 } else { \ 757 result = firstVal _op ((s1) vsrc2); \ 758 } \ 759 SET_REGISTER(vdst, result); \ 760 } else { \ 761 SET_REGISTER(vdst, \ 762 (s4) GET_REGISTER(vsrc1) _op (s1) vsrc2); \ 763 } \ 764 } \ 765 FINISH(2); 766 767 #define HANDLE_OP_SHX_INT_LIT8(_opcode, _opname, _cast, _op) \ 768 HANDLE_OPCODE(_opcode /*vAA, vBB, #+CC*/) \ 769 { \ 770 u2 litInfo; \ 771 vdst = INST_AA(inst); \ 772 litInfo = FETCH(1); \ 773 vsrc1 = litInfo & 0xff; \ 774 vsrc2 = litInfo >> 8; /* constant */ \ 775 ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x", \ 776 (_opname), vdst, vsrc1, vsrc2); \ 777 SET_REGISTER(vdst, \ 778 _cast GET_REGISTER(vsrc1) _op (vsrc2 & 0x1f)); \ 779 } \ 780 FINISH(2); 781 782 #define HANDLE_OP_X_INT_2ADDR(_opcode, _opname, _op, _chkdiv) \ 783 HANDLE_OPCODE(_opcode /*vA, vB*/) \ 784 vdst = INST_A(inst); \ 785 vsrc1 = INST_B(inst); \ 786 ILOGV("|%s-int-2addr v%d,v%d", (_opname), vdst, vsrc1); \ 787 if (_chkdiv != 0) { \ 788 s4 firstVal, secondVal, result; \ 789 firstVal = GET_REGISTER(vdst); \ 790 secondVal = GET_REGISTER(vsrc1); \ 791 if (secondVal == 0) { \ 792 EXPORT_PC(); \ 793 dvmThrowArithmeticException("divide by zero"); \ 794 GOTO_exceptionThrown(); \ 795 } \ 796 if ((u4)firstVal == 0x80000000 && secondVal == -1) { \ 797 if (_chkdiv == 1) \ 798 result = firstVal; /* division */ \ 799 else \ 800 result = 0; /* remainder */ \ 801 } else { \ 802 result = firstVal _op secondVal; \ 803 } \ 804 SET_REGISTER(vdst, result); \ 805 } else { \ 806 SET_REGISTER(vdst, \ 807 (s4) GET_REGISTER(vdst) _op (s4) GET_REGISTER(vsrc1)); \ 808 } \ 809 FINISH(1); 810 811 #define HANDLE_OP_SHX_INT_2ADDR(_opcode, _opname, _cast, _op) \ 812 HANDLE_OPCODE(_opcode /*vA, vB*/) \ 813 vdst = INST_A(inst); \ 814 vsrc1 = INST_B(inst); \ 815 ILOGV("|%s-int-2addr v%d,v%d", (_opname), vdst, vsrc1); \ 816 SET_REGISTER(vdst, \ 817 _cast GET_REGISTER(vdst) _op (GET_REGISTER(vsrc1) & 0x1f)); \ 818 FINISH(1); 819 820 #define HANDLE_OP_X_LONG(_opcode, _opname, _op, _chkdiv) \ 821 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \ 822 { \ 823 u2 srcRegs; \ 824 vdst = INST_AA(inst); \ 825 srcRegs = FETCH(1); \ 826 vsrc1 = srcRegs & 0xff; \ 827 vsrc2 = srcRegs >> 8; \ 828 ILOGV("|%s-long v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \ 829 if (_chkdiv != 0) { \ 830 s8 firstVal, secondVal, result; \ 831 firstVal = GET_REGISTER_WIDE(vsrc1); \ 832 secondVal = GET_REGISTER_WIDE(vsrc2); \ 833 if (secondVal == 0LL) { \ 834 EXPORT_PC(); \ 835 dvmThrowArithmeticException("divide by zero"); \ 836 GOTO_exceptionThrown(); \ 837 } \ 838 if ((u8)firstVal == 0x8000000000000000ULL && \ 839 secondVal == -1LL) \ 840 { \ 841 if (_chkdiv == 1) \ 842 result = firstVal; /* division */ \ 843 else \ 844 result = 0; /* remainder */ \ 845 } else { \ 846 result = firstVal _op secondVal; \ 847 } \ 848 SET_REGISTER_WIDE(vdst, result); \ 849 } else { \ 850 SET_REGISTER_WIDE(vdst, \ 851 (s8) GET_REGISTER_WIDE(vsrc1) _op (s8) GET_REGISTER_WIDE(vsrc2)); \ 852 } \ 853 } \ 854 FINISH(2); 855 856 #define HANDLE_OP_SHX_LONG(_opcode, _opname, _cast, _op) \ 857 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \ 858 { \ 859 u2 srcRegs; \ 860 vdst = INST_AA(inst); \ 861 srcRegs = FETCH(1); \ 862 vsrc1 = srcRegs & 0xff; \ 863 vsrc2 = srcRegs >> 8; \ 864 ILOGV("|%s-long v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \ 865 SET_REGISTER_WIDE(vdst, \ 866 _cast GET_REGISTER_WIDE(vsrc1) _op (GET_REGISTER(vsrc2) & 0x3f)); \ 867 } \ 868 FINISH(2); 869 870 #define HANDLE_OP_X_LONG_2ADDR(_opcode, _opname, _op, _chkdiv) \ 871 HANDLE_OPCODE(_opcode /*vA, vB*/) \ 872 vdst = INST_A(inst); \ 873 vsrc1 = INST_B(inst); \ 874 ILOGV("|%s-long-2addr v%d,v%d", (_opname), vdst, vsrc1); \ 875 if (_chkdiv != 0) { \ 876 s8 firstVal, secondVal, result; \ 877 firstVal = GET_REGISTER_WIDE(vdst); \ 878 secondVal = GET_REGISTER_WIDE(vsrc1); \ 879 if (secondVal == 0LL) { \ 880 EXPORT_PC(); \ 881 dvmThrowArithmeticException("divide by zero"); \ 882 GOTO_exceptionThrown(); \ 883 } \ 884 if ((u8)firstVal == 0x8000000000000000ULL && \ 885 secondVal == -1LL) \ 886 { \ 887 if (_chkdiv == 1) \ 888 result = firstVal; /* division */ \ 889 else \ 890 result = 0; /* remainder */ \ 891 } else { \ 892 result = firstVal _op secondVal; \ 893 } \ 894 SET_REGISTER_WIDE(vdst, result); \ 895 } else { \ 896 SET_REGISTER_WIDE(vdst, \ 897 (s8) GET_REGISTER_WIDE(vdst) _op (s8)GET_REGISTER_WIDE(vsrc1));\ 898 } \ 899 FINISH(1); 900 901 #define HANDLE_OP_SHX_LONG_2ADDR(_opcode, _opname, _cast, _op) \ 902 HANDLE_OPCODE(_opcode /*vA, vB*/) \ 903 vdst = INST_A(inst); \ 904 vsrc1 = INST_B(inst); \ 905 ILOGV("|%s-long-2addr v%d,v%d", (_opname), vdst, vsrc1); \ 906 SET_REGISTER_WIDE(vdst, \ 907 _cast GET_REGISTER_WIDE(vdst) _op (GET_REGISTER(vsrc1) & 0x3f)); \ 908 FINISH(1); 909 910 #define HANDLE_OP_X_FLOAT(_opcode, _opname, _op) \ 911 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \ 912 { \ 913 u2 srcRegs; \ 914 vdst = INST_AA(inst); \ 915 srcRegs = FETCH(1); \ 916 vsrc1 = srcRegs & 0xff; \ 917 vsrc2 = srcRegs >> 8; \ 918 ILOGV("|%s-float v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \ 919 SET_REGISTER_FLOAT(vdst, \ 920 GET_REGISTER_FLOAT(vsrc1) _op GET_REGISTER_FLOAT(vsrc2)); \ 921 } \ 922 FINISH(2); 923 924 #define HANDLE_OP_X_DOUBLE(_opcode, _opname, _op) \ 925 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \ 926 { \ 927 u2 srcRegs; \ 928 vdst = INST_AA(inst); \ 929 srcRegs = FETCH(1); \ 930 vsrc1 = srcRegs & 0xff; \ 931 vsrc2 = srcRegs >> 8; \ 932 ILOGV("|%s-double v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \ 933 SET_REGISTER_DOUBLE(vdst, \ 934 GET_REGISTER_DOUBLE(vsrc1) _op GET_REGISTER_DOUBLE(vsrc2)); \ 935 } \ 936 FINISH(2); 937 938 #define HANDLE_OP_X_FLOAT_2ADDR(_opcode, _opname, _op) \ 939 HANDLE_OPCODE(_opcode /*vA, vB*/) \ 940 vdst = INST_A(inst); \ 941 vsrc1 = INST_B(inst); \ 942 ILOGV("|%s-float-2addr v%d,v%d", (_opname), vdst, vsrc1); \ 943 SET_REGISTER_FLOAT(vdst, \ 944 GET_REGISTER_FLOAT(vdst) _op GET_REGISTER_FLOAT(vsrc1)); \ 945 FINISH(1); 946 947 #define HANDLE_OP_X_DOUBLE_2ADDR(_opcode, _opname, _op) \ 948 HANDLE_OPCODE(_opcode /*vA, vB*/) \ 949 vdst = INST_A(inst); \ 950 vsrc1 = INST_B(inst); \ 951 ILOGV("|%s-double-2addr v%d,v%d", (_opname), vdst, vsrc1); \ 952 SET_REGISTER_DOUBLE(vdst, \ 953 GET_REGISTER_DOUBLE(vdst) _op GET_REGISTER_DOUBLE(vsrc1)); \ 954 FINISH(1); 955 956 #define HANDLE_OP_AGET(_opcode, _opname, _type, _regsize) \ 957 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \ 958 { \ 959 ArrayObject* arrayObj; \ 960 u2 arrayInfo; \ 961 EXPORT_PC(); \ 962 vdst = INST_AA(inst); \ 963 arrayInfo = FETCH(1); \ 964 vsrc1 = arrayInfo & 0xff; /* array ptr */ \ 965 vsrc2 = arrayInfo >> 8; /* index */ \ 966 ILOGV("|aget%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \ 967 arrayObj = (ArrayObject*) GET_REGISTER(vsrc1); \ 968 if (!checkForNull((Object*) arrayObj)) \ 969 GOTO_exceptionThrown(); \ 970 if (GET_REGISTER(vsrc2) >= arrayObj->length) { \ 971 dvmThrowArrayIndexOutOfBoundsException( \ 972 arrayObj->length, GET_REGISTER(vsrc2)); \ 973 GOTO_exceptionThrown(); \ 974 } \ 975 SET_REGISTER##_regsize(vdst, \ 976 ((_type*)(void*)arrayObj->contents)[GET_REGISTER(vsrc2)]); \ 977 ILOGV("+ AGET[%d]=%#x", GET_REGISTER(vsrc2), GET_REGISTER(vdst)); \ 978 } \ 979 FINISH(2); 980 981 #define HANDLE_OP_APUT(_opcode, _opname, _type, _regsize) \ 982 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \ 983 { \ 984 ArrayObject* arrayObj; \ 985 u2 arrayInfo; \ 986 EXPORT_PC(); \ 987 vdst = INST_AA(inst); /* AA: source value */ \ 988 arrayInfo = FETCH(1); \ 989 vsrc1 = arrayInfo & 0xff; /* BB: array ptr */ \ 990 vsrc2 = arrayInfo >> 8; /* CC: index */ \ 991 ILOGV("|aput%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \ 992 arrayObj = (ArrayObject*) GET_REGISTER(vsrc1); \ 993 if (!checkForNull((Object*) arrayObj)) \ 994 GOTO_exceptionThrown(); \ 995 if (GET_REGISTER(vsrc2) >= arrayObj->length) { \ 996 dvmThrowArrayIndexOutOfBoundsException( \ 997 arrayObj->length, GET_REGISTER(vsrc2)); \ 998 GOTO_exceptionThrown(); \ 999 } \ 1000 ILOGV("+ APUT[%d]=0x%08x", GET_REGISTER(vsrc2), GET_REGISTER(vdst));\ 1001 ((_type*)(void*)arrayObj->contents)[GET_REGISTER(vsrc2)] = \ 1002 GET_REGISTER##_regsize(vdst); \ 1003 } \ 1004 FINISH(2); 1005 1006 /* 1007 * It's possible to get a bad value out of a field with sub-32-bit stores 1008 * because the -quick versions always operate on 32 bits. Consider: 1009 * short foo = -1 (sets a 32-bit register to 0xffffffff) 1010 * iput-quick foo (writes all 32 bits to the field) 1011 * short bar = 1 (sets a 32-bit register to 0x00000001) 1012 * iput-short (writes the low 16 bits to the field) 1013 * iget-quick foo (reads all 32 bits from the field, yielding 0xffff0001) 1014 * This can only happen when optimized and non-optimized code has interleaved 1015 * access to the same field. This is unlikely but possible. 1016 * 1017 * The easiest way to fix this is to always read/write 32 bits at a time. On 1018 * a device with a 16-bit data bus this is sub-optimal. (The alternative 1019 * approach is to have sub-int versions of iget-quick, but now we're wasting 1020 * Dalvik instruction space and making it less likely that handler code will 1021 * already be in the CPU i-cache.) 1022 */ 1023 #define HANDLE_IGET_X(_opcode, _opname, _ftype, _regsize) \ 1024 HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \ 1025 { \ 1026 InstField* ifield; \ 1027 Object* obj; \ 1028 EXPORT_PC(); \ 1029 vdst = INST_A(inst); \ 1030 vsrc1 = INST_B(inst); /* object ptr */ \ 1031 ref = FETCH(1); /* field ref */ \ 1032 ILOGV("|iget%s v%d,v%d,field@0x%04x", (_opname), vdst, vsrc1, ref); \ 1033 obj = (Object*) GET_REGISTER(vsrc1); \ 1034 if (!checkForNull(obj)) \ 1035 GOTO_exceptionThrown(); \ 1036 ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref); \ 1037 if (ifield == NULL) { \ 1038 ifield = dvmResolveInstField(curMethod->clazz, ref); \ 1039 if (ifield == NULL) \ 1040 GOTO_exceptionThrown(); \ 1041 } \ 1042 SET_REGISTER##_regsize(vdst, \ 1043 dvmGetField##_ftype(obj, ifield->byteOffset)); \ 1044 ILOGV("+ IGET '%s'=0x%08llx", ifield->name, \ 1045 (u8) GET_REGISTER##_regsize(vdst)); \ 1046 } \ 1047 FINISH(2); 1048 1049 #define HANDLE_IGET_X_QUICK(_opcode, _opname, _ftype, _regsize) \ 1050 HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \ 1051 { \ 1052 Object* obj; \ 1053 vdst = INST_A(inst); \ 1054 vsrc1 = INST_B(inst); /* object ptr */ \ 1055 ref = FETCH(1); /* field offset */ \ 1056 ILOGV("|iget%s-quick v%d,v%d,field@+%u", \ 1057 (_opname), vdst, vsrc1, ref); \ 1058 obj = (Object*) GET_REGISTER(vsrc1); \ 1059 if (!checkForNullExportPC(obj, fp, pc)) \ 1060 GOTO_exceptionThrown(); \ 1061 SET_REGISTER##_regsize(vdst, dvmGetField##_ftype(obj, ref)); \ 1062 ILOGV("+ IGETQ %d=0x%08llx", ref, \ 1063 (u8) GET_REGISTER##_regsize(vdst)); \ 1064 } \ 1065 FINISH(2); 1066 1067 #define HANDLE_IPUT_X(_opcode, _opname, _ftype, _regsize) \ 1068 HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \ 1069 { \ 1070 InstField* ifield; \ 1071 Object* obj; \ 1072 EXPORT_PC(); \ 1073 vdst = INST_A(inst); \ 1074 vsrc1 = INST_B(inst); /* object ptr */ \ 1075 ref = FETCH(1); /* field ref */ \ 1076 ILOGV("|iput%s v%d,v%d,field@0x%04x", (_opname), vdst, vsrc1, ref); \ 1077 obj = (Object*) GET_REGISTER(vsrc1); \ 1078 if (!checkForNull(obj)) \ 1079 GOTO_exceptionThrown(); \ 1080 ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref); \ 1081 if (ifield == NULL) { \ 1082 ifield = dvmResolveInstField(curMethod->clazz, ref); \ 1083 if (ifield == NULL) \ 1084 GOTO_exceptionThrown(); \ 1085 } \ 1086 dvmSetField##_ftype(obj, ifield->byteOffset, \ 1087 GET_REGISTER##_regsize(vdst)); \ 1088 ILOGV("+ IPUT '%s'=0x%08llx", ifield->name, \ 1089 (u8) GET_REGISTER##_regsize(vdst)); \ 1090 } \ 1091 FINISH(2); 1092 1093 #define HANDLE_IPUT_X_QUICK(_opcode, _opname, _ftype, _regsize) \ 1094 HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \ 1095 { \ 1096 Object* obj; \ 1097 vdst = INST_A(inst); \ 1098 vsrc1 = INST_B(inst); /* object ptr */ \ 1099 ref = FETCH(1); /* field offset */ \ 1100 ILOGV("|iput%s-quick v%d,v%d,field@0x%04x", \ 1101 (_opname), vdst, vsrc1, ref); \ 1102 obj = (Object*) GET_REGISTER(vsrc1); \ 1103 if (!checkForNullExportPC(obj, fp, pc)) \ 1104 GOTO_exceptionThrown(); \ 1105 dvmSetField##_ftype(obj, ref, GET_REGISTER##_regsize(vdst)); \ 1106 ILOGV("+ IPUTQ %d=0x%08llx", ref, \ 1107 (u8) GET_REGISTER##_regsize(vdst)); \ 1108 } \ 1109 FINISH(2); 1110 1111 /* 1112 * The JIT needs dvmDexGetResolvedField() to return non-null. 1113 * Because the portable interpreter is not involved with the JIT 1114 * and trace building, we only need the extra check here when this 1115 * code is massaged into a stub called from an assembly interpreter. 1116 * This is controlled by the JIT_STUB_HACK maco. 1117 */ 1118 1119 #define HANDLE_SGET_X(_opcode, _opname, _ftype, _regsize) \ 1120 HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/) \ 1121 { \ 1122 StaticField* sfield; \ 1123 vdst = INST_AA(inst); \ 1124 ref = FETCH(1); /* field ref */ \ 1125 ILOGV("|sget%s v%d,sfield@0x%04x", (_opname), vdst, ref); \ 1126 sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \ 1127 if (sfield == NULL) { \ 1128 EXPORT_PC(); \ 1129 sfield = dvmResolveStaticField(curMethod->clazz, ref); \ 1130 if (sfield == NULL) \ 1131 GOTO_exceptionThrown(); \ 1132 if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) { \ 1133 JIT_STUB_HACK(dvmJitEndTraceSelect(self,pc)); \ 1134 } \ 1135 } \ 1136 SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield)); \ 1137 ILOGV("+ SGET '%s'=0x%08llx", \ 1138 sfield->name, (u8)GET_REGISTER##_regsize(vdst)); \ 1139 } \ 1140 FINISH(2); 1141 1142 #define HANDLE_SPUT_X(_opcode, _opname, _ftype, _regsize) \ 1143 HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/) \ 1144 { \ 1145 StaticField* sfield; \ 1146 vdst = INST_AA(inst); \ 1147 ref = FETCH(1); /* field ref */ \ 1148 ILOGV("|sput%s v%d,sfield@0x%04x", (_opname), vdst, ref); \ 1149 sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \ 1150 if (sfield == NULL) { \ 1151 EXPORT_PC(); \ 1152 sfield = dvmResolveStaticField(curMethod->clazz, ref); \ 1153 if (sfield == NULL) \ 1154 GOTO_exceptionThrown(); \ 1155 if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) { \ 1156 JIT_STUB_HACK(dvmJitEndTraceSelect(self,pc)); \ 1157 } \ 1158 } \ 1159 dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst)); \ 1160 ILOGV("+ SPUT '%s'=0x%08llx", \ 1161 sfield->name, (u8)GET_REGISTER##_regsize(vdst)); \ 1162 } \ 1163 FINISH(2); 1164 1165 /* File: c/OP_IGET_WIDE_VOLATILE.cpp */ 1166 HANDLE_IGET_X(OP_IGET_WIDE_VOLATILE, "-wide-volatile", LongVolatile, _WIDE) 1167 OP_END 1168 1169 /* File: c/OP_IPUT_WIDE_VOLATILE.cpp */ 1170 HANDLE_IPUT_X(OP_IPUT_WIDE_VOLATILE, "-wide-volatile", LongVolatile, _WIDE) 1171 OP_END 1172 1173 /* File: c/OP_SGET_WIDE_VOLATILE.cpp */ 1174 HANDLE_SGET_X(OP_SGET_WIDE_VOLATILE, "-wide-volatile", LongVolatile, _WIDE) 1175 OP_END 1176 1177 /* File: c/OP_SPUT_WIDE_VOLATILE.cpp */ 1178 HANDLE_SPUT_X(OP_SPUT_WIDE_VOLATILE, "-wide-volatile", LongVolatile, _WIDE) 1179 OP_END 1180 1181 /* File: c/OP_EXECUTE_INLINE_RANGE.cpp */ 1182 HANDLE_OPCODE(OP_EXECUTE_INLINE_RANGE /*{vCCCC..v(CCCC+AA-1)}, inline@BBBB*/) 1183 { 1184 u4 arg0, arg1, arg2, arg3; 1185 arg0 = arg1 = arg2 = arg3 = 0; /* placate gcc */ 1186 1187 EXPORT_PC(); 1188 1189 vsrc1 = INST_AA(inst); /* #of args */ 1190 ref = FETCH(1); /* inline call "ref" */ 1191 vdst = FETCH(2); /* range base */ 1192 ILOGV("|execute-inline-range args=%d @%d {regs=v%d-v%d}", 1193 vsrc1, ref, vdst, vdst+vsrc1-1); 1194 1195 assert((vdst >> 16) == 0); // 16-bit type -or- high 16 bits clear 1196 assert(vsrc1 <= 4); 1197 1198 switch (vsrc1) { 1199 case 4: 1200 arg3 = GET_REGISTER(vdst+3); 1201 /* fall through */ 1202 case 3: 1203 arg2 = GET_REGISTER(vdst+2); 1204 /* fall through */ 1205 case 2: 1206 arg1 = GET_REGISTER(vdst+1); 1207 /* fall through */ 1208 case 1: 1209 arg0 = GET_REGISTER(vdst+0); 1210 /* fall through */ 1211 default: // case 0 1212 ; 1213 } 1214 1215 if (self->interpBreak.ctl.subMode & kSubModeDebugProfile) { 1216 if (!dvmPerformInlineOp4Dbg(arg0, arg1, arg2, arg3, &retval, ref)) 1217 GOTO_exceptionThrown(); 1218 } else { 1219 if (!dvmPerformInlineOp4Std(arg0, arg1, arg2, arg3, &retval, ref)) 1220 GOTO_exceptionThrown(); 1221 } 1222 } 1223 FINISH(3); 1224 OP_END 1225 1226 /* File: c/OP_INVOKE_OBJECT_INIT_RANGE.cpp */ 1227 HANDLE_OPCODE(OP_INVOKE_OBJECT_INIT_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/) 1228 { 1229 Object* obj; 1230 1231 vsrc1 = FETCH(2); /* reg number of "this" pointer */ 1232 obj = GET_REGISTER_AS_OBJECT(vsrc1); 1233 1234 if (!checkForNullExportPC(obj, fp, pc)) 1235 GOTO_exceptionThrown(); 1236 1237 /* 1238 * The object should be marked "finalizable" when Object.<init> 1239 * completes normally. We're going to assume it does complete 1240 * (by virtue of being nothing but a return-void) and set it now. 1241 */ 1242 if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISFINALIZABLE)) { 1243 EXPORT_PC(); 1244 dvmSetFinalizable(obj); 1245 if (dvmGetException(self)) 1246 GOTO_exceptionThrown(); 1247 } 1248 1249 if (self->interpBreak.ctl.subMode & kSubModeDebuggerActive) { 1250 /* behave like OP_INVOKE_DIRECT_RANGE */ 1251 GOTO_invoke(invokeDirect, true); 1252 } 1253 FINISH(3); 1254 } 1255 OP_END 1256 1257 /* File: c/OP_RETURN_VOID_BARRIER.cpp */ 1258 HANDLE_OPCODE(OP_RETURN_VOID_BARRIER /**/) 1259 ILOGV("|return-void"); 1260 #ifndef NDEBUG 1261 retval.j = 0xababababULL; /* placate valgrind */ 1262 #endif 1263 ANDROID_MEMBAR_STORE(); 1264 GOTO_returnFromMethod(); 1265 OP_END 1266 1267 /* File: c/gotoTargets.cpp */ 1268 /* 1269 * C footer. This has some common code shared by the various targets. 1270 */ 1271 1272 /* 1273 * Everything from here on is a "goto target". In the basic interpreter 1274 * we jump into these targets and then jump directly to the handler for 1275 * next instruction. Here, these are subroutines that return to the caller. 1276 */ 1277 1278 GOTO_TARGET(filledNewArray, bool methodCallRange, bool) 1279 { 1280 ClassObject* arrayClass; 1281 ArrayObject* newArray; 1282 u4* contents; 1283 char typeCh; 1284 int i; 1285 u4 arg5; 1286 1287 EXPORT_PC(); 1288 1289 ref = FETCH(1); /* class ref */ 1290 vdst = FETCH(2); /* first 4 regs -or- range base */ 1291 1292 if (methodCallRange) { 1293 vsrc1 = INST_AA(inst); /* #of elements */ 1294 arg5 = -1; /* silence compiler warning */ 1295 ILOGV("|filled-new-array-range args=%d @0x%04x {regs=v%d-v%d}", 1296 vsrc1, ref, vdst, vdst+vsrc1-1); 1297 } else { 1298 arg5 = INST_A(inst); 1299 vsrc1 = INST_B(inst); /* #of elements */ 1300 ILOGV("|filled-new-array args=%d @0x%04x {regs=0x%04x %x}", 1301 vsrc1, ref, vdst, arg5); 1302 } 1303 1304 /* 1305 * Resolve the array class. 1306 */ 1307 arrayClass = dvmDexGetResolvedClass(methodClassDex, ref); 1308 if (arrayClass == NULL) { 1309 arrayClass = dvmResolveClass(curMethod->clazz, ref, false); 1310 if (arrayClass == NULL) 1311 GOTO_exceptionThrown(); 1312 } 1313 /* 1314 if (!dvmIsArrayClass(arrayClass)) { 1315 dvmThrowRuntimeException( 1316 "filled-new-array needs array class"); 1317 GOTO_exceptionThrown(); 1318 } 1319 */ 1320 /* verifier guarantees this is an array class */ 1321 assert(dvmIsArrayClass(arrayClass)); 1322 assert(dvmIsClassInitialized(arrayClass)); 1323 1324 /* 1325 * Create an array of the specified type. 1326 */ 1327 LOGVV("+++ filled-new-array type is '%s'", arrayClass->descriptor); 1328 typeCh = arrayClass->descriptor[1]; 1329 if (typeCh == 'D' || typeCh == 'J') { 1330 /* category 2 primitives not allowed */ 1331 dvmThrowRuntimeException("bad filled array req"); 1332 GOTO_exceptionThrown(); 1333 } else if (typeCh != 'L' && typeCh != '[' && typeCh != 'I') { 1334 /* TODO: requires multiple "fill in" loops with different widths */ 1335 ALOGE("non-int primitives not implemented"); 1336 dvmThrowInternalError( 1337 "filled-new-array not implemented for anything but 'int'"); 1338 GOTO_exceptionThrown(); 1339 } 1340 1341 newArray = dvmAllocArrayByClass(arrayClass, vsrc1, ALLOC_DONT_TRACK); 1342 if (newArray == NULL) 1343 GOTO_exceptionThrown(); 1344 1345 /* 1346 * Fill in the elements. It's legal for vsrc1 to be zero. 1347 */ 1348 contents = (u4*)(void*)newArray->contents; 1349 if (methodCallRange) { 1350 for (i = 0; i < vsrc1; i++) 1351 contents[i] = GET_REGISTER(vdst+i); 1352 } else { 1353 assert(vsrc1 <= 5); 1354 if (vsrc1 == 5) { 1355 contents[4] = GET_REGISTER(arg5); 1356 vsrc1--; 1357 } 1358 for (i = 0; i < vsrc1; i++) { 1359 contents[i] = GET_REGISTER(vdst & 0x0f); 1360 vdst >>= 4; 1361 } 1362 } 1363 if (typeCh == 'L' || typeCh == '[') { 1364 dvmWriteBarrierArray(newArray, 0, newArray->length); 1365 } 1366 1367 retval.l = (Object*)newArray; 1368 } 1369 FINISH(3); 1370 GOTO_TARGET_END 1371 1372 1373 GOTO_TARGET(invokeVirtual, bool methodCallRange, bool) 1374 { 1375 Method* baseMethod; 1376 Object* thisPtr; 1377 1378 EXPORT_PC(); 1379 1380 vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */ 1381 ref = FETCH(1); /* method ref */ 1382 vdst = FETCH(2); /* 4 regs -or- first reg */ 1383 1384 /* 1385 * The object against which we are executing a method is always 1386 * in the first argument. 1387 */ 1388 if (methodCallRange) { 1389 assert(vsrc1 > 0); 1390 ILOGV("|invoke-virtual-range args=%d @0x%04x {regs=v%d-v%d}", 1391 vsrc1, ref, vdst, vdst+vsrc1-1); 1392 thisPtr = (Object*) GET_REGISTER(vdst); 1393 } else { 1394 assert((vsrc1>>4) > 0); 1395 ILOGV("|invoke-virtual args=%d @0x%04x {regs=0x%04x %x}", 1396 vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f); 1397 thisPtr = (Object*) GET_REGISTER(vdst & 0x0f); 1398 } 1399 1400 if (!checkForNull(thisPtr)) 1401 GOTO_exceptionThrown(); 1402 1403 /* 1404 * Resolve the method. This is the correct method for the static 1405 * type of the object. We also verify access permissions here. 1406 */ 1407 baseMethod = dvmDexGetResolvedMethod(methodClassDex, ref); 1408 if (baseMethod == NULL) { 1409 baseMethod = dvmResolveMethod(curMethod->clazz, ref,METHOD_VIRTUAL); 1410 if (baseMethod == NULL) { 1411 ILOGV("+ unknown method or access denied"); 1412 GOTO_exceptionThrown(); 1413 } 1414 } 1415 1416 /* 1417 * Combine the object we found with the vtable offset in the 1418 * method. 1419 */ 1420 assert(baseMethod->methodIndex < thisPtr->clazz->vtableCount); 1421 methodToCall = thisPtr->clazz->vtable[baseMethod->methodIndex]; 1422 1423 #if defined(WITH_JIT) && defined(MTERP_STUB) 1424 self->methodToCall = methodToCall; 1425 self->callsiteClass = thisPtr->clazz; 1426 #endif 1427 1428 #if 0 1429 if (dvmIsAbstractMethod(methodToCall)) { 1430 /* 1431 * This can happen if you create two classes, Base and Sub, where 1432 * Sub is a sub-class of Base. Declare a protected abstract 1433 * method foo() in Base, and invoke foo() from a method in Base. 1434 * Base is an "abstract base class" and is never instantiated 1435 * directly. Now, Override foo() in Sub, and use Sub. This 1436 * Works fine unless Sub stops providing an implementation of 1437 * the method. 1438 */ 1439 dvmThrowAbstractMethodError("abstract method not implemented"); 1440 GOTO_exceptionThrown(); 1441 } 1442 #else 1443 assert(!dvmIsAbstractMethod(methodToCall) || 1444 methodToCall->nativeFunc != NULL); 1445 #endif 1446 1447 LOGVV("+++ base=%s.%s virtual[%d]=%s.%s", 1448 baseMethod->clazz->descriptor, baseMethod->name, 1449 (u4) baseMethod->methodIndex, 1450 methodToCall->clazz->descriptor, methodToCall->name); 1451 assert(methodToCall != NULL); 1452 1453 #if 0 1454 if (vsrc1 != methodToCall->insSize) { 1455 ALOGW("WRONG METHOD: base=%s.%s virtual[%d]=%s.%s", 1456 baseMethod->clazz->descriptor, baseMethod->name, 1457 (u4) baseMethod->methodIndex, 1458 methodToCall->clazz->descriptor, methodToCall->name); 1459 //dvmDumpClass(baseMethod->clazz); 1460 //dvmDumpClass(methodToCall->clazz); 1461 dvmDumpAllClasses(0); 1462 } 1463 #endif 1464 1465 GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst); 1466 } 1467 GOTO_TARGET_END 1468 1469 GOTO_TARGET(invokeSuper, bool methodCallRange) 1470 { 1471 Method* baseMethod; 1472 u2 thisReg; 1473 1474 EXPORT_PC(); 1475 1476 vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */ 1477 ref = FETCH(1); /* method ref */ 1478 vdst = FETCH(2); /* 4 regs -or- first reg */ 1479 1480 if (methodCallRange) { 1481 ILOGV("|invoke-super-range args=%d @0x%04x {regs=v%d-v%d}", 1482 vsrc1, ref, vdst, vdst+vsrc1-1); 1483 thisReg = vdst; 1484 } else { 1485 ILOGV("|invoke-super args=%d @0x%04x {regs=0x%04x %x}", 1486 vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f); 1487 thisReg = vdst & 0x0f; 1488 } 1489 1490 /* impossible in well-formed code, but we must check nevertheless */ 1491 if (!checkForNull((Object*) GET_REGISTER(thisReg))) 1492 GOTO_exceptionThrown(); 1493 1494 /* 1495 * Resolve the method. This is the correct method for the static 1496 * type of the object. We also verify access permissions here. 1497 * The first arg to dvmResolveMethod() is just the referring class 1498 * (used for class loaders and such), so we don't want to pass 1499 * the superclass into the resolution call. 1500 */ 1501 baseMethod = dvmDexGetResolvedMethod(methodClassDex, ref); 1502 if (baseMethod == NULL) { 1503 baseMethod = dvmResolveMethod(curMethod->clazz, ref,METHOD_VIRTUAL); 1504 if (baseMethod == NULL) { 1505 ILOGV("+ unknown method or access denied"); 1506 GOTO_exceptionThrown(); 1507 } 1508 } 1509 1510 /* 1511 * Combine the object we found with the vtable offset in the 1512 * method's class. 1513 * 1514 * We're using the current method's class' superclass, not the 1515 * superclass of "this". This is because we might be executing 1516 * in a method inherited from a superclass, and we want to run 1517 * in that class' superclass. 1518 */ 1519 if (baseMethod->methodIndex >= curMethod->clazz->super->vtableCount) { 1520 /* 1521 * Method does not exist in the superclass. Could happen if 1522 * superclass gets updated. 1523 */ 1524 dvmThrowNoSuchMethodError(baseMethod->name); 1525 GOTO_exceptionThrown(); 1526 } 1527 methodToCall = curMethod->clazz->super->vtable[baseMethod->methodIndex]; 1528 1529 #if 0 1530 if (dvmIsAbstractMethod(methodToCall)) { 1531 dvmThrowAbstractMethodError("abstract method not implemented"); 1532 GOTO_exceptionThrown(); 1533 } 1534 #else 1535 assert(!dvmIsAbstractMethod(methodToCall) || 1536 methodToCall->nativeFunc != NULL); 1537 #endif 1538 LOGVV("+++ base=%s.%s super-virtual=%s.%s", 1539 baseMethod->clazz->descriptor, baseMethod->name, 1540 methodToCall->clazz->descriptor, methodToCall->name); 1541 assert(methodToCall != NULL); 1542 1543 GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst); 1544 } 1545 GOTO_TARGET_END 1546 1547 GOTO_TARGET(invokeInterface, bool methodCallRange) 1548 { 1549 Object* thisPtr; 1550 ClassObject* thisClass; 1551 1552 EXPORT_PC(); 1553 1554 vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */ 1555 ref = FETCH(1); /* method ref */ 1556 vdst = FETCH(2); /* 4 regs -or- first reg */ 1557 1558 /* 1559 * The object against which we are executing a method is always 1560 * in the first argument. 1561 */ 1562 if (methodCallRange) { 1563 assert(vsrc1 > 0); 1564 ILOGV("|invoke-interface-range args=%d @0x%04x {regs=v%d-v%d}", 1565 vsrc1, ref, vdst, vdst+vsrc1-1); 1566 thisPtr = (Object*) GET_REGISTER(vdst); 1567 } else { 1568 assert((vsrc1>>4) > 0); 1569 ILOGV("|invoke-interface args=%d @0x%04x {regs=0x%04x %x}", 1570 vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f); 1571 thisPtr = (Object*) GET_REGISTER(vdst & 0x0f); 1572 } 1573 1574 if (!checkForNull(thisPtr)) 1575 GOTO_exceptionThrown(); 1576 1577 thisClass = thisPtr->clazz; 1578 1579 /* 1580 * Given a class and a method index, find the Method* with the 1581 * actual code we want to execute. 1582 */ 1583 methodToCall = dvmFindInterfaceMethodInCache(thisClass, ref, curMethod, 1584 methodClassDex); 1585 #if defined(WITH_JIT) && defined(MTERP_STUB) 1586 self->callsiteClass = thisClass; 1587 self->methodToCall = methodToCall; 1588 #endif 1589 if (methodToCall == NULL) { 1590 assert(dvmCheckException(self)); 1591 GOTO_exceptionThrown(); 1592 } 1593 1594 GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst); 1595 } 1596 GOTO_TARGET_END 1597 1598 GOTO_TARGET(invokeDirect, bool methodCallRange) 1599 { 1600 u2 thisReg; 1601 1602 EXPORT_PC(); 1603 1604 vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */ 1605 ref = FETCH(1); /* method ref */ 1606 vdst = FETCH(2); /* 4 regs -or- first reg */ 1607 1608 if (methodCallRange) { 1609 ILOGV("|invoke-direct-range args=%d @0x%04x {regs=v%d-v%d}", 1610 vsrc1, ref, vdst, vdst+vsrc1-1); 1611 thisReg = vdst; 1612 } else { 1613 ILOGV("|invoke-direct args=%d @0x%04x {regs=0x%04x %x}", 1614 vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f); 1615 thisReg = vdst & 0x0f; 1616 } 1617 1618 if (!checkForNull((Object*) GET_REGISTER(thisReg))) 1619 GOTO_exceptionThrown(); 1620 1621 methodToCall = dvmDexGetResolvedMethod(methodClassDex, ref); 1622 if (methodToCall == NULL) { 1623 methodToCall = dvmResolveMethod(curMethod->clazz, ref, 1624 METHOD_DIRECT); 1625 if (methodToCall == NULL) { 1626 ILOGV("+ unknown direct method"); // should be impossible 1627 GOTO_exceptionThrown(); 1628 } 1629 } 1630 GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst); 1631 } 1632 GOTO_TARGET_END 1633 1634 GOTO_TARGET(invokeStatic, bool methodCallRange) 1635 EXPORT_PC(); 1636 1637 vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */ 1638 ref = FETCH(1); /* method ref */ 1639 vdst = FETCH(2); /* 4 regs -or- first reg */ 1640 1641 if (methodCallRange) 1642 ILOGV("|invoke-static-range args=%d @0x%04x {regs=v%d-v%d}", 1643 vsrc1, ref, vdst, vdst+vsrc1-1); 1644 else 1645 ILOGV("|invoke-static args=%d @0x%04x {regs=0x%04x %x}", 1646 vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f); 1647 1648 methodToCall = dvmDexGetResolvedMethod(methodClassDex, ref); 1649 if (methodToCall == NULL) { 1650 methodToCall = dvmResolveMethod(curMethod->clazz, ref, METHOD_STATIC); 1651 if (methodToCall == NULL) { 1652 ILOGV("+ unknown method"); 1653 GOTO_exceptionThrown(); 1654 } 1655 1656 #if defined(WITH_JIT) && defined(MTERP_STUB) 1657 /* 1658 * The JIT needs dvmDexGetResolvedMethod() to return non-null. 1659 * Include the check if this code is being used as a stub 1660 * called from the assembly interpreter. 1661 */ 1662 if ((self->interpBreak.ctl.subMode & kSubModeJitTraceBuild) && 1663 (dvmDexGetResolvedMethod(methodClassDex, ref) == NULL)) { 1664 /* Class initialization is still ongoing */ 1665 dvmJitEndTraceSelect(self,pc); 1666 } 1667 #endif 1668 } 1669 GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst); 1670 GOTO_TARGET_END 1671 1672 GOTO_TARGET(invokeVirtualQuick, bool methodCallRange) 1673 { 1674 Object* thisPtr; 1675 1676 EXPORT_PC(); 1677 1678 vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */ 1679 ref = FETCH(1); /* vtable index */ 1680 vdst = FETCH(2); /* 4 regs -or- first reg */ 1681 1682 /* 1683 * The object against which we are executing a method is always 1684 * in the first argument. 1685 */ 1686 if (methodCallRange) { 1687 assert(vsrc1 > 0); 1688 ILOGV("|invoke-virtual-quick-range args=%d @0x%04x {regs=v%d-v%d}", 1689 vsrc1, ref, vdst, vdst+vsrc1-1); 1690 thisPtr = (Object*) GET_REGISTER(vdst); 1691 } else { 1692 assert((vsrc1>>4) > 0); 1693 ILOGV("|invoke-virtual-quick args=%d @0x%04x {regs=0x%04x %x}", 1694 vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f); 1695 thisPtr = (Object*) GET_REGISTER(vdst & 0x0f); 1696 } 1697 1698 if (!checkForNull(thisPtr)) 1699 GOTO_exceptionThrown(); 1700 1701 1702 /* 1703 * Combine the object we found with the vtable offset in the 1704 * method. 1705 */ 1706 assert(ref < (unsigned int) thisPtr->clazz->vtableCount); 1707 methodToCall = thisPtr->clazz->vtable[ref]; 1708 #if defined(WITH_JIT) && defined(MTERP_STUB) 1709 self->callsiteClass = thisPtr->clazz; 1710 self->methodToCall = methodToCall; 1711 #endif 1712 1713 #if 0 1714 if (dvmIsAbstractMethod(methodToCall)) { 1715 dvmThrowAbstractMethodError("abstract method not implemented"); 1716 GOTO_exceptionThrown(); 1717 } 1718 #else 1719 assert(!dvmIsAbstractMethod(methodToCall) || 1720 methodToCall->nativeFunc != NULL); 1721 #endif 1722 1723 LOGVV("+++ virtual[%d]=%s.%s", 1724 ref, methodToCall->clazz->descriptor, methodToCall->name); 1725 assert(methodToCall != NULL); 1726 1727 GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst); 1728 } 1729 GOTO_TARGET_END 1730 1731 GOTO_TARGET(invokeSuperQuick, bool methodCallRange) 1732 { 1733 u2 thisReg; 1734 1735 EXPORT_PC(); 1736 1737 vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */ 1738 ref = FETCH(1); /* vtable index */ 1739 vdst = FETCH(2); /* 4 regs -or- first reg */ 1740 1741 if (methodCallRange) { 1742 ILOGV("|invoke-super-quick-range args=%d @0x%04x {regs=v%d-v%d}", 1743 vsrc1, ref, vdst, vdst+vsrc1-1); 1744 thisReg = vdst; 1745 } else { 1746 ILOGV("|invoke-super-quick args=%d @0x%04x {regs=0x%04x %x}", 1747 vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f); 1748 thisReg = vdst & 0x0f; 1749 } 1750 /* impossible in well-formed code, but we must check nevertheless */ 1751 if (!checkForNull((Object*) GET_REGISTER(thisReg))) 1752 GOTO_exceptionThrown(); 1753 1754 #if 0 /* impossible in optimized + verified code */ 1755 if (ref >= curMethod->clazz->super->vtableCount) { 1756 dvmThrowNoSuchMethodError(NULL); 1757 GOTO_exceptionThrown(); 1758 } 1759 #else 1760 assert(ref < (unsigned int) curMethod->clazz->super->vtableCount); 1761 #endif 1762 1763 /* 1764 * Combine the object we found with the vtable offset in the 1765 * method's class. 1766 * 1767 * We're using the current method's class' superclass, not the 1768 * superclass of "this". This is because we might be executing 1769 * in a method inherited from a superclass, and we want to run 1770 * in the method's class' superclass. 1771 */ 1772 methodToCall = curMethod->clazz->super->vtable[ref]; 1773 1774 #if 0 1775 if (dvmIsAbstractMethod(methodToCall)) { 1776 dvmThrowAbstractMethodError("abstract method not implemented"); 1777 GOTO_exceptionThrown(); 1778 } 1779 #else 1780 assert(!dvmIsAbstractMethod(methodToCall) || 1781 methodToCall->nativeFunc != NULL); 1782 #endif 1783 LOGVV("+++ super-virtual[%d]=%s.%s", 1784 ref, methodToCall->clazz->descriptor, methodToCall->name); 1785 assert(methodToCall != NULL); 1786 GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst); 1787 } 1788 GOTO_TARGET_END 1789 1790 1791 /* 1792 * General handling for return-void, return, and return-wide. Put the 1793 * return value in "retval" before jumping here. 1794 */ 1795 GOTO_TARGET(returnFromMethod) 1796 { 1797 StackSaveArea* saveArea; 1798 1799 /* 1800 * We must do this BEFORE we pop the previous stack frame off, so 1801 * that the GC can see the return value (if any) in the local vars. 1802 * 1803 * Since this is now an interpreter switch point, we must do it before 1804 * we do anything at all. 1805 */ 1806 PERIODIC_CHECKS(0); 1807 1808 ILOGV("> retval=0x%llx (leaving %s.%s %s)", 1809 retval.j, curMethod->clazz->descriptor, curMethod->name, 1810 curMethod->shorty); 1811 //DUMP_REGS(curMethod, fp); 1812 1813 saveArea = SAVEAREA_FROM_FP(fp); 1814 1815 #ifdef EASY_GDB 1816 debugSaveArea = saveArea; 1817 #endif 1818 1819 /* back up to previous frame and see if we hit a break */ 1820 fp = (u4*)saveArea->prevFrame; 1821 assert(fp != NULL); 1822 1823 /* Handle any special subMode requirements */ 1824 if (self->interpBreak.ctl.subMode != 0) { 1825 PC_FP_TO_SELF(); 1826 dvmReportReturn(self); 1827 } 1828 1829 if (dvmIsBreakFrame(fp)) { 1830 /* bail without popping the method frame from stack */ 1831 LOGVV("+++ returned into break frame"); 1832 GOTO_bail(); 1833 } 1834 1835 /* update thread FP, and reset local variables */ 1836 self->interpSave.curFrame = fp; 1837 curMethod = SAVEAREA_FROM_FP(fp)->method; 1838 self->interpSave.method = curMethod; 1839 //methodClass = curMethod->clazz; 1840 methodClassDex = curMethod->clazz->pDvmDex; 1841 pc = saveArea->savedPc; 1842 ILOGD("> (return to %s.%s %s)", curMethod->clazz->descriptor, 1843 curMethod->name, curMethod->shorty); 1844 1845 /* use FINISH on the caller's invoke instruction */ 1846 //u2 invokeInstr = INST_INST(FETCH(0)); 1847 if (true /*invokeInstr >= OP_INVOKE_VIRTUAL && 1848 invokeInstr <= OP_INVOKE_INTERFACE*/) 1849 { 1850 FINISH(3); 1851 } else { 1852 //ALOGE("Unknown invoke instr %02x at %d", 1853 // invokeInstr, (int) (pc - curMethod->insns)); 1854 assert(false); 1855 } 1856 } 1857 GOTO_TARGET_END 1858 1859 1860 /* 1861 * Jump here when the code throws an exception. 1862 * 1863 * By the time we get here, the Throwable has been created and the stack 1864 * trace has been saved off. 1865 */ 1866 GOTO_TARGET(exceptionThrown) 1867 { 1868 Object* exception; 1869 int catchRelPc; 1870 1871 PERIODIC_CHECKS(0); 1872 1873 /* 1874 * We save off the exception and clear the exception status. While 1875 * processing the exception we might need to load some Throwable 1876 * classes, and we don't want class loader exceptions to get 1877 * confused with this one. 1878 */ 1879 assert(dvmCheckException(self)); 1880 exception = dvmGetException(self); 1881 dvmAddTrackedAlloc(exception, self); 1882 dvmClearException(self); 1883 1884 ALOGV("Handling exception %s at %s:%d", 1885 exception->clazz->descriptor, curMethod->name, 1886 dvmLineNumFromPC(curMethod, pc - curMethod->insns)); 1887 1888 /* 1889 * Report the exception throw to any "subMode" watchers. 1890 * 1891 * TODO: if the exception was thrown by interpreted code, control 1892 * fell through native, and then back to us, we will report the 1893 * exception at the point of the throw and again here. We can avoid 1894 * this by not reporting exceptions when we jump here directly from 1895 * the native call code above, but then we won't report exceptions 1896 * that were thrown *from* the JNI code (as opposed to *through* it). 1897 * 1898 * The correct solution is probably to ignore from-native exceptions 1899 * here, and have the JNI exception code do the reporting to the 1900 * debugger. 1901 */ 1902 if (self->interpBreak.ctl.subMode != 0) { 1903 PC_FP_TO_SELF(); 1904 dvmReportExceptionThrow(self, exception); 1905 } 1906 1907 /* 1908 * We need to unroll to the catch block or the nearest "break" 1909 * frame. 1910 * 1911 * A break frame could indicate that we have reached an intermediate 1912 * native call, or have gone off the top of the stack and the thread 1913 * needs to exit. Either way, we return from here, leaving the 1914 * exception raised. 1915 * 1916 * If we do find a catch block, we want to transfer execution to 1917 * that point. 1918 * 1919 * Note this can cause an exception while resolving classes in 1920 * the "catch" blocks. 1921 */ 1922 catchRelPc = dvmFindCatchBlock(self, pc - curMethod->insns, 1923 exception, false, (void**)(void*)&fp); 1924 1925 /* 1926 * Restore the stack bounds after an overflow. This isn't going to 1927 * be correct in all circumstances, e.g. if JNI code devours the 1928 * exception this won't happen until some other exception gets 1929 * thrown. If the code keeps pushing the stack bounds we'll end 1930 * up aborting the VM. 1931 * 1932 * Note we want to do this *after* the call to dvmFindCatchBlock, 1933 * because that may need extra stack space to resolve exception 1934 * classes (e.g. through a class loader). 1935 * 1936 * It's possible for the stack overflow handling to cause an 1937 * exception (specifically, class resolution in a "catch" block 1938 * during the call above), so we could see the thread's overflow 1939 * flag raised but actually be running in a "nested" interpreter 1940 * frame. We don't allow doubled-up StackOverflowErrors, so 1941 * we can check for this by just looking at the exception type 1942 * in the cleanup function. Also, we won't unroll past the SOE 1943 * point because the more-recent exception will hit a break frame 1944 * as it unrolls to here. 1945 */ 1946 if (self->stackOverflowed) 1947 dvmCleanupStackOverflow(self, exception); 1948 1949 if (catchRelPc < 0) { 1950 /* falling through to JNI code or off the bottom of the stack */ 1951 #if DVM_SHOW_EXCEPTION >= 2 1952 ALOGD("Exception %s from %s:%d not caught locally", 1953 exception->clazz->descriptor, dvmGetMethodSourceFile(curMethod), 1954 dvmLineNumFromPC(curMethod, pc - curMethod->insns)); 1955 #endif 1956 dvmSetException(self, exception); 1957 dvmReleaseTrackedAlloc(exception, self); 1958 GOTO_bail(); 1959 } 1960 1961 #if DVM_SHOW_EXCEPTION >= 3 1962 { 1963 const Method* catchMethod = SAVEAREA_FROM_FP(fp)->method; 1964 ALOGD("Exception %s thrown from %s:%d to %s:%d", 1965 exception->clazz->descriptor, dvmGetMethodSourceFile(curMethod), 1966 dvmLineNumFromPC(curMethod, pc - curMethod->insns), 1967 dvmGetMethodSourceFile(catchMethod), 1968 dvmLineNumFromPC(catchMethod, catchRelPc)); 1969 } 1970 #endif 1971 1972 /* 1973 * Adjust local variables to match self->interpSave.curFrame and the 1974 * updated PC. 1975 */ 1976 //fp = (u4*) self->interpSave.curFrame; 1977 curMethod = SAVEAREA_FROM_FP(fp)->method; 1978 self->interpSave.method = curMethod; 1979 //methodClass = curMethod->clazz; 1980 methodClassDex = curMethod->clazz->pDvmDex; 1981 pc = curMethod->insns + catchRelPc; 1982 ILOGV("> pc <-- %s.%s %s", curMethod->clazz->descriptor, 1983 curMethod->name, curMethod->shorty); 1984 DUMP_REGS(curMethod, fp, false); // show all regs 1985 1986 /* 1987 * Restore the exception if the handler wants it. 1988 * 1989 * The Dalvik spec mandates that, if an exception handler wants to 1990 * do something with the exception, the first instruction executed 1991 * must be "move-exception". We can pass the exception along 1992 * through the thread struct, and let the move-exception instruction 1993 * clear it for us. 1994 * 1995 * If the handler doesn't call move-exception, we don't want to 1996 * finish here with an exception still pending. 1997 */ 1998 if (INST_INST(FETCH(0)) == OP_MOVE_EXCEPTION) 1999 dvmSetException(self, exception); 2000 2001 dvmReleaseTrackedAlloc(exception, self); 2002 FINISH(0); 2003 } 2004 GOTO_TARGET_END 2005 2006 2007 2008 /* 2009 * General handling for invoke-{virtual,super,direct,static,interface}, 2010 * including "quick" variants. 2011 * 2012 * Set "methodToCall" to the Method we're calling, and "methodCallRange" 2013 * depending on whether this is a "/range" instruction. 2014 * 2015 * For a range call: 2016 * "vsrc1" holds the argument count (8 bits) 2017 * "vdst" holds the first argument in the range 2018 * For a non-range call: 2019 * "vsrc1" holds the argument count (4 bits) and the 5th argument index 2020 * "vdst" holds four 4-bit register indices 2021 * 2022 * The caller must EXPORT_PC before jumping here, because any method 2023 * call can throw a stack overflow exception. 2024 */ 2025 GOTO_TARGET(invokeMethod, bool methodCallRange, const Method* _methodToCall, 2026 u2 count, u2 regs) 2027 { 2028 STUB_HACK(vsrc1 = count; vdst = regs; methodToCall = _methodToCall;); 2029 2030 //printf("range=%d call=%p count=%d regs=0x%04x\n", 2031 // methodCallRange, methodToCall, count, regs); 2032 //printf(" --> %s.%s %s\n", methodToCall->clazz->descriptor, 2033 // methodToCall->name, methodToCall->shorty); 2034 2035 u4* outs; 2036 int i; 2037 2038 /* 2039 * Copy args. This may corrupt vsrc1/vdst. 2040 */ 2041 if (methodCallRange) { 2042 // could use memcpy or a "Duff's device"; most functions have 2043 // so few args it won't matter much 2044 assert(vsrc1 <= curMethod->outsSize); 2045 assert(vsrc1 == methodToCall->insSize); 2046 outs = OUTS_FROM_FP(fp, vsrc1); 2047 for (i = 0; i < vsrc1; i++) 2048 outs[i] = GET_REGISTER(vdst+i); 2049 } else { 2050 u4 count = vsrc1 >> 4; 2051 2052 assert(count <= curMethod->outsSize); 2053 assert(count == methodToCall->insSize); 2054 assert(count <= 5); 2055 2056 outs = OUTS_FROM_FP(fp, count); 2057 #if 0 2058 if (count == 5) { 2059 outs[4] = GET_REGISTER(vsrc1 & 0x0f); 2060 count--; 2061 } 2062 for (i = 0; i < (int) count; i++) { 2063 outs[i] = GET_REGISTER(vdst & 0x0f); 2064 vdst >>= 4; 2065 } 2066 #else 2067 // This version executes fewer instructions but is larger 2068 // overall. Seems to be a teensy bit faster. 2069 assert((vdst >> 16) == 0); // 16 bits -or- high 16 bits clear 2070 switch (count) { 2071 case 5: 2072 outs[4] = GET_REGISTER(vsrc1 & 0x0f); 2073 case 4: 2074 outs[3] = GET_REGISTER(vdst >> 12); 2075 case 3: 2076 outs[2] = GET_REGISTER((vdst & 0x0f00) >> 8); 2077 case 2: 2078 outs[1] = GET_REGISTER((vdst & 0x00f0) >> 4); 2079 case 1: 2080 outs[0] = GET_REGISTER(vdst & 0x0f); 2081 default: 2082 ; 2083 } 2084 #endif 2085 } 2086 } 2087 2088 /* 2089 * (This was originally a "goto" target; I've kept it separate from the 2090 * stuff above in case we want to refactor things again.) 2091 * 2092 * At this point, we have the arguments stored in the "outs" area of 2093 * the current method's stack frame, and the method to call in 2094 * "methodToCall". Push a new stack frame. 2095 */ 2096 { 2097 StackSaveArea* newSaveArea; 2098 u4* newFp; 2099 2100 ILOGV("> %s%s.%s %s", 2101 dvmIsNativeMethod(methodToCall) ? "(NATIVE) " : "", 2102 methodToCall->clazz->descriptor, methodToCall->name, 2103 methodToCall->shorty); 2104 2105 newFp = (u4*) SAVEAREA_FROM_FP(fp) - methodToCall->registersSize; 2106 newSaveArea = SAVEAREA_FROM_FP(newFp); 2107 2108 /* verify that we have enough space */ 2109 if (true) { 2110 u1* bottom; 2111 bottom = (u1*) newSaveArea - methodToCall->outsSize * sizeof(u4); 2112 if (bottom < self->interpStackEnd) { 2113 /* stack overflow */ 2114 ALOGV("Stack overflow on method call (start=%p end=%p newBot=%p(%d) size=%d '%s')", 2115 self->interpStackStart, self->interpStackEnd, bottom, 2116 (u1*) fp - bottom, self->interpStackSize, 2117 methodToCall->name); 2118 dvmHandleStackOverflow(self, methodToCall); 2119 assert(dvmCheckException(self)); 2120 GOTO_exceptionThrown(); 2121 } 2122 //ALOGD("+++ fp=%p newFp=%p newSave=%p bottom=%p", 2123 // fp, newFp, newSaveArea, bottom); 2124 } 2125 2126 #ifdef LOG_INSTR 2127 if (methodToCall->registersSize > methodToCall->insSize) { 2128 /* 2129 * This makes valgrind quiet when we print registers that 2130 * haven't been initialized. Turn it off when the debug 2131 * messages are disabled -- we want valgrind to report any 2132 * used-before-initialized issues. 2133 */ 2134 memset(newFp, 0xcc, 2135 (methodToCall->registersSize - methodToCall->insSize) * 4); 2136 } 2137 #endif 2138 2139 #ifdef EASY_GDB 2140 newSaveArea->prevSave = SAVEAREA_FROM_FP(fp); 2141 #endif 2142 newSaveArea->prevFrame = fp; 2143 newSaveArea->savedPc = pc; 2144 #if defined(WITH_JIT) && defined(MTERP_STUB) 2145 newSaveArea->returnAddr = 0; 2146 #endif 2147 newSaveArea->method = methodToCall; 2148 2149 if (self->interpBreak.ctl.subMode != 0) { 2150 /* 2151 * We mark ENTER here for both native and non-native 2152 * calls. For native calls, we'll mark EXIT on return. 2153 * For non-native calls, EXIT is marked in the RETURN op. 2154 */ 2155 PC_TO_SELF(); 2156 dvmReportInvoke(self, methodToCall); 2157 } 2158 2159 if (!dvmIsNativeMethod(methodToCall)) { 2160 /* 2161 * "Call" interpreted code. Reposition the PC, update the 2162 * frame pointer and other local state, and continue. 2163 */ 2164 curMethod = methodToCall; 2165 self->interpSave.method = curMethod; 2166 methodClassDex = curMethod->clazz->pDvmDex; 2167 pc = methodToCall->insns; 2168 fp = newFp; 2169 self->interpSave.curFrame = fp; 2170 #ifdef EASY_GDB 2171 debugSaveArea = SAVEAREA_FROM_FP(newFp); 2172 #endif 2173 self->debugIsMethodEntry = true; // profiling, debugging 2174 ILOGD("> pc <-- %s.%s %s", curMethod->clazz->descriptor, 2175 curMethod->name, curMethod->shorty); 2176 DUMP_REGS(curMethod, fp, true); // show input args 2177 FINISH(0); // jump to method start 2178 } else { 2179 /* set this up for JNI locals, even if not a JNI native */ 2180 newSaveArea->xtra.localRefCookie = self->jniLocalRefTable.segmentState.all; 2181 2182 self->interpSave.curFrame = newFp; 2183 2184 DUMP_REGS(methodToCall, newFp, true); // show input args 2185 2186 if (self->interpBreak.ctl.subMode != 0) { 2187 dvmReportPreNativeInvoke(methodToCall, self, newSaveArea->prevFrame); 2188 } 2189 2190 ILOGD("> native <-- %s.%s %s", methodToCall->clazz->descriptor, 2191 methodToCall->name, methodToCall->shorty); 2192 2193 /* 2194 * Jump through native call bridge. Because we leave no 2195 * space for locals on native calls, "newFp" points directly 2196 * to the method arguments. 2197 */ 2198 (*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self); 2199 2200 if (self->interpBreak.ctl.subMode != 0) { 2201 dvmReportPostNativeInvoke(methodToCall, self, newSaveArea->prevFrame); 2202 } 2203 2204 /* pop frame off */ 2205 dvmPopJniLocals(self, newSaveArea); 2206 self->interpSave.curFrame = newSaveArea->prevFrame; 2207 fp = newSaveArea->prevFrame; 2208 2209 /* 2210 * If the native code threw an exception, or interpreted code 2211 * invoked by the native call threw one and nobody has cleared 2212 * it, jump to our local exception handling. 2213 */ 2214 if (dvmCheckException(self)) { 2215 ALOGV("Exception thrown by/below native code"); 2216 GOTO_exceptionThrown(); 2217 } 2218 2219 ILOGD("> retval=0x%llx (leaving native)", retval.j); 2220 ILOGD("> (return from native %s.%s to %s.%s %s)", 2221 methodToCall->clazz->descriptor, methodToCall->name, 2222 curMethod->clazz->descriptor, curMethod->name, 2223 curMethod->shorty); 2224 2225 //u2 invokeInstr = INST_INST(FETCH(0)); 2226 if (true /*invokeInstr >= OP_INVOKE_VIRTUAL && 2227 invokeInstr <= OP_INVOKE_INTERFACE*/) 2228 { 2229 FINISH(3); 2230 } else { 2231 //ALOGE("Unknown invoke instr %02x at %d", 2232 // invokeInstr, (int) (pc - curMethod->insns)); 2233 assert(false); 2234 } 2235 } 2236 } 2237 assert(false); // should not get here 2238 GOTO_TARGET_END 2239 2240 /* File: cstubs/enddefs.cpp */ 2241 2242 /* undefine "magic" name remapping */ 2243 #undef retval 2244 #undef pc 2245 #undef fp 2246 #undef curMethod 2247 #undef methodClassDex 2248 #undef self 2249 #undef debugTrackedRefStart 2250 2251