1 /* 2 * This file was generated automatically by gen-mterp.py for 'x86-atom'. 3 * 4 * --> DO NOT EDIT <-- 5 */ 6 7 /* File: c/header.cpp */ 8 /* 9 * Copyright (C) 2008 The Android Open Source Project 10 * 11 * Licensed under the Apache License, Version 2.0 (the "License"); 12 * you may not use this file except in compliance with the License. 13 * You may obtain a copy of the License at 14 * 15 * http://www.apache.org/licenses/LICENSE-2.0 16 * 17 * Unless required by applicable law or agreed to in writing, software 18 * distributed under the License is distributed on an "AS IS" BASIS, 19 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 20 * See the License for the specific language governing permissions and 21 * limitations under the License. 22 */ 23 24 /* common includes */ 25 #include "Dalvik.h" 26 #include "interp/InterpDefs.h" 27 #include "mterp/Mterp.h" 28 #include <math.h> // needed for fmod, fmodf 29 #include "mterp/common/FindInterface.h" 30 31 /* 32 * Configuration defines. These affect the C implementations, i.e. the 33 * portable interpreter(s) and C stubs. 34 * 35 * Some defines are controlled by the Makefile, e.g.: 36 * WITH_INSTR_CHECKS 37 * WITH_TRACKREF_CHECKS 38 * EASY_GDB 39 * NDEBUG 40 */ 41 42 #ifdef WITH_INSTR_CHECKS /* instruction-level paranoia (slow!) */ 43 # define CHECK_BRANCH_OFFSETS 44 # define CHECK_REGISTER_INDICES 45 #endif 46 47 /* 48 * Some architectures require 64-bit alignment for access to 64-bit data 49 * types. We can't just use pointers to copy 64-bit values out of our 50 * interpreted register set, because gcc may assume the pointer target is 51 * aligned and generate invalid code. 52 * 53 * There are two common approaches: 54 * (1) Use a union that defines a 32-bit pair and a 64-bit value. 55 * (2) Call memcpy(). 56 * 57 * Depending upon what compiler you're using and what options are specified, 58 * one may be faster than the other. For example, the compiler might 59 * convert a memcpy() of 8 bytes into a series of instructions and omit 60 * the call. The union version could cause some strange side-effects, 61 * e.g. for a while ARM gcc thought it needed separate storage for each 62 * inlined instance, and generated instructions to zero out ~700 bytes of 63 * stack space at the top of the interpreter. 64 * 65 * The default is to use memcpy(). The current gcc for ARM seems to do 66 * better with the union. 67 */ 68 #if defined(__ARM_EABI__) 69 # define NO_UNALIGN_64__UNION 70 #endif 71 72 73 //#define LOG_INSTR /* verbose debugging */ 74 /* set and adjust ANDROID_LOG_TAGS='*:i jdwp:i dalvikvm:i dalvikvmi:i' */ 75 76 /* 77 * Export another copy of the PC on every instruction; this is largely 78 * redundant with EXPORT_PC and the debugger code. This value can be 79 * compared against what we have stored on the stack with EXPORT_PC to 80 * help ensure that we aren't missing any export calls. 81 */ 82 #if WITH_EXTRA_GC_CHECKS > 1 83 # define EXPORT_EXTRA_PC() (self->currentPc2 = pc) 84 #else 85 # define EXPORT_EXTRA_PC() 86 #endif 87 88 /* 89 * Adjust the program counter. "_offset" is a signed int, in 16-bit units. 90 * 91 * Assumes the existence of "const u2* pc" and "const u2* curMethod->insns". 92 * 93 * We don't advance the program counter until we finish an instruction or 94 * branch, because we do want to have to unroll the PC if there's an 95 * exception. 96 */ 97 #ifdef CHECK_BRANCH_OFFSETS 98 # define ADJUST_PC(_offset) do { \ 99 int myoff = _offset; /* deref only once */ \ 100 if (pc + myoff < curMethod->insns || \ 101 pc + myoff >= curMethod->insns + dvmGetMethodInsnsSize(curMethod)) \ 102 { \ 103 char* desc; \ 104 desc = dexProtoCopyMethodDescriptor(&curMethod->prototype); \ 105 LOGE("Invalid branch %d at 0x%04x in %s.%s %s", \ 106 myoff, (int) (pc - curMethod->insns), \ 107 curMethod->clazz->descriptor, curMethod->name, desc); \ 108 free(desc); \ 109 dvmAbort(); \ 110 } \ 111 pc += myoff; \ 112 EXPORT_EXTRA_PC(); \ 113 } while (false) 114 #else 115 # define ADJUST_PC(_offset) do { \ 116 pc += _offset; \ 117 EXPORT_EXTRA_PC(); \ 118 } while (false) 119 #endif 120 121 /* 122 * If enabled, log instructions as we execute them. 123 */ 124 #ifdef LOG_INSTR 125 # define ILOGD(...) ILOG(LOG_DEBUG, __VA_ARGS__) 126 # define ILOGV(...) ILOG(LOG_VERBOSE, __VA_ARGS__) 127 # define ILOG(_level, ...) do { \ 128 char debugStrBuf[128]; \ 129 snprintf(debugStrBuf, sizeof(debugStrBuf), __VA_ARGS__); \ 130 if (curMethod != NULL) \ 131 LOG(_level, LOG_TAG"i", "%-2d|%04x%s", \ 132 self->threadId, (int)(pc - curMethod->insns), debugStrBuf); \ 133 else \ 134 LOG(_level, LOG_TAG"i", "%-2d|####%s", \ 135 self->threadId, debugStrBuf); \ 136 } while(false) 137 void dvmDumpRegs(const Method* method, const u4* framePtr, bool inOnly); 138 # define DUMP_REGS(_meth, _frame, _inOnly) dvmDumpRegs(_meth, _frame, _inOnly) 139 static const char kSpacing[] = " "; 140 #else 141 # define ILOGD(...) ((void)0) 142 # define ILOGV(...) ((void)0) 143 # define DUMP_REGS(_meth, _frame, _inOnly) ((void)0) 144 #endif 145 146 /* get a long from an array of u4 */ 147 static inline s8 getLongFromArray(const u4* ptr, int idx) 148 { 149 #if defined(NO_UNALIGN_64__UNION) 150 union { s8 ll; u4 parts[2]; } conv; 151 152 ptr += idx; 153 conv.parts[0] = ptr[0]; 154 conv.parts[1] = ptr[1]; 155 return conv.ll; 156 #else 157 s8 val; 158 memcpy(&val, &ptr[idx], 8); 159 return val; 160 #endif 161 } 162 163 /* store a long into an array of u4 */ 164 static inline void putLongToArray(u4* ptr, int idx, s8 val) 165 { 166 #if defined(NO_UNALIGN_64__UNION) 167 union { s8 ll; u4 parts[2]; } conv; 168 169 ptr += idx; 170 conv.ll = val; 171 ptr[0] = conv.parts[0]; 172 ptr[1] = conv.parts[1]; 173 #else 174 memcpy(&ptr[idx], &val, 8); 175 #endif 176 } 177 178 /* get a double from an array of u4 */ 179 static inline double getDoubleFromArray(const u4* ptr, int idx) 180 { 181 #if defined(NO_UNALIGN_64__UNION) 182 union { double d; u4 parts[2]; } conv; 183 184 ptr += idx; 185 conv.parts[0] = ptr[0]; 186 conv.parts[1] = ptr[1]; 187 return conv.d; 188 #else 189 double dval; 190 memcpy(&dval, &ptr[idx], 8); 191 return dval; 192 #endif 193 } 194 195 /* store a double into an array of u4 */ 196 static inline void putDoubleToArray(u4* ptr, int idx, double dval) 197 { 198 #if defined(NO_UNALIGN_64__UNION) 199 union { double d; u4 parts[2]; } conv; 200 201 ptr += idx; 202 conv.d = dval; 203 ptr[0] = conv.parts[0]; 204 ptr[1] = conv.parts[1]; 205 #else 206 memcpy(&ptr[idx], &dval, 8); 207 #endif 208 } 209 210 /* 211 * If enabled, validate the register number on every access. Otherwise, 212 * just do an array access. 213 * 214 * Assumes the existence of "u4* fp". 215 * 216 * "_idx" may be referenced more than once. 217 */ 218 #ifdef CHECK_REGISTER_INDICES 219 # define GET_REGISTER(_idx) \ 220 ( (_idx) < curMethod->registersSize ? \ 221 (fp[(_idx)]) : (assert(!"bad reg"),1969) ) 222 # define SET_REGISTER(_idx, _val) \ 223 ( (_idx) < curMethod->registersSize ? \ 224 (fp[(_idx)] = (u4)(_val)) : (assert(!"bad reg"),1969) ) 225 # define GET_REGISTER_AS_OBJECT(_idx) ((Object *)GET_REGISTER(_idx)) 226 # define SET_REGISTER_AS_OBJECT(_idx, _val) SET_REGISTER(_idx, (s4)_val) 227 # define GET_REGISTER_INT(_idx) ((s4) GET_REGISTER(_idx)) 228 # define SET_REGISTER_INT(_idx, _val) SET_REGISTER(_idx, (s4)_val) 229 # define GET_REGISTER_WIDE(_idx) \ 230 ( (_idx) < curMethod->registersSize-1 ? \ 231 getLongFromArray(fp, (_idx)) : (assert(!"bad reg"),1969) ) 232 # define SET_REGISTER_WIDE(_idx, _val) \ 233 ( (_idx) < curMethod->registersSize-1 ? \ 234 (void)putLongToArray(fp, (_idx), (_val)) : assert(!"bad reg") ) 235 # define GET_REGISTER_FLOAT(_idx) \ 236 ( (_idx) < curMethod->registersSize ? \ 237 (*((float*) &fp[(_idx)])) : (assert(!"bad reg"),1969.0f) ) 238 # define SET_REGISTER_FLOAT(_idx, _val) \ 239 ( (_idx) < curMethod->registersSize ? \ 240 (*((float*) &fp[(_idx)]) = (_val)) : (assert(!"bad reg"),1969.0f) ) 241 # define GET_REGISTER_DOUBLE(_idx) \ 242 ( (_idx) < curMethod->registersSize-1 ? \ 243 getDoubleFromArray(fp, (_idx)) : (assert(!"bad reg"),1969.0) ) 244 # define SET_REGISTER_DOUBLE(_idx, _val) \ 245 ( (_idx) < curMethod->registersSize-1 ? \ 246 (void)putDoubleToArray(fp, (_idx), (_val)) : assert(!"bad reg") ) 247 #else 248 # define GET_REGISTER(_idx) (fp[(_idx)]) 249 # define SET_REGISTER(_idx, _val) (fp[(_idx)] = (_val)) 250 # define GET_REGISTER_AS_OBJECT(_idx) ((Object*) fp[(_idx)]) 251 # define SET_REGISTER_AS_OBJECT(_idx, _val) (fp[(_idx)] = (u4)(_val)) 252 # define GET_REGISTER_INT(_idx) ((s4)GET_REGISTER(_idx)) 253 # define SET_REGISTER_INT(_idx, _val) SET_REGISTER(_idx, (s4)_val) 254 # define GET_REGISTER_WIDE(_idx) getLongFromArray(fp, (_idx)) 255 # define SET_REGISTER_WIDE(_idx, _val) putLongToArray(fp, (_idx), (_val)) 256 # define GET_REGISTER_FLOAT(_idx) (*((float*) &fp[(_idx)])) 257 # define SET_REGISTER_FLOAT(_idx, _val) (*((float*) &fp[(_idx)]) = (_val)) 258 # define GET_REGISTER_DOUBLE(_idx) getDoubleFromArray(fp, (_idx)) 259 # define SET_REGISTER_DOUBLE(_idx, _val) putDoubleToArray(fp, (_idx), (_val)) 260 #endif 261 262 /* 263 * Get 16 bits from the specified offset of the program counter. We always 264 * want to load 16 bits at a time from the instruction stream -- it's more 265 * efficient than 8 and won't have the alignment problems that 32 might. 266 * 267 * Assumes existence of "const u2* pc". 268 */ 269 #define FETCH(_offset) (pc[(_offset)]) 270 271 /* 272 * Extract instruction byte from 16-bit fetch (_inst is a u2). 273 */ 274 #define INST_INST(_inst) ((_inst) & 0xff) 275 276 /* 277 * Replace the opcode (used when handling breakpoints). _opcode is a u1. 278 */ 279 #define INST_REPLACE_OP(_inst, _opcode) (((_inst) & 0xff00) | _opcode) 280 281 /* 282 * Extract the "vA, vB" 4-bit registers from the instruction word (_inst is u2). 283 */ 284 #define INST_A(_inst) (((_inst) >> 8) & 0x0f) 285 #define INST_B(_inst) ((_inst) >> 12) 286 287 /* 288 * Get the 8-bit "vAA" 8-bit register index from the instruction word. 289 * (_inst is u2) 290 */ 291 #define INST_AA(_inst) ((_inst) >> 8) 292 293 /* 294 * The current PC must be available to Throwable constructors, e.g. 295 * those created by the various exception throw routines, so that the 296 * exception stack trace can be generated correctly. If we don't do this, 297 * the offset within the current method won't be shown correctly. See the 298 * notes in Exception.c. 299 * 300 * This is also used to determine the address for precise GC. 301 * 302 * Assumes existence of "u4* fp" and "const u2* pc". 303 */ 304 #define EXPORT_PC() (SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc) 305 306 /* 307 * Check to see if "obj" is NULL. If so, throw an exception. Assumes the 308 * pc has already been exported to the stack. 309 * 310 * Perform additional checks on debug builds. 311 * 312 * Use this to check for NULL when the instruction handler calls into 313 * something that could throw an exception (so we have already called 314 * EXPORT_PC at the top). 315 */ 316 static inline bool checkForNull(Object* obj) 317 { 318 if (obj == NULL) { 319 dvmThrowNullPointerException(NULL); 320 return false; 321 } 322 #ifdef WITH_EXTRA_OBJECT_VALIDATION 323 if (!dvmIsHeapAddressObject(obj)) { 324 LOGE("Invalid object %p", obj); 325 dvmAbort(); 326 } 327 #endif 328 #ifndef NDEBUG 329 if (obj->clazz == NULL || ((u4) obj->clazz) <= 65536) { 330 /* probable heap corruption */ 331 LOGE("Invalid object class %p (in %p)", obj->clazz, obj); 332 dvmAbort(); 333 } 334 #endif 335 return true; 336 } 337 338 /* 339 * Check to see if "obj" is NULL. If so, export the PC into the stack 340 * frame and throw an exception. 341 * 342 * Perform additional checks on debug builds. 343 * 344 * Use this to check for NULL when the instruction handler doesn't do 345 * anything else that can throw an exception. 346 */ 347 static inline bool checkForNullExportPC(Object* obj, u4* fp, const u2* pc) 348 { 349 if (obj == NULL) { 350 EXPORT_PC(); 351 dvmThrowNullPointerException(NULL); 352 return false; 353 } 354 #ifdef WITH_EXTRA_OBJECT_VALIDATION 355 if (!dvmIsHeapAddress(obj)) { 356 LOGE("Invalid object %p", obj); 357 dvmAbort(); 358 } 359 #endif 360 #ifndef NDEBUG 361 if (obj->clazz == NULL || ((u4) obj->clazz) <= 65536) { 362 /* probable heap corruption */ 363 LOGE("Invalid object class %p (in %p)", obj->clazz, obj); 364 dvmAbort(); 365 } 366 #endif 367 return true; 368 } 369 370 /* File: cstubs/stubdefs.cpp */ 371 /* 372 * In the C mterp stubs, "goto" is a function call followed immediately 373 * by a return. 374 */ 375 376 #define GOTO_TARGET_DECL(_target, ...) \ 377 extern "C" void dvmMterp_##_target(Thread* self, ## __VA_ARGS__); 378 379 /* (void)xxx to quiet unused variable compiler warnings. */ 380 #define GOTO_TARGET(_target, ...) \ 381 void dvmMterp_##_target(Thread* self, ## __VA_ARGS__) { \ 382 u2 ref, vsrc1, vsrc2, vdst; \ 383 u2 inst = FETCH(0); \ 384 const Method* methodToCall; \ 385 StackSaveArea* debugSaveArea; \ 386 (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst; \ 387 (void)methodToCall; (void)debugSaveArea; 388 389 #define GOTO_TARGET_END } 390 391 /* 392 * Redefine what used to be local variable accesses into Thread struct 393 * references. (These are undefined down in "footer.cpp".) 394 */ 395 #define retval self->interpSave.retval 396 #define pc self->interpSave.pc 397 #define fp self->interpSave.curFrame 398 #define curMethod self->interpSave.method 399 #define methodClassDex self->interpSave.methodClassDex 400 #define debugTrackedRefStart self->interpSave.debugTrackedRefStart 401 402 /* ugh */ 403 #define STUB_HACK(x) x 404 #if defined(WITH_JIT) 405 #define JIT_STUB_HACK(x) x 406 #else 407 #define JIT_STUB_HACK(x) 408 #endif 409 410 /* 411 * InterpSave's pc and fp must be valid when breaking out to a 412 * "Reportxxx" routine. Because the portable interpreter uses local 413 * variables for these, we must flush prior. Stubs, however, use 414 * the interpSave vars directly, so this is a nop for stubs. 415 */ 416 #define PC_FP_TO_SELF() 417 #define PC_TO_SELF() 418 419 /* 420 * Opcode handler framing macros. Here, each opcode is a separate function 421 * that takes a "self" argument and returns void. We can't declare 422 * these "static" because they may be called from an assembly stub. 423 * (void)xxx to quiet unused variable compiler warnings. 424 */ 425 #define HANDLE_OPCODE(_op) \ 426 extern "C" void dvmMterp_##_op(Thread* self); \ 427 void dvmMterp_##_op(Thread* self) { \ 428 u4 ref; \ 429 u2 vsrc1, vsrc2, vdst; \ 430 u2 inst = FETCH(0); \ 431 (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst; 432 433 #define OP_END } 434 435 /* 436 * Like the "portable" FINISH, but don't reload "inst", and return to caller 437 * when done. Further, debugger/profiler checks are handled 438 * before handler execution in mterp, so we don't do them here either. 439 */ 440 #if defined(WITH_JIT) 441 #define FINISH(_offset) { \ 442 ADJUST_PC(_offset); \ 443 if (self->interpBreak.ctl.subMode & kSubModeJitTraceBuild) { \ 444 dvmCheckJit(pc, self); \ 445 } \ 446 return; \ 447 } 448 #else 449 #define FINISH(_offset) { \ 450 ADJUST_PC(_offset); \ 451 return; \ 452 } 453 #endif 454 455 456 /* 457 * The "goto label" statements turn into function calls followed by 458 * return statements. Some of the functions take arguments, which in the 459 * portable interpreter are handled by assigning values to globals. 460 */ 461 462 #define GOTO_exceptionThrown() \ 463 do { \ 464 dvmMterp_exceptionThrown(self); \ 465 return; \ 466 } while(false) 467 468 #define GOTO_returnFromMethod() \ 469 do { \ 470 dvmMterp_returnFromMethod(self); \ 471 return; \ 472 } while(false) 473 474 #define GOTO_invoke(_target, _methodCallRange, _jumboFormat) \ 475 do { \ 476 dvmMterp_##_target(self, _methodCallRange, _jumboFormat); \ 477 return; \ 478 } while(false) 479 480 #define GOTO_invokeMethod(_methodCallRange, _methodToCall, _vsrc1, _vdst) \ 481 do { \ 482 dvmMterp_invokeMethod(self, _methodCallRange, _methodToCall, \ 483 _vsrc1, _vdst); \ 484 return; \ 485 } while(false) 486 487 /* 488 * As a special case, "goto bail" turns into a longjmp. 489 */ 490 #define GOTO_bail() \ 491 dvmMterpStdBail(self, false); 492 493 /* 494 * Periodically check for thread suspension. 495 * 496 * While we're at it, see if a debugger has attached or the profiler has 497 * started. 498 */ 499 #define PERIODIC_CHECKS(_pcadj) { \ 500 if (dvmCheckSuspendQuick(self)) { \ 501 EXPORT_PC(); /* need for precise GC */ \ 502 dvmCheckSuspendPending(self); \ 503 } \ 504 } 505 506 /* File: c/opcommon.cpp */ 507 /* forward declarations of goto targets */ 508 GOTO_TARGET_DECL(filledNewArray, bool methodCallRange, bool jumboFormat); 509 GOTO_TARGET_DECL(invokeVirtual, bool methodCallRange, bool jumboFormat); 510 GOTO_TARGET_DECL(invokeSuper, bool methodCallRange, bool jumboFormat); 511 GOTO_TARGET_DECL(invokeInterface, bool methodCallRange, bool jumboFormat); 512 GOTO_TARGET_DECL(invokeDirect, bool methodCallRange, bool jumboFormat); 513 GOTO_TARGET_DECL(invokeStatic, bool methodCallRange, bool jumboFormat); 514 GOTO_TARGET_DECL(invokeVirtualQuick, bool methodCallRange, bool jumboFormat); 515 GOTO_TARGET_DECL(invokeSuperQuick, bool methodCallRange, bool jumboFormat); 516 GOTO_TARGET_DECL(invokeMethod, bool methodCallRange, const Method* methodToCall, 517 u2 count, u2 regs); 518 GOTO_TARGET_DECL(returnFromMethod); 519 GOTO_TARGET_DECL(exceptionThrown); 520 521 /* 522 * =========================================================================== 523 * 524 * What follows are opcode definitions shared between multiple opcodes with 525 * minor substitutions handled by the C pre-processor. These should probably 526 * use the mterp substitution mechanism instead, with the code here moved 527 * into common fragment files (like the asm "binop.S"), although it's hard 528 * to give up the C preprocessor in favor of the much simpler text subst. 529 * 530 * =========================================================================== 531 */ 532 533 #define HANDLE_NUMCONV(_opcode, _opname, _fromtype, _totype) \ 534 HANDLE_OPCODE(_opcode /*vA, vB*/) \ 535 vdst = INST_A(inst); \ 536 vsrc1 = INST_B(inst); \ 537 ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \ 538 SET_REGISTER##_totype(vdst, \ 539 GET_REGISTER##_fromtype(vsrc1)); \ 540 FINISH(1); 541 542 #define HANDLE_FLOAT_TO_INT(_opcode, _opname, _fromvtype, _fromrtype, \ 543 _tovtype, _tortype) \ 544 HANDLE_OPCODE(_opcode /*vA, vB*/) \ 545 { \ 546 /* spec defines specific handling for +/- inf and NaN values */ \ 547 _fromvtype val; \ 548 _tovtype intMin, intMax, result; \ 549 vdst = INST_A(inst); \ 550 vsrc1 = INST_B(inst); \ 551 ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \ 552 val = GET_REGISTER##_fromrtype(vsrc1); \ 553 intMin = (_tovtype) 1 << (sizeof(_tovtype) * 8 -1); \ 554 intMax = ~intMin; \ 555 result = (_tovtype) val; \ 556 if (val >= intMax) /* +inf */ \ 557 result = intMax; \ 558 else if (val <= intMin) /* -inf */ \ 559 result = intMin; \ 560 else if (val != val) /* NaN */ \ 561 result = 0; \ 562 else \ 563 result = (_tovtype) val; \ 564 SET_REGISTER##_tortype(vdst, result); \ 565 } \ 566 FINISH(1); 567 568 #define HANDLE_INT_TO_SMALL(_opcode, _opname, _type) \ 569 HANDLE_OPCODE(_opcode /*vA, vB*/) \ 570 vdst = INST_A(inst); \ 571 vsrc1 = INST_B(inst); \ 572 ILOGV("|int-to-%s v%d,v%d", (_opname), vdst, vsrc1); \ 573 SET_REGISTER(vdst, (_type) GET_REGISTER(vsrc1)); \ 574 FINISH(1); 575 576 /* NOTE: the comparison result is always a signed 4-byte integer */ 577 #define HANDLE_OP_CMPX(_opcode, _opname, _varType, _type, _nanVal) \ 578 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \ 579 { \ 580 int result; \ 581 u2 regs; \ 582 _varType val1, val2; \ 583 vdst = INST_AA(inst); \ 584 regs = FETCH(1); \ 585 vsrc1 = regs & 0xff; \ 586 vsrc2 = regs >> 8; \ 587 ILOGV("|cmp%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \ 588 val1 = GET_REGISTER##_type(vsrc1); \ 589 val2 = GET_REGISTER##_type(vsrc2); \ 590 if (val1 == val2) \ 591 result = 0; \ 592 else if (val1 < val2) \ 593 result = -1; \ 594 else if (val1 > val2) \ 595 result = 1; \ 596 else \ 597 result = (_nanVal); \ 598 ILOGV("+ result=%d", result); \ 599 SET_REGISTER(vdst, result); \ 600 } \ 601 FINISH(2); 602 603 #define HANDLE_OP_IF_XX(_opcode, _opname, _cmp) \ 604 HANDLE_OPCODE(_opcode /*vA, vB, +CCCC*/) \ 605 vsrc1 = INST_A(inst); \ 606 vsrc2 = INST_B(inst); \ 607 if ((s4) GET_REGISTER(vsrc1) _cmp (s4) GET_REGISTER(vsrc2)) { \ 608 int branchOffset = (s2)FETCH(1); /* sign-extended */ \ 609 ILOGV("|if-%s v%d,v%d,+0x%04x", (_opname), vsrc1, vsrc2, \ 610 branchOffset); \ 611 ILOGV("> branch taken"); \ 612 if (branchOffset < 0) \ 613 PERIODIC_CHECKS(branchOffset); \ 614 FINISH(branchOffset); \ 615 } else { \ 616 ILOGV("|if-%s v%d,v%d,-", (_opname), vsrc1, vsrc2); \ 617 FINISH(2); \ 618 } 619 620 #define HANDLE_OP_IF_XXZ(_opcode, _opname, _cmp) \ 621 HANDLE_OPCODE(_opcode /*vAA, +BBBB*/) \ 622 vsrc1 = INST_AA(inst); \ 623 if ((s4) GET_REGISTER(vsrc1) _cmp 0) { \ 624 int branchOffset = (s2)FETCH(1); /* sign-extended */ \ 625 ILOGV("|if-%s v%d,+0x%04x", (_opname), vsrc1, branchOffset); \ 626 ILOGV("> branch taken"); \ 627 if (branchOffset < 0) \ 628 PERIODIC_CHECKS(branchOffset); \ 629 FINISH(branchOffset); \ 630 } else { \ 631 ILOGV("|if-%s v%d,-", (_opname), vsrc1); \ 632 FINISH(2); \ 633 } 634 635 #define HANDLE_UNOP(_opcode, _opname, _pfx, _sfx, _type) \ 636 HANDLE_OPCODE(_opcode /*vA, vB*/) \ 637 vdst = INST_A(inst); \ 638 vsrc1 = INST_B(inst); \ 639 ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \ 640 SET_REGISTER##_type(vdst, _pfx GET_REGISTER##_type(vsrc1) _sfx); \ 641 FINISH(1); 642 643 #define HANDLE_OP_X_INT(_opcode, _opname, _op, _chkdiv) \ 644 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \ 645 { \ 646 u2 srcRegs; \ 647 vdst = INST_AA(inst); \ 648 srcRegs = FETCH(1); \ 649 vsrc1 = srcRegs & 0xff; \ 650 vsrc2 = srcRegs >> 8; \ 651 ILOGV("|%s-int v%d,v%d", (_opname), vdst, vsrc1); \ 652 if (_chkdiv != 0) { \ 653 s4 firstVal, secondVal, result; \ 654 firstVal = GET_REGISTER(vsrc1); \ 655 secondVal = GET_REGISTER(vsrc2); \ 656 if (secondVal == 0) { \ 657 EXPORT_PC(); \ 658 dvmThrowArithmeticException("divide by zero"); \ 659 GOTO_exceptionThrown(); \ 660 } \ 661 if ((u4)firstVal == 0x80000000 && secondVal == -1) { \ 662 if (_chkdiv == 1) \ 663 result = firstVal; /* division */ \ 664 else \ 665 result = 0; /* remainder */ \ 666 } else { \ 667 result = firstVal _op secondVal; \ 668 } \ 669 SET_REGISTER(vdst, result); \ 670 } else { \ 671 /* non-div/rem case */ \ 672 SET_REGISTER(vdst, \ 673 (s4) GET_REGISTER(vsrc1) _op (s4) GET_REGISTER(vsrc2)); \ 674 } \ 675 } \ 676 FINISH(2); 677 678 #define HANDLE_OP_SHX_INT(_opcode, _opname, _cast, _op) \ 679 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \ 680 { \ 681 u2 srcRegs; \ 682 vdst = INST_AA(inst); \ 683 srcRegs = FETCH(1); \ 684 vsrc1 = srcRegs & 0xff; \ 685 vsrc2 = srcRegs >> 8; \ 686 ILOGV("|%s-int v%d,v%d", (_opname), vdst, vsrc1); \ 687 SET_REGISTER(vdst, \ 688 _cast GET_REGISTER(vsrc1) _op (GET_REGISTER(vsrc2) & 0x1f)); \ 689 } \ 690 FINISH(2); 691 692 #define HANDLE_OP_X_INT_LIT16(_opcode, _opname, _op, _chkdiv) \ 693 HANDLE_OPCODE(_opcode /*vA, vB, #+CCCC*/) \ 694 vdst = INST_A(inst); \ 695 vsrc1 = INST_B(inst); \ 696 vsrc2 = FETCH(1); \ 697 ILOGV("|%s-int/lit16 v%d,v%d,#+0x%04x", \ 698 (_opname), vdst, vsrc1, vsrc2); \ 699 if (_chkdiv != 0) { \ 700 s4 firstVal, result; \ 701 firstVal = GET_REGISTER(vsrc1); \ 702 if ((s2) vsrc2 == 0) { \ 703 EXPORT_PC(); \ 704 dvmThrowArithmeticException("divide by zero"); \ 705 GOTO_exceptionThrown(); \ 706 } \ 707 if ((u4)firstVal == 0x80000000 && ((s2) vsrc2) == -1) { \ 708 /* won't generate /lit16 instr for this; check anyway */ \ 709 if (_chkdiv == 1) \ 710 result = firstVal; /* division */ \ 711 else \ 712 result = 0; /* remainder */ \ 713 } else { \ 714 result = firstVal _op (s2) vsrc2; \ 715 } \ 716 SET_REGISTER(vdst, result); \ 717 } else { \ 718 /* non-div/rem case */ \ 719 SET_REGISTER(vdst, GET_REGISTER(vsrc1) _op (s2) vsrc2); \ 720 } \ 721 FINISH(2); 722 723 #define HANDLE_OP_X_INT_LIT8(_opcode, _opname, _op, _chkdiv) \ 724 HANDLE_OPCODE(_opcode /*vAA, vBB, #+CC*/) \ 725 { \ 726 u2 litInfo; \ 727 vdst = INST_AA(inst); \ 728 litInfo = FETCH(1); \ 729 vsrc1 = litInfo & 0xff; \ 730 vsrc2 = litInfo >> 8; /* constant */ \ 731 ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x", \ 732 (_opname), vdst, vsrc1, vsrc2); \ 733 if (_chkdiv != 0) { \ 734 s4 firstVal, result; \ 735 firstVal = GET_REGISTER(vsrc1); \ 736 if ((s1) vsrc2 == 0) { \ 737 EXPORT_PC(); \ 738 dvmThrowArithmeticException("divide by zero"); \ 739 GOTO_exceptionThrown(); \ 740 } \ 741 if ((u4)firstVal == 0x80000000 && ((s1) vsrc2) == -1) { \ 742 if (_chkdiv == 1) \ 743 result = firstVal; /* division */ \ 744 else \ 745 result = 0; /* remainder */ \ 746 } else { \ 747 result = firstVal _op ((s1) vsrc2); \ 748 } \ 749 SET_REGISTER(vdst, result); \ 750 } else { \ 751 SET_REGISTER(vdst, \ 752 (s4) GET_REGISTER(vsrc1) _op (s1) vsrc2); \ 753 } \ 754 } \ 755 FINISH(2); 756 757 #define HANDLE_OP_SHX_INT_LIT8(_opcode, _opname, _cast, _op) \ 758 HANDLE_OPCODE(_opcode /*vAA, vBB, #+CC*/) \ 759 { \ 760 u2 litInfo; \ 761 vdst = INST_AA(inst); \ 762 litInfo = FETCH(1); \ 763 vsrc1 = litInfo & 0xff; \ 764 vsrc2 = litInfo >> 8; /* constant */ \ 765 ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x", \ 766 (_opname), vdst, vsrc1, vsrc2); \ 767 SET_REGISTER(vdst, \ 768 _cast GET_REGISTER(vsrc1) _op (vsrc2 & 0x1f)); \ 769 } \ 770 FINISH(2); 771 772 #define HANDLE_OP_X_INT_2ADDR(_opcode, _opname, _op, _chkdiv) \ 773 HANDLE_OPCODE(_opcode /*vA, vB*/) \ 774 vdst = INST_A(inst); \ 775 vsrc1 = INST_B(inst); \ 776 ILOGV("|%s-int-2addr v%d,v%d", (_opname), vdst, vsrc1); \ 777 if (_chkdiv != 0) { \ 778 s4 firstVal, secondVal, result; \ 779 firstVal = GET_REGISTER(vdst); \ 780 secondVal = GET_REGISTER(vsrc1); \ 781 if (secondVal == 0) { \ 782 EXPORT_PC(); \ 783 dvmThrowArithmeticException("divide by zero"); \ 784 GOTO_exceptionThrown(); \ 785 } \ 786 if ((u4)firstVal == 0x80000000 && secondVal == -1) { \ 787 if (_chkdiv == 1) \ 788 result = firstVal; /* division */ \ 789 else \ 790 result = 0; /* remainder */ \ 791 } else { \ 792 result = firstVal _op secondVal; \ 793 } \ 794 SET_REGISTER(vdst, result); \ 795 } else { \ 796 SET_REGISTER(vdst, \ 797 (s4) GET_REGISTER(vdst) _op (s4) GET_REGISTER(vsrc1)); \ 798 } \ 799 FINISH(1); 800 801 #define HANDLE_OP_SHX_INT_2ADDR(_opcode, _opname, _cast, _op) \ 802 HANDLE_OPCODE(_opcode /*vA, vB*/) \ 803 vdst = INST_A(inst); \ 804 vsrc1 = INST_B(inst); \ 805 ILOGV("|%s-int-2addr v%d,v%d", (_opname), vdst, vsrc1); \ 806 SET_REGISTER(vdst, \ 807 _cast GET_REGISTER(vdst) _op (GET_REGISTER(vsrc1) & 0x1f)); \ 808 FINISH(1); 809 810 #define HANDLE_OP_X_LONG(_opcode, _opname, _op, _chkdiv) \ 811 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \ 812 { \ 813 u2 srcRegs; \ 814 vdst = INST_AA(inst); \ 815 srcRegs = FETCH(1); \ 816 vsrc1 = srcRegs & 0xff; \ 817 vsrc2 = srcRegs >> 8; \ 818 ILOGV("|%s-long v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \ 819 if (_chkdiv != 0) { \ 820 s8 firstVal, secondVal, result; \ 821 firstVal = GET_REGISTER_WIDE(vsrc1); \ 822 secondVal = GET_REGISTER_WIDE(vsrc2); \ 823 if (secondVal == 0LL) { \ 824 EXPORT_PC(); \ 825 dvmThrowArithmeticException("divide by zero"); \ 826 GOTO_exceptionThrown(); \ 827 } \ 828 if ((u8)firstVal == 0x8000000000000000ULL && \ 829 secondVal == -1LL) \ 830 { \ 831 if (_chkdiv == 1) \ 832 result = firstVal; /* division */ \ 833 else \ 834 result = 0; /* remainder */ \ 835 } else { \ 836 result = firstVal _op secondVal; \ 837 } \ 838 SET_REGISTER_WIDE(vdst, result); \ 839 } else { \ 840 SET_REGISTER_WIDE(vdst, \ 841 (s8) GET_REGISTER_WIDE(vsrc1) _op (s8) GET_REGISTER_WIDE(vsrc2)); \ 842 } \ 843 } \ 844 FINISH(2); 845 846 #define HANDLE_OP_SHX_LONG(_opcode, _opname, _cast, _op) \ 847 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \ 848 { \ 849 u2 srcRegs; \ 850 vdst = INST_AA(inst); \ 851 srcRegs = FETCH(1); \ 852 vsrc1 = srcRegs & 0xff; \ 853 vsrc2 = srcRegs >> 8; \ 854 ILOGV("|%s-long v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \ 855 SET_REGISTER_WIDE(vdst, \ 856 _cast GET_REGISTER_WIDE(vsrc1) _op (GET_REGISTER(vsrc2) & 0x3f)); \ 857 } \ 858 FINISH(2); 859 860 #define HANDLE_OP_X_LONG_2ADDR(_opcode, _opname, _op, _chkdiv) \ 861 HANDLE_OPCODE(_opcode /*vA, vB*/) \ 862 vdst = INST_A(inst); \ 863 vsrc1 = INST_B(inst); \ 864 ILOGV("|%s-long-2addr v%d,v%d", (_opname), vdst, vsrc1); \ 865 if (_chkdiv != 0) { \ 866 s8 firstVal, secondVal, result; \ 867 firstVal = GET_REGISTER_WIDE(vdst); \ 868 secondVal = GET_REGISTER_WIDE(vsrc1); \ 869 if (secondVal == 0LL) { \ 870 EXPORT_PC(); \ 871 dvmThrowArithmeticException("divide by zero"); \ 872 GOTO_exceptionThrown(); \ 873 } \ 874 if ((u8)firstVal == 0x8000000000000000ULL && \ 875 secondVal == -1LL) \ 876 { \ 877 if (_chkdiv == 1) \ 878 result = firstVal; /* division */ \ 879 else \ 880 result = 0; /* remainder */ \ 881 } else { \ 882 result = firstVal _op secondVal; \ 883 } \ 884 SET_REGISTER_WIDE(vdst, result); \ 885 } else { \ 886 SET_REGISTER_WIDE(vdst, \ 887 (s8) GET_REGISTER_WIDE(vdst) _op (s8)GET_REGISTER_WIDE(vsrc1));\ 888 } \ 889 FINISH(1); 890 891 #define HANDLE_OP_SHX_LONG_2ADDR(_opcode, _opname, _cast, _op) \ 892 HANDLE_OPCODE(_opcode /*vA, vB*/) \ 893 vdst = INST_A(inst); \ 894 vsrc1 = INST_B(inst); \ 895 ILOGV("|%s-long-2addr v%d,v%d", (_opname), vdst, vsrc1); \ 896 SET_REGISTER_WIDE(vdst, \ 897 _cast GET_REGISTER_WIDE(vdst) _op (GET_REGISTER(vsrc1) & 0x3f)); \ 898 FINISH(1); 899 900 #define HANDLE_OP_X_FLOAT(_opcode, _opname, _op) \ 901 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \ 902 { \ 903 u2 srcRegs; \ 904 vdst = INST_AA(inst); \ 905 srcRegs = FETCH(1); \ 906 vsrc1 = srcRegs & 0xff; \ 907 vsrc2 = srcRegs >> 8; \ 908 ILOGV("|%s-float v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \ 909 SET_REGISTER_FLOAT(vdst, \ 910 GET_REGISTER_FLOAT(vsrc1) _op GET_REGISTER_FLOAT(vsrc2)); \ 911 } \ 912 FINISH(2); 913 914 #define HANDLE_OP_X_DOUBLE(_opcode, _opname, _op) \ 915 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \ 916 { \ 917 u2 srcRegs; \ 918 vdst = INST_AA(inst); \ 919 srcRegs = FETCH(1); \ 920 vsrc1 = srcRegs & 0xff; \ 921 vsrc2 = srcRegs >> 8; \ 922 ILOGV("|%s-double v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \ 923 SET_REGISTER_DOUBLE(vdst, \ 924 GET_REGISTER_DOUBLE(vsrc1) _op GET_REGISTER_DOUBLE(vsrc2)); \ 925 } \ 926 FINISH(2); 927 928 #define HANDLE_OP_X_FLOAT_2ADDR(_opcode, _opname, _op) \ 929 HANDLE_OPCODE(_opcode /*vA, vB*/) \ 930 vdst = INST_A(inst); \ 931 vsrc1 = INST_B(inst); \ 932 ILOGV("|%s-float-2addr v%d,v%d", (_opname), vdst, vsrc1); \ 933 SET_REGISTER_FLOAT(vdst, \ 934 GET_REGISTER_FLOAT(vdst) _op GET_REGISTER_FLOAT(vsrc1)); \ 935 FINISH(1); 936 937 #define HANDLE_OP_X_DOUBLE_2ADDR(_opcode, _opname, _op) \ 938 HANDLE_OPCODE(_opcode /*vA, vB*/) \ 939 vdst = INST_A(inst); \ 940 vsrc1 = INST_B(inst); \ 941 ILOGV("|%s-double-2addr v%d,v%d", (_opname), vdst, vsrc1); \ 942 SET_REGISTER_DOUBLE(vdst, \ 943 GET_REGISTER_DOUBLE(vdst) _op GET_REGISTER_DOUBLE(vsrc1)); \ 944 FINISH(1); 945 946 #define HANDLE_OP_AGET(_opcode, _opname, _type, _regsize) \ 947 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \ 948 { \ 949 ArrayObject* arrayObj; \ 950 u2 arrayInfo; \ 951 EXPORT_PC(); \ 952 vdst = INST_AA(inst); \ 953 arrayInfo = FETCH(1); \ 954 vsrc1 = arrayInfo & 0xff; /* array ptr */ \ 955 vsrc2 = arrayInfo >> 8; /* index */ \ 956 ILOGV("|aget%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \ 957 arrayObj = (ArrayObject*) GET_REGISTER(vsrc1); \ 958 if (!checkForNull((Object*) arrayObj)) \ 959 GOTO_exceptionThrown(); \ 960 if (GET_REGISTER(vsrc2) >= arrayObj->length) { \ 961 dvmThrowArrayIndexOutOfBoundsException( \ 962 arrayObj->length, GET_REGISTER(vsrc2)); \ 963 GOTO_exceptionThrown(); \ 964 } \ 965 SET_REGISTER##_regsize(vdst, \ 966 ((_type*)(void*)arrayObj->contents)[GET_REGISTER(vsrc2)]); \ 967 ILOGV("+ AGET[%d]=%#x", GET_REGISTER(vsrc2), GET_REGISTER(vdst)); \ 968 } \ 969 FINISH(2); 970 971 #define HANDLE_OP_APUT(_opcode, _opname, _type, _regsize) \ 972 HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \ 973 { \ 974 ArrayObject* arrayObj; \ 975 u2 arrayInfo; \ 976 EXPORT_PC(); \ 977 vdst = INST_AA(inst); /* AA: source value */ \ 978 arrayInfo = FETCH(1); \ 979 vsrc1 = arrayInfo & 0xff; /* BB: array ptr */ \ 980 vsrc2 = arrayInfo >> 8; /* CC: index */ \ 981 ILOGV("|aput%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \ 982 arrayObj = (ArrayObject*) GET_REGISTER(vsrc1); \ 983 if (!checkForNull((Object*) arrayObj)) \ 984 GOTO_exceptionThrown(); \ 985 if (GET_REGISTER(vsrc2) >= arrayObj->length) { \ 986 dvmThrowArrayIndexOutOfBoundsException( \ 987 arrayObj->length, GET_REGISTER(vsrc2)); \ 988 GOTO_exceptionThrown(); \ 989 } \ 990 ILOGV("+ APUT[%d]=0x%08x", GET_REGISTER(vsrc2), GET_REGISTER(vdst));\ 991 ((_type*)(void*)arrayObj->contents)[GET_REGISTER(vsrc2)] = \ 992 GET_REGISTER##_regsize(vdst); \ 993 } \ 994 FINISH(2); 995 996 /* 997 * It's possible to get a bad value out of a field with sub-32-bit stores 998 * because the -quick versions always operate on 32 bits. Consider: 999 * short foo = -1 (sets a 32-bit register to 0xffffffff) 1000 * iput-quick foo (writes all 32 bits to the field) 1001 * short bar = 1 (sets a 32-bit register to 0x00000001) 1002 * iput-short (writes the low 16 bits to the field) 1003 * iget-quick foo (reads all 32 bits from the field, yielding 0xffff0001) 1004 * This can only happen when optimized and non-optimized code has interleaved 1005 * access to the same field. This is unlikely but possible. 1006 * 1007 * The easiest way to fix this is to always read/write 32 bits at a time. On 1008 * a device with a 16-bit data bus this is sub-optimal. (The alternative 1009 * approach is to have sub-int versions of iget-quick, but now we're wasting 1010 * Dalvik instruction space and making it less likely that handler code will 1011 * already be in the CPU i-cache.) 1012 */ 1013 #define HANDLE_IGET_X(_opcode, _opname, _ftype, _regsize) \ 1014 HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \ 1015 { \ 1016 InstField* ifield; \ 1017 Object* obj; \ 1018 EXPORT_PC(); \ 1019 vdst = INST_A(inst); \ 1020 vsrc1 = INST_B(inst); /* object ptr */ \ 1021 ref = FETCH(1); /* field ref */ \ 1022 ILOGV("|iget%s v%d,v%d,field@0x%04x", (_opname), vdst, vsrc1, ref); \ 1023 obj = (Object*) GET_REGISTER(vsrc1); \ 1024 if (!checkForNull(obj)) \ 1025 GOTO_exceptionThrown(); \ 1026 ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref); \ 1027 if (ifield == NULL) { \ 1028 ifield = dvmResolveInstField(curMethod->clazz, ref); \ 1029 if (ifield == NULL) \ 1030 GOTO_exceptionThrown(); \ 1031 } \ 1032 SET_REGISTER##_regsize(vdst, \ 1033 dvmGetField##_ftype(obj, ifield->byteOffset)); \ 1034 ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name, \ 1035 (u8) GET_REGISTER##_regsize(vdst)); \ 1036 } \ 1037 FINISH(2); 1038 1039 #define HANDLE_IGET_X_JUMBO(_opcode, _opname, _ftype, _regsize) \ 1040 HANDLE_OPCODE(_opcode /*vBBBB, vCCCC, class@AAAAAAAA*/) \ 1041 { \ 1042 InstField* ifield; \ 1043 Object* obj; \ 1044 EXPORT_PC(); \ 1045 ref = FETCH(1) | (u4)FETCH(2) << 16; /* field ref */ \ 1046 vdst = FETCH(3); \ 1047 vsrc1 = FETCH(4); /* object ptr */ \ 1048 ILOGV("|iget%s/jumbo v%d,v%d,field@0x%08x", \ 1049 (_opname), vdst, vsrc1, ref); \ 1050 obj = (Object*) GET_REGISTER(vsrc1); \ 1051 if (!checkForNull(obj)) \ 1052 GOTO_exceptionThrown(); \ 1053 ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref); \ 1054 if (ifield == NULL) { \ 1055 ifield = dvmResolveInstField(curMethod->clazz, ref); \ 1056 if (ifield == NULL) \ 1057 GOTO_exceptionThrown(); \ 1058 } \ 1059 SET_REGISTER##_regsize(vdst, \ 1060 dvmGetField##_ftype(obj, ifield->byteOffset)); \ 1061 ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name, \ 1062 (u8) GET_REGISTER##_regsize(vdst)); \ 1063 } \ 1064 FINISH(5); 1065 1066 #define HANDLE_IGET_X_QUICK(_opcode, _opname, _ftype, _regsize) \ 1067 HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \ 1068 { \ 1069 Object* obj; \ 1070 vdst = INST_A(inst); \ 1071 vsrc1 = INST_B(inst); /* object ptr */ \ 1072 ref = FETCH(1); /* field offset */ \ 1073 ILOGV("|iget%s-quick v%d,v%d,field@+%u", \ 1074 (_opname), vdst, vsrc1, ref); \ 1075 obj = (Object*) GET_REGISTER(vsrc1); \ 1076 if (!checkForNullExportPC(obj, fp, pc)) \ 1077 GOTO_exceptionThrown(); \ 1078 SET_REGISTER##_regsize(vdst, dvmGetField##_ftype(obj, ref)); \ 1079 ILOGV("+ IGETQ %d=0x%08llx", ref, \ 1080 (u8) GET_REGISTER##_regsize(vdst)); \ 1081 } \ 1082 FINISH(2); 1083 1084 #define HANDLE_IPUT_X(_opcode, _opname, _ftype, _regsize) \ 1085 HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \ 1086 { \ 1087 InstField* ifield; \ 1088 Object* obj; \ 1089 EXPORT_PC(); \ 1090 vdst = INST_A(inst); \ 1091 vsrc1 = INST_B(inst); /* object ptr */ \ 1092 ref = FETCH(1); /* field ref */ \ 1093 ILOGV("|iput%s v%d,v%d,field@0x%04x", (_opname), vdst, vsrc1, ref); \ 1094 obj = (Object*) GET_REGISTER(vsrc1); \ 1095 if (!checkForNull(obj)) \ 1096 GOTO_exceptionThrown(); \ 1097 ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref); \ 1098 if (ifield == NULL) { \ 1099 ifield = dvmResolveInstField(curMethod->clazz, ref); \ 1100 if (ifield == NULL) \ 1101 GOTO_exceptionThrown(); \ 1102 } \ 1103 dvmSetField##_ftype(obj, ifield->byteOffset, \ 1104 GET_REGISTER##_regsize(vdst)); \ 1105 ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name, \ 1106 (u8) GET_REGISTER##_regsize(vdst)); \ 1107 } \ 1108 FINISH(2); 1109 1110 #define HANDLE_IPUT_X_JUMBO(_opcode, _opname, _ftype, _regsize) \ 1111 HANDLE_OPCODE(_opcode /*vBBBB, vCCCC, class@AAAAAAAA*/) \ 1112 { \ 1113 InstField* ifield; \ 1114 Object* obj; \ 1115 EXPORT_PC(); \ 1116 ref = FETCH(1) | (u4)FETCH(2) << 16; /* field ref */ \ 1117 vdst = FETCH(3); \ 1118 vsrc1 = FETCH(4); /* object ptr */ \ 1119 ILOGV("|iput%s/jumbo v%d,v%d,field@0x%08x", \ 1120 (_opname), vdst, vsrc1, ref); \ 1121 obj = (Object*) GET_REGISTER(vsrc1); \ 1122 if (!checkForNull(obj)) \ 1123 GOTO_exceptionThrown(); \ 1124 ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref); \ 1125 if (ifield == NULL) { \ 1126 ifield = dvmResolveInstField(curMethod->clazz, ref); \ 1127 if (ifield == NULL) \ 1128 GOTO_exceptionThrown(); \ 1129 } \ 1130 dvmSetField##_ftype(obj, ifield->byteOffset, \ 1131 GET_REGISTER##_regsize(vdst)); \ 1132 ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name, \ 1133 (u8) GET_REGISTER##_regsize(vdst)); \ 1134 } \ 1135 FINISH(5); 1136 1137 #define HANDLE_IPUT_X_QUICK(_opcode, _opname, _ftype, _regsize) \ 1138 HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \ 1139 { \ 1140 Object* obj; \ 1141 vdst = INST_A(inst); \ 1142 vsrc1 = INST_B(inst); /* object ptr */ \ 1143 ref = FETCH(1); /* field offset */ \ 1144 ILOGV("|iput%s-quick v%d,v%d,field@0x%04x", \ 1145 (_opname), vdst, vsrc1, ref); \ 1146 obj = (Object*) GET_REGISTER(vsrc1); \ 1147 if (!checkForNullExportPC(obj, fp, pc)) \ 1148 GOTO_exceptionThrown(); \ 1149 dvmSetField##_ftype(obj, ref, GET_REGISTER##_regsize(vdst)); \ 1150 ILOGV("+ IPUTQ %d=0x%08llx", ref, \ 1151 (u8) GET_REGISTER##_regsize(vdst)); \ 1152 } \ 1153 FINISH(2); 1154 1155 /* 1156 * The JIT needs dvmDexGetResolvedField() to return non-null. 1157 * Because the portable interpreter is not involved with the JIT 1158 * and trace building, we only need the extra check here when this 1159 * code is massaged into a stub called from an assembly interpreter. 1160 * This is controlled by the JIT_STUB_HACK maco. 1161 */ 1162 1163 #define HANDLE_SGET_X(_opcode, _opname, _ftype, _regsize) \ 1164 HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/) \ 1165 { \ 1166 StaticField* sfield; \ 1167 vdst = INST_AA(inst); \ 1168 ref = FETCH(1); /* field ref */ \ 1169 ILOGV("|sget%s v%d,sfield@0x%04x", (_opname), vdst, ref); \ 1170 sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \ 1171 if (sfield == NULL) { \ 1172 EXPORT_PC(); \ 1173 sfield = dvmResolveStaticField(curMethod->clazz, ref); \ 1174 if (sfield == NULL) \ 1175 GOTO_exceptionThrown(); \ 1176 if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) { \ 1177 JIT_STUB_HACK(dvmJitEndTraceSelect(self,pc)); \ 1178 } \ 1179 } \ 1180 SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield)); \ 1181 ILOGV("+ SGET '%s'=0x%08llx", \ 1182 sfield->field.name, (u8)GET_REGISTER##_regsize(vdst)); \ 1183 } \ 1184 FINISH(2); 1185 1186 #define HANDLE_SGET_X_JUMBO(_opcode, _opname, _ftype, _regsize) \ 1187 HANDLE_OPCODE(_opcode /*vBBBB, class@AAAAAAAA*/) \ 1188 { \ 1189 StaticField* sfield; \ 1190 ref = FETCH(1) | (u4)FETCH(2) << 16; /* field ref */ \ 1191 vdst = FETCH(3); \ 1192 ILOGV("|sget%s/jumbo v%d,sfield@0x%08x", (_opname), vdst, ref); \ 1193 sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \ 1194 if (sfield == NULL) { \ 1195 EXPORT_PC(); \ 1196 sfield = dvmResolveStaticField(curMethod->clazz, ref); \ 1197 if (sfield == NULL) \ 1198 GOTO_exceptionThrown(); \ 1199 if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) { \ 1200 JIT_STUB_HACK(dvmJitEndTraceSelect(self,pc)); \ 1201 } \ 1202 } \ 1203 SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield)); \ 1204 ILOGV("+ SGET '%s'=0x%08llx", \ 1205 sfield->field.name, (u8)GET_REGISTER##_regsize(vdst)); \ 1206 } \ 1207 FINISH(4); 1208 1209 #define HANDLE_SPUT_X(_opcode, _opname, _ftype, _regsize) \ 1210 HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/) \ 1211 { \ 1212 StaticField* sfield; \ 1213 vdst = INST_AA(inst); \ 1214 ref = FETCH(1); /* field ref */ \ 1215 ILOGV("|sput%s v%d,sfield@0x%04x", (_opname), vdst, ref); \ 1216 sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \ 1217 if (sfield == NULL) { \ 1218 EXPORT_PC(); \ 1219 sfield = dvmResolveStaticField(curMethod->clazz, ref); \ 1220 if (sfield == NULL) \ 1221 GOTO_exceptionThrown(); \ 1222 if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) { \ 1223 JIT_STUB_HACK(dvmJitEndTraceSelect(self,pc)); \ 1224 } \ 1225 } \ 1226 dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst)); \ 1227 ILOGV("+ SPUT '%s'=0x%08llx", \ 1228 sfield->field.name, (u8)GET_REGISTER##_regsize(vdst)); \ 1229 } \ 1230 FINISH(2); 1231 1232 #define HANDLE_SPUT_X_JUMBO(_opcode, _opname, _ftype, _regsize) \ 1233 HANDLE_OPCODE(_opcode /*vBBBB, class@AAAAAAAA*/) \ 1234 { \ 1235 StaticField* sfield; \ 1236 ref = FETCH(1) | (u4)FETCH(2) << 16; /* field ref */ \ 1237 vdst = FETCH(3); \ 1238 ILOGV("|sput%s/jumbo v%d,sfield@0x%08x", (_opname), vdst, ref); \ 1239 sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \ 1240 if (sfield == NULL) { \ 1241 EXPORT_PC(); \ 1242 sfield = dvmResolveStaticField(curMethod->clazz, ref); \ 1243 if (sfield == NULL) \ 1244 GOTO_exceptionThrown(); \ 1245 if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) { \ 1246 JIT_STUB_HACK(dvmJitEndTraceSelect(self,pc)); \ 1247 } \ 1248 } \ 1249 dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst)); \ 1250 ILOGV("+ SPUT '%s'=0x%08llx", \ 1251 sfield->field.name, (u8)GET_REGISTER##_regsize(vdst)); \ 1252 } \ 1253 FINISH(4); 1254 1255 /* File: c/OP_IGET_VOLATILE.cpp */ 1256 HANDLE_IGET_X(OP_IGET_VOLATILE, "-volatile", IntVolatile, ) 1257 OP_END 1258 1259 /* File: c/OP_IPUT_VOLATILE.cpp */ 1260 HANDLE_IPUT_X(OP_IPUT_VOLATILE, "-volatile", IntVolatile, ) 1261 OP_END 1262 1263 /* File: c/OP_SGET_VOLATILE.cpp */ 1264 HANDLE_SGET_X(OP_SGET_VOLATILE, "-volatile", IntVolatile, ) 1265 OP_END 1266 1267 /* File: c/OP_SPUT_VOLATILE.cpp */ 1268 HANDLE_SPUT_X(OP_SPUT_VOLATILE, "-volatile", IntVolatile, ) 1269 OP_END 1270 1271 /* File: c/OP_IGET_OBJECT_VOLATILE.cpp */ 1272 HANDLE_IGET_X(OP_IGET_OBJECT_VOLATILE, "-object-volatile", ObjectVolatile, _AS_OBJECT) 1273 OP_END 1274 1275 /* File: c/OP_IGET_WIDE_VOLATILE.cpp */ 1276 HANDLE_IGET_X(OP_IGET_WIDE_VOLATILE, "-wide-volatile", LongVolatile, _WIDE) 1277 OP_END 1278 1279 /* File: c/OP_IPUT_WIDE_VOLATILE.cpp */ 1280 HANDLE_IPUT_X(OP_IPUT_WIDE_VOLATILE, "-wide-volatile", LongVolatile, _WIDE) 1281 OP_END 1282 1283 /* File: c/OP_SGET_WIDE_VOLATILE.cpp */ 1284 HANDLE_SGET_X(OP_SGET_WIDE_VOLATILE, "-wide-volatile", LongVolatile, _WIDE) 1285 OP_END 1286 1287 /* File: c/OP_SPUT_WIDE_VOLATILE.cpp */ 1288 HANDLE_SPUT_X(OP_SPUT_WIDE_VOLATILE, "-wide-volatile", LongVolatile, _WIDE) 1289 OP_END 1290 1291 /* File: c/OP_BREAKPOINT.cpp */ 1292 HANDLE_OPCODE(OP_BREAKPOINT) 1293 { 1294 /* 1295 * Restart this instruction with the original opcode. We do 1296 * this by simply jumping to the handler. 1297 * 1298 * It's probably not necessary to update "inst", but we do it 1299 * for the sake of anything that needs to do disambiguation in a 1300 * common handler with INST_INST. 1301 * 1302 * The breakpoint itself is handled over in updateDebugger(), 1303 * because we need to detect other events (method entry, single 1304 * step) and report them in the same event packet, and we're not 1305 * yet handling those through breakpoint instructions. By the 1306 * time we get here, the breakpoint has already been handled and 1307 * the thread resumed. 1308 */ 1309 u1 originalOpcode = dvmGetOriginalOpcode(pc); 1310 LOGV("+++ break 0x%02x (0x%04x -> 0x%04x)", originalOpcode, inst, 1311 INST_REPLACE_OP(inst, originalOpcode)); 1312 inst = INST_REPLACE_OP(inst, originalOpcode); 1313 FINISH_BKPT(originalOpcode); 1314 } 1315 OP_END 1316 1317 /* File: c/OP_EXECUTE_INLINE_RANGE.cpp */ 1318 HANDLE_OPCODE(OP_EXECUTE_INLINE_RANGE /*{vCCCC..v(CCCC+AA-1)}, inline@BBBB*/) 1319 { 1320 u4 arg0, arg1, arg2, arg3; 1321 arg0 = arg1 = arg2 = arg3 = 0; /* placate gcc */ 1322 1323 EXPORT_PC(); 1324 1325 vsrc1 = INST_AA(inst); /* #of args */ 1326 ref = FETCH(1); /* inline call "ref" */ 1327 vdst = FETCH(2); /* range base */ 1328 ILOGV("|execute-inline-range args=%d @%d {regs=v%d-v%d}", 1329 vsrc1, ref, vdst, vdst+vsrc1-1); 1330 1331 assert((vdst >> 16) == 0); // 16-bit type -or- high 16 bits clear 1332 assert(vsrc1 <= 4); 1333 1334 switch (vsrc1) { 1335 case 4: 1336 arg3 = GET_REGISTER(vdst+3); 1337 /* fall through */ 1338 case 3: 1339 arg2 = GET_REGISTER(vdst+2); 1340 /* fall through */ 1341 case 2: 1342 arg1 = GET_REGISTER(vdst+1); 1343 /* fall through */ 1344 case 1: 1345 arg0 = GET_REGISTER(vdst+0); 1346 /* fall through */ 1347 default: // case 0 1348 ; 1349 } 1350 1351 if (self->interpBreak.ctl.subMode & kSubModeDebuggerActive) { 1352 if (!dvmPerformInlineOp4Dbg(arg0, arg1, arg2, arg3, &retval, ref)) 1353 GOTO_exceptionThrown(); 1354 } else { 1355 if (!dvmPerformInlineOp4Std(arg0, arg1, arg2, arg3, &retval, ref)) 1356 GOTO_exceptionThrown(); 1357 } 1358 } 1359 FINISH(3); 1360 OP_END 1361 1362 /* File: c/OP_INVOKE_OBJECT_INIT_RANGE.cpp */ 1363 HANDLE_OPCODE(OP_INVOKE_OBJECT_INIT_RANGE /*{vCCCC..v(CCCC+AA-1)}, meth@BBBB*/) 1364 { 1365 Object* obj; 1366 1367 vsrc1 = FETCH(2); /* reg number of "this" pointer */ 1368 obj = GET_REGISTER_AS_OBJECT(vsrc1); 1369 1370 if (!checkForNullExportPC(obj, fp, pc)) 1371 GOTO_exceptionThrown(); 1372 1373 /* 1374 * The object should be marked "finalizable" when Object.<init> 1375 * completes normally. We're going to assume it does complete 1376 * (by virtue of being nothing but a return-void) and set it now. 1377 */ 1378 if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISFINALIZABLE)) { 1379 EXPORT_PC(); 1380 dvmSetFinalizable(obj); 1381 if (dvmGetException(self)) 1382 GOTO_exceptionThrown(); 1383 } 1384 1385 if (self->interpBreak.ctl.subMode & kSubModeDebuggerActive) { 1386 /* behave like OP_INVOKE_DIRECT_RANGE */ 1387 GOTO_invoke(invokeDirect, true, false); 1388 } 1389 FINISH(3); 1390 } 1391 OP_END 1392 1393 /* File: c/OP_RETURN_VOID_BARRIER.cpp */ 1394 HANDLE_OPCODE(OP_RETURN_VOID_BARRIER /**/) 1395 ILOGV("|return-void"); 1396 #ifndef NDEBUG 1397 retval.j = 0xababababULL; /* placate valgrind */ 1398 #endif 1399 ANDROID_MEMBAR_STORE(); 1400 GOTO_returnFromMethod(); 1401 OP_END 1402 1403 /* File: c/OP_IPUT_OBJECT_VOLATILE.cpp */ 1404 HANDLE_IPUT_X(OP_IPUT_OBJECT_VOLATILE, "-object-volatile", ObjectVolatile, _AS_OBJECT) 1405 OP_END 1406 1407 /* File: c/OP_SGET_OBJECT_VOLATILE.cpp */ 1408 HANDLE_SGET_X(OP_SGET_OBJECT_VOLATILE, "-object-volatile", ObjectVolatile, _AS_OBJECT) 1409 OP_END 1410 1411 /* File: c/OP_SPUT_OBJECT_VOLATILE.cpp */ 1412 HANDLE_SPUT_X(OP_SPUT_OBJECT_VOLATILE, "-object-volatile", ObjectVolatile, _AS_OBJECT) 1413 OP_END 1414 1415 /* File: c/OP_INVOKE_OBJECT_INIT_JUMBO.cpp */ 1416 HANDLE_OPCODE(OP_INVOKE_OBJECT_INIT_JUMBO /*{vCCCC..vNNNN}, meth@AAAAAAAA*/) 1417 { 1418 Object* obj; 1419 1420 vsrc1 = FETCH(4); /* reg number of "this" pointer */ 1421 obj = GET_REGISTER_AS_OBJECT(vsrc1); 1422 1423 if (!checkForNullExportPC(obj, fp, pc)) 1424 GOTO_exceptionThrown(); 1425 1426 /* 1427 * The object should be marked "finalizable" when Object.<init> 1428 * completes normally. We're going to assume it does complete 1429 * (by virtue of being nothing but a return-void) and set it now. 1430 */ 1431 if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISFINALIZABLE)) { 1432 EXPORT_PC(); 1433 dvmSetFinalizable(obj); 1434 if (dvmGetException(self)) 1435 GOTO_exceptionThrown(); 1436 } 1437 1438 if (self->interpBreak.ctl.subMode & kSubModeDebuggerActive) { 1439 /* behave like OP_INVOKE_DIRECT_RANGE */ 1440 GOTO_invoke(invokeDirect, true, true); 1441 } 1442 FINISH(5); 1443 } 1444 OP_END 1445 1446 /* File: c/OP_IGET_VOLATILE_JUMBO.cpp */ 1447 HANDLE_IGET_X_JUMBO(OP_IGET_VOLATILE_JUMBO, "-volatile/jumbo", IntVolatile, ) 1448 OP_END 1449 1450 /* File: c/OP_IGET_WIDE_VOLATILE_JUMBO.cpp */ 1451 HANDLE_IGET_X_JUMBO(OP_IGET_WIDE_VOLATILE_JUMBO, "-wide-volatile/jumbo", LongVolatile, _WIDE) 1452 OP_END 1453 1454 /* File: c/OP_IGET_OBJECT_VOLATILE_JUMBO.cpp */ 1455 HANDLE_IGET_X_JUMBO(OP_IGET_OBJECT_VOLATILE_JUMBO, "-object-volatile/jumbo", ObjectVolatile, _AS_OBJECT) 1456 OP_END 1457 1458 /* File: c/OP_IPUT_VOLATILE_JUMBO.cpp */ 1459 HANDLE_IPUT_X_JUMBO(OP_IPUT_VOLATILE_JUMBO, "-volatile/jumbo", IntVolatile, ) 1460 OP_END 1461 1462 /* File: c/OP_IPUT_WIDE_VOLATILE_JUMBO.cpp */ 1463 HANDLE_IPUT_X_JUMBO(OP_IPUT_WIDE_VOLATILE_JUMBO, "-wide-volatile/jumbo", LongVolatile, _WIDE) 1464 OP_END 1465 1466 /* File: c/OP_IPUT_OBJECT_VOLATILE_JUMBO.cpp */ 1467 HANDLE_IPUT_X_JUMBO(OP_IPUT_OBJECT_VOLATILE_JUMBO, "-object-volatile/jumbo", ObjectVolatile, _AS_OBJECT) 1468 OP_END 1469 1470 /* File: c/OP_SGET_VOLATILE_JUMBO.cpp */ 1471 HANDLE_SGET_X_JUMBO(OP_SGET_VOLATILE_JUMBO, "-volatile/jumbo", IntVolatile, ) 1472 OP_END 1473 1474 /* File: c/OP_SGET_WIDE_VOLATILE_JUMBO.cpp */ 1475 HANDLE_SGET_X_JUMBO(OP_SGET_WIDE_VOLATILE_JUMBO, "-wide-volatile/jumbo", LongVolatile, _WIDE) 1476 OP_END 1477 1478 /* File: c/OP_SGET_OBJECT_VOLATILE_JUMBO.cpp */ 1479 HANDLE_SGET_X_JUMBO(OP_SGET_OBJECT_VOLATILE_JUMBO, "-object-volatile/jumbo", ObjectVolatile, _AS_OBJECT) 1480 OP_END 1481 1482 /* File: c/OP_SPUT_VOLATILE_JUMBO.cpp */ 1483 HANDLE_SPUT_X_JUMBO(OP_SPUT_VOLATILE_JUMBO, "-volatile", IntVolatile, ) 1484 OP_END 1485 1486 /* File: c/OP_SPUT_WIDE_VOLATILE_JUMBO.cpp */ 1487 HANDLE_SPUT_X_JUMBO(OP_SPUT_WIDE_VOLATILE_JUMBO, "-wide-volatile/jumbo", LongVolatile, _WIDE) 1488 OP_END 1489 1490 /* File: c/OP_SPUT_OBJECT_VOLATILE_JUMBO.cpp */ 1491 HANDLE_SPUT_X_JUMBO(OP_SPUT_OBJECT_VOLATILE_JUMBO, "-object-volatile/jumbo", ObjectVolatile, _AS_OBJECT) 1492 OP_END 1493 1494 /* File: c/gotoTargets.cpp */ 1495 /* 1496 * C footer. This has some common code shared by the various targets. 1497 */ 1498 1499 /* 1500 * Everything from here on is a "goto target". In the basic interpreter 1501 * we jump into these targets and then jump directly to the handler for 1502 * next instruction. Here, these are subroutines that return to the caller. 1503 */ 1504 1505 GOTO_TARGET(filledNewArray, bool methodCallRange, bool jumboFormat) 1506 { 1507 ClassObject* arrayClass; 1508 ArrayObject* newArray; 1509 u4* contents; 1510 char typeCh; 1511 int i; 1512 u4 arg5; 1513 1514 EXPORT_PC(); 1515 1516 if (jumboFormat) { 1517 ref = FETCH(1) | (u4)FETCH(2) << 16; /* class ref */ 1518 vsrc1 = FETCH(3); /* #of elements */ 1519 vdst = FETCH(4); /* range base */ 1520 arg5 = -1; /* silence compiler warning */ 1521 ILOGV("|filled-new-array/jumbo args=%d @0x%08x {regs=v%d-v%d}", 1522 vsrc1, ref, vdst, vdst+vsrc1-1); 1523 } else { 1524 ref = FETCH(1); /* class ref */ 1525 vdst = FETCH(2); /* first 4 regs -or- range base */ 1526 1527 if (methodCallRange) { 1528 vsrc1 = INST_AA(inst); /* #of elements */ 1529 arg5 = -1; /* silence compiler warning */ 1530 ILOGV("|filled-new-array-range args=%d @0x%04x {regs=v%d-v%d}", 1531 vsrc1, ref, vdst, vdst+vsrc1-1); 1532 } else { 1533 arg5 = INST_A(inst); 1534 vsrc1 = INST_B(inst); /* #of elements */ 1535 ILOGV("|filled-new-array args=%d @0x%04x {regs=0x%04x %x}", 1536 vsrc1, ref, vdst, arg5); 1537 } 1538 } 1539 1540 /* 1541 * Resolve the array class. 1542 */ 1543 arrayClass = dvmDexGetResolvedClass(methodClassDex, ref); 1544 if (arrayClass == NULL) { 1545 arrayClass = dvmResolveClass(curMethod->clazz, ref, false); 1546 if (arrayClass == NULL) 1547 GOTO_exceptionThrown(); 1548 } 1549 /* 1550 if (!dvmIsArrayClass(arrayClass)) { 1551 dvmThrowRuntimeException( 1552 "filled-new-array needs array class"); 1553 GOTO_exceptionThrown(); 1554 } 1555 */ 1556 /* verifier guarantees this is an array class */ 1557 assert(dvmIsArrayClass(arrayClass)); 1558 assert(dvmIsClassInitialized(arrayClass)); 1559 1560 /* 1561 * Create an array of the specified type. 1562 */ 1563 LOGVV("+++ filled-new-array type is '%s'", arrayClass->descriptor); 1564 typeCh = arrayClass->descriptor[1]; 1565 if (typeCh == 'D' || typeCh == 'J') { 1566 /* category 2 primitives not allowed */ 1567 dvmThrowRuntimeException("bad filled array req"); 1568 GOTO_exceptionThrown(); 1569 } else if (typeCh != 'L' && typeCh != '[' && typeCh != 'I') { 1570 /* TODO: requires multiple "fill in" loops with different widths */ 1571 LOGE("non-int primitives not implemented"); 1572 dvmThrowInternalError( 1573 "filled-new-array not implemented for anything but 'int'"); 1574 GOTO_exceptionThrown(); 1575 } 1576 1577 newArray = dvmAllocArrayByClass(arrayClass, vsrc1, ALLOC_DONT_TRACK); 1578 if (newArray == NULL) 1579 GOTO_exceptionThrown(); 1580 1581 /* 1582 * Fill in the elements. It's legal for vsrc1 to be zero. 1583 */ 1584 contents = (u4*)(void*)newArray->contents; 1585 if (methodCallRange) { 1586 for (i = 0; i < vsrc1; i++) 1587 contents[i] = GET_REGISTER(vdst+i); 1588 } else { 1589 assert(vsrc1 <= 5); 1590 if (vsrc1 == 5) { 1591 contents[4] = GET_REGISTER(arg5); 1592 vsrc1--; 1593 } 1594 for (i = 0; i < vsrc1; i++) { 1595 contents[i] = GET_REGISTER(vdst & 0x0f); 1596 vdst >>= 4; 1597 } 1598 } 1599 if (typeCh == 'L' || typeCh == '[') { 1600 dvmWriteBarrierArray(newArray, 0, newArray->length); 1601 } 1602 1603 retval.l = (Object*)newArray; 1604 } 1605 if (jumboFormat) { 1606 FINISH(5); 1607 } else { 1608 FINISH(3); 1609 } 1610 GOTO_TARGET_END 1611 1612 1613 GOTO_TARGET(invokeVirtual, bool methodCallRange, bool jumboFormat) 1614 { 1615 Method* baseMethod; 1616 Object* thisPtr; 1617 1618 EXPORT_PC(); 1619 1620 if (jumboFormat) { 1621 ref = FETCH(1) | (u4)FETCH(2) << 16; /* method ref */ 1622 vsrc1 = FETCH(3); /* count */ 1623 vdst = FETCH(4); /* first reg */ 1624 ADJUST_PC(2); /* advance pc partially to make returns easier */ 1625 ILOGV("|invoke-virtual/jumbo args=%d @0x%08x {regs=v%d-v%d}", 1626 vsrc1, ref, vdst, vdst+vsrc1-1); 1627 thisPtr = (Object*) GET_REGISTER(vdst); 1628 } else { 1629 vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */ 1630 ref = FETCH(1); /* method ref */ 1631 vdst = FETCH(2); /* 4 regs -or- first reg */ 1632 1633 /* 1634 * The object against which we are executing a method is always 1635 * in the first argument. 1636 */ 1637 if (methodCallRange) { 1638 assert(vsrc1 > 0); 1639 ILOGV("|invoke-virtual-range args=%d @0x%04x {regs=v%d-v%d}", 1640 vsrc1, ref, vdst, vdst+vsrc1-1); 1641 thisPtr = (Object*) GET_REGISTER(vdst); 1642 } else { 1643 assert((vsrc1>>4) > 0); 1644 ILOGV("|invoke-virtual args=%d @0x%04x {regs=0x%04x %x}", 1645 vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f); 1646 thisPtr = (Object*) GET_REGISTER(vdst & 0x0f); 1647 } 1648 } 1649 1650 if (!checkForNull(thisPtr)) 1651 GOTO_exceptionThrown(); 1652 1653 /* 1654 * Resolve the method. This is the correct method for the static 1655 * type of the object. We also verify access permissions here. 1656 */ 1657 baseMethod = dvmDexGetResolvedMethod(methodClassDex, ref); 1658 if (baseMethod == NULL) { 1659 baseMethod = dvmResolveMethod(curMethod->clazz, ref,METHOD_VIRTUAL); 1660 if (baseMethod == NULL) { 1661 ILOGV("+ unknown method or access denied"); 1662 GOTO_exceptionThrown(); 1663 } 1664 } 1665 1666 /* 1667 * Combine the object we found with the vtable offset in the 1668 * method. 1669 */ 1670 assert(baseMethod->methodIndex < thisPtr->clazz->vtableCount); 1671 methodToCall = thisPtr->clazz->vtable[baseMethod->methodIndex]; 1672 1673 #if defined(WITH_JIT) && defined(MTERP_STUB) 1674 self->methodToCall = methodToCall; 1675 self->callsiteClass = thisPtr->clazz; 1676 #endif 1677 1678 #if 0 1679 if (dvmIsAbstractMethod(methodToCall)) { 1680 /* 1681 * This can happen if you create two classes, Base and Sub, where 1682 * Sub is a sub-class of Base. Declare a protected abstract 1683 * method foo() in Base, and invoke foo() from a method in Base. 1684 * Base is an "abstract base class" and is never instantiated 1685 * directly. Now, Override foo() in Sub, and use Sub. This 1686 * Works fine unless Sub stops providing an implementation of 1687 * the method. 1688 */ 1689 dvmThrowAbstractMethodError("abstract method not implemented"); 1690 GOTO_exceptionThrown(); 1691 } 1692 #else 1693 assert(!dvmIsAbstractMethod(methodToCall) || 1694 methodToCall->nativeFunc != NULL); 1695 #endif 1696 1697 LOGVV("+++ base=%s.%s virtual[%d]=%s.%s", 1698 baseMethod->clazz->descriptor, baseMethod->name, 1699 (u4) baseMethod->methodIndex, 1700 methodToCall->clazz->descriptor, methodToCall->name); 1701 assert(methodToCall != NULL); 1702 1703 #if 0 1704 if (vsrc1 != methodToCall->insSize) { 1705 LOGW("WRONG METHOD: base=%s.%s virtual[%d]=%s.%s", 1706 baseMethod->clazz->descriptor, baseMethod->name, 1707 (u4) baseMethod->methodIndex, 1708 methodToCall->clazz->descriptor, methodToCall->name); 1709 //dvmDumpClass(baseMethod->clazz); 1710 //dvmDumpClass(methodToCall->clazz); 1711 dvmDumpAllClasses(0); 1712 } 1713 #endif 1714 1715 GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst); 1716 } 1717 GOTO_TARGET_END 1718 1719 GOTO_TARGET(invokeSuper, bool methodCallRange, bool jumboFormat) 1720 { 1721 Method* baseMethod; 1722 u2 thisReg; 1723 1724 EXPORT_PC(); 1725 1726 if (jumboFormat) { 1727 ref = FETCH(1) | (u4)FETCH(2) << 16; /* method ref */ 1728 vsrc1 = FETCH(3); /* count */ 1729 vdst = FETCH(4); /* first reg */ 1730 ADJUST_PC(2); /* advance pc partially to make returns easier */ 1731 ILOGV("|invoke-super/jumbo args=%d @0x%08x {regs=v%d-v%d}", 1732 vsrc1, ref, vdst, vdst+vsrc1-1); 1733 thisReg = vdst; 1734 } else { 1735 vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */ 1736 ref = FETCH(1); /* method ref */ 1737 vdst = FETCH(2); /* 4 regs -or- first reg */ 1738 1739 if (methodCallRange) { 1740 ILOGV("|invoke-super-range args=%d @0x%04x {regs=v%d-v%d}", 1741 vsrc1, ref, vdst, vdst+vsrc1-1); 1742 thisReg = vdst; 1743 } else { 1744 ILOGV("|invoke-super args=%d @0x%04x {regs=0x%04x %x}", 1745 vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f); 1746 thisReg = vdst & 0x0f; 1747 } 1748 } 1749 1750 /* impossible in well-formed code, but we must check nevertheless */ 1751 if (!checkForNull((Object*) GET_REGISTER(thisReg))) 1752 GOTO_exceptionThrown(); 1753 1754 /* 1755 * Resolve the method. This is the correct method for the static 1756 * type of the object. We also verify access permissions here. 1757 * The first arg to dvmResolveMethod() is just the referring class 1758 * (used for class loaders and such), so we don't want to pass 1759 * the superclass into the resolution call. 1760 */ 1761 baseMethod = dvmDexGetResolvedMethod(methodClassDex, ref); 1762 if (baseMethod == NULL) { 1763 baseMethod = dvmResolveMethod(curMethod->clazz, ref,METHOD_VIRTUAL); 1764 if (baseMethod == NULL) { 1765 ILOGV("+ unknown method or access denied"); 1766 GOTO_exceptionThrown(); 1767 } 1768 } 1769 1770 /* 1771 * Combine the object we found with the vtable offset in the 1772 * method's class. 1773 * 1774 * We're using the current method's class' superclass, not the 1775 * superclass of "this". This is because we might be executing 1776 * in a method inherited from a superclass, and we want to run 1777 * in that class' superclass. 1778 */ 1779 if (baseMethod->methodIndex >= curMethod->clazz->super->vtableCount) { 1780 /* 1781 * Method does not exist in the superclass. Could happen if 1782 * superclass gets updated. 1783 */ 1784 dvmThrowNoSuchMethodError(baseMethod->name); 1785 GOTO_exceptionThrown(); 1786 } 1787 methodToCall = curMethod->clazz->super->vtable[baseMethod->methodIndex]; 1788 1789 #if 0 1790 if (dvmIsAbstractMethod(methodToCall)) { 1791 dvmThrowAbstractMethodError("abstract method not implemented"); 1792 GOTO_exceptionThrown(); 1793 } 1794 #else 1795 assert(!dvmIsAbstractMethod(methodToCall) || 1796 methodToCall->nativeFunc != NULL); 1797 #endif 1798 LOGVV("+++ base=%s.%s super-virtual=%s.%s", 1799 baseMethod->clazz->descriptor, baseMethod->name, 1800 methodToCall->clazz->descriptor, methodToCall->name); 1801 assert(methodToCall != NULL); 1802 1803 GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst); 1804 } 1805 GOTO_TARGET_END 1806 1807 GOTO_TARGET(invokeInterface, bool methodCallRange, bool jumboFormat) 1808 { 1809 Object* thisPtr; 1810 ClassObject* thisClass; 1811 1812 EXPORT_PC(); 1813 1814 if (jumboFormat) { 1815 ref = FETCH(1) | (u4)FETCH(2) << 16; /* method ref */ 1816 vsrc1 = FETCH(3); /* count */ 1817 vdst = FETCH(4); /* first reg */ 1818 ADJUST_PC(2); /* advance pc partially to make returns easier */ 1819 ILOGV("|invoke-interface/jumbo args=%d @0x%08x {regs=v%d-v%d}", 1820 vsrc1, ref, vdst, vdst+vsrc1-1); 1821 thisPtr = (Object*) GET_REGISTER(vdst); 1822 } else { 1823 vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */ 1824 ref = FETCH(1); /* method ref */ 1825 vdst = FETCH(2); /* 4 regs -or- first reg */ 1826 1827 /* 1828 * The object against which we are executing a method is always 1829 * in the first argument. 1830 */ 1831 if (methodCallRange) { 1832 assert(vsrc1 > 0); 1833 ILOGV("|invoke-interface-range args=%d @0x%04x {regs=v%d-v%d}", 1834 vsrc1, ref, vdst, vdst+vsrc1-1); 1835 thisPtr = (Object*) GET_REGISTER(vdst); 1836 } else { 1837 assert((vsrc1>>4) > 0); 1838 ILOGV("|invoke-interface args=%d @0x%04x {regs=0x%04x %x}", 1839 vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f); 1840 thisPtr = (Object*) GET_REGISTER(vdst & 0x0f); 1841 } 1842 } 1843 1844 if (!checkForNull(thisPtr)) 1845 GOTO_exceptionThrown(); 1846 1847 thisClass = thisPtr->clazz; 1848 1849 1850 /* 1851 * Given a class and a method index, find the Method* with the 1852 * actual code we want to execute. 1853 */ 1854 methodToCall = dvmFindInterfaceMethodInCache(thisClass, ref, curMethod, 1855 methodClassDex); 1856 #if defined(WITH_JIT) && defined(MTERP_STUB) 1857 self->callsiteClass = thisClass; 1858 self->methodToCall = methodToCall; 1859 #endif 1860 if (methodToCall == NULL) { 1861 assert(dvmCheckException(self)); 1862 GOTO_exceptionThrown(); 1863 } 1864 1865 GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst); 1866 } 1867 GOTO_TARGET_END 1868 1869 GOTO_TARGET(invokeDirect, bool methodCallRange, bool jumboFormat) 1870 { 1871 u2 thisReg; 1872 1873 EXPORT_PC(); 1874 1875 if (jumboFormat) { 1876 ref = FETCH(1) | (u4)FETCH(2) << 16; /* method ref */ 1877 vsrc1 = FETCH(3); /* count */ 1878 vdst = FETCH(4); /* first reg */ 1879 ADJUST_PC(2); /* advance pc partially to make returns easier */ 1880 ILOGV("|invoke-direct/jumbo args=%d @0x%08x {regs=v%d-v%d}", 1881 vsrc1, ref, vdst, vdst+vsrc1-1); 1882 thisReg = vdst; 1883 } else { 1884 vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */ 1885 ref = FETCH(1); /* method ref */ 1886 vdst = FETCH(2); /* 4 regs -or- first reg */ 1887 1888 if (methodCallRange) { 1889 ILOGV("|invoke-direct-range args=%d @0x%04x {regs=v%d-v%d}", 1890 vsrc1, ref, vdst, vdst+vsrc1-1); 1891 thisReg = vdst; 1892 } else { 1893 ILOGV("|invoke-direct args=%d @0x%04x {regs=0x%04x %x}", 1894 vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f); 1895 thisReg = vdst & 0x0f; 1896 } 1897 } 1898 1899 if (!checkForNull((Object*) GET_REGISTER(thisReg))) 1900 GOTO_exceptionThrown(); 1901 1902 methodToCall = dvmDexGetResolvedMethod(methodClassDex, ref); 1903 if (methodToCall == NULL) { 1904 methodToCall = dvmResolveMethod(curMethod->clazz, ref, 1905 METHOD_DIRECT); 1906 if (methodToCall == NULL) { 1907 ILOGV("+ unknown direct method"); // should be impossible 1908 GOTO_exceptionThrown(); 1909 } 1910 } 1911 GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst); 1912 } 1913 GOTO_TARGET_END 1914 1915 GOTO_TARGET(invokeStatic, bool methodCallRange, bool jumboFormat) 1916 EXPORT_PC(); 1917 1918 if (jumboFormat) { 1919 ref = FETCH(1) | (u4)FETCH(2) << 16; /* method ref */ 1920 vsrc1 = FETCH(3); /* count */ 1921 vdst = FETCH(4); /* first reg */ 1922 ADJUST_PC(2); /* advance pc partially to make returns easier */ 1923 ILOGV("|invoke-static/jumbo args=%d @0x%08x {regs=v%d-v%d}", 1924 vsrc1, ref, vdst, vdst+vsrc1-1); 1925 } else { 1926 vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */ 1927 ref = FETCH(1); /* method ref */ 1928 vdst = FETCH(2); /* 4 regs -or- first reg */ 1929 1930 if (methodCallRange) 1931 ILOGV("|invoke-static-range args=%d @0x%04x {regs=v%d-v%d}", 1932 vsrc1, ref, vdst, vdst+vsrc1-1); 1933 else 1934 ILOGV("|invoke-static args=%d @0x%04x {regs=0x%04x %x}", 1935 vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f); 1936 } 1937 1938 methodToCall = dvmDexGetResolvedMethod(methodClassDex, ref); 1939 if (methodToCall == NULL) { 1940 methodToCall = dvmResolveMethod(curMethod->clazz, ref, METHOD_STATIC); 1941 if (methodToCall == NULL) { 1942 ILOGV("+ unknown method"); 1943 GOTO_exceptionThrown(); 1944 } 1945 1946 #if defined(WITH_JIT) && defined(MTERP_STUB) 1947 /* 1948 * The JIT needs dvmDexGetResolvedMethod() to return non-null. 1949 * Include the check if this code is being used as a stub 1950 * called from the assembly interpreter. 1951 */ 1952 if ((self->interpBreak.ctl.subMode & kSubModeJitTraceBuild) && 1953 (dvmDexGetResolvedMethod(methodClassDex, ref) == NULL)) { 1954 /* Class initialization is still ongoing */ 1955 dvmJitEndTraceSelect(self,pc); 1956 } 1957 #endif 1958 } 1959 GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst); 1960 GOTO_TARGET_END 1961 1962 GOTO_TARGET(invokeVirtualQuick, bool methodCallRange, bool jumboFormat) 1963 { 1964 Object* thisPtr; 1965 1966 EXPORT_PC(); 1967 1968 vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */ 1969 ref = FETCH(1); /* vtable index */ 1970 vdst = FETCH(2); /* 4 regs -or- first reg */ 1971 1972 /* 1973 * The object against which we are executing a method is always 1974 * in the first argument. 1975 */ 1976 if (methodCallRange) { 1977 assert(vsrc1 > 0); 1978 ILOGV("|invoke-virtual-quick-range args=%d @0x%04x {regs=v%d-v%d}", 1979 vsrc1, ref, vdst, vdst+vsrc1-1); 1980 thisPtr = (Object*) GET_REGISTER(vdst); 1981 } else { 1982 assert((vsrc1>>4) > 0); 1983 ILOGV("|invoke-virtual-quick args=%d @0x%04x {regs=0x%04x %x}", 1984 vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f); 1985 thisPtr = (Object*) GET_REGISTER(vdst & 0x0f); 1986 } 1987 1988 if (!checkForNull(thisPtr)) 1989 GOTO_exceptionThrown(); 1990 1991 1992 /* 1993 * Combine the object we found with the vtable offset in the 1994 * method. 1995 */ 1996 assert(ref < (unsigned int) thisPtr->clazz->vtableCount); 1997 methodToCall = thisPtr->clazz->vtable[ref]; 1998 #if defined(WITH_JIT) && defined(MTERP_STUB) 1999 self->callsiteClass = thisPtr->clazz; 2000 self->methodToCall = methodToCall; 2001 #endif 2002 2003 #if 0 2004 if (dvmIsAbstractMethod(methodToCall)) { 2005 dvmThrowAbstractMethodError("abstract method not implemented"); 2006 GOTO_exceptionThrown(); 2007 } 2008 #else 2009 assert(!dvmIsAbstractMethod(methodToCall) || 2010 methodToCall->nativeFunc != NULL); 2011 #endif 2012 2013 LOGVV("+++ virtual[%d]=%s.%s", 2014 ref, methodToCall->clazz->descriptor, methodToCall->name); 2015 assert(methodToCall != NULL); 2016 2017 GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst); 2018 } 2019 GOTO_TARGET_END 2020 2021 GOTO_TARGET(invokeSuperQuick, bool methodCallRange, bool jumboFormat) 2022 { 2023 u2 thisReg; 2024 2025 EXPORT_PC(); 2026 2027 vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */ 2028 ref = FETCH(1); /* vtable index */ 2029 vdst = FETCH(2); /* 4 regs -or- first reg */ 2030 2031 if (methodCallRange) { 2032 ILOGV("|invoke-super-quick-range args=%d @0x%04x {regs=v%d-v%d}", 2033 vsrc1, ref, vdst, vdst+vsrc1-1); 2034 thisReg = vdst; 2035 } else { 2036 ILOGV("|invoke-super-quick args=%d @0x%04x {regs=0x%04x %x}", 2037 vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f); 2038 thisReg = vdst & 0x0f; 2039 } 2040 /* impossible in well-formed code, but we must check nevertheless */ 2041 if (!checkForNull((Object*) GET_REGISTER(thisReg))) 2042 GOTO_exceptionThrown(); 2043 2044 #if 0 /* impossible in optimized + verified code */ 2045 if (ref >= curMethod->clazz->super->vtableCount) { 2046 dvmThrowNoSuchMethodError(NULL); 2047 GOTO_exceptionThrown(); 2048 } 2049 #else 2050 assert(ref < (unsigned int) curMethod->clazz->super->vtableCount); 2051 #endif 2052 2053 /* 2054 * Combine the object we found with the vtable offset in the 2055 * method's class. 2056 * 2057 * We're using the current method's class' superclass, not the 2058 * superclass of "this". This is because we might be executing 2059 * in a method inherited from a superclass, and we want to run 2060 * in the method's class' superclass. 2061 */ 2062 methodToCall = curMethod->clazz->super->vtable[ref]; 2063 2064 #if 0 2065 if (dvmIsAbstractMethod(methodToCall)) { 2066 dvmThrowAbstractMethodError("abstract method not implemented"); 2067 GOTO_exceptionThrown(); 2068 } 2069 #else 2070 assert(!dvmIsAbstractMethod(methodToCall) || 2071 methodToCall->nativeFunc != NULL); 2072 #endif 2073 LOGVV("+++ super-virtual[%d]=%s.%s", 2074 ref, methodToCall->clazz->descriptor, methodToCall->name); 2075 assert(methodToCall != NULL); 2076 GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst); 2077 } 2078 GOTO_TARGET_END 2079 2080 2081 /* 2082 * General handling for return-void, return, and return-wide. Put the 2083 * return value in "retval" before jumping here. 2084 */ 2085 GOTO_TARGET(returnFromMethod) 2086 { 2087 StackSaveArea* saveArea; 2088 2089 /* 2090 * We must do this BEFORE we pop the previous stack frame off, so 2091 * that the GC can see the return value (if any) in the local vars. 2092 * 2093 * Since this is now an interpreter switch point, we must do it before 2094 * we do anything at all. 2095 */ 2096 PERIODIC_CHECKS(0); 2097 2098 ILOGV("> retval=0x%llx (leaving %s.%s %s)", 2099 retval.j, curMethod->clazz->descriptor, curMethod->name, 2100 curMethod->shorty); 2101 //DUMP_REGS(curMethod, fp); 2102 2103 saveArea = SAVEAREA_FROM_FP(fp); 2104 2105 #ifdef EASY_GDB 2106 debugSaveArea = saveArea; 2107 #endif 2108 2109 /* back up to previous frame and see if we hit a break */ 2110 fp = (u4*)saveArea->prevFrame; 2111 assert(fp != NULL); 2112 2113 /* Handle any special subMode requirements */ 2114 if (self->interpBreak.ctl.subMode != 0) { 2115 PC_FP_TO_SELF(); 2116 dvmReportReturn(self); 2117 } 2118 2119 if (dvmIsBreakFrame(fp)) { 2120 /* bail without popping the method frame from stack */ 2121 LOGVV("+++ returned into break frame"); 2122 GOTO_bail(); 2123 } 2124 2125 /* update thread FP, and reset local variables */ 2126 self->interpSave.curFrame = fp; 2127 curMethod = SAVEAREA_FROM_FP(fp)->method; 2128 self->interpSave.method = curMethod; 2129 //methodClass = curMethod->clazz; 2130 methodClassDex = curMethod->clazz->pDvmDex; 2131 pc = saveArea->savedPc; 2132 ILOGD("> (return to %s.%s %s)", curMethod->clazz->descriptor, 2133 curMethod->name, curMethod->shorty); 2134 2135 /* use FINISH on the caller's invoke instruction */ 2136 //u2 invokeInstr = INST_INST(FETCH(0)); 2137 if (true /*invokeInstr >= OP_INVOKE_VIRTUAL && 2138 invokeInstr <= OP_INVOKE_INTERFACE*/) 2139 { 2140 FINISH(3); 2141 } else { 2142 //LOGE("Unknown invoke instr %02x at %d", 2143 // invokeInstr, (int) (pc - curMethod->insns)); 2144 assert(false); 2145 } 2146 } 2147 GOTO_TARGET_END 2148 2149 2150 /* 2151 * Jump here when the code throws an exception. 2152 * 2153 * By the time we get here, the Throwable has been created and the stack 2154 * trace has been saved off. 2155 */ 2156 GOTO_TARGET(exceptionThrown) 2157 { 2158 Object* exception; 2159 int catchRelPc; 2160 2161 PERIODIC_CHECKS(0); 2162 2163 /* 2164 * We save off the exception and clear the exception status. While 2165 * processing the exception we might need to load some Throwable 2166 * classes, and we don't want class loader exceptions to get 2167 * confused with this one. 2168 */ 2169 assert(dvmCheckException(self)); 2170 exception = dvmGetException(self); 2171 dvmAddTrackedAlloc(exception, self); 2172 dvmClearException(self); 2173 2174 LOGV("Handling exception %s at %s:%d", 2175 exception->clazz->descriptor, curMethod->name, 2176 dvmLineNumFromPC(curMethod, pc - curMethod->insns)); 2177 2178 /* 2179 * Report the exception throw to any "subMode" watchers. 2180 * 2181 * TODO: if the exception was thrown by interpreted code, control 2182 * fell through native, and then back to us, we will report the 2183 * exception at the point of the throw and again here. We can avoid 2184 * this by not reporting exceptions when we jump here directly from 2185 * the native call code above, but then we won't report exceptions 2186 * that were thrown *from* the JNI code (as opposed to *through* it). 2187 * 2188 * The correct solution is probably to ignore from-native exceptions 2189 * here, and have the JNI exception code do the reporting to the 2190 * debugger. 2191 */ 2192 if (self->interpBreak.ctl.subMode != 0) { 2193 PC_FP_TO_SELF(); 2194 dvmReportExceptionThrow(self, exception); 2195 } 2196 2197 /* 2198 * We need to unroll to the catch block or the nearest "break" 2199 * frame. 2200 * 2201 * A break frame could indicate that we have reached an intermediate 2202 * native call, or have gone off the top of the stack and the thread 2203 * needs to exit. Either way, we return from here, leaving the 2204 * exception raised. 2205 * 2206 * If we do find a catch block, we want to transfer execution to 2207 * that point. 2208 * 2209 * Note this can cause an exception while resolving classes in 2210 * the "catch" blocks. 2211 */ 2212 catchRelPc = dvmFindCatchBlock(self, pc - curMethod->insns, 2213 exception, false, (void**)(void*)&fp); 2214 2215 /* 2216 * Restore the stack bounds after an overflow. This isn't going to 2217 * be correct in all circumstances, e.g. if JNI code devours the 2218 * exception this won't happen until some other exception gets 2219 * thrown. If the code keeps pushing the stack bounds we'll end 2220 * up aborting the VM. 2221 * 2222 * Note we want to do this *after* the call to dvmFindCatchBlock, 2223 * because that may need extra stack space to resolve exception 2224 * classes (e.g. through a class loader). 2225 * 2226 * It's possible for the stack overflow handling to cause an 2227 * exception (specifically, class resolution in a "catch" block 2228 * during the call above), so we could see the thread's overflow 2229 * flag raised but actually be running in a "nested" interpreter 2230 * frame. We don't allow doubled-up StackOverflowErrors, so 2231 * we can check for this by just looking at the exception type 2232 * in the cleanup function. Also, we won't unroll past the SOE 2233 * point because the more-recent exception will hit a break frame 2234 * as it unrolls to here. 2235 */ 2236 if (self->stackOverflowed) 2237 dvmCleanupStackOverflow(self, exception); 2238 2239 if (catchRelPc < 0) { 2240 /* falling through to JNI code or off the bottom of the stack */ 2241 #if DVM_SHOW_EXCEPTION >= 2 2242 LOGD("Exception %s from %s:%d not caught locally", 2243 exception->clazz->descriptor, dvmGetMethodSourceFile(curMethod), 2244 dvmLineNumFromPC(curMethod, pc - curMethod->insns)); 2245 #endif 2246 dvmSetException(self, exception); 2247 dvmReleaseTrackedAlloc(exception, self); 2248 GOTO_bail(); 2249 } 2250 2251 #if DVM_SHOW_EXCEPTION >= 3 2252 { 2253 const Method* catchMethod = SAVEAREA_FROM_FP(fp)->method; 2254 LOGD("Exception %s thrown from %s:%d to %s:%d", 2255 exception->clazz->descriptor, dvmGetMethodSourceFile(curMethod), 2256 dvmLineNumFromPC(curMethod, pc - curMethod->insns), 2257 dvmGetMethodSourceFile(catchMethod), 2258 dvmLineNumFromPC(catchMethod, catchRelPc)); 2259 } 2260 #endif 2261 2262 /* 2263 * Adjust local variables to match self->interpSave.curFrame and the 2264 * updated PC. 2265 */ 2266 //fp = (u4*) self->interpSave.curFrame; 2267 curMethod = SAVEAREA_FROM_FP(fp)->method; 2268 self->interpSave.method = curMethod; 2269 //methodClass = curMethod->clazz; 2270 methodClassDex = curMethod->clazz->pDvmDex; 2271 pc = curMethod->insns + catchRelPc; 2272 ILOGV("> pc <-- %s.%s %s", curMethod->clazz->descriptor, 2273 curMethod->name, curMethod->shorty); 2274 DUMP_REGS(curMethod, fp, false); // show all regs 2275 2276 /* 2277 * Restore the exception if the handler wants it. 2278 * 2279 * The Dalvik spec mandates that, if an exception handler wants to 2280 * do something with the exception, the first instruction executed 2281 * must be "move-exception". We can pass the exception along 2282 * through the thread struct, and let the move-exception instruction 2283 * clear it for us. 2284 * 2285 * If the handler doesn't call move-exception, we don't want to 2286 * finish here with an exception still pending. 2287 */ 2288 if (INST_INST(FETCH(0)) == OP_MOVE_EXCEPTION) 2289 dvmSetException(self, exception); 2290 2291 dvmReleaseTrackedAlloc(exception, self); 2292 FINISH(0); 2293 } 2294 GOTO_TARGET_END 2295 2296 2297 2298 /* 2299 * General handling for invoke-{virtual,super,direct,static,interface}, 2300 * including "quick" variants. 2301 * 2302 * Set "methodToCall" to the Method we're calling, and "methodCallRange" 2303 * depending on whether this is a "/range" instruction. 2304 * 2305 * For a range call: 2306 * "vsrc1" holds the argument count (8 bits) 2307 * "vdst" holds the first argument in the range 2308 * For a non-range call: 2309 * "vsrc1" holds the argument count (4 bits) and the 5th argument index 2310 * "vdst" holds four 4-bit register indices 2311 * 2312 * The caller must EXPORT_PC before jumping here, because any method 2313 * call can throw a stack overflow exception. 2314 */ 2315 GOTO_TARGET(invokeMethod, bool methodCallRange, const Method* _methodToCall, 2316 u2 count, u2 regs) 2317 { 2318 STUB_HACK(vsrc1 = count; vdst = regs; methodToCall = _methodToCall;); 2319 2320 //printf("range=%d call=%p count=%d regs=0x%04x\n", 2321 // methodCallRange, methodToCall, count, regs); 2322 //printf(" --> %s.%s %s\n", methodToCall->clazz->descriptor, 2323 // methodToCall->name, methodToCall->shorty); 2324 2325 u4* outs; 2326 int i; 2327 2328 /* 2329 * Copy args. This may corrupt vsrc1/vdst. 2330 */ 2331 if (methodCallRange) { 2332 // could use memcpy or a "Duff's device"; most functions have 2333 // so few args it won't matter much 2334 assert(vsrc1 <= curMethod->outsSize); 2335 assert(vsrc1 == methodToCall->insSize); 2336 outs = OUTS_FROM_FP(fp, vsrc1); 2337 for (i = 0; i < vsrc1; i++) 2338 outs[i] = GET_REGISTER(vdst+i); 2339 } else { 2340 u4 count = vsrc1 >> 4; 2341 2342 assert(count <= curMethod->outsSize); 2343 assert(count == methodToCall->insSize); 2344 assert(count <= 5); 2345 2346 outs = OUTS_FROM_FP(fp, count); 2347 #if 0 2348 if (count == 5) { 2349 outs[4] = GET_REGISTER(vsrc1 & 0x0f); 2350 count--; 2351 } 2352 for (i = 0; i < (int) count; i++) { 2353 outs[i] = GET_REGISTER(vdst & 0x0f); 2354 vdst >>= 4; 2355 } 2356 #else 2357 // This version executes fewer instructions but is larger 2358 // overall. Seems to be a teensy bit faster. 2359 assert((vdst >> 16) == 0); // 16 bits -or- high 16 bits clear 2360 switch (count) { 2361 case 5: 2362 outs[4] = GET_REGISTER(vsrc1 & 0x0f); 2363 case 4: 2364 outs[3] = GET_REGISTER(vdst >> 12); 2365 case 3: 2366 outs[2] = GET_REGISTER((vdst & 0x0f00) >> 8); 2367 case 2: 2368 outs[1] = GET_REGISTER((vdst & 0x00f0) >> 4); 2369 case 1: 2370 outs[0] = GET_REGISTER(vdst & 0x0f); 2371 default: 2372 ; 2373 } 2374 #endif 2375 } 2376 } 2377 2378 /* 2379 * (This was originally a "goto" target; I've kept it separate from the 2380 * stuff above in case we want to refactor things again.) 2381 * 2382 * At this point, we have the arguments stored in the "outs" area of 2383 * the current method's stack frame, and the method to call in 2384 * "methodToCall". Push a new stack frame. 2385 */ 2386 { 2387 StackSaveArea* newSaveArea; 2388 u4* newFp; 2389 2390 ILOGV("> %s%s.%s %s", 2391 dvmIsNativeMethod(methodToCall) ? "(NATIVE) " : "", 2392 methodToCall->clazz->descriptor, methodToCall->name, 2393 methodToCall->shorty); 2394 2395 newFp = (u4*) SAVEAREA_FROM_FP(fp) - methodToCall->registersSize; 2396 newSaveArea = SAVEAREA_FROM_FP(newFp); 2397 2398 /* verify that we have enough space */ 2399 if (true) { 2400 u1* bottom; 2401 bottom = (u1*) newSaveArea - methodToCall->outsSize * sizeof(u4); 2402 if (bottom < self->interpStackEnd) { 2403 /* stack overflow */ 2404 LOGV("Stack overflow on method call (start=%p end=%p newBot=%p(%d) size=%d '%s')", 2405 self->interpStackStart, self->interpStackEnd, bottom, 2406 (u1*) fp - bottom, self->interpStackSize, 2407 methodToCall->name); 2408 dvmHandleStackOverflow(self, methodToCall); 2409 assert(dvmCheckException(self)); 2410 GOTO_exceptionThrown(); 2411 } 2412 //LOGD("+++ fp=%p newFp=%p newSave=%p bottom=%p", 2413 // fp, newFp, newSaveArea, bottom); 2414 } 2415 2416 #ifdef LOG_INSTR 2417 if (methodToCall->registersSize > methodToCall->insSize) { 2418 /* 2419 * This makes valgrind quiet when we print registers that 2420 * haven't been initialized. Turn it off when the debug 2421 * messages are disabled -- we want valgrind to report any 2422 * used-before-initialized issues. 2423 */ 2424 memset(newFp, 0xcc, 2425 (methodToCall->registersSize - methodToCall->insSize) * 4); 2426 } 2427 #endif 2428 2429 #ifdef EASY_GDB 2430 newSaveArea->prevSave = SAVEAREA_FROM_FP(fp); 2431 #endif 2432 newSaveArea->prevFrame = fp; 2433 newSaveArea->savedPc = pc; 2434 #if defined(WITH_JIT) && defined(MTERP_STUB) 2435 newSaveArea->returnAddr = 0; 2436 #endif 2437 newSaveArea->method = methodToCall; 2438 2439 if (self->interpBreak.ctl.subMode != 0) { 2440 /* 2441 * We mark ENTER here for both native and non-native 2442 * calls. For native calls, we'll mark EXIT on return. 2443 * For non-native calls, EXIT is marked in the RETURN op. 2444 */ 2445 PC_TO_SELF(); 2446 dvmReportInvoke(self, methodToCall); 2447 } 2448 2449 if (!dvmIsNativeMethod(methodToCall)) { 2450 /* 2451 * "Call" interpreted code. Reposition the PC, update the 2452 * frame pointer and other local state, and continue. 2453 */ 2454 curMethod = methodToCall; 2455 self->interpSave.method = curMethod; 2456 methodClassDex = curMethod->clazz->pDvmDex; 2457 pc = methodToCall->insns; 2458 self->interpSave.curFrame = fp = newFp; 2459 #ifdef EASY_GDB 2460 debugSaveArea = SAVEAREA_FROM_FP(newFp); 2461 #endif 2462 self->debugIsMethodEntry = true; // profiling, debugging 2463 ILOGD("> pc <-- %s.%s %s", curMethod->clazz->descriptor, 2464 curMethod->name, curMethod->shorty); 2465 DUMP_REGS(curMethod, fp, true); // show input args 2466 FINISH(0); // jump to method start 2467 } else { 2468 /* set this up for JNI locals, even if not a JNI native */ 2469 newSaveArea->xtra.localRefCookie = self->jniLocalRefTable.segmentState.all; 2470 2471 self->interpSave.curFrame = newFp; 2472 2473 DUMP_REGS(methodToCall, newFp, true); // show input args 2474 2475 if (self->interpBreak.ctl.subMode != 0) { 2476 dvmReportPreNativeInvoke(methodToCall, self, fp); 2477 } 2478 2479 ILOGD("> native <-- %s.%s %s", methodToCall->clazz->descriptor, 2480 methodToCall->name, methodToCall->shorty); 2481 2482 /* 2483 * Jump through native call bridge. Because we leave no 2484 * space for locals on native calls, "newFp" points directly 2485 * to the method arguments. 2486 */ 2487 (*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self); 2488 2489 if (self->interpBreak.ctl.subMode != 0) { 2490 dvmReportPostNativeInvoke(methodToCall, self, fp); 2491 } 2492 2493 /* pop frame off */ 2494 dvmPopJniLocals(self, newSaveArea); 2495 self->interpSave.curFrame = fp; 2496 2497 /* 2498 * If the native code threw an exception, or interpreted code 2499 * invoked by the native call threw one and nobody has cleared 2500 * it, jump to our local exception handling. 2501 */ 2502 if (dvmCheckException(self)) { 2503 LOGV("Exception thrown by/below native code"); 2504 GOTO_exceptionThrown(); 2505 } 2506 2507 ILOGD("> retval=0x%llx (leaving native)", retval.j); 2508 ILOGD("> (return from native %s.%s to %s.%s %s)", 2509 methodToCall->clazz->descriptor, methodToCall->name, 2510 curMethod->clazz->descriptor, curMethod->name, 2511 curMethod->shorty); 2512 2513 //u2 invokeInstr = INST_INST(FETCH(0)); 2514 if (true /*invokeInstr >= OP_INVOKE_VIRTUAL && 2515 invokeInstr <= OP_INVOKE_INTERFACE*/) 2516 { 2517 FINISH(3); 2518 } else { 2519 //LOGE("Unknown invoke instr %02x at %d", 2520 // invokeInstr, (int) (pc - curMethod->insns)); 2521 assert(false); 2522 } 2523 } 2524 } 2525 assert(false); // should not get here 2526 GOTO_TARGET_END 2527 2528 /* File: cstubs/enddefs.cpp */ 2529 2530 /* undefine "magic" name remapping */ 2531 #undef retval 2532 #undef pc 2533 #undef fp 2534 #undef curMethod 2535 #undef methodClassDex 2536 #undef self 2537 #undef debugTrackedRefStart 2538 2539