Home | History | Annotate | Download | only in interp
      1 /*
      2  * Copyright (C) 2008 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 #ifdef WITH_JIT
     17 
     18 /*
     19  * Target independent portion of Android's Jit
     20  */
     21 
     22 #include "Dalvik.h"
     23 #include "Jit.h"
     24 
     25 
     26 #include "dexdump/OpCodeNames.h"
     27 #include <unistd.h>
     28 #include <pthread.h>
     29 #include <sys/time.h>
     30 #include <signal.h>
     31 #include "compiler/Compiler.h"
     32 #include "compiler/CompilerUtility.h"
     33 #include "compiler/CompilerIR.h"
     34 #include <errno.h>
     35 
     36 #if defined(WITH_SELF_VERIFICATION)
     37 /* Allocate space for per-thread ShadowSpace data structures */
     38 void* dvmSelfVerificationShadowSpaceAlloc(Thread* self)
     39 {
     40     self->shadowSpace = (ShadowSpace*) calloc(1, sizeof(ShadowSpace));
     41     if (self->shadowSpace == NULL)
     42         return NULL;
     43 
     44     self->shadowSpace->registerSpaceSize = REG_SPACE;
     45     self->shadowSpace->registerSpace =
     46         (int*) calloc(self->shadowSpace->registerSpaceSize, sizeof(int));
     47 
     48     return self->shadowSpace->registerSpace;
     49 }
     50 
     51 /* Free per-thread ShadowSpace data structures */
     52 void dvmSelfVerificationShadowSpaceFree(Thread* self)
     53 {
     54     free(self->shadowSpace->registerSpace);
     55     free(self->shadowSpace);
     56 }
     57 
     58 /*
     59  * Save out PC, FP, InterpState, and registers to shadow space.
     60  * Return a pointer to the shadow space for JIT to use.
     61  */
     62 void* dvmSelfVerificationSaveState(const u2* pc, const void* fp,
     63                                    InterpState* interpState, int targetTrace)
     64 {
     65     Thread *self = dvmThreadSelf();
     66     ShadowSpace *shadowSpace = self->shadowSpace;
     67     unsigned preBytes = interpState->method->outsSize*4 + sizeof(StackSaveArea);
     68     unsigned postBytes = interpState->method->registersSize*4;
     69 
     70     //LOGD("### selfVerificationSaveState(%d) pc: 0x%x fp: 0x%x",
     71     //    self->threadId, (int)pc, (int)fp);
     72 
     73     if (shadowSpace->selfVerificationState != kSVSIdle) {
     74         LOGD("~~~ Save: INCORRECT PREVIOUS STATE(%d): %d",
     75             self->threadId, shadowSpace->selfVerificationState);
     76         LOGD("********** SHADOW STATE DUMP **********");
     77         LOGD("PC: 0x%x FP: 0x%x", (int)pc, (int)fp);
     78     }
     79     shadowSpace->selfVerificationState = kSVSStart;
     80 
     81     if (interpState->entryPoint == kInterpEntryResume) {
     82         interpState->entryPoint = kInterpEntryInstr;
     83 #if 0
     84         /* Tracking the success rate of resume after single-stepping */
     85         if (interpState->jitResumeDPC == pc) {
     86             LOGD("SV single step resumed at %p", pc);
     87         }
     88         else {
     89             LOGD("real %p DPC %p NPC %p", pc, interpState->jitResumeDPC,
     90                  interpState->jitResumeNPC);
     91         }
     92 #endif
     93     }
     94 
     95     // Dynamically grow shadow register space if necessary
     96     if (preBytes + postBytes > shadowSpace->registerSpaceSize * sizeof(u4)) {
     97         free(shadowSpace->registerSpace);
     98         shadowSpace->registerSpaceSize = (preBytes + postBytes) / sizeof(u4);
     99         shadowSpace->registerSpace =
    100             (int*) calloc(shadowSpace->registerSpaceSize, sizeof(u4));
    101     }
    102 
    103     // Remember original state
    104     shadowSpace->startPC = pc;
    105     shadowSpace->fp = fp;
    106     shadowSpace->glue = interpState;
    107     /*
    108      * Store the original method here in case the trace ends with a
    109      * return/invoke, the last method.
    110      */
    111     shadowSpace->method = interpState->method;
    112     shadowSpace->shadowFP = shadowSpace->registerSpace +
    113                             shadowSpace->registerSpaceSize - postBytes/4;
    114 
    115     // Create a copy of the InterpState
    116     memcpy(&(shadowSpace->interpState), interpState, sizeof(InterpState));
    117     shadowSpace->interpState.fp = shadowSpace->shadowFP;
    118     shadowSpace->interpState.interpStackEnd = (u1*)shadowSpace->registerSpace;
    119 
    120     // Create a copy of the stack
    121     memcpy(((char*)shadowSpace->shadowFP)-preBytes, ((char*)fp)-preBytes,
    122         preBytes+postBytes);
    123 
    124     // Setup the shadowed heap space
    125     shadowSpace->heapSpaceTail = shadowSpace->heapSpace;
    126 
    127     // Reset trace length
    128     shadowSpace->traceLength = 0;
    129 
    130     return shadowSpace;
    131 }
    132 
    133 /*
    134  * Save ending PC, FP and compiled code exit point to shadow space.
    135  * Return a pointer to the shadow space for JIT to restore state.
    136  */
    137 void* dvmSelfVerificationRestoreState(const u2* pc, const void* fp,
    138                                       SelfVerificationState exitPoint)
    139 {
    140     Thread *self = dvmThreadSelf();
    141     ShadowSpace *shadowSpace = self->shadowSpace;
    142     // Official InterpState structure
    143     InterpState *realGlue = shadowSpace->glue;
    144     shadowSpace->endPC = pc;
    145     shadowSpace->endShadowFP = fp;
    146 
    147     //LOGD("### selfVerificationRestoreState(%d) pc: 0x%x fp: 0x%x endPC: 0x%x",
    148     //    self->threadId, (int)shadowSpace->startPC, (int)shadowSpace->fp,
    149     //    (int)pc);
    150 
    151     if (shadowSpace->selfVerificationState != kSVSStart) {
    152         LOGD("~~~ Restore: INCORRECT PREVIOUS STATE(%d): %d",
    153             self->threadId, shadowSpace->selfVerificationState);
    154         LOGD("********** SHADOW STATE DUMP **********");
    155         LOGD("Dalvik PC: 0x%x endPC: 0x%x", (int)shadowSpace->startPC,
    156             (int)shadowSpace->endPC);
    157         LOGD("Interp FP: 0x%x", (int)shadowSpace->fp);
    158         LOGD("Shadow FP: 0x%x endFP: 0x%x", (int)shadowSpace->shadowFP,
    159             (int)shadowSpace->endShadowFP);
    160     }
    161 
    162     // Move the resume [ND]PC from the shadow space to the real space so that
    163     // the debug interpreter can return to the translation
    164     if (exitPoint == kSVSSingleStep) {
    165         realGlue->jitResumeNPC = shadowSpace->interpState.jitResumeNPC;
    166         realGlue->jitResumeDPC = shadowSpace->interpState.jitResumeDPC;
    167     } else {
    168         realGlue->jitResumeNPC = NULL;
    169         realGlue->jitResumeDPC = NULL;
    170     }
    171 
    172     // Special case when punting after a single instruction
    173     if (exitPoint == kSVSPunt && pc == shadowSpace->startPC) {
    174         shadowSpace->selfVerificationState = kSVSIdle;
    175     } else {
    176         shadowSpace->selfVerificationState = exitPoint;
    177     }
    178 
    179     return shadowSpace;
    180 }
    181 
    182 /* Print contents of virtual registers */
    183 static void selfVerificationPrintRegisters(int* addr, int* addrRef,
    184                                            int numWords)
    185 {
    186     int i;
    187     for (i = 0; i < numWords; i++) {
    188         LOGD("(v%d) 0x%8x%s", i, addr[i], addr[i] != addrRef[i] ? " X" : "");
    189     }
    190 }
    191 
    192 /* Print values maintained in shadowSpace */
    193 static void selfVerificationDumpState(const u2* pc, Thread* self)
    194 {
    195     ShadowSpace* shadowSpace = self->shadowSpace;
    196     StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->curFrame);
    197     int frameBytes = (int) shadowSpace->registerSpace +
    198                      shadowSpace->registerSpaceSize*4 -
    199                      (int) shadowSpace->shadowFP;
    200     int localRegs = 0;
    201     int frameBytes2 = 0;
    202     if (self->curFrame < shadowSpace->fp) {
    203         localRegs = (stackSave->method->registersSize -
    204                      stackSave->method->insSize)*4;
    205         frameBytes2 = (int) shadowSpace->fp - (int) self->curFrame - localRegs;
    206     }
    207     LOGD("********** SHADOW STATE DUMP **********");
    208     LOGD("CurrentPC: 0x%x, Offset: 0x%04x", (int)pc,
    209         (int)(pc - stackSave->method->insns));
    210     LOGD("Class: %s", shadowSpace->method->clazz->descriptor);
    211     LOGD("Method: %s", shadowSpace->method->name);
    212     LOGD("Dalvik PC: 0x%x endPC: 0x%x", (int)shadowSpace->startPC,
    213         (int)shadowSpace->endPC);
    214     LOGD("Interp FP: 0x%x endFP: 0x%x", (int)shadowSpace->fp,
    215         (int)self->curFrame);
    216     LOGD("Shadow FP: 0x%x endFP: 0x%x", (int)shadowSpace->shadowFP,
    217         (int)shadowSpace->endShadowFP);
    218     LOGD("Frame1 Bytes: %d Frame2 Local: %d Bytes: %d", frameBytes,
    219         localRegs, frameBytes2);
    220     LOGD("Trace length: %d State: %d", shadowSpace->traceLength,
    221         shadowSpace->selfVerificationState);
    222 }
    223 
    224 /* Print decoded instructions in the current trace */
    225 static void selfVerificationDumpTrace(const u2* pc, Thread* self)
    226 {
    227     ShadowSpace* shadowSpace = self->shadowSpace;
    228     StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->curFrame);
    229     int i, addr, offset;
    230     DecodedInstruction *decInsn;
    231 
    232     LOGD("********** SHADOW TRACE DUMP **********");
    233     for (i = 0; i < shadowSpace->traceLength; i++) {
    234         addr = shadowSpace->trace[i].addr;
    235         offset =  (int)((u2*)addr - stackSave->method->insns);
    236         decInsn = &(shadowSpace->trace[i].decInsn);
    237         /* Not properly decoding instruction, some registers may be garbage */
    238         LOGD("0x%x: (0x%04x) %s", addr, offset, getOpcodeName(decInsn->opCode));
    239     }
    240 }
    241 
    242 /* Code is forced into this spin loop when a divergence is detected */
    243 static void selfVerificationSpinLoop(ShadowSpace *shadowSpace)
    244 {
    245     const u2 *startPC = shadowSpace->startPC;
    246     JitTraceDescription* desc = dvmCopyTraceDescriptor(startPC, NULL);
    247     if (desc) {
    248         dvmCompilerWorkEnqueue(startPC, kWorkOrderTraceDebug, desc);
    249         /*
    250          * This function effectively terminates the VM right here, so not
    251          * freeing the desc pointer when the enqueuing fails is acceptable.
    252          */
    253     }
    254     gDvmJit.selfVerificationSpin = true;
    255     while(gDvmJit.selfVerificationSpin) sleep(10);
    256 }
    257 
    258 /* Manage self verification while in the debug interpreter */
    259 static bool selfVerificationDebugInterp(const u2* pc, Thread* self,
    260                                         InterpState *interpState)
    261 {
    262     ShadowSpace *shadowSpace = self->shadowSpace;
    263     SelfVerificationState state = shadowSpace->selfVerificationState;
    264 
    265     DecodedInstruction decInsn;
    266     dexDecodeInstruction(gDvm.instrFormat, pc, &decInsn);
    267 
    268     //LOGD("### DbgIntp(%d): PC: 0x%x endPC: 0x%x state: %d len: %d %s",
    269     //    self->threadId, (int)pc, (int)shadowSpace->endPC, state,
    270     //    shadowSpace->traceLength, getOpcodeName(decInsn.opCode));
    271 
    272     if (state == kSVSIdle || state == kSVSStart) {
    273         LOGD("~~~ DbgIntrp: INCORRECT PREVIOUS STATE(%d): %d",
    274             self->threadId, state);
    275         selfVerificationDumpState(pc, self);
    276         selfVerificationDumpTrace(pc, self);
    277     }
    278 
    279     /*
    280      * Skip endPC once when trace has a backward branch. If the SV state is
    281      * single step, keep it that way.
    282      */
    283     if ((state == kSVSBackwardBranch && pc == shadowSpace->endPC) ||
    284         (state != kSVSBackwardBranch && state != kSVSSingleStep)) {
    285         shadowSpace->selfVerificationState = kSVSDebugInterp;
    286     }
    287 
    288     /* Check that the current pc is the end of the trace */
    289     if ((state == kSVSDebugInterp || state == kSVSSingleStep) &&
    290         pc == shadowSpace->endPC) {
    291 
    292         shadowSpace->selfVerificationState = kSVSIdle;
    293 
    294         /* Check register space */
    295         int frameBytes = (int) shadowSpace->registerSpace +
    296                          shadowSpace->registerSpaceSize*4 -
    297                          (int) shadowSpace->shadowFP;
    298         if (memcmp(shadowSpace->fp, shadowSpace->shadowFP, frameBytes)) {
    299             LOGD("~~~ DbgIntp(%d): REGISTERS DIVERGENCE!", self->threadId);
    300             selfVerificationDumpState(pc, self);
    301             selfVerificationDumpTrace(pc, self);
    302             LOGD("*** Interp Registers: addr: 0x%x bytes: %d",
    303                 (int)shadowSpace->fp, frameBytes);
    304             selfVerificationPrintRegisters((int*)shadowSpace->fp,
    305                                            (int*)shadowSpace->shadowFP,
    306                                            frameBytes/4);
    307             LOGD("*** Shadow Registers: addr: 0x%x bytes: %d",
    308                 (int)shadowSpace->shadowFP, frameBytes);
    309             selfVerificationPrintRegisters((int*)shadowSpace->shadowFP,
    310                                            (int*)shadowSpace->fp,
    311                                            frameBytes/4);
    312             selfVerificationSpinLoop(shadowSpace);
    313         }
    314         /* Check new frame if it exists (invokes only) */
    315         if (self->curFrame < shadowSpace->fp) {
    316             StackSaveArea* stackSave = SAVEAREA_FROM_FP(self->curFrame);
    317             int localRegs = (stackSave->method->registersSize -
    318                              stackSave->method->insSize)*4;
    319             int frameBytes2 = (int) shadowSpace->fp -
    320                               (int) self->curFrame - localRegs;
    321             if (memcmp(((char*)self->curFrame)+localRegs,
    322                 ((char*)shadowSpace->endShadowFP)+localRegs, frameBytes2)) {
    323                 LOGD("~~~ DbgIntp(%d): REGISTERS (FRAME2) DIVERGENCE!",
    324                     self->threadId);
    325                 selfVerificationDumpState(pc, self);
    326                 selfVerificationDumpTrace(pc, self);
    327                 LOGD("*** Interp Registers: addr: 0x%x l: %d bytes: %d",
    328                     (int)self->curFrame, localRegs, frameBytes2);
    329                 selfVerificationPrintRegisters((int*)self->curFrame,
    330                                                (int*)shadowSpace->endShadowFP,
    331                                                (frameBytes2+localRegs)/4);
    332                 LOGD("*** Shadow Registers: addr: 0x%x l: %d bytes: %d",
    333                     (int)shadowSpace->endShadowFP, localRegs, frameBytes2);
    334                 selfVerificationPrintRegisters((int*)shadowSpace->endShadowFP,
    335                                                (int*)self->curFrame,
    336                                                (frameBytes2+localRegs)/4);
    337                 selfVerificationSpinLoop(shadowSpace);
    338             }
    339         }
    340 
    341         /* Check memory space */
    342         bool memDiff = false;
    343         ShadowHeap* heapSpacePtr;
    344         for (heapSpacePtr = shadowSpace->heapSpace;
    345              heapSpacePtr != shadowSpace->heapSpaceTail; heapSpacePtr++) {
    346             int memData = *((unsigned int*) heapSpacePtr->addr);
    347             if (heapSpacePtr->data != memData) {
    348                 LOGD("~~~ DbgIntp(%d): MEMORY DIVERGENCE!", self->threadId);
    349                 LOGD("Addr: 0x%x Intrp Data: 0x%x Jit Data: 0x%x",
    350                     heapSpacePtr->addr, memData, heapSpacePtr->data);
    351                 selfVerificationDumpState(pc, self);
    352                 selfVerificationDumpTrace(pc, self);
    353                 memDiff = true;
    354             }
    355         }
    356         if (memDiff) selfVerificationSpinLoop(shadowSpace);
    357 
    358         /*
    359          * Switch to JIT single step mode to stay in the debug interpreter for
    360          * one more instruction
    361          */
    362         if (state == kSVSSingleStep) {
    363             interpState->jitState = kJitSingleStepEnd;
    364         }
    365         return true;
    366 
    367     /* If end not been reached, make sure max length not exceeded */
    368     } else if (shadowSpace->traceLength >= JIT_MAX_TRACE_LEN) {
    369         LOGD("~~~ DbgIntp(%d): CONTROL DIVERGENCE!", self->threadId);
    370         LOGD("startPC: 0x%x endPC: 0x%x currPC: 0x%x",
    371             (int)shadowSpace->startPC, (int)shadowSpace->endPC, (int)pc);
    372         selfVerificationDumpState(pc, self);
    373         selfVerificationDumpTrace(pc, self);
    374         selfVerificationSpinLoop(shadowSpace);
    375 
    376         return true;
    377     }
    378     /* Log the instruction address and decoded instruction for debug */
    379     shadowSpace->trace[shadowSpace->traceLength].addr = (int)pc;
    380     shadowSpace->trace[shadowSpace->traceLength].decInsn = decInsn;
    381     shadowSpace->traceLength++;
    382 
    383     return false;
    384 }
    385 #endif
    386 
    387 /*
    388  * If one of our fixed tables or the translation buffer fills up,
    389  * call this routine to avoid wasting cycles on future translation requests.
    390  */
    391 void dvmJitStopTranslationRequests()
    392 {
    393     /*
    394      * Note 1: This won't necessarily stop all translation requests, and
    395      * operates on a delayed mechanism.  Running threads look to the copy
    396      * of this value in their private InterpState structures and won't see
    397      * this change until it is refreshed (which happens on interpreter
    398      * entry).
    399      * Note 2: This is a one-shot memory leak on this table. Because this is a
    400      * permanent off switch for Jit profiling, it is a one-time leak of 1K
    401      * bytes, and no further attempt will be made to re-allocate it.  Can't
    402      * free it because some thread may be holding a reference.
    403      */
    404     gDvmJit.pProfTable = NULL;
    405 }
    406 
    407 #if defined(JIT_STATS)
    408 /* Convenience function to increment counter from assembly code */
    409 void dvmBumpNoChain(int from)
    410 {
    411     gDvmJit.noChainExit[from]++;
    412 }
    413 
    414 /* Convenience function to increment counter from assembly code */
    415 void dvmBumpNormal()
    416 {
    417     gDvmJit.normalExit++;
    418 }
    419 
    420 /* Convenience function to increment counter from assembly code */
    421 void dvmBumpPunt(int from)
    422 {
    423     gDvmJit.puntExit++;
    424 }
    425 #endif
    426 
    427 /* Dumps debugging & tuning stats to the log */
    428 void dvmJitStats()
    429 {
    430     int i;
    431     int hit;
    432     int not_hit;
    433     int chains;
    434     int stubs;
    435     if (gDvmJit.pJitEntryTable) {
    436         for (i=0, stubs=chains=hit=not_hit=0;
    437              i < (int) gDvmJit.jitTableSize;
    438              i++) {
    439             if (gDvmJit.pJitEntryTable[i].dPC != 0) {
    440                 hit++;
    441                 if (gDvmJit.pJitEntryTable[i].codeAddress ==
    442                       gDvmJit.interpretTemplate)
    443                     stubs++;
    444             } else
    445                 not_hit++;
    446             if (gDvmJit.pJitEntryTable[i].u.info.chain != gDvmJit.jitTableSize)
    447                 chains++;
    448         }
    449         LOGD("JIT: table size is %d, entries used is %d",
    450              gDvmJit.jitTableSize,  gDvmJit.jitTableEntriesUsed);
    451         LOGD("JIT: %d traces, %d slots, %d chains, %d thresh, %s",
    452              hit, not_hit + hit, chains, gDvmJit.threshold,
    453              gDvmJit.blockingMode ? "Blocking" : "Non-blocking");
    454 
    455 #if defined(JIT_STATS)
    456         LOGD("JIT: Lookups: %d hits, %d misses; %d normal, %d punt",
    457              gDvmJit.addrLookupsFound, gDvmJit.addrLookupsNotFound,
    458              gDvmJit.normalExit, gDvmJit.puntExit);
    459 
    460         LOGD("JIT: noChainExit: %d IC miss, %d interp callsite, "
    461              "%d switch overflow",
    462              gDvmJit.noChainExit[kInlineCacheMiss],
    463              gDvmJit.noChainExit[kCallsiteInterpreted],
    464              gDvmJit.noChainExit[kSwitchOverflow]);
    465 
    466         LOGD("JIT: ICPatch: %d fast, %d queued; %d dropped",
    467              gDvmJit.icPatchFast, gDvmJit.icPatchQueued,
    468              gDvmJit.icPatchDropped);
    469 
    470         LOGD("JIT: Invoke: %d mono, %d poly, %d native, %d return",
    471              gDvmJit.invokeMonomorphic, gDvmJit.invokePolymorphic,
    472              gDvmJit.invokeNative, gDvmJit.returnOp);
    473         LOGD("JIT: Total compilation time: %llu ms", gDvmJit.jitTime / 1000);
    474         LOGD("JIT: Avg unit compilation time: %llu us",
    475              gDvmJit.jitTime / gDvmJit.numCompilations);
    476 #endif
    477 
    478         LOGD("JIT: %d Translation chains, %d interp stubs",
    479              gDvmJit.translationChains, stubs);
    480         if (gDvmJit.profile) {
    481             dvmCompilerSortAndPrintTraceProfiles();
    482         }
    483     }
    484 }
    485 
    486 
    487 void setTraceConstruction(JitEntry *slot, bool value)
    488 {
    489 
    490     JitEntryInfoUnion oldValue;
    491     JitEntryInfoUnion newValue;
    492     do {
    493         oldValue = slot->u;
    494         newValue = oldValue;
    495         newValue.info.traceConstruction = value;
    496     } while (!ATOMIC_CMP_SWAP( &slot->u.infoWord,
    497              oldValue.infoWord, newValue.infoWord));
    498 }
    499 
    500 void resetTracehead(InterpState* interpState, JitEntry *slot)
    501 {
    502     slot->codeAddress = gDvmJit.interpretTemplate;
    503     setTraceConstruction(slot, false);
    504 }
    505 
    506 /* Clean up any pending trace builds */
    507 void dvmJitAbortTraceSelect(InterpState* interpState)
    508 {
    509     if (interpState->jitState == kJitTSelect)
    510         interpState->jitState = kJitDone;
    511 }
    512 
    513 /*
    514  * Find an entry in the JitTable, creating if necessary.
    515  * Returns null if table is full.
    516  */
    517 static JitEntry *lookupAndAdd(const u2* dPC, bool callerLocked)
    518 {
    519     u4 chainEndMarker = gDvmJit.jitTableSize;
    520     u4 idx = dvmJitHash(dPC);
    521 
    522     /* Walk the bucket chain to find an exact match for our PC */
    523     while ((gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) &&
    524            (gDvmJit.pJitEntryTable[idx].dPC != dPC)) {
    525         idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
    526     }
    527 
    528     if (gDvmJit.pJitEntryTable[idx].dPC != dPC) {
    529         /*
    530          * No match.  Aquire jitTableLock and find the last
    531          * slot in the chain. Possibly continue the chain walk in case
    532          * some other thread allocated the slot we were looking
    533          * at previuosly (perhaps even the dPC we're trying to enter).
    534          */
    535         if (!callerLocked)
    536             dvmLockMutex(&gDvmJit.tableLock);
    537         /*
    538          * At this point, if .dPC is NULL, then the slot we're
    539          * looking at is the target slot from the primary hash
    540          * (the simple, and common case).  Otherwise we're going
    541          * to have to find a free slot and chain it.
    542          */
    543         MEM_BARRIER(); /* Make sure we reload [].dPC after lock */
    544         if (gDvmJit.pJitEntryTable[idx].dPC != NULL) {
    545             u4 prev;
    546             while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) {
    547                 if (gDvmJit.pJitEntryTable[idx].dPC == dPC) {
    548                     /* Another thread got there first for this dPC */
    549                     if (!callerLocked)
    550                         dvmUnlockMutex(&gDvmJit.tableLock);
    551                     return &gDvmJit.pJitEntryTable[idx];
    552                 }
    553                 idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
    554             }
    555             /* Here, idx should be pointing to the last cell of an
    556              * active chain whose last member contains a valid dPC */
    557             assert(gDvmJit.pJitEntryTable[idx].dPC != NULL);
    558             /* Linear walk to find a free cell and add it to the end */
    559             prev = idx;
    560             while (true) {
    561                 idx++;
    562                 if (idx == chainEndMarker)
    563                     idx = 0;  /* Wraparound */
    564                 if ((gDvmJit.pJitEntryTable[idx].dPC == NULL) ||
    565                     (idx == prev))
    566                     break;
    567             }
    568             if (idx != prev) {
    569                 JitEntryInfoUnion oldValue;
    570                 JitEntryInfoUnion newValue;
    571                 /*
    572                  * Although we hold the lock so that noone else will
    573                  * be trying to update a chain field, the other fields
    574                  * packed into the word may be in use by other threads.
    575                  */
    576                 do {
    577                     oldValue = gDvmJit.pJitEntryTable[prev].u;
    578                     newValue = oldValue;
    579                     newValue.info.chain = idx;
    580                 } while (!ATOMIC_CMP_SWAP(
    581                          &gDvmJit.pJitEntryTable[prev].u.infoWord,
    582                          oldValue.infoWord, newValue.infoWord));
    583             }
    584         }
    585         if (gDvmJit.pJitEntryTable[idx].dPC == NULL) {
    586             /*
    587              * Initialize codeAddress and allocate the slot.  Must
    588              * happen in this order (since dPC is set, the entry is live.
    589              */
    590             gDvmJit.pJitEntryTable[idx].dPC = dPC;
    591             gDvmJit.jitTableEntriesUsed++;
    592         } else {
    593             /* Table is full */
    594             idx = chainEndMarker;
    595         }
    596         if (!callerLocked)
    597             dvmUnlockMutex(&gDvmJit.tableLock);
    598     }
    599     return (idx == chainEndMarker) ? NULL : &gDvmJit.pJitEntryTable[idx];
    600 }
    601 
    602 /*
    603  * Adds to the current trace request one instruction at a time, just
    604  * before that instruction is interpreted.  This is the primary trace
    605  * selection function.  NOTE: return instruction are handled a little
    606  * differently.  In general, instructions are "proposed" to be added
    607  * to the current trace prior to interpretation.  If the interpreter
    608  * then successfully completes the instruction, is will be considered
    609  * part of the request.  This allows us to examine machine state prior
    610  * to interpretation, and also abort the trace request if the instruction
    611  * throws or does something unexpected.  However, return instructions
    612  * will cause an immediate end to the translation request - which will
    613  * be passed to the compiler before the return completes.  This is done
    614  * in response to special handling of returns by the interpreter (and
    615  * because returns cannot throw in a way that causes problems for the
    616  * translated code.
    617  */
    618 int dvmCheckJit(const u2* pc, Thread* self, InterpState* interpState)
    619 {
    620     int flags,i,len;
    621     int switchInterp = false;
    622     bool debugOrProfile = dvmDebuggerOrProfilerActive();
    623 
    624     /* Prepare to handle last PC and stage the current PC */
    625     const u2 *lastPC = interpState->lastPC;
    626     interpState->lastPC = pc;
    627 
    628     switch (interpState->jitState) {
    629         char* nopStr;
    630         int target;
    631         int offset;
    632         DecodedInstruction decInsn;
    633         case kJitTSelect:
    634             /* First instruction - just remember the PC and exit */
    635             if (lastPC == NULL) break;
    636             /* Grow the trace around the last PC if jitState is kJitTSelect */
    637             dexDecodeInstruction(gDvm.instrFormat, lastPC, &decInsn);
    638 
    639             /*
    640              * Treat {PACKED,SPARSE}_SWITCH as trace-ending instructions due
    641              * to the amount of space it takes to generate the chaining
    642              * cells.
    643              */
    644             if (interpState->totalTraceLen != 0 &&
    645                 (decInsn.opCode == OP_PACKED_SWITCH ||
    646                  decInsn.opCode == OP_SPARSE_SWITCH)) {
    647                 interpState->jitState = kJitTSelectEnd;
    648                 break;
    649             }
    650 
    651 
    652 #if defined(SHOW_TRACE)
    653             LOGD("TraceGen: adding %s",getOpcodeName(decInsn.opCode));
    654 #endif
    655             flags = dexGetInstrFlags(gDvm.instrFlags, decInsn.opCode);
    656             len = dexGetInstrOrTableWidthAbs(gDvm.instrWidth, lastPC);
    657             offset = lastPC - interpState->method->insns;
    658             assert((unsigned) offset <
    659                    dvmGetMethodInsnsSize(interpState->method));
    660             if (lastPC != interpState->currRunHead + interpState->currRunLen) {
    661                 int currTraceRun;
    662                 /* We need to start a new trace run */
    663                 currTraceRun = ++interpState->currTraceRun;
    664                 interpState->currRunLen = 0;
    665                 interpState->currRunHead = (u2*)lastPC;
    666                 interpState->trace[currTraceRun].frag.startOffset = offset;
    667                 interpState->trace[currTraceRun].frag.numInsts = 0;
    668                 interpState->trace[currTraceRun].frag.runEnd = false;
    669                 interpState->trace[currTraceRun].frag.hint = kJitHintNone;
    670             }
    671             interpState->trace[interpState->currTraceRun].frag.numInsts++;
    672             interpState->totalTraceLen++;
    673             interpState->currRunLen += len;
    674 
    675             /* Will probably never hit this with the current trace buildier */
    676             if (interpState->currTraceRun == (MAX_JIT_RUN_LEN - 1)) {
    677                 interpState->jitState = kJitTSelectEnd;
    678             }
    679 
    680             if (  ((flags & kInstrUnconditional) == 0) &&
    681                   /* don't end trace on INVOKE_DIRECT_EMPTY  */
    682                   (decInsn.opCode != OP_INVOKE_DIRECT_EMPTY) &&
    683                   ((flags & (kInstrCanBranch |
    684                              kInstrCanSwitch |
    685                              kInstrCanReturn |
    686                              kInstrInvoke)) != 0)) {
    687                     interpState->jitState = kJitTSelectEnd;
    688 #if defined(SHOW_TRACE)
    689             LOGD("TraceGen: ending on %s, basic block end",
    690                  getOpcodeName(decInsn.opCode));
    691 #endif
    692             }
    693             /* Break on throw or self-loop */
    694             if ((decInsn.opCode == OP_THROW) || (lastPC == pc)){
    695                 interpState->jitState = kJitTSelectEnd;
    696             }
    697             if (interpState->totalTraceLen >= JIT_MAX_TRACE_LEN) {
    698                 interpState->jitState = kJitTSelectEnd;
    699             }
    700              /* Abandon the trace request if debugger/profiler is attached */
    701             if (debugOrProfile) {
    702                 interpState->jitState = kJitDone;
    703                 break;
    704             }
    705             if ((flags & kInstrCanReturn) != kInstrCanReturn) {
    706                 break;
    707             }
    708             /* NOTE: intentional fallthrough for returns */
    709         case kJitTSelectEnd:
    710             {
    711                 /* Bad trace */
    712                 if (interpState->totalTraceLen == 0) {
    713                     /* Bad trace - mark as untranslatable */
    714                     interpState->jitState = kJitDone;
    715                     switchInterp = true;
    716                     break;
    717                 }
    718                 JitTraceDescription* desc =
    719                    (JitTraceDescription*)malloc(sizeof(JitTraceDescription) +
    720                      sizeof(JitTraceRun) * (interpState->currTraceRun+1));
    721                 if (desc == NULL) {
    722                     LOGE("Out of memory in trace selection");
    723                     dvmJitStopTranslationRequests();
    724                     interpState->jitState = kJitDone;
    725                     switchInterp = true;
    726                     break;
    727                 }
    728                 interpState->trace[interpState->currTraceRun].frag.runEnd =
    729                      true;
    730                 desc->method = interpState->method;
    731                 memcpy((char*)&(desc->trace[0]),
    732                     (char*)&(interpState->trace[0]),
    733                     sizeof(JitTraceRun) * (interpState->currTraceRun+1));
    734 #if defined(SHOW_TRACE)
    735                 LOGD("TraceGen:  trace done, adding to queue");
    736 #endif
    737                 if (dvmCompilerWorkEnqueue(
    738                        interpState->currTraceHead,kWorkOrderTrace,desc)) {
    739                     /* Work order successfully enqueued */
    740                     if (gDvmJit.blockingMode) {
    741                         dvmCompilerDrainQueue();
    742                     }
    743                 } else {
    744                     /*
    745                      * Make sure the descriptor for the abandoned work order is
    746                      * freed.
    747                      */
    748                     free(desc);
    749                 }
    750                 /*
    751                  * Reset "trace in progress" flag whether or not we
    752                  * successfully entered a work order.
    753                  */
    754                 JitEntry *jitEntry =
    755                     lookupAndAdd(interpState->currTraceHead, false);
    756                 if (jitEntry) {
    757                     setTraceConstruction(jitEntry, false);
    758                 }
    759                 interpState->jitState = kJitDone;
    760                 switchInterp = true;
    761             }
    762             break;
    763         case kJitSingleStep:
    764             interpState->jitState = kJitSingleStepEnd;
    765             break;
    766         case kJitSingleStepEnd:
    767             interpState->entryPoint = kInterpEntryResume;
    768             interpState->jitState = kJitDone;
    769             switchInterp = true;
    770             break;
    771         case kJitDone:
    772             switchInterp = true;
    773             break;
    774 #if defined(WITH_SELF_VERIFICATION)
    775         case kJitSelfVerification:
    776             if (selfVerificationDebugInterp(pc, self, interpState)) {
    777                 /*
    778                  * If the next state is not single-step end, we can switch
    779                  * interpreter now.
    780                  */
    781                 if (interpState->jitState != kJitSingleStepEnd) {
    782                     interpState->jitState = kJitDone;
    783                     switchInterp = true;
    784                 }
    785             }
    786             break;
    787 #endif
    788         /*
    789          * If the debug interpreter was entered for non-JIT reasons, check if
    790          * the original reason still holds. If not, we have to force the
    791          * interpreter switch here and use dvmDebuggerOrProfilerActive instead
    792          * of dvmJitDebuggerOrProfilerActive since the latter will alwasy
    793          * return true when the debugger/profiler is already detached and the
    794          * JIT profiling table is restored.
    795          */
    796         case kJitNot:
    797             switchInterp = !dvmDebuggerOrProfilerActive();
    798             break;
    799         default:
    800             LOGE("Unexpected JIT state: %d entry point: %d",
    801                  interpState->jitState, interpState->entryPoint);
    802             dvmAbort();
    803             break;
    804     }
    805     /*
    806      * Final check to see if we can really switch the interpreter. Make sure
    807      * the jitState is kJitDone or kJitNot when switchInterp is set to true.
    808      */
    809      assert(switchInterp == false || interpState->jitState == kJitDone ||
    810             interpState->jitState == kJitNot);
    811      return switchInterp && !debugOrProfile;
    812 }
    813 
    814 JitEntry *dvmFindJitEntry(const u2* pc)
    815 {
    816     int idx = dvmJitHash(pc);
    817 
    818     /* Expect a high hit rate on 1st shot */
    819     if (gDvmJit.pJitEntryTable[idx].dPC == pc)
    820         return &gDvmJit.pJitEntryTable[idx];
    821     else {
    822         int chainEndMarker = gDvmJit.jitTableSize;
    823         while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) {
    824             idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
    825             if (gDvmJit.pJitEntryTable[idx].dPC == pc)
    826                 return &gDvmJit.pJitEntryTable[idx];
    827         }
    828     }
    829     return NULL;
    830 }
    831 
    832 /*
    833  * If a translated code address exists for the davik byte code
    834  * pointer return it.  This routine needs to be fast.
    835  */
    836 void* dvmJitGetCodeAddr(const u2* dPC)
    837 {
    838     int idx = dvmJitHash(dPC);
    839     const u2* npc = gDvmJit.pJitEntryTable[idx].dPC;
    840     if (npc != NULL) {
    841         bool hideTranslation = (gDvm.sumThreadSuspendCount != 0) ||
    842                                (gDvmJit.codeCacheFull == true) ||
    843                                (gDvmJit.pProfTable == NULL);
    844 
    845         if (npc == dPC) {
    846 #if defined(JIT_STATS)
    847             gDvmJit.addrLookupsFound++;
    848 #endif
    849             return hideTranslation ?
    850                 NULL : gDvmJit.pJitEntryTable[idx].codeAddress;
    851         } else {
    852             int chainEndMarker = gDvmJit.jitTableSize;
    853             while (gDvmJit.pJitEntryTable[idx].u.info.chain != chainEndMarker) {
    854                 idx = gDvmJit.pJitEntryTable[idx].u.info.chain;
    855                 if (gDvmJit.pJitEntryTable[idx].dPC == dPC) {
    856 #if defined(JIT_STATS)
    857                     gDvmJit.addrLookupsFound++;
    858 #endif
    859                     return hideTranslation ?
    860                         NULL : gDvmJit.pJitEntryTable[idx].codeAddress;
    861                 }
    862             }
    863         }
    864     }
    865 #if defined(JIT_STATS)
    866     gDvmJit.addrLookupsNotFound++;
    867 #endif
    868     return NULL;
    869 }
    870 
    871 /*
    872  * Register the translated code pointer into the JitTable.
    873  * NOTE: Once a codeAddress field transitions from initial state to
    874  * JIT'd code, it must not be altered without first halting all
    875  * threads.  This routine should only be called by the compiler
    876  * thread.
    877  */
    878 void dvmJitSetCodeAddr(const u2* dPC, void *nPC, JitInstructionSetType set) {
    879     JitEntryInfoUnion oldValue;
    880     JitEntryInfoUnion newValue;
    881     JitEntry *jitEntry = lookupAndAdd(dPC, false);
    882     assert(jitEntry);
    883     /* Note: order of update is important */
    884     do {
    885         oldValue = jitEntry->u;
    886         newValue = oldValue;
    887         newValue.info.instructionSet = set;
    888     } while (!ATOMIC_CMP_SWAP(
    889              &jitEntry->u.infoWord,
    890              oldValue.infoWord, newValue.infoWord));
    891     jitEntry->codeAddress = nPC;
    892 }
    893 
    894 /*
    895  * Determine if valid trace-bulding request is active.  Return true
    896  * if we need to abort and switch back to the fast interpreter, false
    897  * otherwise.
    898  */
    899 bool dvmJitCheckTraceRequest(Thread* self, InterpState* interpState)
    900 {
    901     bool switchInterp = false;         /* Assume success */
    902     int i;
    903     intptr_t filterKey = ((intptr_t) interpState->pc) >>
    904                          JIT_TRACE_THRESH_FILTER_GRAN_LOG2;
    905     bool debugOrProfile = dvmDebuggerOrProfilerActive();
    906 
    907     /* Check if the JIT request can be handled now */
    908     if (gDvmJit.pJitEntryTable != NULL && debugOrProfile == false) {
    909         /* Bypass the filter for hot trace requests or during stress mode */
    910         if (interpState->jitState == kJitTSelectRequest &&
    911             gDvmJit.threshold > 6) {
    912             /* Two-level filtering scheme */
    913             for (i=0; i< JIT_TRACE_THRESH_FILTER_SIZE; i++) {
    914                 if (filterKey == interpState->threshFilter[i]) {
    915                     break;
    916                 }
    917             }
    918             if (i == JIT_TRACE_THRESH_FILTER_SIZE) {
    919                 /*
    920                  * Use random replacement policy - otherwise we could miss a
    921                  * large loop that contains more traces than the size of our
    922                  * filter array.
    923                  */
    924                 i = rand() % JIT_TRACE_THRESH_FILTER_SIZE;
    925                 interpState->threshFilter[i] = filterKey;
    926                 interpState->jitState = kJitDone;
    927             }
    928         }
    929 
    930         /* If the compiler is backlogged, cancel any JIT actions */
    931         if (gDvmJit.compilerQueueLength >= gDvmJit.compilerHighWater) {
    932             interpState->jitState = kJitDone;
    933         }
    934 
    935         /*
    936          * Check for additional reasons that might force the trace select
    937          * request to be dropped
    938          */
    939         if (interpState->jitState == kJitTSelectRequest ||
    940             interpState->jitState == kJitTSelectRequestHot) {
    941             JitEntry *slot = lookupAndAdd(interpState->pc, false);
    942             if (slot == NULL) {
    943                 /*
    944                  * Table is full.  This should have been
    945                  * detected by the compiler thread and the table
    946                  * resized before we run into it here.  Assume bad things
    947                  * are afoot and disable profiling.
    948                  */
    949                 interpState->jitState = kJitDone;
    950                 LOGD("JIT: JitTable full, disabling profiling");
    951                 dvmJitStopTranslationRequests();
    952             } else if (slot->u.info.traceConstruction) {
    953                 /*
    954                  * Trace request already in progress, but most likely it
    955                  * aborted without cleaning up.  Assume the worst and
    956                  * mark trace head as untranslatable.  If we're wrong,
    957                  * the compiler thread will correct the entry when the
    958                  * translation is completed.  The downside here is that
    959                  * some existing translation may chain to the interpret-only
    960                  * template instead of the real translation during this
    961                  * window.  Performance, but not correctness, issue.
    962                  */
    963                 interpState->jitState = kJitDone;
    964                 resetTracehead(interpState, slot);
    965             } else if (slot->codeAddress) {
    966                  /* Nothing to do here - just return */
    967                 interpState->jitState = kJitDone;
    968             } else {
    969                 /*
    970                  * Mark request.  Note, we are not guaranteed exclusivity
    971                  * here.  A window exists for another thread to be
    972                  * attempting to build this same trace.  Rather than
    973                  * bear the cost of locking, we'll just allow that to
    974                  * happen.  The compiler thread, if it chooses, can
    975                  * discard redundant requests.
    976                  */
    977                 setTraceConstruction(slot, true);
    978             }
    979         }
    980 
    981         switch (interpState->jitState) {
    982             case kJitTSelectRequest:
    983             case kJitTSelectRequestHot:
    984                 interpState->jitState = kJitTSelect;
    985                 interpState->currTraceHead = interpState->pc;
    986                 interpState->currTraceRun = 0;
    987                 interpState->totalTraceLen = 0;
    988                 interpState->currRunHead = interpState->pc;
    989                 interpState->currRunLen = 0;
    990                 interpState->trace[0].frag.startOffset =
    991                      interpState->pc - interpState->method->insns;
    992                 interpState->trace[0].frag.numInsts = 0;
    993                 interpState->trace[0].frag.runEnd = false;
    994                 interpState->trace[0].frag.hint = kJitHintNone;
    995                 interpState->lastPC = 0;
    996                 break;
    997             /*
    998              * For JIT's perspective there is no need to stay in the debug
    999              * interpreter unless debugger/profiler is attached.
   1000              */
   1001             case kJitDone:
   1002                 switchInterp = true;
   1003                 break;
   1004             default:
   1005                 LOGE("Unexpected JIT state: %d entry point: %d",
   1006                      interpState->jitState, interpState->entryPoint);
   1007                 dvmAbort();
   1008         }
   1009     } else {
   1010         /*
   1011          * Cannot build trace this time - ready to leave the dbg interpreter
   1012          */
   1013         interpState->jitState = kJitDone;
   1014         switchInterp = true;
   1015     }
   1016 
   1017     /*
   1018      * Final check to see if we can really switch the interpreter. Make sure
   1019      * the jitState is kJitDone when switchInterp is set to true.
   1020      */
   1021     assert(switchInterp == false || interpState->jitState == kJitDone);
   1022     return switchInterp && !debugOrProfile;
   1023 }
   1024 
   1025 /*
   1026  * Resizes the JitTable.  Must be a power of 2, and returns true on failure.
   1027  * Stops all threads, and thus is a heavyweight operation. May only be called
   1028  * by the compiler thread.
   1029  */
   1030 bool dvmJitResizeJitTable( unsigned int size )
   1031 {
   1032     JitEntry *pNewTable;
   1033     JitEntry *pOldTable;
   1034     JitEntry tempEntry;
   1035     u4 newMask;
   1036     unsigned int oldSize;
   1037     unsigned int i;
   1038 
   1039     assert(gDvmJit.pJitEntryTable != NULL);
   1040     assert(size && !(size & (size - 1)));   /* Is power of 2? */
   1041 
   1042     LOGI("Jit: resizing JitTable from %d to %d", gDvmJit.jitTableSize, size);
   1043 
   1044     newMask = size - 1;
   1045 
   1046     if (size <= gDvmJit.jitTableSize) {
   1047         return true;
   1048     }
   1049 
   1050     /* Make sure requested size is compatible with chain field width */
   1051     tempEntry.u.info.chain = size;
   1052     if (tempEntry.u.info.chain != size) {
   1053         LOGD("Jit: JitTable request of %d too big", size);
   1054         return true;
   1055     }
   1056 
   1057     pNewTable = (JitEntry*)calloc(size, sizeof(*pNewTable));
   1058     if (pNewTable == NULL) {
   1059         return true;
   1060     }
   1061     for (i=0; i< size; i++) {
   1062         pNewTable[i].u.info.chain = size;  /* Initialize chain termination */
   1063     }
   1064 
   1065     /* Stop all other interpreting/jit'ng threads */
   1066     dvmSuspendAllThreads(SUSPEND_FOR_TBL_RESIZE);
   1067 
   1068     pOldTable = gDvmJit.pJitEntryTable;
   1069     oldSize = gDvmJit.jitTableSize;
   1070 
   1071     dvmLockMutex(&gDvmJit.tableLock);
   1072     gDvmJit.pJitEntryTable = pNewTable;
   1073     gDvmJit.jitTableSize = size;
   1074     gDvmJit.jitTableMask = size - 1;
   1075     gDvmJit.jitTableEntriesUsed = 0;
   1076 
   1077     for (i=0; i < oldSize; i++) {
   1078         if (pOldTable[i].dPC) {
   1079             JitEntry *p;
   1080             u2 chain;
   1081             p = lookupAndAdd(pOldTable[i].dPC, true /* holds tableLock*/ );
   1082             p->codeAddress = pOldTable[i].codeAddress;
   1083             /* We need to preserve the new chain field, but copy the rest */
   1084             chain = p->u.info.chain;
   1085             p->u = pOldTable[i].u;
   1086             p->u.info.chain = chain;
   1087         }
   1088     }
   1089     dvmUnlockMutex(&gDvmJit.tableLock);
   1090 
   1091     free(pOldTable);
   1092 
   1093     /* Restart the world */
   1094     dvmResumeAllThreads(SUSPEND_FOR_TBL_RESIZE);
   1095 
   1096     return false;
   1097 }
   1098 
   1099 /*
   1100  * Reset the JitTable to the initial clean state.
   1101  */
   1102 void dvmJitResetTable(void)
   1103 {
   1104     JitEntry *jitEntry = gDvmJit.pJitEntryTable;
   1105     unsigned int size = gDvmJit.jitTableSize;
   1106     unsigned int i;
   1107 
   1108     dvmLockMutex(&gDvmJit.tableLock);
   1109     memset((void *) jitEntry, 0, sizeof(JitEntry) * size);
   1110     for (i=0; i< size; i++) {
   1111         jitEntry[i].u.info.chain = size;  /* Initialize chain termination */
   1112     }
   1113     gDvmJit.jitTableEntriesUsed = 0;
   1114     dvmUnlockMutex(&gDvmJit.tableLock);
   1115 }
   1116 
   1117 /*
   1118  * Float/double conversion requires clamping to min and max of integer form.  If
   1119  * target doesn't support this normally, use these.
   1120  */
   1121 s8 dvmJitd2l(double d)
   1122 {
   1123     static const double kMaxLong = (double)(s8)0x7fffffffffffffffULL;
   1124     static const double kMinLong = (double)(s8)0x8000000000000000ULL;
   1125     if (d >= kMaxLong)
   1126         return (s8)0x7fffffffffffffffULL;
   1127     else if (d <= kMinLong)
   1128         return (s8)0x8000000000000000ULL;
   1129     else if (d != d) // NaN case
   1130         return 0;
   1131     else
   1132         return (s8)d;
   1133 }
   1134 
   1135 s8 dvmJitf2l(float f)
   1136 {
   1137     static const float kMaxLong = (float)(s8)0x7fffffffffffffffULL;
   1138     static const float kMinLong = (float)(s8)0x8000000000000000ULL;
   1139     if (f >= kMaxLong)
   1140         return (s8)0x7fffffffffffffffULL;
   1141     else if (f <= kMinLong)
   1142         return (s8)0x8000000000000000ULL;
   1143     else if (f != f) // NaN case
   1144         return 0;
   1145     else
   1146         return (s8)f;
   1147 }
   1148 
   1149 #endif /* WITH_JIT */
   1150