Home | History | Annotate | Download | only in vm
      1 /*
      2  * Copyright (C) 2008 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 /*
     18  * Fundamental synchronization mechanisms.
     19  *
     20  * The top part of the file has operations on "monitor" structs; the
     21  * next part has the native calls on objects.
     22  *
     23  * The current implementation uses "thin locking" to avoid allocating
     24  * an Object's full Monitor struct until absolutely necessary (i.e.,
     25  * during contention or a call to wait()).
     26  *
     27  * TODO: make improvements to thin locking
     28  * We may be able to improve performance and reduce memory requirements by:
     29  *  - reverting to a thin lock once the Monitor is no longer necessary
     30  *  - using a pool of monitor objects, with some sort of recycling scheme
     31  *
     32  * TODO: recycle native-level monitors when objects are garbage collected.
     33  */
     34 #include "Dalvik.h"
     35 
     36 #include <fcntl.h>
     37 #include <stdlib.h>
     38 #include <unistd.h>
     39 #include <pthread.h>
     40 #include <time.h>
     41 #include <sys/time.h>
     42 #include <errno.h>
     43 
     44 #define LOG_THIN    LOGV
     45 
     46 #ifdef WITH_DEADLOCK_PREDICTION     /* fwd */
     47 static const char* kStartBanner =
     48     "<-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#";
     49 static const char* kEndBanner =
     50     "#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#->";
     51 
     52 /*
     53  * Unsorted, expanding list of objects.
     54  *
     55  * This is very similar to PointerSet (which came into existence after this),
     56  * but these are unsorted, uniqueness is not enforced by the "add" function,
     57  * and the base object isn't allocated on the heap.
     58  */
     59 typedef struct ExpandingObjectList {
     60     u2          alloc;
     61     u2          count;
     62     Object**    list;
     63 } ExpandingObjectList;
     64 
     65 /* fwd */
     66 static void updateDeadlockPrediction(Thread* self, Object* obj);
     67 static void removeCollectedObject(Object* obj);
     68 static void expandObjClear(ExpandingObjectList* pList);
     69 #endif
     70 
     71 /*
     72  * Every Object has a monitor associated with it, but not every Object is
     73  * actually locked.  Even the ones that are locked do not need a
     74  * full-fledged monitor until a) there is actual contention or b) wait()
     75  * is called on the Object.
     76  *
     77  * For Dalvik, we have implemented a scheme similar to the one described
     78  * in Bacon et al.'s "Thin locks: featherweight synchronization for Java"
     79  * (ACM 1998).  Things are even easier for us, though, because we have
     80  * a full 32 bits to work with.
     81  *
     82  * The two states of an Object's lock are referred to as "thin" and
     83  * "fat".  A lock may transition from the "thin" state to the "fat"
     84  * state and this transition is referred to as inflation.  Once a lock
     85  * has been inflated it remains in the "fat" state indefinitely.
     86  *
     87  * The lock value itself is stored in Object.lock.  The LSB of the
     88  * lock encodes its state.  When cleared, the lock is in the "thin"
     89  * state and its bits are formatted as follows:
     90  *
     91  *    [31 ---- 19] [18 ---- 3] [2 ---- 1] [0]
     92  *     lock count   thread id  hash state  0
     93  *
     94  * When set, the lock is in the "fat" state and its bits are formatted
     95  * as follows:
     96  *
     97  *    [31 ---- 3] [2 ---- 1] [0]
     98  *      pointer   hash state  1
     99  *
    100  * For an in-depth description of the mechanics of thin-vs-fat locking,
    101  * read the paper referred to above.
    102  */
    103 
    104 /*
    105  * Monitors provide:
    106  *  - mutually exclusive access to resources
    107  *  - a way for multiple threads to wait for notification
    108  *
    109  * In effect, they fill the role of both mutexes and condition variables.
    110  *
    111  * Only one thread can own the monitor at any time.  There may be several
    112  * threads waiting on it (the wait call unlocks it).  One or more waiting
    113  * threads may be getting interrupted or notified at any given time.
    114  */
    115 struct Monitor {
    116     Thread*     owner;          /* which thread currently owns the lock? */
    117     int         lockCount;      /* owner's recursive lock depth */
    118     Object*     obj;            /* what object are we part of [debug only] */
    119 
    120     Thread*     waitSet;	/* threads currently waiting on this monitor */
    121 
    122     pthread_mutex_t lock;
    123 
    124     Monitor*    next;
    125 
    126 #ifdef WITH_DEADLOCK_PREDICTION
    127     /*
    128      * Objects that have been locked immediately after this one in the
    129      * past.  We use an expanding flat array, allocated on first use, to
    130      * minimize allocations.  Deletions from the list, expected to be
    131      * infrequent, are crunched down.
    132      */
    133     ExpandingObjectList historyChildren;
    134 
    135     /*
    136      * We also track parents.  This isn't strictly necessary, but it makes
    137      * the cleanup at GC time significantly faster.
    138      */
    139     ExpandingObjectList historyParents;
    140 
    141     /* used during cycle detection */
    142     bool        historyMark;
    143 
    144     /* stack trace, established the first time we locked the object */
    145     int         historyStackDepth;
    146     int*        historyRawStackTrace;
    147 #endif
    148 };
    149 
    150 
    151 /*
    152  * Create and initialize a monitor.
    153  */
    154 Monitor* dvmCreateMonitor(Object* obj)
    155 {
    156     Monitor* mon;
    157 
    158     mon = (Monitor*) calloc(1, sizeof(Monitor));
    159     if (mon == NULL) {
    160         LOGE("Unable to allocate monitor\n");
    161         dvmAbort();
    162     }
    163     if (((u4)mon & 7) != 0) {
    164         LOGE("Misaligned monitor: %p\n", mon);
    165         dvmAbort();
    166     }
    167     mon->obj = obj;
    168     dvmInitMutex(&mon->lock);
    169 
    170     /* replace the head of the list with the new monitor */
    171     do {
    172         mon->next = gDvm.monitorList;
    173     } while (!ATOMIC_CMP_SWAP((int32_t*)(void*)&gDvm.monitorList,
    174                               (int32_t)mon->next, (int32_t)mon));
    175 
    176     return mon;
    177 }
    178 
    179 /*
    180  * Free the monitor list.  Only used when shutting the VM down.
    181  */
    182 void dvmFreeMonitorList(void)
    183 {
    184     Monitor* mon;
    185     Monitor* nextMon;
    186 
    187     mon = gDvm.monitorList;
    188     while (mon != NULL) {
    189         nextMon = mon->next;
    190 
    191 #ifdef WITH_DEADLOCK_PREDICTION
    192         expandObjClear(&mon->historyChildren);
    193         expandObjClear(&mon->historyParents);
    194         free(mon->historyRawStackTrace);
    195 #endif
    196         free(mon);
    197         mon = nextMon;
    198     }
    199 }
    200 
    201 /*
    202  * Log some info about our monitors.
    203  */
    204 void dvmDumpMonitorInfo(const char* msg)
    205 {
    206 #if QUIET_ZYGOTE_MONITOR
    207     if (gDvm.zygote) {
    208         return;
    209     }
    210 #endif
    211 
    212     int totalCount;
    213     int liveCount;
    214 
    215     totalCount = liveCount = 0;
    216     Monitor* mon = gDvm.monitorList;
    217     while (mon != NULL) {
    218         totalCount++;
    219         if (mon->obj != NULL)
    220             liveCount++;
    221         mon = mon->next;
    222     }
    223 
    224     LOGD("%s: monitor list has %d entries (%d live)\n",
    225         msg, totalCount, liveCount);
    226 }
    227 
    228 /*
    229  * Get the object that a monitor is part of.
    230  */
    231 Object* dvmGetMonitorObject(Monitor* mon)
    232 {
    233     if (mon == NULL)
    234         return NULL;
    235     else
    236         return mon->obj;
    237 }
    238 
    239 /*
    240  * Returns the thread id of the thread owning the given lock.
    241  */
    242 static u4 lockOwner(Object* obj)
    243 {
    244     Thread *owner;
    245     u4 lock;
    246 
    247     assert(obj != NULL);
    248     /*
    249      * Since we're reading the lock value multiple times, latch it so
    250      * that it doesn't change out from under us if we get preempted.
    251      */
    252     lock = obj->lock;
    253     if (LW_SHAPE(lock) == LW_SHAPE_THIN) {
    254         return LW_LOCK_OWNER(lock);
    255     } else {
    256         owner = LW_MONITOR(lock)->owner;
    257         return owner ? owner->threadId : 0;
    258     }
    259 }
    260 
    261 /*
    262  * Get the thread that holds the lock on the specified object.  The
    263  * object may be unlocked, thin-locked, or fat-locked.
    264  *
    265  * The caller must lock the thread list before calling here.
    266  */
    267 Thread* dvmGetObjectLockHolder(Object* obj)
    268 {
    269     u4 threadId = lockOwner(obj);
    270 
    271     if (threadId == 0)
    272         return NULL;
    273     return dvmGetThreadByThreadId(threadId);
    274 }
    275 
    276 /*
    277  * Checks whether the given thread holds the given
    278  * objects's lock.
    279  */
    280 bool dvmHoldsLock(Thread* thread, Object* obj)
    281 {
    282     if (thread == NULL || obj == NULL) {
    283         return false;
    284     } else {
    285         return thread->threadId == lockOwner(obj);
    286     }
    287 }
    288 
    289 /*
    290  * Free the monitor associated with an object and make the object's lock
    291  * thin again.  This is called during garbage collection.
    292  */
    293 static void freeObjectMonitor(Object* obj)
    294 {
    295     Monitor *mon;
    296 
    297     assert(LW_SHAPE(obj->lock) == LW_SHAPE_FAT);
    298 
    299 #ifdef WITH_DEADLOCK_PREDICTION
    300     if (gDvm.deadlockPredictMode != kDPOff)
    301         removeCollectedObject(obj);
    302 #endif
    303 
    304     mon = LW_MONITOR(obj->lock);
    305     obj->lock = DVM_LOCK_INITIAL_THIN_VALUE;
    306 
    307     /* This lock is associated with an object
    308      * that's being swept.  The only possible way
    309      * anyone could be holding this lock would be
    310      * if some JNI code locked but didn't unlock
    311      * the object, in which case we've got some bad
    312      * native code somewhere.
    313      */
    314     assert(pthread_mutex_trylock(&mon->lock) == 0);
    315     pthread_mutex_destroy(&mon->lock);
    316 #ifdef WITH_DEADLOCK_PREDICTION
    317     expandObjClear(&mon->historyChildren);
    318     expandObjClear(&mon->historyParents);
    319     free(mon->historyRawStackTrace);
    320 #endif
    321     free(mon);
    322 }
    323 
    324 /*
    325  * Frees monitor objects belonging to unmarked objects.
    326  */
    327 void dvmSweepMonitorList(Monitor** mon, int (*isUnmarkedObject)(void*))
    328 {
    329     Monitor handle;
    330     Monitor *prev, *curr;
    331     Object *obj;
    332 
    333     assert(mon != NULL);
    334     assert(*mon != NULL);
    335     assert(isUnmarkedObject != NULL);
    336     prev = &handle;
    337     prev->next = curr = *mon;
    338     while (curr != NULL) {
    339         obj = curr->obj;
    340         if (obj != NULL && (*isUnmarkedObject)(obj) != 0) {
    341             prev->next = curr = curr->next;
    342             freeObjectMonitor(obj);
    343         } else {
    344             prev = curr;
    345             curr = curr->next;
    346         }
    347     }
    348     *mon = handle.next;
    349 }
    350 
    351 static char *logWriteInt(char *dst, int value)
    352 {
    353     *dst++ = EVENT_TYPE_INT;
    354     set4LE((u1 *)dst, value);
    355     return dst + 4;
    356 }
    357 
    358 static char *logWriteString(char *dst, const char *value, size_t len)
    359 {
    360     *dst++ = EVENT_TYPE_STRING;
    361     len = len < 32 ? len : 32;
    362     set4LE((u1 *)dst, len);
    363     dst += 4;
    364     memcpy(dst, value, len);
    365     return dst + len;
    366 }
    367 
    368 #define EVENT_LOG_TAG_dvm_lock_sample 20003
    369 
    370 static void logContentionEvent(Thread *self, u4 waitMs, u4 samplePercent)
    371 {
    372     const StackSaveArea *saveArea;
    373     const Method *meth;
    374     u4 relativePc;
    375     char eventBuffer[132];
    376     const char *fileName;
    377     char procName[33], *selfName, *ownerName;
    378     char *cp;
    379     size_t len;
    380     int fd;
    381 
    382     saveArea = SAVEAREA_FROM_FP(self->curFrame);
    383     meth = saveArea->method;
    384     cp = eventBuffer;
    385 
    386     /* Emit the event list length, 1 byte. */
    387     *cp++ = 7;
    388 
    389     /* Emit the process name, <= 37 bytes. */
    390     fd = open("/proc/self/cmdline", O_RDONLY);
    391     memset(procName, 0, sizeof(procName));
    392     read(fd, procName, sizeof(procName) - 1);
    393     close(fd);
    394     len = strlen(procName);
    395     cp = logWriteString(cp, procName, len);
    396 
    397     /* Emit the main thread status, 5 bytes. */
    398     bool isMainThread = (self->systemTid == getpid());
    399     cp = logWriteInt(cp, isMainThread);
    400 
    401     /* Emit self thread name string, <= 37 bytes. */
    402     selfName = dvmGetThreadName(self);
    403     cp = logWriteString(cp, selfName, strlen(selfName));
    404     free(selfName);
    405 
    406     /* Emit the wait time, 5 bytes. */
    407     cp = logWriteInt(cp, waitMs);
    408 
    409     /* Emit the source code file name, <= 37 bytes. */
    410     fileName = dvmGetMethodSourceFile(meth);
    411     if (fileName == NULL) fileName = "";
    412     cp = logWriteString(cp, fileName, strlen(fileName));
    413 
    414     /* Emit the source code line number, 5 bytes. */
    415     relativePc = saveArea->xtra.currentPc - saveArea->method->insns;
    416     cp = logWriteInt(cp, dvmLineNumFromPC(meth, relativePc));
    417 
    418     /* Emit the sample percentage, 5 bytes. */
    419     cp = logWriteInt(cp, samplePercent);
    420 
    421     assert((size_t)(cp - eventBuffer) <= sizeof(eventBuffer));
    422     android_btWriteLog(EVENT_LOG_TAG_dvm_lock_sample,
    423                        EVENT_TYPE_LIST,
    424                        eventBuffer,
    425                        (size_t)(cp - eventBuffer));
    426 }
    427 
    428 /*
    429  * Lock a monitor.
    430  */
    431 static void lockMonitor(Thread* self, Monitor* mon)
    432 {
    433     Thread *owner;
    434     ThreadStatus oldStatus;
    435     u4 waitThreshold, samplePercent;
    436     u8 waitStart, waitEnd, waitMs;
    437 
    438     if (mon->owner == self) {
    439         mon->lockCount++;
    440         return;
    441     }
    442     if (pthread_mutex_trylock(&mon->lock) != 0) {
    443         oldStatus = dvmChangeStatus(self, THREAD_MONITOR);
    444         waitThreshold = gDvm.lockProfThreshold;
    445         if (waitThreshold) {
    446             waitStart = dvmGetRelativeTimeUsec();
    447         }
    448         dvmLockMutex(&mon->lock);
    449         if (waitThreshold) {
    450             waitEnd = dvmGetRelativeTimeUsec();
    451         }
    452         dvmChangeStatus(self, oldStatus);
    453         if (waitThreshold) {
    454             waitMs = (waitEnd - waitStart) / 1000;
    455             if (waitMs >= waitThreshold) {
    456                 samplePercent = 100;
    457             } else {
    458                 samplePercent = 100 * waitMs / waitThreshold;
    459             }
    460             if (samplePercent != 0 && ((u4)rand() % 100 < samplePercent)) {
    461                 logContentionEvent(self, waitMs, samplePercent);
    462             }
    463         }
    464     }
    465     mon->owner = self;
    466     assert(mon->lockCount == 0);
    467 }
    468 
    469 /*
    470  * Try to lock a monitor.
    471  *
    472  * Returns "true" on success.
    473  */
    474 static bool tryLockMonitor(Thread* self, Monitor* mon)
    475 {
    476     int cc;
    477 
    478     if (mon->owner == self) {
    479         mon->lockCount++;
    480         return true;
    481     } else {
    482         cc = pthread_mutex_trylock(&mon->lock);
    483         if (cc == 0) {
    484             mon->owner = self;
    485             assert(mon->lockCount == 0);
    486             return true;
    487         } else {
    488             return false;
    489         }
    490     }
    491 }
    492 
    493 
    494 /*
    495  * Unlock a monitor.
    496  *
    497  * Returns true if the unlock succeeded.
    498  * If the unlock failed, an exception will be pending.
    499  */
    500 static bool unlockMonitor(Thread* self, Monitor* mon)
    501 {
    502     assert(self != NULL);
    503     assert(mon != NULL);        // can this happen?
    504 
    505     if (mon->owner == self) {
    506         /*
    507          * We own the monitor, so nobody else can be in here.
    508          */
    509         if (mon->lockCount == 0) {
    510             int cc;
    511             mon->owner = NULL;
    512             cc = pthread_mutex_unlock(&mon->lock);
    513             assert(cc == 0);
    514         } else {
    515             mon->lockCount--;
    516         }
    517     } else {
    518         /*
    519          * We don't own this, so we're not allowed to unlock it.
    520          * The JNI spec says that we should throw IllegalMonitorStateException
    521          * in this case.
    522          */
    523         dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
    524                           "unlock of unowned monitor");
    525         return false;
    526     }
    527     return true;
    528 }
    529 
    530 /*
    531  * Checks the wait set for circular structure.  Returns 0 if the list
    532  * is not circular.  Otherwise, returns 1.  Used only by asserts.
    533  */
    534 static int waitSetCheck(Monitor *mon)
    535 {
    536     Thread *fast, *slow;
    537     size_t n;
    538 
    539     assert(mon != NULL);
    540     fast = slow = mon->waitSet;
    541     n = 0;
    542     for (;;) {
    543         if (fast == NULL) return 0;
    544         if (fast->waitNext == NULL) return 0;
    545         if (fast == slow && n > 0) return 1;
    546         n += 2;
    547         fast = fast->waitNext->waitNext;
    548         slow = slow->waitNext;
    549     }
    550 }
    551 
    552 /*
    553  * Links a thread into a monitor's wait set.  The monitor lock must be
    554  * held by the caller of this routine.
    555  */
    556 static void waitSetAppend(Monitor *mon, Thread *thread)
    557 {
    558     Thread *elt;
    559 
    560     assert(mon != NULL);
    561     assert(mon->owner == dvmThreadSelf());
    562     assert(thread != NULL);
    563     assert(thread->waitNext == NULL);
    564     assert(waitSetCheck(mon) == 0);
    565     if (mon->waitSet == NULL) {
    566         mon->waitSet = thread;
    567         return;
    568     }
    569     elt = mon->waitSet;
    570     while (elt->waitNext != NULL) {
    571         elt = elt->waitNext;
    572     }
    573     elt->waitNext = thread;
    574 }
    575 
    576 /*
    577  * Unlinks a thread from a monitor's wait set.  The monitor lock must
    578  * be held by the caller of this routine.
    579  */
    580 static void waitSetRemove(Monitor *mon, Thread *thread)
    581 {
    582     Thread *elt;
    583 
    584     assert(mon != NULL);
    585     assert(mon->owner == dvmThreadSelf());
    586     assert(thread != NULL);
    587     assert(waitSetCheck(mon) == 0);
    588     if (mon->waitSet == NULL) {
    589         return;
    590     }
    591     if (mon->waitSet == thread) {
    592         mon->waitSet = thread->waitNext;
    593         thread->waitNext = NULL;
    594         return;
    595     }
    596     elt = mon->waitSet;
    597     while (elt->waitNext != NULL) {
    598         if (elt->waitNext == thread) {
    599             elt->waitNext = thread->waitNext;
    600             thread->waitNext = NULL;
    601             return;
    602         }
    603         elt = elt->waitNext;
    604     }
    605 }
    606 
    607 /*
    608  * Converts the given relative waiting time into an absolute time.
    609  */
    610 void absoluteTime(s8 msec, s4 nsec, struct timespec *ts)
    611 {
    612     s8 endSec;
    613 
    614 #ifdef HAVE_TIMEDWAIT_MONOTONIC
    615     clock_gettime(CLOCK_MONOTONIC, ts);
    616 #else
    617     {
    618         struct timeval tv;
    619         gettimeofday(&tv, NULL);
    620         ts->tv_sec = tv.tv_sec;
    621         ts->tv_nsec = tv.tv_usec * 1000;
    622     }
    623 #endif
    624     endSec = ts->tv_sec + msec / 1000;
    625     if (endSec >= 0x7fffffff) {
    626         LOGV("NOTE: end time exceeds epoch\n");
    627         endSec = 0x7ffffffe;
    628     }
    629     ts->tv_sec = endSec;
    630     ts->tv_nsec = (ts->tv_nsec + (msec % 1000) * 1000000) + nsec;
    631 
    632     /* catch rollover */
    633     if (ts->tv_nsec >= 1000000000L) {
    634         ts->tv_sec++;
    635         ts->tv_nsec -= 1000000000L;
    636     }
    637 }
    638 
    639 int dvmRelativeCondWait(pthread_cond_t* cond, pthread_mutex_t* mutex,
    640                         s8 msec, s4 nsec)
    641 {
    642     int ret;
    643     struct timespec ts;
    644     absoluteTime(msec, nsec, &ts);
    645 #if defined(HAVE_TIMEDWAIT_MONOTONIC)
    646     ret = pthread_cond_timedwait_monotonic(cond, mutex, &ts);
    647 #else
    648     ret = pthread_cond_timedwait(cond, mutex, &ts);
    649 #endif
    650     assert(ret == 0 || ret == ETIMEDOUT);
    651     return ret;
    652 }
    653 
    654 /*
    655  * Wait on a monitor until timeout, interrupt, or notification.  Used for
    656  * Object.wait() and (somewhat indirectly) Thread.sleep() and Thread.join().
    657  *
    658  * If another thread calls Thread.interrupt(), we throw InterruptedException
    659  * and return immediately if one of the following are true:
    660  *  - blocked in wait(), wait(long), or wait(long, int) methods of Object
    661  *  - blocked in join(), join(long), or join(long, int) methods of Thread
    662  *  - blocked in sleep(long), or sleep(long, int) methods of Thread
    663  * Otherwise, we set the "interrupted" flag.
    664  *
    665  * Checks to make sure that "nsec" is in the range 0-999999
    666  * (i.e. fractions of a millisecond) and throws the appropriate
    667  * exception if it isn't.
    668  *
    669  * The spec allows "spurious wakeups", and recommends that all code using
    670  * Object.wait() do so in a loop.  This appears to derive from concerns
    671  * about pthread_cond_wait() on multiprocessor systems.  Some commentary
    672  * on the web casts doubt on whether these can/should occur.
    673  *
    674  * Since we're allowed to wake up "early", we clamp extremely long durations
    675  * to return at the end of the 32-bit time epoch.
    676  */
    677 static void waitMonitor(Thread* self, Monitor* mon, s8 msec, s4 nsec,
    678     bool interruptShouldThrow)
    679 {
    680     struct timespec ts;
    681     bool wasInterrupted = false;
    682     bool timed;
    683     int ret;
    684 
    685     assert(self != NULL);
    686     assert(mon != NULL);
    687 
    688     /* Make sure that we hold the lock. */
    689     if (mon->owner != self) {
    690         dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
    691             "object not locked by thread before wait()");
    692         return;
    693     }
    694 
    695     /*
    696      * Enforce the timeout range.
    697      */
    698     if (msec < 0 || nsec < 0 || nsec > 999999) {
    699         dvmThrowException("Ljava/lang/IllegalArgumentException;",
    700             "timeout arguments out of range");
    701         return;
    702     }
    703 
    704     /*
    705      * Compute absolute wakeup time, if necessary.
    706      */
    707     if (msec == 0 && nsec == 0) {
    708         timed = false;
    709     } else {
    710         absoluteTime(msec, nsec, &ts);
    711         timed = true;
    712     }
    713 
    714     /*
    715      * Add ourselves to the set of threads waiting on this monitor, and
    716      * release our hold.  We need to let it go even if we're a few levels
    717      * deep in a recursive lock, and we need to restore that later.
    718      *
    719      * We append to the wait set ahead of clearing the count and owner
    720      * fields so the subroutine can check that the calling thread owns
    721      * the monitor.  Aside from that, the order of member updates is
    722      * not order sensitive as we hold the pthread mutex.
    723      */
    724     waitSetAppend(mon, self);
    725     int prevLockCount = mon->lockCount;
    726     mon->lockCount = 0;
    727     mon->owner = NULL;
    728 
    729     /*
    730      * Update thread status.  If the GC wakes up, it'll ignore us, knowing
    731      * that we won't touch any references in this state, and we'll check
    732      * our suspend mode before we transition out.
    733      */
    734     if (timed)
    735         dvmChangeStatus(self, THREAD_TIMED_WAIT);
    736     else
    737         dvmChangeStatus(self, THREAD_WAIT);
    738 
    739     ret = pthread_mutex_lock(&self->waitMutex);
    740     assert(ret == 0);
    741 
    742     /*
    743      * Set waitMonitor to the monitor object we will be waiting on.
    744      * When waitMonitor is non-NULL a notifying or interrupting thread
    745      * must signal the thread's waitCond to wake it up.
    746      */
    747     assert(self->waitMonitor == NULL);
    748     self->waitMonitor = mon;
    749 
    750     /*
    751      * Handle the case where the thread was interrupted before we called
    752      * wait().
    753      */
    754     if (self->interrupted) {
    755         wasInterrupted = true;
    756         self->waitMonitor = NULL;
    757         pthread_mutex_unlock(&self->waitMutex);
    758         goto done;
    759     }
    760 
    761     /*
    762      * Release the monitor lock and wait for a notification or
    763      * a timeout to occur.
    764      */
    765     pthread_mutex_unlock(&mon->lock);
    766 
    767     if (!timed) {
    768         ret = pthread_cond_wait(&self->waitCond, &self->waitMutex);
    769         assert(ret == 0);
    770     } else {
    771 #ifdef HAVE_TIMEDWAIT_MONOTONIC
    772         ret = pthread_cond_timedwait_monotonic(&self->waitCond, &self->waitMutex, &ts);
    773 #else
    774         ret = pthread_cond_timedwait(&self->waitCond, &self->waitMutex, &ts);
    775 #endif
    776         assert(ret == 0 || ret == ETIMEDOUT);
    777     }
    778     if (self->interrupted) {
    779         wasInterrupted = true;
    780     }
    781 
    782     self->interrupted = false;
    783     self->waitMonitor = NULL;
    784 
    785     pthread_mutex_unlock(&self->waitMutex);
    786 
    787     /* Reacquire the monitor lock. */
    788     lockMonitor(self, mon);
    789 
    790 done:
    791     /*
    792      * We remove our thread from wait set after restoring the count
    793      * and owner fields so the subroutine can check that the calling
    794      * thread owns the monitor. Aside from that, the order of member
    795      * updates is not order sensitive as we hold the pthread mutex.
    796      */
    797     mon->owner = self;
    798     mon->lockCount = prevLockCount;
    799     waitSetRemove(mon, self);
    800 
    801     /* set self->status back to THREAD_RUNNING, and self-suspend if needed */
    802     dvmChangeStatus(self, THREAD_RUNNING);
    803 
    804     if (wasInterrupted) {
    805         /*
    806          * We were interrupted while waiting, or somebody interrupted an
    807          * un-interruptible thread earlier and we're bailing out immediately.
    808          *
    809          * The doc sayeth: "The interrupted status of the current thread is
    810          * cleared when this exception is thrown."
    811          */
    812         self->interrupted = false;
    813         if (interruptShouldThrow)
    814             dvmThrowException("Ljava/lang/InterruptedException;", NULL);
    815     }
    816 }
    817 
    818 /*
    819  * Notify one thread waiting on this monitor.
    820  */
    821 static void notifyMonitor(Thread* self, Monitor* mon)
    822 {
    823     Thread* thread;
    824 
    825     assert(self != NULL);
    826     assert(mon != NULL);
    827 
    828     /* Make sure that we hold the lock. */
    829     if (mon->owner != self) {
    830         dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
    831             "object not locked by thread before notify()");
    832         return;
    833     }
    834     /* Signal the first waiting thread in the wait set. */
    835     while (mon->waitSet != NULL) {
    836         thread = mon->waitSet;
    837         mon->waitSet = thread->waitNext;
    838         thread->waitNext = NULL;
    839         pthread_mutex_lock(&thread->waitMutex);
    840         /* Check to see if the thread is still waiting. */
    841         if (thread->waitMonitor != NULL) {
    842             pthread_cond_signal(&thread->waitCond);
    843             pthread_mutex_unlock(&thread->waitMutex);
    844             return;
    845         }
    846         pthread_mutex_unlock(&thread->waitMutex);
    847     }
    848 }
    849 
    850 /*
    851  * Notify all threads waiting on this monitor.
    852  */
    853 static void notifyAllMonitor(Thread* self, Monitor* mon)
    854 {
    855     Thread* thread;
    856 
    857     assert(self != NULL);
    858     assert(mon != NULL);
    859 
    860     /* Make sure that we hold the lock. */
    861     if (mon->owner != self) {
    862         dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
    863             "object not locked by thread before notifyAll()");
    864         return;
    865     }
    866     /* Signal all threads in the wait set. */
    867     while (mon->waitSet != NULL) {
    868         thread = mon->waitSet;
    869         mon->waitSet = thread->waitNext;
    870         thread->waitNext = NULL;
    871         pthread_mutex_lock(&thread->waitMutex);
    872         /* Check to see if the thread is still waiting. */
    873         if (thread->waitMonitor != NULL) {
    874             pthread_cond_signal(&thread->waitCond);
    875         }
    876         pthread_mutex_unlock(&thread->waitMutex);
    877     }
    878 }
    879 
    880 /*
    881  * Implements monitorenter for "synchronized" stuff.
    882  *
    883  * This does not fail or throw an exception (unless deadlock prediction
    884  * is enabled and set to "err" mode).
    885  */
    886 void dvmLockObject(Thread* self, Object *obj)
    887 {
    888     volatile u4 *thinp;
    889     Monitor *mon;
    890     ThreadStatus oldStatus;
    891     useconds_t sleepDelay;
    892     const useconds_t maxSleepDelay = 1 << 20;
    893     u4 thin, newThin, threadId;
    894 
    895     assert(self != NULL);
    896     assert(obj != NULL);
    897     threadId = self->threadId;
    898     thinp = &obj->lock;
    899 retry:
    900     thin = *thinp;
    901     if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
    902         /*
    903          * The lock is a thin lock.  The owner field is used to
    904          * determine the acquire method, ordered by cost.
    905          */
    906         if (LW_LOCK_OWNER(thin) == threadId) {
    907             /*
    908              * The calling thread owns the lock.  Increment the
    909              * value of the recursion count field.
    910              */
    911             obj->lock += 1 << LW_LOCK_COUNT_SHIFT;
    912         } else if (LW_LOCK_OWNER(thin) == 0) {
    913             /*
    914              * The lock is unowned.  Install the thread id of the
    915              * calling thread into the owner field.  This is the
    916              * common case.  In performance critical code the JIT
    917              * will have tried this before calling out to the VM.
    918              */
    919             newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
    920             if (!ATOMIC_CMP_SWAP((int32_t *)thinp, thin, newThin)) {
    921                 /*
    922                  * The acquire failed.  Try again.
    923                  */
    924                 goto retry;
    925             }
    926         } else {
    927             LOG_THIN("(%d) spin on lock %p: %#x (%#x) %#x",
    928                      threadId, &obj->lock, 0, *thinp, thin);
    929             /*
    930              * The lock is owned by another thread.  Notify the VM
    931              * that we are about to wait.
    932              */
    933             oldStatus = dvmChangeStatus(self, THREAD_MONITOR);
    934             /*
    935              * Spin until the thin lock is released or inflated.
    936              */
    937             sleepDelay = 0;
    938             for (;;) {
    939                 thin = *thinp;
    940                 /*
    941                  * Check the shape of the lock word.  Another thread
    942                  * may have inflated the lock while we were waiting.
    943                  */
    944                 if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
    945                     if (LW_LOCK_OWNER(thin) == 0) {
    946                         /*
    947                          * The lock has been released.  Install the
    948                          * thread id of the calling thread into the
    949                          * owner field.
    950                          */
    951                         newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
    952                         if (ATOMIC_CMP_SWAP((int32_t *)thinp,
    953                                             thin, newThin)) {
    954                             /*
    955                              * The acquire succeed.  Break out of the
    956                              * loop and proceed to inflate the lock.
    957                              */
    958                             break;
    959                         }
    960                     } else {
    961                         /*
    962                          * The lock has not been released.  Yield so
    963                          * the owning thread can run.
    964                          */
    965                         if (sleepDelay == 0) {
    966                             sched_yield();
    967                             sleepDelay = 1000;
    968                         } else {
    969                             usleep(sleepDelay);
    970                             if (sleepDelay < maxSleepDelay / 2) {
    971                                 sleepDelay *= 2;
    972                             }
    973                         }
    974                     }
    975                 } else {
    976                     /*
    977                      * The thin lock was inflated by another thread.
    978                      * Let the VM know we are no longer waiting and
    979                      * try again.
    980                      */
    981                     LOG_THIN("(%d) lock %p surprise-fattened",
    982                              threadId, &obj->lock);
    983                     dvmChangeStatus(self, oldStatus);
    984                     goto retry;
    985                 }
    986             }
    987             LOG_THIN("(%d) spin on lock done %p: %#x (%#x) %#x",
    988                      threadId, &obj->lock, 0, *thinp, thin);
    989             /*
    990              * We have acquired the thin lock.  Let the VM know that
    991              * we are no longer waiting.
    992              */
    993             dvmChangeStatus(self, oldStatus);
    994             /*
    995              * Fatten the lock.
    996              */
    997             mon = dvmCreateMonitor(obj);
    998             lockMonitor(self, mon);
    999             thin = *thinp;
   1000             thin &= LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT;
   1001             thin |= (u4)mon | LW_SHAPE_FAT;
   1002             MEM_BARRIER();
   1003             obj->lock = thin;
   1004             LOG_THIN("(%d) lock %p fattened", threadId, &obj->lock);
   1005         }
   1006     } else {
   1007         /*
   1008          * The lock is a fat lock.
   1009          */
   1010         assert(LW_MONITOR(obj->lock) != NULL);
   1011         lockMonitor(self, LW_MONITOR(obj->lock));
   1012     }
   1013 #ifdef WITH_DEADLOCK_PREDICTION
   1014     /*
   1015      * See if we were allowed to grab the lock at this time.  We do it
   1016      * *after* acquiring the lock, rather than before, so that we can
   1017      * freely update the Monitor struct.  This seems counter-intuitive,
   1018      * but our goal is deadlock *prediction* not deadlock *prevention*.
   1019      * (If we actually deadlock, the situation is easy to diagnose from
   1020      * a thread dump, so there's no point making a special effort to do
   1021      * the checks before the lock is held.)
   1022      *
   1023      * This needs to happen before we add the object to the thread's
   1024      * monitor list, so we can tell the difference between first-lock and
   1025      * re-lock.
   1026      *
   1027      * It's also important that we do this while in THREAD_RUNNING, so
   1028      * that we don't interfere with cleanup operations in the GC.
   1029      */
   1030     if (gDvm.deadlockPredictMode != kDPOff) {
   1031         if (self->status != THREAD_RUNNING) {
   1032             LOGE("Bad thread status (%d) in DP\n", self->status);
   1033             dvmDumpThread(self, false);
   1034             dvmAbort();
   1035         }
   1036         assert(!dvmCheckException(self));
   1037         updateDeadlockPrediction(self, obj);
   1038         if (dvmCheckException(self)) {
   1039             /*
   1040              * If we're throwing an exception here, we need to free the
   1041              * lock.  We add the object to the thread's monitor list so the
   1042              * "unlock" code can remove it.
   1043              */
   1044             dvmAddToMonitorList(self, obj, false);
   1045             dvmUnlockObject(self, obj);
   1046             LOGV("--- unlocked, pending is '%s'\n",
   1047                 dvmGetException(self)->clazz->descriptor);
   1048         }
   1049     }
   1050 
   1051     /*
   1052      * Add the locked object, and the current stack trace, to the list
   1053      * held by the Thread object.  If deadlock prediction isn't on,
   1054      * don't capture the stack trace.
   1055      */
   1056     dvmAddToMonitorList(self, obj, gDvm.deadlockPredictMode != kDPOff);
   1057 #elif defined(WITH_MONITOR_TRACKING)
   1058     /*
   1059      * Add the locked object to the list held by the Thread object.
   1060      */
   1061     dvmAddToMonitorList(self, obj, false);
   1062 #endif
   1063 }
   1064 
   1065 /*
   1066  * Implements monitorexit for "synchronized" stuff.
   1067  *
   1068  * On failure, throws an exception and returns "false".
   1069  */
   1070 bool dvmUnlockObject(Thread* self, Object *obj)
   1071 {
   1072     u4 thin;
   1073 
   1074     assert(self != NULL);
   1075     assert(self->status == THREAD_RUNNING);
   1076     assert(obj != NULL);
   1077     /*
   1078      * Cache the lock word as its value can change while we are
   1079      * examining its state.
   1080      */
   1081     thin = obj->lock;
   1082     if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
   1083         /*
   1084          * The lock is thin.  We must ensure that the lock is owned
   1085          * by the given thread before unlocking it.
   1086          */
   1087         if (LW_LOCK_OWNER(thin) == self->threadId) {
   1088             /*
   1089              * We are the lock owner.  It is safe to update the lock
   1090              * without CAS as lock ownership guards the lock itself.
   1091              */
   1092             if (LW_LOCK_COUNT(thin) == 0) {
   1093                 /*
   1094                  * The lock was not recursively acquired, the common
   1095                  * case.  Unlock by clearing all bits except for the
   1096                  * hash state.
   1097                  */
   1098                 obj->lock &= (LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT);
   1099             } else {
   1100                 /*
   1101                  * The object was recursively acquired.  Decrement the
   1102                  * lock recursion count field.
   1103                  */
   1104                 obj->lock -= 1 << LW_LOCK_COUNT_SHIFT;
   1105             }
   1106         } else {
   1107             /*
   1108              * We do not own the lock.  The JVM spec requires that we
   1109              * throw an exception in this case.
   1110              */
   1111             dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
   1112                               "unlock of unowned monitor");
   1113             return false;
   1114         }
   1115     } else {
   1116         /*
   1117          * The lock is fat.  We must check to see if unlockMonitor has
   1118          * raised any exceptions before continuing.
   1119          */
   1120         assert(LW_MONITOR(obj->lock) != NULL);
   1121         if (!unlockMonitor(self, LW_MONITOR(obj->lock))) {
   1122             /*
   1123              * An exception has been raised.  Do not fall through.
   1124              */
   1125             return false;
   1126         }
   1127     }
   1128 
   1129 #ifdef WITH_MONITOR_TRACKING
   1130     /*
   1131      * Remove the object from the Thread's list.
   1132      */
   1133     dvmRemoveFromMonitorList(self, obj);
   1134 #endif
   1135 
   1136     return true;
   1137 }
   1138 
   1139 /*
   1140  * Object.wait().  Also called for class init.
   1141  */
   1142 void dvmObjectWait(Thread* self, Object *obj, s8 msec, s4 nsec,
   1143     bool interruptShouldThrow)
   1144 {
   1145     Monitor* mon = LW_MONITOR(obj->lock);
   1146     u4 hashState;
   1147     u4 thin = obj->lock;
   1148 
   1149     /* If the lock is still thin, we need to fatten it.
   1150      */
   1151     if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
   1152         /* Make sure that 'self' holds the lock.
   1153          */
   1154         if (LW_LOCK_OWNER(thin) != self->threadId) {
   1155             dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
   1156                 "object not locked by thread before wait()");
   1157             return;
   1158         }
   1159 
   1160         /* This thread holds the lock.  We need to fatten the lock
   1161          * so 'self' can block on it.  Don't update the object lock
   1162          * field yet, because 'self' needs to acquire the lock before
   1163          * any other thread gets a chance.
   1164          */
   1165         mon = dvmCreateMonitor(obj);
   1166 
   1167         /* 'self' has actually locked the object one or more times;
   1168          * make sure that the monitor reflects this.
   1169          */
   1170         lockMonitor(self, mon);
   1171         mon->lockCount = LW_LOCK_COUNT(thin);
   1172         LOG_THIN("(%d) lock 0x%08x fattened by wait() to count %d\n",
   1173                  self->threadId, (uint)&obj->lock, mon->lockCount);
   1174 
   1175 
   1176         /* Make the monitor public now that it's in the right state.
   1177          */
   1178         thin &= LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT;
   1179         thin |= (u4)mon | LW_SHAPE_FAT;
   1180         MEM_BARRIER();
   1181         obj->lock = thin;
   1182     }
   1183 
   1184     waitMonitor(self, mon, msec, nsec, interruptShouldThrow);
   1185 }
   1186 
   1187 /*
   1188  * Object.notify().
   1189  */
   1190 void dvmObjectNotify(Thread* self, Object *obj)
   1191 {
   1192     u4 thin = obj->lock;
   1193 
   1194     /* If the lock is still thin, there aren't any waiters;
   1195      * waiting on an object forces lock fattening.
   1196      */
   1197     if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
   1198         /* Make sure that 'self' holds the lock.
   1199          */
   1200         if (LW_LOCK_OWNER(thin) != self->threadId) {
   1201             dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
   1202                 "object not locked by thread before notify()");
   1203             return;
   1204         }
   1205 
   1206         /* no-op;  there are no waiters to notify.
   1207          */
   1208     } else {
   1209         /* It's a fat lock.
   1210          */
   1211         notifyMonitor(self, LW_MONITOR(thin));
   1212     }
   1213 }
   1214 
   1215 /*
   1216  * Object.notifyAll().
   1217  */
   1218 void dvmObjectNotifyAll(Thread* self, Object *obj)
   1219 {
   1220     u4 thin = obj->lock;
   1221 
   1222     /* If the lock is still thin, there aren't any waiters;
   1223      * waiting on an object forces lock fattening.
   1224      */
   1225     if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
   1226         /* Make sure that 'self' holds the lock.
   1227          */
   1228         if (LW_LOCK_OWNER(thin) != self->threadId) {
   1229             dvmThrowException("Ljava/lang/IllegalMonitorStateException;",
   1230                 "object not locked by thread before notifyAll()");
   1231             return;
   1232         }
   1233 
   1234         /* no-op;  there are no waiters to notify.
   1235          */
   1236     } else {
   1237         /* It's a fat lock.
   1238          */
   1239         notifyAllMonitor(self, LW_MONITOR(thin));
   1240     }
   1241 }
   1242 
   1243 /*
   1244  * This implements java.lang.Thread.sleep(long msec, int nsec).
   1245  *
   1246  * The sleep is interruptible by other threads, which means we can't just
   1247  * plop into an OS sleep call.  (We probably could if we wanted to send
   1248  * signals around and rely on EINTR, but that's inefficient and relies
   1249  * on native code respecting our signal mask.)
   1250  *
   1251  * We have to do all of this stuff for Object.wait() as well, so it's
   1252  * easiest to just sleep on a private Monitor.
   1253  *
   1254  * It appears that we want sleep(0,0) to go through the motions of sleeping
   1255  * for a very short duration, rather than just returning.
   1256  */
   1257 void dvmThreadSleep(u8 msec, u4 nsec)
   1258 {
   1259     Thread* self = dvmThreadSelf();
   1260     Monitor* mon = gDvm.threadSleepMon;
   1261 
   1262     /* sleep(0,0) wakes up immediately, wait(0,0) means wait forever; adjust */
   1263     if (msec == 0 && nsec == 0)
   1264         nsec++;
   1265 
   1266     lockMonitor(self, mon);
   1267     waitMonitor(self, mon, msec, nsec, true);
   1268     unlockMonitor(self, mon);
   1269 }
   1270 
   1271 /*
   1272  * Implement java.lang.Thread.interrupt().
   1273  */
   1274 void dvmThreadInterrupt(Thread* thread)
   1275 {
   1276     assert(thread != NULL);
   1277 
   1278     pthread_mutex_lock(&thread->waitMutex);
   1279 
   1280     /*
   1281      * If the interrupted flag is already set no additional action is
   1282      * required.
   1283      */
   1284     if (thread->interrupted == true) {
   1285         pthread_mutex_unlock(&thread->waitMutex);
   1286         return;
   1287     }
   1288 
   1289     /*
   1290      * Raise the "interrupted" flag.  This will cause it to bail early out
   1291      * of the next wait() attempt, if it's not currently waiting on
   1292      * something.
   1293      */
   1294     thread->interrupted = true;
   1295     MEM_BARRIER();
   1296 
   1297     /*
   1298      * Is the thread waiting?
   1299      *
   1300      * Note that fat vs. thin doesn't matter here;  waitMonitor
   1301      * is only set when a thread actually waits on a monitor,
   1302      * which implies that the monitor has already been fattened.
   1303      */
   1304     if (thread->waitMonitor != NULL) {
   1305         pthread_cond_signal(&thread->waitCond);
   1306     }
   1307 
   1308     pthread_mutex_unlock(&thread->waitMutex);
   1309 }
   1310 
   1311 #ifndef WITH_COPYING_GC
   1312 u4 dvmIdentityHashCode(Object *obj)
   1313 {
   1314     return (u4)obj;
   1315 }
   1316 #else
   1317 static size_t arrayElementWidth(const ArrayObject *array)
   1318 {
   1319     const char *descriptor;
   1320 
   1321     if (dvmIsObjectArray(array)) {
   1322 	return sizeof(Object *);
   1323     } else {
   1324 	descriptor = array->obj.clazz->descriptor;
   1325         switch (descriptor[1]) {
   1326         case 'B': return 1;  /* byte */
   1327         case 'C': return 2;  /* char */
   1328         case 'D': return 8;  /* double */
   1329         case 'F': return 4;  /* float */
   1330         case 'I': return 4;  /* int */
   1331         case 'J': return 8;  /* long */
   1332         case 'S': return 2;  /* short */
   1333         case 'Z': return 1;  /* boolean */
   1334         }
   1335     }
   1336     LOGE("object %p has an unhandled descriptor '%s'", array, descriptor);
   1337     dvmDumpThread(dvmThreadSelf(), false);
   1338     dvmAbort();
   1339     return 0;  /* Quiet the compiler. */
   1340 }
   1341 
   1342 static size_t arrayObjectLength(const ArrayObject *array)
   1343 {
   1344     size_t length;
   1345 
   1346     length = offsetof(ArrayObject, contents);
   1347     length += array->length * arrayElementWidth(array);
   1348     return length;
   1349 }
   1350 
   1351 /*
   1352  * Returns the identity hash code of the given object.
   1353  */
   1354 u4 dvmIdentityHashCode(Object *obj)
   1355 {
   1356     Thread *self, *thread;
   1357     volatile u4 *lw;
   1358     size_t length;
   1359     u4 lock, owner, hashState;
   1360 
   1361     if (obj == NULL) {
   1362         /*
   1363          * Null is defined to have an identity hash code of 0.
   1364          */
   1365         return 0;
   1366     }
   1367     lw = &obj->lock;
   1368 retry:
   1369     hashState = LW_HASH_STATE(*lw);
   1370     if (hashState == LW_HASH_STATE_HASHED) {
   1371         /*
   1372          * The object has been hashed but has not had its hash code
   1373          * relocated by the garbage collector.  Use the raw object
   1374          * address.
   1375          */
   1376         return (u4)obj >> 3;
   1377     } else if (hashState == LW_HASH_STATE_HASHED_AND_MOVED) {
   1378         /*
   1379          * The object has been hashed and its hash code has been
   1380          * relocated by the collector.  Use the value of the naturally
   1381          * aligned word following the instance data.
   1382          */
   1383         if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISARRAY)) {
   1384             length = arrayObjectLength((ArrayObject *)obj);
   1385             length = (length + 3) & ~3;
   1386         } else {
   1387             length = obj->clazz->objectSize;
   1388         }
   1389         return *(u4 *)(((char *)obj) + length);
   1390     } else if (hashState == LW_HASH_STATE_UNHASHED) {
   1391         /*
   1392          * The object has never been hashed.  Change the hash state to
   1393          * hashed and use the raw object address.
   1394          */
   1395         self = dvmThreadSelf();
   1396         if (self->threadId == lockOwner(obj)) {
   1397             /*
   1398              * We already own the lock so we can update the hash state
   1399              * directly.
   1400              */
   1401             *lw |= (LW_HASH_STATE_HASHED << LW_HASH_STATE_SHIFT);
   1402             return (u4)obj >> 3;
   1403         }
   1404         /*
   1405          * We do not own the lock.  Try acquiring the lock.  Should
   1406          * this fail, we must suspend the owning thread.
   1407          */
   1408         if (LW_SHAPE(*lw) == LW_SHAPE_THIN) {
   1409             /*
   1410              * If the lock is thin assume it is unowned.  We simulate
   1411              * an acquire, update, and release with a single CAS.
   1412              */
   1413             lock = DVM_LOCK_INITIAL_THIN_VALUE;
   1414             lock |= (LW_HASH_STATE_HASHED << LW_HASH_STATE_SHIFT);
   1415             if (ATOMIC_CMP_SWAP((int32_t *)lw,
   1416                                 (int32_t)DVM_LOCK_INITIAL_THIN_VALUE,
   1417                                 (int32_t)lock)) {
   1418                 /*
   1419                  * A new lockword has been installed with a hash state
   1420                  * of hashed.  Use the raw object address.
   1421                  */
   1422                 return (u4)obj >> 3;
   1423             }
   1424         } else {
   1425             if (tryLockMonitor(self, LW_MONITOR(*lw))) {
   1426                 /*
   1427                  * The monitor lock has been acquired.  Change the
   1428                  * hash state to hashed and use the raw object
   1429                  * address.
   1430                  */
   1431                 *lw |= (LW_HASH_STATE_HASHED << LW_HASH_STATE_SHIFT);
   1432                 unlockMonitor(self, LW_MONITOR(*lw));
   1433                 return (u4)obj >> 3;
   1434             }
   1435         }
   1436         /*
   1437          * At this point we have failed to acquire the lock.  We must
   1438          * identify the owning thread and suspend it.
   1439          */
   1440         dvmLockThreadList(self);
   1441         /*
   1442          * Cache the lock word as its value can change between
   1443          * determining its shape and retrieving its owner.
   1444          */
   1445         lock = *lw;
   1446         if (LW_SHAPE(lock) == LW_SHAPE_THIN) {
   1447             /*
   1448              * Find the thread with the corresponding thread id.
   1449              */
   1450             owner = LW_LOCK_OWNER(lock);
   1451             assert(owner != self->threadId);
   1452             /*
   1453              * If the lock has no owner do not bother scanning the
   1454              * thread list and fall through to the failure handler.
   1455              */
   1456             thread = owner ? gDvm.threadList : NULL;
   1457             while (thread != NULL) {
   1458                 if (thread->threadId == owner) {
   1459                     break;
   1460                 }
   1461                 thread = thread->next;
   1462             }
   1463         } else {
   1464             thread = LW_MONITOR(lock)->owner;
   1465         }
   1466         /*
   1467          * If thread is NULL the object has been released since the
   1468          * thread list lock was acquired.  Try again.
   1469          */
   1470         if (thread == NULL) {
   1471             dvmUnlockThreadList();
   1472             goto retry;
   1473         }
   1474         /*
   1475          * Wait for the owning thread to suspend.
   1476          */
   1477         dvmSuspendThread(thread);
   1478         if (dvmHoldsLock(thread, obj)) {
   1479             /*
   1480              * The owning thread has been suspended.  We can safely
   1481              * change the hash state to hashed.
   1482              */
   1483             *lw |= (LW_HASH_STATE_HASHED << LW_HASH_STATE_SHIFT);
   1484             dvmResumeThread(thread);
   1485             dvmUnlockThreadList();
   1486             return (u4)obj >> 3;
   1487         }
   1488         /*
   1489          * The wrong thread has been suspended.  Try again.
   1490          */
   1491         dvmResumeThread(thread);
   1492         dvmUnlockThreadList();
   1493         goto retry;
   1494     }
   1495     LOGE("object %p has an unknown hash state %#x", obj, hashState);
   1496     dvmDumpThread(dvmThreadSelf(), false);
   1497     dvmAbort();
   1498     return 0;  /* Quiet the compiler. */
   1499 }
   1500 #endif  /* WITH_COPYING_GC */
   1501 
   1502 #ifdef WITH_DEADLOCK_PREDICTION
   1503 /*
   1504  * ===========================================================================
   1505  *      Deadlock prediction
   1506  * ===========================================================================
   1507  */
   1508 /*
   1509 The idea is to predict the possibility of deadlock by recording the order
   1510 in which monitors are acquired.  If we see an attempt to acquire a lock
   1511 out of order, we can identify the locks and offending code.
   1512 
   1513 To make this work, we need to keep track of the locks held by each thread,
   1514 and create history trees for each lock.  When a thread tries to acquire
   1515 a new lock, we walk through the "history children" of the lock, looking
   1516 for a match with locks the thread already holds.  If we find a match,
   1517 it means the thread has made a request that could result in a deadlock.
   1518 
   1519 To support recursive locks, we always allow re-locking a currently-held
   1520 lock, and maintain a recursion depth count.
   1521 
   1522 An ASCII-art example, where letters represent Objects:
   1523 
   1524         A
   1525        /|\
   1526       / | \
   1527      B  |  D
   1528       \ |
   1529        \|
   1530         C
   1531 
   1532 The above is the tree we'd have after handling Object synchronization
   1533 sequences "ABC", "AC", "AD".  A has three children, {B, C, D}.  C is also
   1534 a child of B.  (The lines represent pointers between parent and child.
   1535 Every node can have multiple parents and multiple children.)
   1536 
   1537 If we hold AC, and want to lock B, we recursively search through B's
   1538 children to see if A or C appears.  It does, so we reject the attempt.
   1539 (A straightforward way to implement it: add a link from C to B, then
   1540 determine whether the graph starting at B contains a cycle.)
   1541 
   1542 If we hold AC and want to lock D, we would succeed, creating a new link
   1543 from C to D.
   1544 
   1545 The lock history and a stack trace is attached to the Object's Monitor
   1546 struct, which means we need to fatten every Object we lock (thin locking
   1547 is effectively disabled).  If we don't need the stack trace we can
   1548 avoid fattening the leaf nodes, only fattening objects that need to hold
   1549 history trees.
   1550 
   1551 Updates to Monitor structs are only allowed for the thread that holds
   1552 the Monitor, so we actually do most of our deadlock prediction work after
   1553 the lock has been acquired.
   1554 
   1555 When an object with a monitor is GCed, we need to remove it from the
   1556 history trees.  There are two basic approaches:
   1557  (1) For through the entire set of known monitors, search all child
   1558      lists for the object in question.  This is rather slow, resulting
   1559      in GC passes that take upwards of 10 seconds to complete.
   1560  (2) Maintain "parent" pointers in each node.  Remove the entries as
   1561      required.  This requires additional storage and maintenance for
   1562      every operation, but is significantly faster at GC time.
   1563 For each GCed object, we merge all of the object's children into each of
   1564 the object's parents.
   1565 */
   1566 
   1567 #if !defined(WITH_MONITOR_TRACKING)
   1568 # error "WITH_DEADLOCK_PREDICTION requires WITH_MONITOR_TRACKING"
   1569 #endif
   1570 
   1571 /*
   1572  * Clear out the contents of an ExpandingObjectList, freeing any
   1573  * dynamic allocations.
   1574  */
   1575 static void expandObjClear(ExpandingObjectList* pList)
   1576 {
   1577     if (pList->list != NULL) {
   1578         free(pList->list);
   1579         pList->list = NULL;
   1580     }
   1581     pList->alloc = pList->count = 0;
   1582 }
   1583 
   1584 /*
   1585  * Get the number of objects currently stored in the list.
   1586  */
   1587 static inline int expandBufGetCount(const ExpandingObjectList* pList)
   1588 {
   1589     return pList->count;
   1590 }
   1591 
   1592 /*
   1593  * Get the Nth entry from the list.
   1594  */
   1595 static inline Object* expandBufGetEntry(const ExpandingObjectList* pList,
   1596     int i)
   1597 {
   1598     return pList->list[i];
   1599 }
   1600 
   1601 /*
   1602  * Add a new entry to the list.
   1603  *
   1604  * We don't check for or try to enforce uniqueness.  It's expected that
   1605  * the higher-level code does this for us.
   1606  */
   1607 static void expandObjAddEntry(ExpandingObjectList* pList, Object* obj)
   1608 {
   1609     if (pList->count == pList->alloc) {
   1610         /* time to expand */
   1611         Object** newList;
   1612 
   1613         if (pList->alloc == 0)
   1614             pList->alloc = 4;
   1615         else
   1616             pList->alloc *= 2;
   1617         LOGVV("expanding %p to %d\n", pList, pList->alloc);
   1618         newList = realloc(pList->list, pList->alloc * sizeof(Object*));
   1619         if (newList == NULL) {
   1620             LOGE("Failed expanding DP object list (alloc=%d)\n", pList->alloc);
   1621             dvmAbort();
   1622         }
   1623         pList->list = newList;
   1624     }
   1625 
   1626     pList->list[pList->count++] = obj;
   1627 }
   1628 
   1629 /*
   1630  * Returns "true" if the element was successfully removed.
   1631  */
   1632 static bool expandObjRemoveEntry(ExpandingObjectList* pList, Object* obj)
   1633 {
   1634     int i;
   1635 
   1636     for (i = pList->count-1; i >= 0; i--) {
   1637         if (pList->list[i] == obj)
   1638             break;
   1639     }
   1640     if (i < 0)
   1641         return false;
   1642 
   1643     if (i != pList->count-1) {
   1644         /*
   1645          * The order of elements is not important, so we just copy the
   1646          * last entry into the new slot.
   1647          */
   1648         //memmove(&pList->list[i], &pList->list[i+1],
   1649         //    (pList->count-1 - i) * sizeof(pList->list[0]));
   1650         pList->list[i] = pList->list[pList->count-1];
   1651     }
   1652 
   1653     pList->count--;
   1654     pList->list[pList->count] = (Object*) 0xdecadead;
   1655     return true;
   1656 }
   1657 
   1658 /*
   1659  * Returns "true" if "obj" appears in the list.
   1660  */
   1661 static bool expandObjHas(const ExpandingObjectList* pList, Object* obj)
   1662 {
   1663     int i;
   1664 
   1665     for (i = 0; i < pList->count; i++) {
   1666         if (pList->list[i] == obj)
   1667             return true;
   1668     }
   1669     return false;
   1670 }
   1671 
   1672 /*
   1673  * Print the list contents to stdout.  For debugging.
   1674  */
   1675 static void expandObjDump(const ExpandingObjectList* pList)
   1676 {
   1677     int i;
   1678     for (i = 0; i < pList->count; i++)
   1679         printf(" %p", pList->list[i]);
   1680 }
   1681 
   1682 /*
   1683  * Check for duplicate entries.  Returns the index of the first instance
   1684  * of the duplicated value, or -1 if no duplicates were found.
   1685  */
   1686 static int expandObjCheckForDuplicates(const ExpandingObjectList* pList)
   1687 {
   1688     int i, j;
   1689     for (i = 0; i < pList->count-1; i++) {
   1690         for (j = i + 1; j < pList->count; j++) {
   1691             if (pList->list[i] == pList->list[j]) {
   1692                 return i;
   1693             }
   1694         }
   1695     }
   1696 
   1697     return -1;
   1698 }
   1699 
   1700 
   1701 /*
   1702  * Determine whether "child" appears in the list of objects associated
   1703  * with the Monitor in "parent".  If "parent" is a thin lock, we return
   1704  * false immediately.
   1705  */
   1706 static bool objectInChildList(const Object* parent, Object* child)
   1707 {
   1708     u4 lock = parent->lock;
   1709     if (!IS_LOCK_FAT(&lock)) {
   1710         //LOGI("on thin\n");
   1711         return false;
   1712     }
   1713 
   1714     return expandObjHas(&LW_MONITOR(lock)->historyChildren, child);
   1715 }
   1716 
   1717 /*
   1718  * Print the child list.
   1719  */
   1720 static void dumpKids(Object* parent)
   1721 {
   1722     Monitor* mon = LW_MONITOR(parent->lock);
   1723 
   1724     printf("Children of %p:", parent);
   1725     expandObjDump(&mon->historyChildren);
   1726     printf("\n");
   1727 }
   1728 
   1729 /*
   1730  * Add "child" to the list of children in "parent", and add "parent" to
   1731  * the list of parents in "child".
   1732  */
   1733 static void linkParentToChild(Object* parent, Object* child)
   1734 {
   1735     //assert(LW_MONITOR(parent->lock)->owner == dvmThreadSelf());   // !owned for merge
   1736     assert(IS_LOCK_FAT(&parent->lock));
   1737     assert(IS_LOCK_FAT(&child->lock));
   1738     assert(parent != child);
   1739     Monitor* mon;
   1740 
   1741     mon = LW_MONITOR(parent->lock);
   1742     assert(!expandObjHas(&mon->historyChildren, child));
   1743     expandObjAddEntry(&mon->historyChildren, child);
   1744 
   1745     mon = LW_MONITOR(child->lock);
   1746     assert(!expandObjHas(&mon->historyParents, parent));
   1747     expandObjAddEntry(&mon->historyParents, parent);
   1748 }
   1749 
   1750 
   1751 /*
   1752  * Remove "child" from the list of children in "parent".
   1753  */
   1754 static void unlinkParentFromChild(Object* parent, Object* child)
   1755 {
   1756     //assert(LW_MONITOR(parent->lock)->owner == dvmThreadSelf());   // !owned for GC
   1757     assert(IS_LOCK_FAT(&parent->lock));
   1758     assert(IS_LOCK_FAT(&child->lock));
   1759     assert(parent != child);
   1760     Monitor* mon;
   1761 
   1762     mon = LW_MONITOR(parent->lock);
   1763     if (!expandObjRemoveEntry(&mon->historyChildren, child)) {
   1764         LOGW("WARNING: child %p not found in parent %p\n", child, parent);
   1765     }
   1766     assert(!expandObjHas(&mon->historyChildren, child));
   1767     assert(expandObjCheckForDuplicates(&mon->historyChildren) < 0);
   1768 
   1769     mon = LW_MONITOR(child->lock);
   1770     if (!expandObjRemoveEntry(&mon->historyParents, parent)) {
   1771         LOGW("WARNING: parent %p not found in child %p\n", parent, child);
   1772     }
   1773     assert(!expandObjHas(&mon->historyParents, parent));
   1774     assert(expandObjCheckForDuplicates(&mon->historyParents) < 0);
   1775 }
   1776 
   1777 
   1778 /*
   1779  * Log the monitors held by the current thread.  This is done as part of
   1780  * flagging an error.
   1781  */
   1782 static void logHeldMonitors(Thread* self)
   1783 {
   1784     char* name = NULL;
   1785 
   1786     name = dvmGetThreadName(self);
   1787     LOGW("Monitors currently held by thread (threadid=%d '%s')\n",
   1788         self->threadId, name);
   1789     LOGW("(most-recently-acquired on top):\n");
   1790     free(name);
   1791 
   1792     LockedObjectData* lod = self->pLockedObjects;
   1793     while (lod != NULL) {
   1794         LOGW("--- object %p[%d] (%s)\n",
   1795             lod->obj, lod->recursionCount, lod->obj->clazz->descriptor);
   1796         dvmLogRawStackTrace(lod->rawStackTrace, lod->stackDepth);
   1797 
   1798         lod = lod->next;
   1799     }
   1800 }
   1801 
   1802 /*
   1803  * Recursively traverse the object hierarchy starting at "obj".  We mark
   1804  * ourselves on entry and clear the mark on exit.  If we ever encounter
   1805  * a marked object, we have a cycle.
   1806  *
   1807  * Returns "true" if all is well, "false" if we found a cycle.
   1808  */
   1809 static bool traverseTree(Thread* self, const Object* obj)
   1810 {
   1811     assert(IS_LOCK_FAT(&obj->lock));
   1812     Monitor* mon = LW_MONITOR(obj->lock);
   1813 
   1814     /*
   1815      * Have we been here before?
   1816      */
   1817     if (mon->historyMark) {
   1818         int* rawStackTrace;
   1819         int stackDepth;
   1820 
   1821         LOGW("%s\n", kStartBanner);
   1822         LOGW("Illegal lock attempt:\n");
   1823         LOGW("--- object %p (%s)\n", obj, obj->clazz->descriptor);
   1824 
   1825         rawStackTrace = dvmFillInStackTraceRaw(self, &stackDepth);
   1826         dvmLogRawStackTrace(rawStackTrace, stackDepth);
   1827         free(rawStackTrace);
   1828 
   1829         LOGW(" ");
   1830         logHeldMonitors(self);
   1831 
   1832         LOGW(" ");
   1833         LOGW("Earlier, the following lock order (from last to first) was\n");
   1834         LOGW("established -- stack trace is from first successful lock):\n");
   1835         return false;
   1836     }
   1837     mon->historyMark = true;
   1838 
   1839     /*
   1840      * Examine the children.  We do NOT hold these locks, so they might
   1841      * very well transition from thin to fat or change ownership while
   1842      * we work.
   1843      *
   1844      * NOTE: we rely on the fact that they cannot revert from fat to thin
   1845      * while we work.  This is currently a safe assumption.
   1846      *
   1847      * We can safely ignore thin-locked children, because by definition
   1848      * they have no history and are leaf nodes.  In the current
   1849      * implementation we always fatten the locks to provide a place to
   1850      * hang the stack trace.
   1851      */
   1852     ExpandingObjectList* pList = &mon->historyChildren;
   1853     int i;
   1854     for (i = expandBufGetCount(pList)-1; i >= 0; i--) {
   1855         const Object* child = expandBufGetEntry(pList, i);
   1856         u4 lock = child->lock;
   1857         if (!IS_LOCK_FAT(&lock))
   1858             continue;
   1859         if (!traverseTree(self, child)) {
   1860             LOGW("--- object %p (%s)\n", obj, obj->clazz->descriptor);
   1861             dvmLogRawStackTrace(mon->historyRawStackTrace,
   1862                 mon->historyStackDepth);
   1863             mon->historyMark = false;
   1864             return false;
   1865         }
   1866     }
   1867 
   1868     mon->historyMark = false;
   1869 
   1870     return true;
   1871 }
   1872 
   1873 /*
   1874  * Update the deadlock prediction tree, based on the current thread
   1875  * acquiring "acqObj".  This must be called before the object is added to
   1876  * the thread's list of held monitors.
   1877  *
   1878  * If the thread already holds the lock (recursion), or this is a known
   1879  * lock configuration, we return without doing anything.  Otherwise, we add
   1880  * a link from the most-recently-acquired lock in this thread to "acqObj"
   1881  * after ensuring that the parent lock is "fat".
   1882  *
   1883  * This MUST NOT be called while a GC is in progress in another thread,
   1884  * because we assume exclusive access to history trees in owned monitors.
   1885  */
   1886 static void updateDeadlockPrediction(Thread* self, Object* acqObj)
   1887 {
   1888     LockedObjectData* lod;
   1889     LockedObjectData* mrl;
   1890 
   1891     /*
   1892      * Quick check for recursive access.
   1893      */
   1894     lod = dvmFindInMonitorList(self, acqObj);
   1895     if (lod != NULL) {
   1896         LOGV("+++ DP: recursive %p\n", acqObj);
   1897         return;
   1898     }
   1899 
   1900     /*
   1901      * Make the newly-acquired object's monitor "fat".  In some ways this
   1902      * isn't strictly necessary, but we need the GC to tell us when
   1903      * "interesting" objects go away, and right now the only way to make
   1904      * an object look interesting is to give it a monitor.
   1905      *
   1906      * This also gives us a place to hang a stack trace.
   1907      *
   1908      * Our thread holds the lock, so we're allowed to rewrite the lock
   1909      * without worrying that something will change out from under us.
   1910      */
   1911     if (!IS_LOCK_FAT(&acqObj->lock)) {
   1912         LOGVV("fattening lockee %p (recur=%d)\n",
   1913             acqObj, LW_LOCK_COUNT(acqObj->lock.thin));
   1914         Monitor* newMon = dvmCreateMonitor(acqObj);
   1915         lockMonitor(self, newMon);      // can't stall, don't need VMWAIT
   1916         newMon->lockCount += LW_LOCK_COUNT(acqObj->lock);
   1917         u4 hashState = LW_HASH_STATE(acqObj->lock) << LW_HASH_STATE_SHIFT;
   1918         acqObj->lock = (u4)newMon | hashState | LW_SHAPE_FAT;
   1919     }
   1920 
   1921     /* if we don't have a stack trace for this monitor, establish one */
   1922     if (LW_MONITOR(acqObj->lock)->historyRawStackTrace == NULL) {
   1923         Monitor* mon = LW_MONITOR(acqObj->lock);
   1924         mon->historyRawStackTrace = dvmFillInStackTraceRaw(self,
   1925             &mon->historyStackDepth);
   1926     }
   1927 
   1928     /*
   1929      * We need to examine and perhaps modify the most-recently-locked
   1930      * monitor.  We own that, so there's no risk of another thread
   1931      * stepping on us.
   1932      *
   1933      * Retrieve the most-recently-locked entry from our thread.
   1934      */
   1935     mrl = self->pLockedObjects;
   1936     if (mrl == NULL)
   1937         return;         /* no other locks held */
   1938 
   1939     /*
   1940      * Do a quick check to see if "acqObj" is a direct descendant.  We can do
   1941      * this without holding the global lock because of our assertion that
   1942      * a GC is not running in parallel -- nobody except the GC can
   1943      * modify a history list in a Monitor they don't own, and we own "mrl".
   1944      * (There might be concurrent *reads*, but no concurrent *writes.)
   1945      *
   1946      * If we find it, this is a known good configuration, and we're done.
   1947      */
   1948     if (objectInChildList(mrl->obj, acqObj))
   1949         return;
   1950 
   1951     /*
   1952      * "mrl" is going to need to have a history tree.  If it's currently
   1953      * a thin lock, we make it fat now.  The thin lock might have a
   1954      * nonzero recursive lock count, which we need to carry over.
   1955      *
   1956      * Our thread holds the lock, so we're allowed to rewrite the lock
   1957      * without worrying that something will change out from under us.
   1958      */
   1959     if (!IS_LOCK_FAT(&mrl->obj->lock)) {
   1960         LOGVV("fattening parent %p f/b/o child %p (recur=%d)\n",
   1961             mrl->obj, acqObj, LW_LOCK_COUNT(mrl->obj->lock));
   1962         Monitor* newMon = dvmCreateMonitor(mrl->obj);
   1963         lockMonitor(self, newMon);      // can't stall, don't need VMWAIT
   1964         newMon->lockCount += LW_LOCK_COUNT(mrl->obj->lock);
   1965         u4 hashState = LW_HASH_STATE(mrl->obj->lock) << LW_HASH_STATE_SHIFT;
   1966         mrl->obj->lock = (u4)newMon | hashState | LW_SHAPE_FAT;
   1967     }
   1968 
   1969     /*
   1970      * We haven't seen this configuration before.  We need to scan down
   1971      * acqObj's tree to see if any of the monitors in self->pLockedObjects
   1972      * appear.  We grab a global lock before traversing or updating the
   1973      * history list.
   1974      *
   1975      * If we find a match for any of our held locks, we know that the lock
   1976      * has previously been acquired *after* acqObj, and we throw an error.
   1977      *
   1978      * The easiest way to do this is to create a link from "mrl" to "acqObj"
   1979      * and do a recursive traversal, marking nodes as we cross them.  If
   1980      * we cross one a second time, we have a cycle and can throw an error.
   1981      * (We do the flag-clearing traversal before adding the new link, so
   1982      * that we're guaranteed to terminate.)
   1983      *
   1984      * If "acqObj" is a thin lock, it has no history, and we can create a
   1985      * link to it without additional checks.  [ We now guarantee that it's
   1986      * always fat. ]
   1987      */
   1988     bool failed = false;
   1989     dvmLockMutex(&gDvm.deadlockHistoryLock);
   1990     linkParentToChild(mrl->obj, acqObj);
   1991     if (!traverseTree(self, acqObj)) {
   1992         LOGW("%s\n", kEndBanner);
   1993         failed = true;
   1994 
   1995         /* remove the entry so we're still okay when in "warning" mode */
   1996         unlinkParentFromChild(mrl->obj, acqObj);
   1997     }
   1998     dvmUnlockMutex(&gDvm.deadlockHistoryLock);
   1999 
   2000     if (failed) {
   2001         switch (gDvm.deadlockPredictMode) {
   2002         case kDPErr:
   2003             dvmThrowException("Ldalvik/system/PotentialDeadlockError;", NULL);
   2004             break;
   2005         case kDPAbort:
   2006             LOGE("Aborting due to potential deadlock\n");
   2007             dvmAbort();
   2008             break;
   2009         default:
   2010             /* warn only */
   2011             break;
   2012         }
   2013     }
   2014 }
   2015 
   2016 /*
   2017  * We're removing "child" from existence.  We want to pull all of
   2018  * child's children into "parent", filtering out duplicates.  This is
   2019  * called during the GC.
   2020  *
   2021  * This does not modify "child", which might have multiple parents.
   2022  */
   2023 static void mergeChildren(Object* parent, const Object* child)
   2024 {
   2025     Monitor* mon;
   2026     int i;
   2027 
   2028     assert(IS_LOCK_FAT(&child->lock));
   2029     mon = LW_MONITOR(child->lock);
   2030     ExpandingObjectList* pList = &mon->historyChildren;
   2031 
   2032     for (i = expandBufGetCount(pList)-1; i >= 0; i--) {
   2033         Object* grandChild = expandBufGetEntry(pList, i);
   2034 
   2035         if (!objectInChildList(parent, grandChild)) {
   2036             LOGVV("+++  migrating %p link to %p\n", grandChild, parent);
   2037             linkParentToChild(parent, grandChild);
   2038         } else {
   2039             LOGVV("+++  parent %p already links to %p\n", parent, grandChild);
   2040         }
   2041     }
   2042 }
   2043 
   2044 /*
   2045  * An object with a fat lock is being collected during a GC pass.  We
   2046  * want to remove it from any lock history trees that it is a part of.
   2047  *
   2048  * This may require updating the history trees in several monitors.  The
   2049  * monitor semantics guarantee that no other thread will be accessing
   2050  * the history trees at the same time.
   2051  */
   2052 static void removeCollectedObject(Object* obj)
   2053 {
   2054     Monitor* mon;
   2055 
   2056     LOGVV("+++ collecting %p\n", obj);
   2057 
   2058 #if 0
   2059     /*
   2060      * We're currently running through the entire set of known monitors.
   2061      * This can be somewhat slow.  We may want to keep lists of parents
   2062      * in each child to speed up GC.
   2063      */
   2064     mon = gDvm.monitorList;
   2065     while (mon != NULL) {
   2066         Object* parent = mon->obj;
   2067         if (parent != NULL) {       /* value nulled for deleted entries */
   2068             if (objectInChildList(parent, obj)) {
   2069                 LOGVV("removing child %p from parent %p\n", obj, parent);
   2070                 unlinkParentFromChild(parent, obj);
   2071                 mergeChildren(parent, obj);
   2072             }
   2073         }
   2074         mon = mon->next;
   2075     }
   2076 #endif
   2077 
   2078     /*
   2079      * For every parent of this object:
   2080      *  - merge all of our children into the parent's child list (creates
   2081      *    a two-way link between parent and child)
   2082      *  - remove ourselves from the parent's child list
   2083      */
   2084     ExpandingObjectList* pList;
   2085     int i;
   2086 
   2087     assert(IS_LOCK_FAT(&obj->lock));
   2088     mon = LW_MONITOR(obj->lock);
   2089     pList = &mon->historyParents;
   2090     for (i = expandBufGetCount(pList)-1; i >= 0; i--) {
   2091         Object* parent = expandBufGetEntry(pList, i);
   2092         Monitor* parentMon = LW_MONITOR(parent->lock);
   2093 
   2094         if (!expandObjRemoveEntry(&parentMon->historyChildren, obj)) {
   2095             LOGW("WARNING: child %p not found in parent %p\n", obj, parent);
   2096         }
   2097         assert(!expandObjHas(&parentMon->historyChildren, obj));
   2098 
   2099         mergeChildren(parent, obj);
   2100     }
   2101 
   2102     /*
   2103      * For every child of this object:
   2104      *  - remove ourselves from the child's parent list
   2105      */
   2106     pList = &mon->historyChildren;
   2107     for (i = expandBufGetCount(pList)-1; i >= 0; i--) {
   2108         Object* child = expandBufGetEntry(pList, i);
   2109         Monitor* childMon = LW_MONITOR(child->lock);
   2110 
   2111         if (!expandObjRemoveEntry(&childMon->historyParents, obj)) {
   2112             LOGW("WARNING: parent %p not found in child %p\n", obj, child);
   2113         }
   2114         assert(!expandObjHas(&childMon->historyParents, obj));
   2115     }
   2116 }
   2117 
   2118 #endif /*WITH_DEADLOCK_PREDICTION*/
   2119