Home | History | Annotate | Download | only in vm

Lines Matching full:lock

29  *  - reverting to a thin lock once the Monitor is no longer necessary
82 * The two states of an Object's lock are referred to as "thin" and
83 * "fat". A lock may transition from the "thin" state to the "fat"
84 * state and this transition is referred to as inflation. Once a lock
87 * The lock value itself is stored in Object.lock. The LSB of the
88 * lock encodes its state. When cleared, the lock is in the "thin"
92 * lock count thread id hash state 0
94 * When set, the lock is in the "fat" state and its bits are formatted
116 Thread* owner; /* which thread currently owns the lock? */
117 int lockCount; /* owner's recursive lock depth */
122 pthread_mutex_t lock;
168 dvmInitMutex(&mon->lock);
240 * Returns the thread id of the thread owning the given lock.
245 u4 lock;
249 * Since we're reading the lock value multiple times, latch it so
252 lock = obj->lock;
253 if (LW_SHAPE(lock) == LW_SHAPE_THIN) {
254 return LW_LOCK_OWNER(lock);
256 owner = LW_MONITOR(lock)->owner;
262 * Get the thread that holds the lock on the specified object. The
265 * The caller must lock the thread list before calling here.
278 * objects's lock.
290 * Free the monitor associated with an object and make the object's lock
297 assert(LW_SHAPE(obj->lock) == LW_SHAPE_FAT);
304 mon = LW_MONITOR(obj->lock);
305 obj->lock = DVM_LOCK_INITIAL_THIN_VALUE;
307 /* This lock is associated with an object
309 * anyone could be holding this lock would be
314 assert(pthread_mutex_trylock(&mon->lock) == 0);
315 pthread_mutex_destroy(&mon->lock);
429 * Lock a monitor.
442 if (pthread_mutex_trylock(&mon->lock) != 0) {
448 dvmLockMutex(&mon->lock);
470 * Try to lock a monitor.
482 cc = pthread_mutex_trylock(&mon->lock);
512 cc = pthread_mutex_unlock(&mon->lock);
553 * Links a thread into a monitor's wait set. The monitor lock must be
577 * Unlinks a thread from a monitor's wait set. The monitor lock must
688 /* Make sure that we hold the lock. */
717 * deep in a recursive lock, and we need to restore that later.
762 * Release the monitor lock and wait for a notification or
765 pthread_mutex_unlock(&mon->lock);
787 /* Reacquire the monitor lock. */
828 /* Make sure that we hold the lock. */
860 /* Make sure that we hold the lock. */
898 thinp = &obj->lock;
903 * The lock is a thin lock. The owner field is used to
908 * The calling thread owns the lock. Increment the
911 obj->lock += 1 << LW_LOCK_COUNT_SHIFT;
914 * The lock is unowned. Install the thread id of the
927 LOG_THIN("(%d) spin on lock %p: %#x (%#x) %#x",
928 threadId, &obj->lock, 0, *thinp, thin);
930 * The lock is owned by another thread. Notify the VM
935 * Spin until the thin lock is released or inflated.
941 * Check the shape of the lock word. Another thread
942 * may have inflated the lock while we were waiting.
947 * The lock has been released. Install the
956 * loop and proceed to inflate the lock.
962 * The lock has not been released. Yield so
977 * The thin lock was inflated by another thread.
981 LOG_THIN("(%d) lock %p surprise-fattened",
982 threadId, &obj->lock);
987 LOG_THIN("(%d) spin on lock done %p: %#x (%#x) %#x",
988 threadId, &obj->lock, 0, *thinp, thin);
990 * We have acquired the thin lock. Let the VM know that
995 * Fatten the lock.
1003 obj->lock = thin;
1004 LOG_THIN("(%d) lock %p fattened", threadId, &obj->lock);
1008 * The lock is a fat lock.
1010 assert(LW_MONITOR(obj->lock) != NULL);
1011 lockMonitor(self, LW_MONITOR(obj->lock));
1015 * See if we were allowed to grab the lock at this time. We do it
1016 * *after* acquiring the lock, rather than before, so that we can
1021 * the checks before the lock is held.)
1024 * monitor list, so we can tell the difference between first-lock and
1025 * re-lock.
1041 * lock. We add the object to the thread's monitor list so the
1078 * Cache the lock word as its value can change while we are
1081 thin = obj->lock;
1084 * The lock is thin. We must ensure that the lock is owned
1089 * We are the lock owner. It is safe to update the lock
1090 * without CAS as locklock itself.
1094 * The lock was not recursively acquired, the common
1098 obj->lock &= (LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT);
1102 * lock recursion count field.
1104 obj->lock -= 1 << LW_LOCK_COUNT_SHIFT;
1108 * We do not own the lock. The JVM spec requires that we
1117 * The lock is fat. We must check to see if unlockMonitor has
1120 assert(LW_MONITOR(obj->lock) != NULL);
1121 if (!unlockMonitor(self, LW_MONITOR(obj->lock))) {
1145 Monitor* mon = LW_MONITOR(obj->lock);
1147 u4 thin = obj->lock;
1149 /* If the lock is still thin, we need to fatten it.
1152 /* Make sure that 'self' holds the lock.
1160 /* This thread holds the lock. We need to fatten the lock
1161 * so 'self' can block on it. Don't update the object lock
1162 * field yet, because 'self' needs to acquire the lock before
1172 LOG_THIN("(%d) lock 0x%08x fattened by wait() to count %d\n",
1173 self->threadId, (uint)&obj->lock, mon->lockCount);
1181 obj->lock = thin;
1192 u4 thin = obj->lock;
1194 /* If the lock is still thin, there aren't any waiters;
1195 * waiting on an object forces lock fattening.
1198 /* Make sure that 'self' holds the lock.
1209 /* It's a fat lock.
1220 u4 thin = obj->lock;
1222 /* If the lock is still thin, there aren't any waiters;
1223 * waiting on an object forces lock fattening.
1226 /* Make sure that 'self' holds the lock.
1237 /* It's a fat lock.
1359 u4 lock, owner, hashState;
1367 lw = &obj->lock;
1398 * We already own the lock so we can update the hash state
1405 * We do not own the lock. Try acquiring the lock. Should
1410 * If the lock is thin assume it is unowned. We simulate
1413 lock = DVM_LOCK_INITIAL_THIN_VALUE;
1414 lock |= (LW_HASH_STATE_HASHED << LW_HASH_STATE_SHIFT);
1417 (int32_t)lock)) {
1427 * The monitor lock has been acquired. Change the
1437 * At this point we have failed to acquire the lock. We must
1442 * Cache the lock word as its value can change between
1445 lock = *lw;
1446 if (LW_SHAPE(lock) == LW_SHAPE_THIN) {
1450 owner = LW_LOCK_OWNER(lock);
1453 * If the lock has no owner do not bother scanning the
1464 thread = LW_MONITOR(lock)->owner;
1468 * thread list lock was acquired. Try again.
1510 in which monitors are acquired. If we see an attempt to acquire a lock
1514 and create history trees for each lock. When a thread tries to acquire
1515 a new lock, we walk through the "history children" of the lock, looking
1520 lock, and maintain a recursion depth count.
1537 If we hold AC, and want to lock B, we recursively search through B's
1542 If we hold AC and want to lock D, we would succeed, creating a new link
1545 The lock history and a stack trace is attached to the Object's Monitor
1546 struct, which means we need to fatten every Object we lock (thin locking
1553 the lock has been acquired.
1703 * with the Monitor in "parent". If "parent" is a thin lock, we return
1708 u4 lock = parent->lock;
1709 if (!IS_LOCK_FAT(&lock)) {
1714 return expandObjHas(&LW_MONITOR(lock)->historyChildren, child);
1722 Monitor* mon = LW_MONITOR(parent->lock);
1735 //assert(LW_MONITOR(parent->lock)->owner == dvmThreadSelf()); // !owned for merge
1736 assert(IS_LOCK_FAT(&parent->lock));
1737 assert(IS_LOCK_FAT(&child->lock));
1741 mon = LW_MONITOR(parent->lock);
1745 mon = LW_MONITOR(child->lock);
1756 //assert(LW_MONITOR(parent->lock)->owner == dvmThreadSelf()); // !owned for GC
1757 assert(IS_LOCK_FAT(&parent->lock));
1758 assert(IS_LOCK_FAT(&child->lock));
1762 mon = LW_MONITOR(parent->lock);
1769 mon = LW_MONITOR(child->lock);
1811 assert(IS_LOCK_FAT(&obj->lock));
1812 Monitor* mon = LW_MONITOR(obj->lock);
1822 LOGW("Illegal lock attempt:\n");
1833 LOGW("Earlier, the following lock order (from last to first) was\n");
1834 LOGW("established -- stack trace is from first successful lock):\n");
1856 u4 lock = child->lock;
1857 if (!IS_LOCK_FAT(&lock))
1878 * If the thread already holds the lock (recursion), or this is a known
1879 * lock configuration, we return without doing anything. Otherwise, we add
1880 * a link from the most-recently-acquired lock in this thread to "acqObj"
1881 * after ensuring that the parent lock is "fat".
1908 * Our thread holds the lock, so we're allowed to rewrite the lock
1911 if (!IS_LOCK_FAT(&acqObj->lock)) {
1913 acqObj, LW_LOCK_COUNT(acqObj->lock.thin));
1916 newMon->lockCount += LW_LOCK_COUNT(acqObj->lock);
1917 u4 hashState = LW_HASH_STATE(acqObj->lock) << LW_HASH_STATE_SHIFT;
1918 acqObj->lock = (u4)newMon | hashState | LW_SHAPE_FAT;
1922 if (LW_MONITOR(acqObj->lock)->historyRawStackTrace == NULL) {
1923 Monitor* mon = LW_MONITOR(acqObj->lock);
1941 * this without holding the global lock because of our assertion that
1953 * a thin lock, we make it fat now. The thin lock might have a
1954 * nonzero recursive lock count, which we need to carry over.
1956 * Our thread holds the lock, so we're allowed to rewrite the lock
1959 if (!IS_LOCK_FAT(&mrl->obj->lock)) {
1961 mrl->obj, acqObj, LW_LOCK_COUNT(mrl->obj->lock));
1964 newMon->lockCount += LW_LOCK_COUNT(mrl->obj->lock);
1965 u4 hashState = LW_HASH_STATE(mrl->obj->lock) << LW_HASH_STATE_SHIFT;
1966 mrl->obj->lock = (u4)newMon | hashState | LW_SHAPE_FAT;
1972 * appear. We grab a global lock before traversing or updating the
1975 * If we find a match for any of our held locks, we know that the lock
1984 * If "acqObj" is a thin lock, it has no history, and we can create a
2028 assert(IS_LOCK_FAT(&child->lock));
2029 mon = LW_MONITOR(child->lock);
2045 * An object with a fat lock is being collected during a GC pass. We
2046 * want to remove it from any lock history trees that it is a part of.
2087 assert(IS_LOCK_FAT(&obj->lock));
2088 mon = LW_MONITOR(obj->lock);
2092 Monitor* parentMon = LW_MONITOR(parent->lock);
2109 Monitor* childMon = LW_MONITOR(child->lock);