1 /* 2 * Copyright (C) 2008 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include "Dalvik.h" 18 19 #include <fcntl.h> 20 #include <stdlib.h> 21 #include <unistd.h> 22 #include <pthread.h> 23 #include <time.h> 24 #include <errno.h> 25 26 /* 27 * Every Object has a monitor associated with it, but not every Object is 28 * actually locked. Even the ones that are locked do not need a 29 * full-fledged monitor until a) there is actual contention or b) wait() 30 * is called on the Object. 31 * 32 * For Dalvik, we have implemented a scheme similar to the one described 33 * in Bacon et al.'s "Thin locks: featherweight synchronization for Java" 34 * (ACM 1998). Things are even easier for us, though, because we have 35 * a full 32 bits to work with. 36 * 37 * The two states of an Object's lock are referred to as "thin" and 38 * "fat". A lock may transition from the "thin" state to the "fat" 39 * state and this transition is referred to as inflation. Once a lock 40 * has been inflated it remains in the "fat" state indefinitely. 41 * 42 * The lock value itself is stored in Object.lock. The LSB of the 43 * lock encodes its state. When cleared, the lock is in the "thin" 44 * state and its bits are formatted as follows: 45 * 46 * [31 ---- 19] [18 ---- 3] [2 ---- 1] [0] 47 * lock count thread id hash state 0 48 * 49 * When set, the lock is in the "fat" state and its bits are formatted 50 * as follows: 51 * 52 * [31 ---- 3] [2 ---- 1] [0] 53 * pointer hash state 1 54 * 55 * For an in-depth description of the mechanics of thin-vs-fat locking, 56 * read the paper referred to above. 57 */ 58 59 /* 60 * Monitors provide: 61 * - mutually exclusive access to resources 62 * - a way for multiple threads to wait for notification 63 * 64 * In effect, they fill the role of both mutexes and condition variables. 65 * 66 * Only one thread can own the monitor at any time. There may be several 67 * threads waiting on it (the wait call unlocks it). One or more waiting 68 * threads may be getting interrupted or notified at any given time. 69 * 70 * TODO: the various members of monitor are not SMP-safe. 71 */ 72 struct Monitor { 73 Thread* owner; /* which thread currently owns the lock? */ 74 int lockCount; /* owner's recursive lock depth */ 75 Object* obj; /* what object are we part of [debug only] */ 76 77 Thread* waitSet; /* threads currently waiting on this monitor */ 78 79 pthread_mutex_t lock; 80 81 Monitor* next; 82 83 /* 84 * Who last acquired this monitor, when lock sampling is enabled. 85 * Even when enabled, ownerMethod may be NULL. 86 */ 87 const Method* ownerMethod; 88 u4 ownerPc; 89 }; 90 91 92 /* 93 * Create and initialize a monitor. 94 */ 95 Monitor* dvmCreateMonitor(Object* obj) 96 { 97 Monitor* mon; 98 99 mon = (Monitor*) calloc(1, sizeof(Monitor)); 100 if (mon == NULL) { 101 ALOGE("Unable to allocate monitor"); 102 dvmAbort(); 103 } 104 if (((u4)mon & 7) != 0) { 105 ALOGE("Misaligned monitor: %p", mon); 106 dvmAbort(); 107 } 108 mon->obj = obj; 109 dvmInitMutex(&mon->lock); 110 111 /* replace the head of the list with the new monitor */ 112 do { 113 mon->next = gDvm.monitorList; 114 } while (android_atomic_release_cas((int32_t)mon->next, (int32_t)mon, 115 (int32_t*)(void*)&gDvm.monitorList) != 0); 116 117 return mon; 118 } 119 120 /* 121 * Free the monitor list. Only used when shutting the VM down. 122 */ 123 void dvmFreeMonitorList() 124 { 125 Monitor* mon; 126 Monitor* nextMon; 127 128 mon = gDvm.monitorList; 129 while (mon != NULL) { 130 nextMon = mon->next; 131 free(mon); 132 mon = nextMon; 133 } 134 } 135 136 /* 137 * Get the object that a monitor is part of. 138 */ 139 Object* dvmGetMonitorObject(Monitor* mon) 140 { 141 if (mon == NULL) 142 return NULL; 143 else 144 return mon->obj; 145 } 146 147 /* 148 * Returns the thread id of the thread owning the given lock. 149 */ 150 static u4 lockOwner(Object* obj) 151 { 152 Thread *owner; 153 u4 lock; 154 155 assert(obj != NULL); 156 /* 157 * Since we're reading the lock value multiple times, latch it so 158 * that it doesn't change out from under us if we get preempted. 159 */ 160 lock = obj->lock; 161 if (LW_SHAPE(lock) == LW_SHAPE_THIN) { 162 return LW_LOCK_OWNER(lock); 163 } else { 164 owner = LW_MONITOR(lock)->owner; 165 return owner ? owner->threadId : 0; 166 } 167 } 168 169 /* 170 * Get the thread that holds the lock on the specified object. The 171 * object may be unlocked, thin-locked, or fat-locked. 172 * 173 * The caller must lock the thread list before calling here. 174 */ 175 Thread* dvmGetObjectLockHolder(Object* obj) 176 { 177 u4 threadId = lockOwner(obj); 178 179 if (threadId == 0) 180 return NULL; 181 return dvmGetThreadByThreadId(threadId); 182 } 183 184 /* 185 * Checks whether the given thread holds the given 186 * objects's lock. 187 */ 188 bool dvmHoldsLock(Thread* thread, Object* obj) 189 { 190 if (thread == NULL || obj == NULL) { 191 return false; 192 } else { 193 return thread->threadId == lockOwner(obj); 194 } 195 } 196 197 /* 198 * Free the monitor associated with an object and make the object's lock 199 * thin again. This is called during garbage collection. 200 */ 201 static void freeMonitor(Monitor *mon) 202 { 203 assert(mon != NULL); 204 assert(mon->obj != NULL); 205 assert(LW_SHAPE(mon->obj->lock) == LW_SHAPE_FAT); 206 207 /* This lock is associated with an object 208 * that's being swept. The only possible way 209 * anyone could be holding this lock would be 210 * if some JNI code locked but didn't unlock 211 * the object, in which case we've got some bad 212 * native code somewhere. 213 */ 214 assert(pthread_mutex_trylock(&mon->lock) == 0); 215 assert(pthread_mutex_unlock(&mon->lock) == 0); 216 dvmDestroyMutex(&mon->lock); 217 free(mon); 218 } 219 220 /* 221 * Frees monitor objects belonging to unmarked objects. 222 */ 223 void dvmSweepMonitorList(Monitor** mon, int (*isUnmarkedObject)(void*)) 224 { 225 Monitor handle; 226 Monitor *prev, *curr; 227 Object *obj; 228 229 assert(mon != NULL); 230 assert(isUnmarkedObject != NULL); 231 prev = &handle; 232 prev->next = curr = *mon; 233 while (curr != NULL) { 234 obj = curr->obj; 235 if (obj != NULL && (*isUnmarkedObject)(obj) != 0) { 236 prev->next = curr->next; 237 freeMonitor(curr); 238 curr = prev->next; 239 } else { 240 prev = curr; 241 curr = curr->next; 242 } 243 } 244 *mon = handle.next; 245 } 246 247 static char *logWriteInt(char *dst, int value) 248 { 249 *dst++ = EVENT_TYPE_INT; 250 set4LE((u1 *)dst, value); 251 return dst + 4; 252 } 253 254 static char *logWriteString(char *dst, const char *value, size_t len) 255 { 256 *dst++ = EVENT_TYPE_STRING; 257 len = len < 32 ? len : 32; 258 set4LE((u1 *)dst, len); 259 dst += 4; 260 memcpy(dst, value, len); 261 return dst + len; 262 } 263 264 #define EVENT_LOG_TAG_dvm_lock_sample 20003 265 266 static void logContentionEvent(Thread *self, u4 waitMs, u4 samplePercent, 267 const char *ownerFileName, u4 ownerLineNumber) 268 { 269 const StackSaveArea *saveArea; 270 const Method *meth; 271 u4 relativePc; 272 char eventBuffer[174]; 273 const char *fileName; 274 char procName[33]; 275 char *cp; 276 size_t len; 277 int fd; 278 279 saveArea = SAVEAREA_FROM_FP(self->interpSave.curFrame); 280 meth = saveArea->method; 281 cp = eventBuffer; 282 283 /* Emit the event list length, 1 byte. */ 284 *cp++ = 9; 285 286 /* Emit the process name, <= 37 bytes. */ 287 fd = open("/proc/self/cmdline", O_RDONLY); 288 memset(procName, 0, sizeof(procName)); 289 read(fd, procName, sizeof(procName) - 1); 290 close(fd); 291 len = strlen(procName); 292 cp = logWriteString(cp, procName, len); 293 294 /* Emit the sensitive thread ("main thread") status, 5 bytes. */ 295 bool isSensitive = false; 296 if (gDvm.isSensitiveThreadHook != NULL) { 297 isSensitive = gDvm.isSensitiveThreadHook(); 298 } 299 cp = logWriteInt(cp, isSensitive); 300 301 /* Emit self thread name string, <= 37 bytes. */ 302 std::string selfName = dvmGetThreadName(self); 303 cp = logWriteString(cp, selfName.c_str(), selfName.size()); 304 305 /* Emit the wait time, 5 bytes. */ 306 cp = logWriteInt(cp, waitMs); 307 308 /* Emit the source code file name, <= 37 bytes. */ 309 fileName = dvmGetMethodSourceFile(meth); 310 if (fileName == NULL) fileName = ""; 311 cp = logWriteString(cp, fileName, strlen(fileName)); 312 313 /* Emit the source code line number, 5 bytes. */ 314 relativePc = saveArea->xtra.currentPc - saveArea->method->insns; 315 cp = logWriteInt(cp, dvmLineNumFromPC(meth, relativePc)); 316 317 /* Emit the lock owner source code file name, <= 37 bytes. */ 318 if (ownerFileName == NULL) { 319 ownerFileName = ""; 320 } else if (strcmp(fileName, ownerFileName) == 0) { 321 /* Common case, so save on log space. */ 322 ownerFileName = "-"; 323 } 324 cp = logWriteString(cp, ownerFileName, strlen(ownerFileName)); 325 326 /* Emit the source code line number, 5 bytes. */ 327 cp = logWriteInt(cp, ownerLineNumber); 328 329 /* Emit the sample percentage, 5 bytes. */ 330 cp = logWriteInt(cp, samplePercent); 331 332 assert((size_t)(cp - eventBuffer) <= sizeof(eventBuffer)); 333 android_btWriteLog(EVENT_LOG_TAG_dvm_lock_sample, 334 EVENT_TYPE_LIST, 335 eventBuffer, 336 (size_t)(cp - eventBuffer)); 337 } 338 339 /* 340 * Lock a monitor. 341 */ 342 static void lockMonitor(Thread* self, Monitor* mon) 343 { 344 ThreadStatus oldStatus; 345 u4 waitThreshold, samplePercent; 346 u8 waitStart, waitEnd, waitMs; 347 348 if (mon->owner == self) { 349 mon->lockCount++; 350 return; 351 } 352 if (dvmTryLockMutex(&mon->lock) != 0) { 353 oldStatus = dvmChangeStatus(self, THREAD_MONITOR); 354 waitThreshold = gDvm.lockProfThreshold; 355 if (waitThreshold) { 356 waitStart = dvmGetRelativeTimeUsec(); 357 } 358 359 const Method* currentOwnerMethod = mon->ownerMethod; 360 u4 currentOwnerPc = mon->ownerPc; 361 362 dvmLockMutex(&mon->lock); 363 if (waitThreshold) { 364 waitEnd = dvmGetRelativeTimeUsec(); 365 } 366 dvmChangeStatus(self, oldStatus); 367 if (waitThreshold) { 368 waitMs = (waitEnd - waitStart) / 1000; 369 if (waitMs >= waitThreshold) { 370 samplePercent = 100; 371 } else { 372 samplePercent = 100 * waitMs / waitThreshold; 373 } 374 if (samplePercent != 0 && ((u4)rand() % 100 < samplePercent)) { 375 const char* currentOwnerFileName = "no_method"; 376 u4 currentOwnerLineNumber = 0; 377 if (currentOwnerMethod != NULL) { 378 currentOwnerFileName = dvmGetMethodSourceFile(currentOwnerMethod); 379 if (currentOwnerFileName == NULL) { 380 currentOwnerFileName = "no_method_file"; 381 } 382 currentOwnerLineNumber = dvmLineNumFromPC(currentOwnerMethod, currentOwnerPc); 383 } 384 logContentionEvent(self, waitMs, samplePercent, 385 currentOwnerFileName, currentOwnerLineNumber); 386 } 387 } 388 } 389 mon->owner = self; 390 assert(mon->lockCount == 0); 391 392 // When debugging, save the current monitor holder for future 393 // acquisition failures to use in sampled logging. 394 if (gDvm.lockProfThreshold > 0) { 395 mon->ownerMethod = NULL; 396 mon->ownerPc = 0; 397 if (self->interpSave.curFrame == NULL) { 398 return; 399 } 400 const StackSaveArea* saveArea = SAVEAREA_FROM_FP(self->interpSave.curFrame); 401 if (saveArea == NULL) { 402 return; 403 } 404 mon->ownerMethod = saveArea->method; 405 mon->ownerPc = (saveArea->xtra.currentPc - saveArea->method->insns); 406 } 407 } 408 409 /* 410 * Try to lock a monitor. 411 * 412 * Returns "true" on success. 413 */ 414 #ifdef WITH_COPYING_GC 415 static bool tryLockMonitor(Thread* self, Monitor* mon) 416 { 417 if (mon->owner == self) { 418 mon->lockCount++; 419 return true; 420 } else { 421 if (dvmTryLockMutex(&mon->lock) == 0) { 422 mon->owner = self; 423 assert(mon->lockCount == 0); 424 return true; 425 } else { 426 return false; 427 } 428 } 429 } 430 #endif 431 432 /* 433 * Unlock a monitor. 434 * 435 * Returns true if the unlock succeeded. 436 * If the unlock failed, an exception will be pending. 437 */ 438 static bool unlockMonitor(Thread* self, Monitor* mon) 439 { 440 assert(self != NULL); 441 assert(mon != NULL); 442 if (mon->owner == self) { 443 /* 444 * We own the monitor, so nobody else can be in here. 445 */ 446 if (mon->lockCount == 0) { 447 mon->owner = NULL; 448 mon->ownerMethod = NULL; 449 mon->ownerPc = 0; 450 dvmUnlockMutex(&mon->lock); 451 } else { 452 mon->lockCount--; 453 } 454 } else { 455 /* 456 * We don't own this, so we're not allowed to unlock it. 457 * The JNI spec says that we should throw IllegalMonitorStateException 458 * in this case. 459 */ 460 dvmThrowIllegalMonitorStateException("unlock of unowned monitor"); 461 return false; 462 } 463 return true; 464 } 465 466 /* 467 * Checks the wait set for circular structure. Returns 0 if the list 468 * is not circular. Otherwise, returns 1. Used only by asserts. 469 */ 470 #ifndef NDEBUG 471 static int waitSetCheck(Monitor *mon) 472 { 473 Thread *fast, *slow; 474 size_t n; 475 476 assert(mon != NULL); 477 fast = slow = mon->waitSet; 478 n = 0; 479 for (;;) { 480 if (fast == NULL) return 0; 481 if (fast->waitNext == NULL) return 0; 482 if (fast == slow && n > 0) return 1; 483 n += 2; 484 fast = fast->waitNext->waitNext; 485 slow = slow->waitNext; 486 } 487 } 488 #endif 489 490 /* 491 * Links a thread into a monitor's wait set. The monitor lock must be 492 * held by the caller of this routine. 493 */ 494 static void waitSetAppend(Monitor *mon, Thread *thread) 495 { 496 Thread *elt; 497 498 assert(mon != NULL); 499 assert(mon->owner == dvmThreadSelf()); 500 assert(thread != NULL); 501 assert(thread->waitNext == NULL); 502 assert(waitSetCheck(mon) == 0); 503 if (mon->waitSet == NULL) { 504 mon->waitSet = thread; 505 return; 506 } 507 elt = mon->waitSet; 508 while (elt->waitNext != NULL) { 509 elt = elt->waitNext; 510 } 511 elt->waitNext = thread; 512 } 513 514 /* 515 * Unlinks a thread from a monitor's wait set. The monitor lock must 516 * be held by the caller of this routine. 517 */ 518 static void waitSetRemove(Monitor *mon, Thread *thread) 519 { 520 Thread *elt; 521 522 assert(mon != NULL); 523 assert(mon->owner == dvmThreadSelf()); 524 assert(thread != NULL); 525 assert(waitSetCheck(mon) == 0); 526 if (mon->waitSet == NULL) { 527 return; 528 } 529 if (mon->waitSet == thread) { 530 mon->waitSet = thread->waitNext; 531 thread->waitNext = NULL; 532 return; 533 } 534 elt = mon->waitSet; 535 while (elt->waitNext != NULL) { 536 if (elt->waitNext == thread) { 537 elt->waitNext = thread->waitNext; 538 thread->waitNext = NULL; 539 return; 540 } 541 elt = elt->waitNext; 542 } 543 } 544 545 /* 546 * Converts the given relative waiting time into an absolute time. 547 */ 548 static void absoluteTime(s8 msec, s4 nsec, struct timespec *ts) 549 { 550 s8 endSec; 551 552 #ifdef HAVE_TIMEDWAIT_MONOTONIC 553 clock_gettime(CLOCK_MONOTONIC, ts); 554 #else 555 { 556 struct timeval tv; 557 gettimeofday(&tv, NULL); 558 ts->tv_sec = tv.tv_sec; 559 ts->tv_nsec = tv.tv_usec * 1000; 560 } 561 #endif 562 endSec = ts->tv_sec + msec / 1000; 563 if (endSec >= 0x7fffffff) { 564 ALOGV("NOTE: end time exceeds epoch"); 565 endSec = 0x7ffffffe; 566 } 567 ts->tv_sec = endSec; 568 ts->tv_nsec = (ts->tv_nsec + (msec % 1000) * 1000000) + nsec; 569 570 /* catch rollover */ 571 if (ts->tv_nsec >= 1000000000L) { 572 ts->tv_sec++; 573 ts->tv_nsec -= 1000000000L; 574 } 575 } 576 577 int dvmRelativeCondWait(pthread_cond_t* cond, pthread_mutex_t* mutex, 578 s8 msec, s4 nsec) 579 { 580 int ret; 581 struct timespec ts; 582 absoluteTime(msec, nsec, &ts); 583 #if defined(HAVE_TIMEDWAIT_MONOTONIC) 584 ret = pthread_cond_timedwait_monotonic(cond, mutex, &ts); 585 #else 586 ret = pthread_cond_timedwait(cond, mutex, &ts); 587 #endif 588 assert(ret == 0 || ret == ETIMEDOUT); 589 return ret; 590 } 591 592 /* 593 * Wait on a monitor until timeout, interrupt, or notification. Used for 594 * Object.wait() and (somewhat indirectly) Thread.sleep() and Thread.join(). 595 * 596 * If another thread calls Thread.interrupt(), we throw InterruptedException 597 * and return immediately if one of the following are true: 598 * - blocked in wait(), wait(long), or wait(long, int) methods of Object 599 * - blocked in join(), join(long), or join(long, int) methods of Thread 600 * - blocked in sleep(long), or sleep(long, int) methods of Thread 601 * Otherwise, we set the "interrupted" flag. 602 * 603 * Checks to make sure that "nsec" is in the range 0-999999 604 * (i.e. fractions of a millisecond) and throws the appropriate 605 * exception if it isn't. 606 * 607 * The spec allows "spurious wakeups", and recommends that all code using 608 * Object.wait() do so in a loop. This appears to derive from concerns 609 * about pthread_cond_wait() on multiprocessor systems. Some commentary 610 * on the web casts doubt on whether these can/should occur. 611 * 612 * Since we're allowed to wake up "early", we clamp extremely long durations 613 * to return at the end of the 32-bit time epoch. 614 */ 615 static void waitMonitor(Thread* self, Monitor* mon, s8 msec, s4 nsec, 616 bool interruptShouldThrow) 617 { 618 struct timespec ts; 619 bool wasInterrupted = false; 620 bool timed; 621 int ret; 622 623 assert(self != NULL); 624 assert(mon != NULL); 625 626 /* Make sure that we hold the lock. */ 627 if (mon->owner != self) { 628 dvmThrowIllegalMonitorStateException( 629 "object not locked by thread before wait()"); 630 return; 631 } 632 633 /* 634 * Enforce the timeout range. 635 */ 636 if (msec < 0 || nsec < 0 || nsec > 999999) { 637 dvmThrowIllegalArgumentException("timeout arguments out of range"); 638 return; 639 } 640 641 /* 642 * Compute absolute wakeup time, if necessary. 643 */ 644 if (msec == 0 && nsec == 0) { 645 timed = false; 646 } else { 647 absoluteTime(msec, nsec, &ts); 648 timed = true; 649 } 650 651 /* 652 * Add ourselves to the set of threads waiting on this monitor, and 653 * release our hold. We need to let it go even if we're a few levels 654 * deep in a recursive lock, and we need to restore that later. 655 * 656 * We append to the wait set ahead of clearing the count and owner 657 * fields so the subroutine can check that the calling thread owns 658 * the monitor. Aside from that, the order of member updates is 659 * not order sensitive as we hold the pthread mutex. 660 */ 661 waitSetAppend(mon, self); 662 int prevLockCount = mon->lockCount; 663 mon->lockCount = 0; 664 mon->owner = NULL; 665 666 const Method* savedMethod = mon->ownerMethod; 667 u4 savedPc = mon->ownerPc; 668 mon->ownerMethod = NULL; 669 mon->ownerPc = 0; 670 671 /* 672 * Update thread status. If the GC wakes up, it'll ignore us, knowing 673 * that we won't touch any references in this state, and we'll check 674 * our suspend mode before we transition out. 675 */ 676 if (timed) 677 dvmChangeStatus(self, THREAD_TIMED_WAIT); 678 else 679 dvmChangeStatus(self, THREAD_WAIT); 680 681 dvmLockMutex(&self->waitMutex); 682 683 /* 684 * Set waitMonitor to the monitor object we will be waiting on. 685 * When waitMonitor is non-NULL a notifying or interrupting thread 686 * must signal the thread's waitCond to wake it up. 687 */ 688 assert(self->waitMonitor == NULL); 689 self->waitMonitor = mon; 690 691 /* 692 * Handle the case where the thread was interrupted before we called 693 * wait(). 694 */ 695 if (self->interrupted) { 696 wasInterrupted = true; 697 self->waitMonitor = NULL; 698 dvmUnlockMutex(&self->waitMutex); 699 goto done; 700 } 701 702 /* 703 * Release the monitor lock and wait for a notification or 704 * a timeout to occur. 705 */ 706 dvmUnlockMutex(&mon->lock); 707 708 if (!timed) { 709 ret = pthread_cond_wait(&self->waitCond, &self->waitMutex); 710 assert(ret == 0); 711 } else { 712 #ifdef HAVE_TIMEDWAIT_MONOTONIC 713 ret = pthread_cond_timedwait_monotonic(&self->waitCond, &self->waitMutex, &ts); 714 #else 715 ret = pthread_cond_timedwait(&self->waitCond, &self->waitMutex, &ts); 716 #endif 717 assert(ret == 0 || ret == ETIMEDOUT); 718 } 719 if (self->interrupted) { 720 wasInterrupted = true; 721 } 722 723 self->interrupted = false; 724 self->waitMonitor = NULL; 725 726 dvmUnlockMutex(&self->waitMutex); 727 728 /* Reacquire the monitor lock. */ 729 lockMonitor(self, mon); 730 731 done: 732 /* 733 * We remove our thread from wait set after restoring the count 734 * and owner fields so the subroutine can check that the calling 735 * thread owns the monitor. Aside from that, the order of member 736 * updates is not order sensitive as we hold the pthread mutex. 737 */ 738 mon->owner = self; 739 mon->lockCount = prevLockCount; 740 mon->ownerMethod = savedMethod; 741 mon->ownerPc = savedPc; 742 waitSetRemove(mon, self); 743 744 /* set self->status back to THREAD_RUNNING, and self-suspend if needed */ 745 dvmChangeStatus(self, THREAD_RUNNING); 746 747 if (wasInterrupted) { 748 /* 749 * We were interrupted while waiting, or somebody interrupted an 750 * un-interruptible thread earlier and we're bailing out immediately. 751 * 752 * The doc sayeth: "The interrupted status of the current thread is 753 * cleared when this exception is thrown." 754 */ 755 self->interrupted = false; 756 if (interruptShouldThrow) { 757 dvmThrowInterruptedException(NULL); 758 } 759 } 760 } 761 762 /* 763 * Notify one thread waiting on this monitor. 764 */ 765 static void notifyMonitor(Thread* self, Monitor* mon) 766 { 767 Thread* thread; 768 769 assert(self != NULL); 770 assert(mon != NULL); 771 772 /* Make sure that we hold the lock. */ 773 if (mon->owner != self) { 774 dvmThrowIllegalMonitorStateException( 775 "object not locked by thread before notify()"); 776 return; 777 } 778 /* Signal the first waiting thread in the wait set. */ 779 while (mon->waitSet != NULL) { 780 thread = mon->waitSet; 781 mon->waitSet = thread->waitNext; 782 thread->waitNext = NULL; 783 dvmLockMutex(&thread->waitMutex); 784 /* Check to see if the thread is still waiting. */ 785 if (thread->waitMonitor != NULL) { 786 pthread_cond_signal(&thread->waitCond); 787 dvmUnlockMutex(&thread->waitMutex); 788 return; 789 } 790 dvmUnlockMutex(&thread->waitMutex); 791 } 792 } 793 794 /* 795 * Notify all threads waiting on this monitor. 796 */ 797 static void notifyAllMonitor(Thread* self, Monitor* mon) 798 { 799 Thread* thread; 800 801 assert(self != NULL); 802 assert(mon != NULL); 803 804 /* Make sure that we hold the lock. */ 805 if (mon->owner != self) { 806 dvmThrowIllegalMonitorStateException( 807 "object not locked by thread before notifyAll()"); 808 return; 809 } 810 /* Signal all threads in the wait set. */ 811 while (mon->waitSet != NULL) { 812 thread = mon->waitSet; 813 mon->waitSet = thread->waitNext; 814 thread->waitNext = NULL; 815 dvmLockMutex(&thread->waitMutex); 816 /* Check to see if the thread is still waiting. */ 817 if (thread->waitMonitor != NULL) { 818 pthread_cond_signal(&thread->waitCond); 819 } 820 dvmUnlockMutex(&thread->waitMutex); 821 } 822 } 823 824 /* 825 * Changes the shape of a monitor from thin to fat, preserving the 826 * internal lock state. The calling thread must own the lock. 827 */ 828 static void inflateMonitor(Thread *self, Object *obj) 829 { 830 Monitor *mon; 831 u4 thin; 832 833 assert(self != NULL); 834 assert(obj != NULL); 835 assert(LW_SHAPE(obj->lock) == LW_SHAPE_THIN); 836 assert(LW_LOCK_OWNER(obj->lock) == self->threadId); 837 /* Allocate and acquire a new monitor. */ 838 mon = dvmCreateMonitor(obj); 839 lockMonitor(self, mon); 840 /* Propagate the lock state. */ 841 thin = obj->lock; 842 mon->lockCount = LW_LOCK_COUNT(thin); 843 thin &= LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT; 844 thin |= (u4)mon | LW_SHAPE_FAT; 845 /* Publish the updated lock word. */ 846 android_atomic_release_store(thin, (int32_t *)&obj->lock); 847 } 848 849 /* 850 * Implements monitorenter for "synchronized" stuff. 851 * 852 * This does not fail or throw an exception (unless deadlock prediction 853 * is enabled and set to "err" mode). 854 */ 855 void dvmLockObject(Thread* self, Object *obj) 856 { 857 volatile u4 *thinp; 858 ThreadStatus oldStatus; 859 struct timespec tm; 860 long sleepDelayNs; 861 long minSleepDelayNs = 1000000; /* 1 millisecond */ 862 long maxSleepDelayNs = 1000000000; /* 1 second */ 863 u4 thin, newThin, threadId; 864 865 assert(self != NULL); 866 assert(obj != NULL); 867 threadId = self->threadId; 868 thinp = &obj->lock; 869 retry: 870 thin = *thinp; 871 if (LW_SHAPE(thin) == LW_SHAPE_THIN) { 872 /* 873 * The lock is a thin lock. The owner field is used to 874 * determine the acquire method, ordered by cost. 875 */ 876 if (LW_LOCK_OWNER(thin) == threadId) { 877 /* 878 * The calling thread owns the lock. Increment the 879 * value of the recursion count field. 880 */ 881 obj->lock += 1 << LW_LOCK_COUNT_SHIFT; 882 if (LW_LOCK_COUNT(obj->lock) == LW_LOCK_COUNT_MASK) { 883 /* 884 * The reacquisition limit has been reached. Inflate 885 * the lock so the next acquire will not overflow the 886 * recursion count field. 887 */ 888 inflateMonitor(self, obj); 889 } 890 } else if (LW_LOCK_OWNER(thin) == 0) { 891 /* 892 * The lock is unowned. Install the thread id of the 893 * calling thread into the owner field. This is the 894 * common case. In performance critical code the JIT 895 * will have tried this before calling out to the VM. 896 */ 897 newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT); 898 if (android_atomic_acquire_cas(thin, newThin, 899 (int32_t*)thinp) != 0) { 900 /* 901 * The acquire failed. Try again. 902 */ 903 goto retry; 904 } 905 } else { 906 ALOGV("(%d) spin on lock %p: %#x (%#x) %#x", 907 threadId, &obj->lock, 0, *thinp, thin); 908 /* 909 * The lock is owned by another thread. Notify the VM 910 * that we are about to wait. 911 */ 912 oldStatus = dvmChangeStatus(self, THREAD_MONITOR); 913 /* 914 * Spin until the thin lock is released or inflated. 915 */ 916 sleepDelayNs = 0; 917 for (;;) { 918 thin = *thinp; 919 /* 920 * Check the shape of the lock word. Another thread 921 * may have inflated the lock while we were waiting. 922 */ 923 if (LW_SHAPE(thin) == LW_SHAPE_THIN) { 924 if (LW_LOCK_OWNER(thin) == 0) { 925 /* 926 * The lock has been released. Install the 927 * thread id of the calling thread into the 928 * owner field. 929 */ 930 newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT); 931 if (android_atomic_acquire_cas(thin, newThin, 932 (int32_t *)thinp) == 0) { 933 /* 934 * The acquire succeed. Break out of the 935 * loop and proceed to inflate the lock. 936 */ 937 break; 938 } 939 } else { 940 /* 941 * The lock has not been released. Yield so 942 * the owning thread can run. 943 */ 944 if (sleepDelayNs == 0) { 945 sched_yield(); 946 sleepDelayNs = minSleepDelayNs; 947 } else { 948 tm.tv_sec = 0; 949 tm.tv_nsec = sleepDelayNs; 950 nanosleep(&tm, NULL); 951 /* 952 * Prepare the next delay value. Wrap to 953 * avoid once a second polls for eternity. 954 */ 955 if (sleepDelayNs < maxSleepDelayNs / 2) { 956 sleepDelayNs *= 2; 957 } else { 958 sleepDelayNs = minSleepDelayNs; 959 } 960 } 961 } 962 } else { 963 /* 964 * The thin lock was inflated by another thread. 965 * Let the VM know we are no longer waiting and 966 * try again. 967 */ 968 ALOGV("(%d) lock %p surprise-fattened", 969 threadId, &obj->lock); 970 dvmChangeStatus(self, oldStatus); 971 goto retry; 972 } 973 } 974 ALOGV("(%d) spin on lock done %p: %#x (%#x) %#x", 975 threadId, &obj->lock, 0, *thinp, thin); 976 /* 977 * We have acquired the thin lock. Let the VM know that 978 * we are no longer waiting. 979 */ 980 dvmChangeStatus(self, oldStatus); 981 /* 982 * Fatten the lock. 983 */ 984 inflateMonitor(self, obj); 985 ALOGV("(%d) lock %p fattened", threadId, &obj->lock); 986 } 987 } else { 988 /* 989 * The lock is a fat lock. 990 */ 991 assert(LW_MONITOR(obj->lock) != NULL); 992 lockMonitor(self, LW_MONITOR(obj->lock)); 993 } 994 } 995 996 /* 997 * Implements monitorexit for "synchronized" stuff. 998 * 999 * On failure, throws an exception and returns "false". 1000 */ 1001 bool dvmUnlockObject(Thread* self, Object *obj) 1002 { 1003 u4 thin; 1004 1005 assert(self != NULL); 1006 assert(self->status == THREAD_RUNNING); 1007 assert(obj != NULL); 1008 /* 1009 * Cache the lock word as its value can change while we are 1010 * examining its state. 1011 */ 1012 thin = *(volatile u4 *)&obj->lock; 1013 if (LW_SHAPE(thin) == LW_SHAPE_THIN) { 1014 /* 1015 * The lock is thin. We must ensure that the lock is owned 1016 * by the given thread before unlocking it. 1017 */ 1018 if (LW_LOCK_OWNER(thin) == self->threadId) { 1019 /* 1020 * We are the lock owner. It is safe to update the lock 1021 * without CAS as lock ownership guards the lock itself. 1022 */ 1023 if (LW_LOCK_COUNT(thin) == 0) { 1024 /* 1025 * The lock was not recursively acquired, the common 1026 * case. Unlock by clearing all bits except for the 1027 * hash state. 1028 */ 1029 thin &= (LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT); 1030 android_atomic_release_store(thin, (int32_t*)&obj->lock); 1031 } else { 1032 /* 1033 * The object was recursively acquired. Decrement the 1034 * lock recursion count field. 1035 */ 1036 obj->lock -= 1 << LW_LOCK_COUNT_SHIFT; 1037 } 1038 } else { 1039 /* 1040 * We do not own the lock. The JVM spec requires that we 1041 * throw an exception in this case. 1042 */ 1043 dvmThrowIllegalMonitorStateException("unlock of unowned monitor"); 1044 return false; 1045 } 1046 } else { 1047 /* 1048 * The lock is fat. We must check to see if unlockMonitor has 1049 * raised any exceptions before continuing. 1050 */ 1051 assert(LW_MONITOR(obj->lock) != NULL); 1052 if (!unlockMonitor(self, LW_MONITOR(obj->lock))) { 1053 /* 1054 * An exception has been raised. Do not fall through. 1055 */ 1056 return false; 1057 } 1058 } 1059 return true; 1060 } 1061 1062 /* 1063 * Object.wait(). Also called for class init. 1064 */ 1065 void dvmObjectWait(Thread* self, Object *obj, s8 msec, s4 nsec, 1066 bool interruptShouldThrow) 1067 { 1068 Monitor* mon; 1069 u4 thin = *(volatile u4 *)&obj->lock; 1070 1071 /* If the lock is still thin, we need to fatten it. 1072 */ 1073 if (LW_SHAPE(thin) == LW_SHAPE_THIN) { 1074 /* Make sure that 'self' holds the lock. 1075 */ 1076 if (LW_LOCK_OWNER(thin) != self->threadId) { 1077 dvmThrowIllegalMonitorStateException( 1078 "object not locked by thread before wait()"); 1079 return; 1080 } 1081 1082 /* This thread holds the lock. We need to fatten the lock 1083 * so 'self' can block on it. Don't update the object lock 1084 * field yet, because 'self' needs to acquire the lock before 1085 * any other thread gets a chance. 1086 */ 1087 inflateMonitor(self, obj); 1088 ALOGV("(%d) lock %p fattened by wait()", self->threadId, &obj->lock); 1089 } 1090 mon = LW_MONITOR(obj->lock); 1091 waitMonitor(self, mon, msec, nsec, interruptShouldThrow); 1092 } 1093 1094 /* 1095 * Object.notify(). 1096 */ 1097 void dvmObjectNotify(Thread* self, Object *obj) 1098 { 1099 u4 thin = *(volatile u4 *)&obj->lock; 1100 1101 /* If the lock is still thin, there aren't any waiters; 1102 * waiting on an object forces lock fattening. 1103 */ 1104 if (LW_SHAPE(thin) == LW_SHAPE_THIN) { 1105 /* Make sure that 'self' holds the lock. 1106 */ 1107 if (LW_LOCK_OWNER(thin) != self->threadId) { 1108 dvmThrowIllegalMonitorStateException( 1109 "object not locked by thread before notify()"); 1110 return; 1111 } 1112 1113 /* no-op; there are no waiters to notify. 1114 */ 1115 } else { 1116 /* It's a fat lock. 1117 */ 1118 notifyMonitor(self, LW_MONITOR(thin)); 1119 } 1120 } 1121 1122 /* 1123 * Object.notifyAll(). 1124 */ 1125 void dvmObjectNotifyAll(Thread* self, Object *obj) 1126 { 1127 u4 thin = *(volatile u4 *)&obj->lock; 1128 1129 /* If the lock is still thin, there aren't any waiters; 1130 * waiting on an object forces lock fattening. 1131 */ 1132 if (LW_SHAPE(thin) == LW_SHAPE_THIN) { 1133 /* Make sure that 'self' holds the lock. 1134 */ 1135 if (LW_LOCK_OWNER(thin) != self->threadId) { 1136 dvmThrowIllegalMonitorStateException( 1137 "object not locked by thread before notifyAll()"); 1138 return; 1139 } 1140 1141 /* no-op; there are no waiters to notify. 1142 */ 1143 } else { 1144 /* It's a fat lock. 1145 */ 1146 notifyAllMonitor(self, LW_MONITOR(thin)); 1147 } 1148 } 1149 1150 /* 1151 * This implements java.lang.Thread.sleep(long msec, int nsec). 1152 * 1153 * The sleep is interruptible by other threads, which means we can't just 1154 * plop into an OS sleep call. (We probably could if we wanted to send 1155 * signals around and rely on EINTR, but that's inefficient and relies 1156 * on native code respecting our signal mask.) 1157 * 1158 * We have to do all of this stuff for Object.wait() as well, so it's 1159 * easiest to just sleep on a private Monitor. 1160 * 1161 * It appears that we want sleep(0,0) to go through the motions of sleeping 1162 * for a very short duration, rather than just returning. 1163 */ 1164 void dvmThreadSleep(u8 msec, u4 nsec) 1165 { 1166 Thread* self = dvmThreadSelf(); 1167 Monitor* mon = gDvm.threadSleepMon; 1168 1169 /* sleep(0,0) wakes up immediately, wait(0,0) means wait forever; adjust */ 1170 if (msec == 0 && nsec == 0) 1171 nsec++; 1172 1173 lockMonitor(self, mon); 1174 waitMonitor(self, mon, msec, nsec, true); 1175 unlockMonitor(self, mon); 1176 } 1177 1178 /* 1179 * Implement java.lang.Thread.interrupt(). 1180 */ 1181 void dvmThreadInterrupt(Thread* thread) 1182 { 1183 assert(thread != NULL); 1184 1185 dvmLockMutex(&thread->waitMutex); 1186 1187 /* 1188 * If the interrupted flag is already set no additional action is 1189 * required. 1190 */ 1191 if (thread->interrupted == true) { 1192 dvmUnlockMutex(&thread->waitMutex); 1193 return; 1194 } 1195 1196 /* 1197 * Raise the "interrupted" flag. This will cause it to bail early out 1198 * of the next wait() attempt, if it's not currently waiting on 1199 * something. 1200 */ 1201 thread->interrupted = true; 1202 1203 /* 1204 * Is the thread waiting? 1205 * 1206 * Note that fat vs. thin doesn't matter here; waitMonitor 1207 * is only set when a thread actually waits on a monitor, 1208 * which implies that the monitor has already been fattened. 1209 */ 1210 if (thread->waitMonitor != NULL) { 1211 pthread_cond_signal(&thread->waitCond); 1212 } 1213 1214 dvmUnlockMutex(&thread->waitMutex); 1215 } 1216 1217 #ifndef WITH_COPYING_GC 1218 u4 dvmIdentityHashCode(Object *obj) 1219 { 1220 return (u4)obj; 1221 } 1222 #else 1223 /* 1224 * Returns the identity hash code of the given object. 1225 */ 1226 u4 dvmIdentityHashCode(Object *obj) 1227 { 1228 Thread *self, *thread; 1229 volatile u4 *lw; 1230 size_t size; 1231 u4 lock, owner, hashState; 1232 1233 if (obj == NULL) { 1234 /* 1235 * Null is defined to have an identity hash code of 0. 1236 */ 1237 return 0; 1238 } 1239 lw = &obj->lock; 1240 retry: 1241 hashState = LW_HASH_STATE(*lw); 1242 if (hashState == LW_HASH_STATE_HASHED) { 1243 /* 1244 * The object has been hashed but has not had its hash code 1245 * relocated by the garbage collector. Use the raw object 1246 * address. 1247 */ 1248 return (u4)obj >> 3; 1249 } else if (hashState == LW_HASH_STATE_HASHED_AND_MOVED) { 1250 /* 1251 * The object has been hashed and its hash code has been 1252 * relocated by the collector. Use the value of the naturally 1253 * aligned word following the instance data. 1254 */ 1255 assert(!dvmIsClassObject(obj)); 1256 if (IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISARRAY)) { 1257 size = dvmArrayObjectSize((ArrayObject *)obj); 1258 size = (size + 2) & ~2; 1259 } else { 1260 size = obj->clazz->objectSize; 1261 } 1262 return *(u4 *)(((char *)obj) + size); 1263 } else if (hashState == LW_HASH_STATE_UNHASHED) { 1264 /* 1265 * The object has never been hashed. Change the hash state to 1266 * hashed and use the raw object address. 1267 */ 1268 self = dvmThreadSelf(); 1269 if (self->threadId == lockOwner(obj)) { 1270 /* 1271 * We already own the lock so we can update the hash state 1272 * directly. 1273 */ 1274 *lw |= (LW_HASH_STATE_HASHED << LW_HASH_STATE_SHIFT); 1275 return (u4)obj >> 3; 1276 } 1277 /* 1278 * We do not own the lock. Try acquiring the lock. Should 1279 * this fail, we must suspend the owning thread. 1280 */ 1281 if (LW_SHAPE(*lw) == LW_SHAPE_THIN) { 1282 /* 1283 * If the lock is thin assume it is unowned. We simulate 1284 * an acquire, update, and release with a single CAS. 1285 */ 1286 lock = (LW_HASH_STATE_HASHED << LW_HASH_STATE_SHIFT); 1287 if (android_atomic_acquire_cas( 1288 0, 1289 (int32_t)lock, 1290 (int32_t *)lw) == 0) { 1291 /* 1292 * A new lockword has been installed with a hash state 1293 * of hashed. Use the raw object address. 1294 */ 1295 return (u4)obj >> 3; 1296 } 1297 } else { 1298 if (tryLockMonitor(self, LW_MONITOR(*lw))) { 1299 /* 1300 * The monitor lock has been acquired. Change the 1301 * hash state to hashed and use the raw object 1302 * address. 1303 */ 1304 *lw |= (LW_HASH_STATE_HASHED << LW_HASH_STATE_SHIFT); 1305 unlockMonitor(self, LW_MONITOR(*lw)); 1306 return (u4)obj >> 3; 1307 } 1308 } 1309 /* 1310 * At this point we have failed to acquire the lock. We must 1311 * identify the owning thread and suspend it. 1312 */ 1313 dvmLockThreadList(self); 1314 /* 1315 * Cache the lock word as its value can change between 1316 * determining its shape and retrieving its owner. 1317 */ 1318 lock = *lw; 1319 if (LW_SHAPE(lock) == LW_SHAPE_THIN) { 1320 /* 1321 * Find the thread with the corresponding thread id. 1322 */ 1323 owner = LW_LOCK_OWNER(lock); 1324 assert(owner != self->threadId); 1325 /* 1326 * If the lock has no owner do not bother scanning the 1327 * thread list and fall through to the failure handler. 1328 */ 1329 thread = owner ? gDvm.threadList : NULL; 1330 while (thread != NULL) { 1331 if (thread->threadId == owner) { 1332 break; 1333 } 1334 thread = thread->next; 1335 } 1336 } else { 1337 thread = LW_MONITOR(lock)->owner; 1338 } 1339 /* 1340 * If thread is NULL the object has been released since the 1341 * thread list lock was acquired. Try again. 1342 */ 1343 if (thread == NULL) { 1344 dvmUnlockThreadList(); 1345 goto retry; 1346 } 1347 /* 1348 * Wait for the owning thread to suspend. 1349 */ 1350 dvmSuspendThread(thread); 1351 if (dvmHoldsLock(thread, obj)) { 1352 /* 1353 * The owning thread has been suspended. We can safely 1354 * change the hash state to hashed. 1355 */ 1356 *lw |= (LW_HASH_STATE_HASHED << LW_HASH_STATE_SHIFT); 1357 dvmResumeThread(thread); 1358 dvmUnlockThreadList(); 1359 return (u4)obj >> 3; 1360 } 1361 /* 1362 * The wrong thread has been suspended. Try again. 1363 */ 1364 dvmResumeThread(thread); 1365 dvmUnlockThreadList(); 1366 goto retry; 1367 } 1368 ALOGE("object %p has an unknown hash state %#x", obj, hashState); 1369 dvmDumpThread(dvmThreadSelf(), false); 1370 dvmAbort(); 1371 return 0; /* Quiet the compiler. */ 1372 } 1373 #endif /* WITH_COPYING_GC */ 1374