Home | History | Annotate | Download | only in alloc
      1 /*
      2  * Copyright (C) 2008 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 /*
     17  * Garbage-collecting memory allocator.
     18  */
     19 #include "Dalvik.h"
     20 #include "alloc/HeapBitmap.h"
     21 #include "alloc/Verify.h"
     22 #include "alloc/HeapTable.h"
     23 #include "alloc/Heap.h"
     24 #include "alloc/HeapInternal.h"
     25 #include "alloc/DdmHeap.h"
     26 #include "alloc/HeapSource.h"
     27 #include "alloc/MarkSweep.h"
     28 #include "alloc/Visit.h"
     29 
     30 #include "utils/threads.h"      // need Android thread priorities
     31 #define kInvalidPriority        10000
     32 
     33 #include <cutils/sched_policy.h>
     34 
     35 #include <sys/time.h>
     36 #include <sys/resource.h>
     37 #include <limits.h>
     38 #include <errno.h>
     39 
     40 static const char* GcReasonStr[] = {
     41     [GC_FOR_MALLOC] = "GC_FOR_MALLOC",
     42     [GC_CONCURRENT] = "GC_CONCURRENT",
     43     [GC_EXPLICIT] = "GC_EXPLICIT",
     44     [GC_EXTERNAL_ALLOC] = "GC_EXTERNAL_ALLOC",
     45     [GC_HPROF_DUMP_HEAP] = "GC_HPROF_DUMP_HEAP"
     46 };
     47 
     48 /*
     49  * Initialize the GC heap.
     50  *
     51  * Returns true if successful, false otherwise.
     52  */
     53 bool dvmHeapStartup()
     54 {
     55     GcHeap *gcHeap;
     56 
     57 #if defined(WITH_ALLOC_LIMITS)
     58     gDvm.checkAllocLimits = false;
     59     gDvm.allocationLimit = -1;
     60 #endif
     61 
     62     gcHeap = dvmHeapSourceStartup(gDvm.heapSizeStart, gDvm.heapSizeMax);
     63     if (gcHeap == NULL) {
     64         return false;
     65     }
     66     gcHeap->heapWorkerCurrentObject = NULL;
     67     gcHeap->heapWorkerCurrentMethod = NULL;
     68     gcHeap->heapWorkerInterpStartTime = 0LL;
     69     gcHeap->ddmHpifWhen = 0;
     70     gcHeap->ddmHpsgWhen = 0;
     71     gcHeap->ddmHpsgWhat = 0;
     72     gcHeap->ddmNhsgWhen = 0;
     73     gcHeap->ddmNhsgWhat = 0;
     74 #if WITH_HPROF
     75     gcHeap->hprofDumpOnGc = false;
     76     gcHeap->hprofContext = NULL;
     77 #endif
     78     gDvm.gcHeap = gcHeap;
     79 
     80     /* Set up the lists and lock we'll use for finalizable
     81      * and reference objects.
     82      */
     83     dvmInitMutex(&gDvm.heapWorkerListLock);
     84     gcHeap->finalizableRefs = NULL;
     85     gcHeap->pendingFinalizationRefs = NULL;
     86     gcHeap->referenceOperations = NULL;
     87 
     88     if (!dvmCardTableStartup()) {
     89         LOGE_HEAP("card table startup failed.");
     90         return false;
     91     }
     92 
     93     /* Initialize the HeapWorker locks and other state
     94      * that the GC uses.
     95      */
     96     dvmInitializeHeapWorkerState();
     97 
     98     return true;
     99 }
    100 
    101 bool dvmHeapStartupAfterZygote(void)
    102 {
    103     return dvmHeapSourceStartupAfterZygote();
    104 }
    105 
    106 void dvmHeapShutdown()
    107 {
    108 //TODO: make sure we're locked
    109     if (gDvm.gcHeap != NULL) {
    110         dvmCardTableShutdown();
    111          /* Tables are allocated on the native heap; they need to be
    112          * cleaned up explicitly.  The process may stick around, so we
    113          * don't want to leak any native memory.
    114          */
    115         dvmHeapFreeLargeTable(gDvm.gcHeap->finalizableRefs);
    116         gDvm.gcHeap->finalizableRefs = NULL;
    117 
    118         dvmHeapFreeLargeTable(gDvm.gcHeap->pendingFinalizationRefs);
    119         gDvm.gcHeap->pendingFinalizationRefs = NULL;
    120 
    121         dvmHeapFreeLargeTable(gDvm.gcHeap->referenceOperations);
    122         gDvm.gcHeap->referenceOperations = NULL;
    123 
    124         /* Destroy the heap.  Any outstanding pointers will point to
    125          * unmapped memory (unless/until someone else maps it).  This
    126          * frees gDvm.gcHeap as a side-effect.
    127          */
    128         dvmHeapSourceShutdown(&gDvm.gcHeap);
    129     }
    130 }
    131 
    132 /*
    133  * Shutdown any threads internal to the heap.
    134  */
    135 void dvmHeapThreadShutdown(void)
    136 {
    137     dvmHeapSourceThreadShutdown();
    138 }
    139 
    140 /*
    141  * We've been asked to allocate something we can't, e.g. an array so
    142  * large that (length * elementWidth) is larger than 2^31.
    143  *
    144  * _The Java Programming Language_, 4th edition, says, "you can be sure
    145  * that all SoftReferences to softly reachable objects will be cleared
    146  * before an OutOfMemoryError is thrown."
    147  *
    148  * It's unclear whether that holds for all situations where an OOM can
    149  * be thrown, or just in the context of an allocation that fails due
    150  * to lack of heap space.  For simplicity we just throw the exception.
    151  *
    152  * (OOM due to actually running out of space is handled elsewhere.)
    153  */
    154 void dvmThrowBadAllocException(const char* msg)
    155 {
    156     dvmThrowException("Ljava/lang/OutOfMemoryError;", msg);
    157 }
    158 
    159 /*
    160  * Grab the lock, but put ourselves into THREAD_VMWAIT if it looks like
    161  * we're going to have to wait on the mutex.
    162  */
    163 bool dvmLockHeap()
    164 {
    165     if (dvmTryLockMutex(&gDvm.gcHeapLock) != 0) {
    166         Thread *self;
    167         ThreadStatus oldStatus;
    168 
    169         self = dvmThreadSelf();
    170         oldStatus = dvmChangeStatus(self, THREAD_VMWAIT);
    171         dvmLockMutex(&gDvm.gcHeapLock);
    172         dvmChangeStatus(self, oldStatus);
    173     }
    174 
    175     return true;
    176 }
    177 
    178 void dvmUnlockHeap()
    179 {
    180     dvmUnlockMutex(&gDvm.gcHeapLock);
    181 }
    182 
    183 /* Pop an object from the list of pending finalizations and
    184  * reference clears/enqueues, and return the object.
    185  * The caller must call dvmReleaseTrackedAlloc()
    186  * on the object when finished.
    187  *
    188  * Typically only called by the heap worker thread.
    189  */
    190 Object *dvmGetNextHeapWorkerObject(HeapWorkerOperation *op)
    191 {
    192     Object *obj;
    193     GcHeap *gcHeap = gDvm.gcHeap;
    194 
    195     assert(op != NULL);
    196 
    197     dvmLockMutex(&gDvm.heapWorkerListLock);
    198 
    199     obj = dvmHeapGetNextObjectFromLargeTable(&gcHeap->referenceOperations);
    200     if (obj != NULL) {
    201         *op = WORKER_ENQUEUE;
    202     } else {
    203         obj = dvmHeapGetNextObjectFromLargeTable(
    204                 &gcHeap->pendingFinalizationRefs);
    205         if (obj != NULL) {
    206             *op = WORKER_FINALIZE;
    207         }
    208     }
    209 
    210     if (obj != NULL) {
    211         /* Don't let the GC collect the object until the
    212          * worker thread is done with it.
    213          */
    214         dvmAddTrackedAlloc(obj, NULL);
    215     }
    216 
    217     dvmUnlockMutex(&gDvm.heapWorkerListLock);
    218 
    219     return obj;
    220 }
    221 
    222 /* Do a full garbage collection, which may grow the
    223  * heap as a side-effect if the live set is large.
    224  */
    225 static void gcForMalloc(bool collectSoftReferences)
    226 {
    227     if (gDvm.allocProf.enabled) {
    228         Thread* self = dvmThreadSelf();
    229         gDvm.allocProf.gcCount++;
    230         if (self != NULL) {
    231             self->allocProf.gcCount++;
    232         }
    233     }
    234     /* This may adjust the soft limit as a side-effect.
    235      */
    236     LOGD_HEAP("dvmMalloc initiating GC%s\n",
    237             collectSoftReferences ? "(collect SoftReferences)" : "");
    238     dvmCollectGarbageInternal(collectSoftReferences, GC_FOR_MALLOC);
    239 }
    240 
    241 /* Try as hard as possible to allocate some memory.
    242  */
    243 static void *tryMalloc(size_t size)
    244 {
    245     void *ptr;
    246 
    247     /* Don't try too hard if there's no way the allocation is
    248      * going to succeed.  We have to collect SoftReferences before
    249      * throwing an OOME, though.
    250      */
    251     if (size >= gDvm.heapSizeMax) {
    252         LOGW_HEAP("dvmMalloc(%zu/0x%08zx): "
    253                 "someone's allocating a huge buffer\n", size, size);
    254         ptr = NULL;
    255         goto collect_soft_refs;
    256     }
    257 
    258 //TODO: figure out better heuristics
    259 //    There will be a lot of churn if someone allocates a bunch of
    260 //    big objects in a row, and we hit the frag case each time.
    261 //    A full GC for each.
    262 //    Maybe we grow the heap in bigger leaps
    263 //    Maybe we skip the GC if the size is large and we did one recently
    264 //      (number of allocations ago) (watch for thread effects)
    265 //    DeflateTest allocs a bunch of ~128k buffers w/in 0-5 allocs of each other
    266 //      (or, at least, there are only 0-5 objects swept each time)
    267 
    268     ptr = dvmHeapSourceAlloc(size);
    269     if (ptr != NULL) {
    270         return ptr;
    271     }
    272 
    273     /*
    274      * The allocation failed.  If the GC is running, block until it
    275      * completes and retry.
    276      */
    277     if (gDvm.gcHeap->gcRunning) {
    278         /*
    279          * The GC is concurrently tracing the heap.  Release the heap
    280          * lock, wait for the GC to complete, and retrying allocating.
    281          */
    282         dvmWaitForConcurrentGcToComplete();
    283         ptr = dvmHeapSourceAlloc(size);
    284         if (ptr != NULL) {
    285             return ptr;
    286         }
    287     }
    288     /*
    289      * Another failure.  Our thread was starved or there may be too
    290      * many live objects.  Try a foreground GC.  This will have no
    291      * effect if the concurrent GC is already running.
    292      */
    293     gcForMalloc(false);
    294     ptr = dvmHeapSourceAlloc(size);
    295     if (ptr != NULL) {
    296         return ptr;
    297     }
    298 
    299     /* Even that didn't work;  this is an exceptional state.
    300      * Try harder, growing the heap if necessary.
    301      */
    302     ptr = dvmHeapSourceAllocAndGrow(size);
    303     if (ptr != NULL) {
    304         size_t newHeapSize;
    305 
    306         newHeapSize = dvmHeapSourceGetIdealFootprint();
    307 //TODO: may want to grow a little bit more so that the amount of free
    308 //      space is equal to the old free space + the utilization slop for
    309 //      the new allocation.
    310         LOGI_HEAP("Grow heap (frag case) to "
    311                 "%zu.%03zuMB for %zu-byte allocation\n",
    312                 FRACTIONAL_MB(newHeapSize), size);
    313         return ptr;
    314     }
    315 
    316     /* Most allocations should have succeeded by now, so the heap
    317      * is really full, really fragmented, or the requested size is
    318      * really big.  Do another GC, collecting SoftReferences this
    319      * time.  The VM spec requires that all SoftReferences have
    320      * been collected and cleared before throwing an OOME.
    321      */
    322 //TODO: wait for the finalizers from the previous GC to finish
    323 collect_soft_refs:
    324     LOGI_HEAP("Forcing collection of SoftReferences for %zu-byte allocation\n",
    325             size);
    326     gcForMalloc(true);
    327     ptr = dvmHeapSourceAllocAndGrow(size);
    328     if (ptr != NULL) {
    329         return ptr;
    330     }
    331 //TODO: maybe wait for finalizers and try one last time
    332 
    333     LOGE_HEAP("Out of memory on a %zd-byte allocation.\n", size);
    334 //TODO: tell the HeapSource to dump its state
    335     dvmDumpThread(dvmThreadSelf(), false);
    336 
    337     return NULL;
    338 }
    339 
    340 /* Throw an OutOfMemoryError if there's a thread to attach it to.
    341  * Avoid recursing.
    342  *
    343  * The caller must not be holding the heap lock, or else the allocations
    344  * in dvmThrowException() will deadlock.
    345  */
    346 static void throwOOME()
    347 {
    348     Thread *self;
    349 
    350     if ((self = dvmThreadSelf()) != NULL) {
    351         /* If the current (failing) dvmMalloc() happened as part of thread
    352          * creation/attachment before the thread became part of the root set,
    353          * we can't rely on the thread-local trackedAlloc table, so
    354          * we can't keep track of a real allocated OOME object.  But, since
    355          * the thread is in the process of being created, it won't have
    356          * a useful stack anyway, so we may as well make things easier
    357          * by throwing the (stackless) pre-built OOME.
    358          */
    359         if (dvmIsOnThreadList(self) && !self->throwingOOME) {
    360             /* Let ourselves know that we tried to throw an OOM
    361              * error in the normal way in case we run out of
    362              * memory trying to allocate it inside dvmThrowException().
    363              */
    364             self->throwingOOME = true;
    365 
    366             /* Don't include a description string;
    367              * one fewer allocation.
    368              */
    369             dvmThrowException("Ljava/lang/OutOfMemoryError;", NULL);
    370         } else {
    371             /*
    372              * This thread has already tried to throw an OutOfMemoryError,
    373              * which probably means that we're running out of memory
    374              * while recursively trying to throw.
    375              *
    376              * To avoid any more allocation attempts, "throw" a pre-built
    377              * OutOfMemoryError object (which won't have a useful stack trace).
    378              *
    379              * Note that since this call can't possibly allocate anything,
    380              * we don't care about the state of self->throwingOOME
    381              * (which will usually already be set).
    382              */
    383             dvmSetException(self, gDvm.outOfMemoryObj);
    384         }
    385         /* We're done with the possible recursion.
    386          */
    387         self->throwingOOME = false;
    388     }
    389 }
    390 
    391 /*
    392  * Allocate storage on the GC heap.  We guarantee 8-byte alignment.
    393  *
    394  * The new storage is zeroed out.
    395  *
    396  * Note that, in rare cases, this could get called while a GC is in
    397  * progress.  If a non-VM thread tries to attach itself through JNI,
    398  * it will need to allocate some objects.  If this becomes annoying to
    399  * deal with, we can block it at the source, but holding the allocation
    400  * mutex should be enough.
    401  *
    402  * In rare circumstances (JNI AttachCurrentThread) we can be called
    403  * from a non-VM thread.
    404  *
    405  * Use ALLOC_DONT_TRACK when we either don't want to track an allocation
    406  * (because it's being done for the interpreter "new" operation and will
    407  * be part of the root set immediately) or we can't (because this allocation
    408  * is for a brand new thread).
    409  *
    410  * Returns NULL and throws an exception on failure.
    411  *
    412  * TODO: don't do a GC if the debugger thinks all threads are suspended
    413  */
    414 void* dvmMalloc(size_t size, int flags)
    415 {
    416     GcHeap *gcHeap = gDvm.gcHeap;
    417     void *ptr;
    418 
    419 #if defined(WITH_ALLOC_LIMITS)
    420     /*
    421      * See if they've exceeded the allocation limit for this thread.
    422      *
    423      * A limit value of -1 means "no limit".
    424      *
    425      * This is enabled at compile time because it requires us to do a
    426      * TLS lookup for the Thread pointer.  This has enough of a performance
    427      * impact that we don't want to do it if we don't have to.  (Now that
    428      * we're using gDvm.checkAllocLimits we may want to reconsider this,
    429      * but it's probably still best to just compile the check out of
    430      * production code -- one less thing to hit on every allocation.)
    431      */
    432     if (gDvm.checkAllocLimits) {
    433         Thread* self = dvmThreadSelf();
    434         if (self != NULL) {
    435             int count = self->allocLimit;
    436             if (count > 0) {
    437                 self->allocLimit--;
    438             } else if (count == 0) {
    439                 /* fail! */
    440                 assert(!gDvm.initializing);
    441                 self->allocLimit = -1;
    442                 dvmThrowException("Ldalvik/system/AllocationLimitError;",
    443                     "thread allocation limit exceeded");
    444                 return NULL;
    445             }
    446         }
    447     }
    448 
    449     if (gDvm.allocationLimit >= 0) {
    450         assert(!gDvm.initializing);
    451         gDvm.allocationLimit = -1;
    452         dvmThrowException("Ldalvik/system/AllocationLimitError;",
    453             "global allocation limit exceeded");
    454         return NULL;
    455     }
    456 #endif
    457 
    458     dvmLockHeap();
    459 
    460     /* Try as hard as possible to allocate some memory.
    461      */
    462     ptr = tryMalloc(size);
    463     if (ptr != NULL) {
    464         /* We've got the memory.
    465          */
    466         if ((flags & ALLOC_FINALIZABLE) != 0) {
    467             /* This object is an instance of a class that
    468              * overrides finalize().  Add it to the finalizable list.
    469              */
    470             if (!dvmHeapAddRefToLargeTable(&gcHeap->finalizableRefs,
    471                                     (Object *)ptr))
    472             {
    473                 LOGE_HEAP("dvmMalloc(): no room for any more "
    474                         "finalizable objects\n");
    475                 dvmAbort();
    476             }
    477         }
    478 
    479         if (gDvm.allocProf.enabled) {
    480             Thread* self = dvmThreadSelf();
    481             gDvm.allocProf.allocCount++;
    482             gDvm.allocProf.allocSize += size;
    483             if (self != NULL) {
    484                 self->allocProf.allocCount++;
    485                 self->allocProf.allocSize += size;
    486             }
    487         }
    488     } else {
    489         /* The allocation failed.
    490          */
    491 
    492         if (gDvm.allocProf.enabled) {
    493             Thread* self = dvmThreadSelf();
    494             gDvm.allocProf.failedAllocCount++;
    495             gDvm.allocProf.failedAllocSize += size;
    496             if (self != NULL) {
    497                 self->allocProf.failedAllocCount++;
    498                 self->allocProf.failedAllocSize += size;
    499             }
    500         }
    501     }
    502 
    503     dvmUnlockHeap();
    504 
    505     if (ptr != NULL) {
    506         /*
    507          * If caller hasn't asked us not to track it, add it to the
    508          * internal tracking list.
    509          */
    510         if ((flags & ALLOC_DONT_TRACK) == 0) {
    511             dvmAddTrackedAlloc(ptr, NULL);
    512         }
    513     } else {
    514         /*
    515          * The allocation failed; throw an OutOfMemoryError.
    516          */
    517         throwOOME();
    518     }
    519 
    520     return ptr;
    521 }
    522 
    523 /*
    524  * Returns true iff <obj> points to a valid allocated object.
    525  */
    526 bool dvmIsValidObject(const Object* obj)
    527 {
    528     /* Don't bother if it's NULL or not 8-byte aligned.
    529      */
    530     if (obj != NULL && ((uintptr_t)obj & (8-1)) == 0) {
    531         /* Even if the heap isn't locked, this shouldn't return
    532          * any false negatives.  The only mutation that could
    533          * be happening is allocation, which means that another
    534          * thread could be in the middle of a read-modify-write
    535          * to add a new bit for a new object.  However, that
    536          * RMW will have completed by the time any other thread
    537          * could possibly see the new pointer, so there is no
    538          * danger of dvmIsValidObject() being called on a valid
    539          * pointer whose bit isn't set.
    540          *
    541          * Freeing will only happen during the sweep phase, which
    542          * only happens while the heap is locked.
    543          */
    544         return dvmHeapSourceContains(obj);
    545     }
    546     return false;
    547 }
    548 
    549 size_t dvmObjectSizeInHeap(const Object *obj)
    550 {
    551     return dvmHeapSourceChunkSize(obj);
    552 }
    553 
    554 static void verifyRootsAndHeap(void)
    555 {
    556     dvmVerifyRoots();
    557     dvmVerifyBitmap(dvmHeapSourceGetLiveBits());
    558 }
    559 
    560 /*
    561  * Initiate garbage collection.
    562  *
    563  * NOTES:
    564  * - If we don't hold gDvm.threadListLock, it's possible for a thread to
    565  *   be added to the thread list while we work.  The thread should NOT
    566  *   start executing, so this is only interesting when we start chasing
    567  *   thread stacks.  (Before we do so, grab the lock.)
    568  *
    569  * We are not allowed to GC when the debugger has suspended the VM, which
    570  * is awkward because debugger requests can cause allocations.  The easiest
    571  * way to enforce this is to refuse to GC on an allocation made by the
    572  * JDWP thread -- we have to expand the heap or fail.
    573  */
    574 void dvmCollectGarbageInternal(bool clearSoftRefs, GcReason reason)
    575 {
    576     GcHeap *gcHeap = gDvm.gcHeap;
    577     u4 rootSuspend, rootSuspendTime, rootStart, rootEnd;
    578     u4 dirtySuspend, dirtyStart, dirtyEnd;
    579     u4 totalTime;
    580     size_t numObjectsFreed, numBytesFreed;
    581     size_t currAllocated, currFootprint;
    582     size_t extAllocated, extLimit;
    583     size_t percentFree;
    584     GcMode gcMode;
    585     int oldThreadPriority = kInvalidPriority;
    586 
    587     /* The heap lock must be held.
    588      */
    589 
    590     if (gcHeap->gcRunning) {
    591         LOGW_HEAP("Attempted recursive GC\n");
    592         return;
    593     }
    594 
    595     gcMode = (reason == GC_FOR_MALLOC) ? GC_PARTIAL : GC_FULL;
    596     gcHeap->gcRunning = true;
    597 
    598     rootSuspend = dvmGetRelativeTimeMsec();
    599     dvmSuspendAllThreads(SUSPEND_FOR_GC);
    600     rootStart = dvmGetRelativeTimeMsec();
    601     rootSuspendTime = rootStart - rootSuspend;
    602 
    603     /*
    604      * If we are not marking concurrently raise the priority of the
    605      * thread performing the garbage collection.
    606      */
    607     if (reason != GC_CONCURRENT) {
    608         /* Get the priority (the "nice" value) of the current thread.  The
    609          * getpriority() call can legitimately return -1, so we have to
    610          * explicitly test errno.
    611          */
    612         errno = 0;
    613         int priorityResult = getpriority(PRIO_PROCESS, 0);
    614         if (errno != 0) {
    615             LOGI_HEAP("getpriority(self) failed: %s\n", strerror(errno));
    616         } else if (priorityResult > ANDROID_PRIORITY_NORMAL) {
    617             /* Current value is numerically greater than "normal", which
    618              * in backward UNIX terms means lower priority.
    619              */
    620 
    621             if (priorityResult >= ANDROID_PRIORITY_BACKGROUND) {
    622                 set_sched_policy(dvmGetSysThreadId(), SP_FOREGROUND);
    623             }
    624 
    625             if (setpriority(PRIO_PROCESS, 0, ANDROID_PRIORITY_NORMAL) != 0) {
    626                 LOGI_HEAP("Unable to elevate priority from %d to %d\n",
    627                           priorityResult, ANDROID_PRIORITY_NORMAL);
    628             } else {
    629                 /* priority elevated; save value so we can restore it later */
    630                 LOGD_HEAP("Elevating priority from %d to %d\n",
    631                           priorityResult, ANDROID_PRIORITY_NORMAL);
    632                 oldThreadPriority = priorityResult;
    633             }
    634         }
    635     }
    636 
    637     /* Wait for the HeapWorker thread to block.
    638      * (It may also already be suspended in interp code,
    639      * in which case it's not holding heapWorkerLock.)
    640      */
    641     dvmLockMutex(&gDvm.heapWorkerLock);
    642 
    643     /* Make sure that the HeapWorker thread hasn't become
    644      * wedged inside interp code.  If it has, this call will
    645      * print a message and abort the VM.
    646      */
    647     dvmAssertHeapWorkerThreadRunning();
    648 
    649     /* Lock the pendingFinalizationRefs list.
    650      *
    651      * Acquire the lock after suspending so the finalizer
    652      * thread can't block in the RUNNING state while
    653      * we try to suspend.
    654      */
    655     dvmLockMutex(&gDvm.heapWorkerListLock);
    656 
    657     if (gDvm.preVerify) {
    658         LOGV_HEAP("Verifying roots and heap before GC");
    659         verifyRootsAndHeap();
    660     }
    661 
    662     dvmMethodTraceGCBegin();
    663 
    664 #if WITH_HPROF
    665 
    666 /* Set DUMP_HEAP_ON_DDMS_UPDATE to 1 to enable heap dumps
    667  * whenever DDMS requests a heap update (HPIF chunk).
    668  * The output files will appear in /data/misc, which must
    669  * already exist.
    670  * You must define "WITH_HPROF := true" in your buildspec.mk
    671  * and recompile libdvm for this to work.
    672  *
    673  * To enable stack traces for each allocation, define
    674  * "WITH_HPROF_STACK := true" in buildspec.mk.  This option slows down
    675  * allocations and also requires 8 additional bytes per object on the
    676  * GC heap.
    677  */
    678 #define DUMP_HEAP_ON_DDMS_UPDATE 0
    679 #if DUMP_HEAP_ON_DDMS_UPDATE
    680     gcHeap->hprofDumpOnGc |= (gcHeap->ddmHpifWhen != 0);
    681 #endif
    682 
    683     if (gcHeap->hprofDumpOnGc) {
    684         char nameBuf[128];
    685 
    686         gcHeap->hprofResult = -1;
    687 
    688         if (gcHeap->hprofFileName == NULL) {
    689             /* no filename was provided; invent one */
    690             sprintf(nameBuf, "/data/misc/heap-dump-tm%d-pid%d.hprof",
    691                 (int) time(NULL), (int) getpid());
    692             gcHeap->hprofFileName = nameBuf;
    693         }
    694         gcHeap->hprofContext = hprofStartup(gcHeap->hprofFileName,
    695                 gcHeap->hprofFd, gcHeap->hprofDirectToDdms);
    696         if (gcHeap->hprofContext != NULL) {
    697             hprofStartHeapDump(gcHeap->hprofContext);
    698         }
    699         gcHeap->hprofDumpOnGc = false;
    700         gcHeap->hprofFileName = NULL;
    701     }
    702 #endif
    703 
    704     /* Set up the marking context.
    705      */
    706     if (!dvmHeapBeginMarkStep(gcMode)) {
    707         LOGE_HEAP("dvmHeapBeginMarkStep failed; aborting\n");
    708         dvmAbort();
    709     }
    710 
    711     /* Mark the set of objects that are strongly reachable from the roots.
    712      */
    713     LOGD_HEAP("Marking...");
    714     dvmHeapMarkRootSet();
    715 
    716     /* dvmHeapScanMarkedObjects() will build the lists of known
    717      * instances of the Reference classes.
    718      */
    719     gcHeap->softReferences = NULL;
    720     gcHeap->weakReferences = NULL;
    721     gcHeap->phantomReferences = NULL;
    722 
    723     if (reason == GC_CONCURRENT) {
    724         /*
    725          * Resume threads while tracing from the roots.  We unlock the
    726          * heap to allow mutator threads to allocate from free space.
    727          */
    728         rootEnd = dvmGetRelativeTimeMsec();
    729         dvmClearCardTable();
    730         dvmUnlockHeap();
    731         dvmResumeAllThreads(SUSPEND_FOR_GC);
    732     }
    733 
    734     /* Recursively mark any objects that marked objects point to strongly.
    735      * If we're not collecting soft references, soft-reachable
    736      * objects will also be marked.
    737      */
    738     LOGD_HEAP("Recursing...");
    739     dvmHeapScanMarkedObjects();
    740 
    741     if (reason == GC_CONCURRENT) {
    742         /*
    743          * Re-acquire the heap lock and perform the final thread
    744          * suspension.
    745          */
    746         dvmLockHeap();
    747         dirtySuspend = dvmGetRelativeTimeMsec();
    748         dvmSuspendAllThreads(SUSPEND_FOR_GC);
    749         dirtyStart = dvmGetRelativeTimeMsec();
    750         /*
    751          * As no barrier intercepts root updates, we conservatively
    752          * assume all roots may be gray and re-mark them.
    753          */
    754         dvmHeapReMarkRootSet();
    755         /*
    756          * With the exception of reference objects and weak interned
    757          * strings, all gray objects should now be on dirty cards.
    758          */
    759         if (gDvm.verifyCardTable) {
    760             dvmVerifyCardTable();
    761         }
    762         /*
    763          * Recursively mark gray objects pointed to by the roots or by
    764          * heap objects dirtied during the concurrent mark.
    765          */
    766         dvmHeapReScanMarkedObjects();
    767     }
    768 
    769     /* All strongly-reachable objects have now been marked.
    770      */
    771     LOGD_HEAP("Handling soft references...");
    772     if (!clearSoftRefs) {
    773         dvmHandleSoftRefs(&gcHeap->softReferences);
    774     }
    775     dvmClearWhiteRefs(&gcHeap->softReferences);
    776 
    777     LOGD_HEAP("Handling weak references...");
    778     dvmClearWhiteRefs(&gcHeap->weakReferences);
    779 
    780     /* Once all weak-reachable objects have been taken
    781      * care of, any remaining unmarked objects can be finalized.
    782      */
    783     LOGD_HEAP("Finding finalizations...");
    784     dvmHeapScheduleFinalizations();
    785 
    786     LOGD_HEAP("Handling f-reachable soft references...");
    787     dvmClearWhiteRefs(&gcHeap->softReferences);
    788 
    789     LOGD_HEAP("Handling f-reachable weak references...");
    790     dvmClearWhiteRefs(&gcHeap->weakReferences);
    791 
    792     /* Any remaining objects that are not pending finalization
    793      * could be phantom-reachable.  This will mark any phantom-reachable
    794      * objects, as well as enqueue their references.
    795      */
    796     LOGD_HEAP("Handling phantom references...");
    797     dvmClearWhiteRefs(&gcHeap->phantomReferences);
    798 
    799 #if defined(WITH_JIT)
    800     /*
    801      * Patching a chaining cell is very cheap as it only updates 4 words. It's
    802      * the overhead of stopping all threads and synchronizing the I/D cache
    803      * that makes it expensive.
    804      *
    805      * Therefore we batch those work orders in a queue and go through them
    806      * when threads are suspended for GC.
    807      */
    808     dvmCompilerPerformSafePointChecks();
    809 #endif
    810 
    811     LOGD_HEAP("Sweeping...");
    812 
    813     dvmHeapSweepSystemWeaks();
    814 
    815     /*
    816      * Live objects have a bit set in the mark bitmap, swap the mark
    817      * and live bitmaps.  The sweep can proceed concurrently viewing
    818      * the new live bitmap as the old mark bitmap, and vice versa.
    819      */
    820     dvmHeapSourceSwapBitmaps();
    821 
    822     if (gDvm.postVerify) {
    823         LOGV_HEAP("Verifying roots and heap after GC");
    824         verifyRootsAndHeap();
    825     }
    826 
    827     if (reason == GC_CONCURRENT) {
    828         dirtyEnd = dvmGetRelativeTimeMsec();
    829         dvmUnlockHeap();
    830         dvmResumeAllThreads(SUSPEND_FOR_GC);
    831     }
    832     dvmHeapSweepUnmarkedObjects(gcMode, reason == GC_CONCURRENT,
    833                                 &numObjectsFreed, &numBytesFreed);
    834     LOGD_HEAP("Cleaning up...");
    835     dvmHeapFinishMarkStep();
    836     if (reason == GC_CONCURRENT) {
    837         dvmLockHeap();
    838     }
    839 
    840     LOGD_HEAP("Done.");
    841 
    842     /* Now's a good time to adjust the heap size, since
    843      * we know what our utilization is.
    844      *
    845      * This doesn't actually resize any memory;
    846      * it just lets the heap grow more when necessary.
    847      */
    848     if (reason != GC_EXTERNAL_ALLOC) {
    849         dvmHeapSourceGrowForUtilization();
    850     }
    851 
    852     currAllocated = dvmHeapSourceGetValue(HS_BYTES_ALLOCATED, NULL, 0);
    853     currFootprint = dvmHeapSourceGetValue(HS_FOOTPRINT, NULL, 0);
    854 
    855 #if WITH_HPROF
    856     if (gcHeap->hprofContext != NULL) {
    857         hprofFinishHeapDump(gcHeap->hprofContext);
    858 //TODO: write a HEAP_SUMMARY record
    859         if (hprofShutdown(gcHeap->hprofContext))
    860             gcHeap->hprofResult = 0;    /* indicate success */
    861         gcHeap->hprofContext = NULL;
    862     }
    863 #endif
    864 
    865     /* Now that we've freed up the GC heap, return any large
    866      * free chunks back to the system.  They'll get paged back
    867      * in the next time they're used.  Don't do it immediately,
    868      * though;  if the process is still allocating a bunch of
    869      * memory, we'll be taking a ton of page faults that we don't
    870      * necessarily need to.
    871      *
    872      * Cancel any old scheduled trims, and schedule a new one.
    873      */
    874     dvmScheduleHeapSourceTrim(5);  // in seconds
    875 
    876     dvmMethodTraceGCEnd();
    877     LOGV_HEAP("GC finished");
    878 
    879     gcHeap->gcRunning = false;
    880 
    881     LOGV_HEAP("Resuming threads");
    882     dvmUnlockMutex(&gDvm.heapWorkerListLock);
    883     dvmUnlockMutex(&gDvm.heapWorkerLock);
    884 
    885     if (reason == GC_CONCURRENT) {
    886         /*
    887          * Wake-up any threads that blocked after a failed allocation
    888          * request.
    889          */
    890         dvmBroadcastCond(&gDvm.gcHeapCond);
    891     }
    892 
    893     if (reason != GC_CONCURRENT) {
    894         dirtyEnd = dvmGetRelativeTimeMsec();
    895         dvmResumeAllThreads(SUSPEND_FOR_GC);
    896         if (oldThreadPriority != kInvalidPriority) {
    897             if (setpriority(PRIO_PROCESS, 0, oldThreadPriority) != 0) {
    898                 LOGW_HEAP("Unable to reset priority to %d: %s\n",
    899                           oldThreadPriority, strerror(errno));
    900             } else {
    901                 LOGD_HEAP("Reset priority to %d\n", oldThreadPriority);
    902             }
    903 
    904             if (oldThreadPriority >= ANDROID_PRIORITY_BACKGROUND) {
    905                 set_sched_policy(dvmGetSysThreadId(), SP_BACKGROUND);
    906             }
    907         }
    908     }
    909 
    910     extAllocated = dvmHeapSourceGetValue(HS_EXTERNAL_BYTES_ALLOCATED, NULL, 0);
    911     extLimit = dvmHeapSourceGetValue(HS_EXTERNAL_LIMIT, NULL, 0);
    912     percentFree = 100 - (size_t)(100.0f * (float)currAllocated / currFootprint);
    913     if (reason != GC_CONCURRENT) {
    914         u4 markSweepTime = dirtyEnd - rootStart;
    915         bool isSmall = numBytesFreed > 0 && numBytesFreed < 1024;
    916         totalTime = rootSuspendTime + markSweepTime;
    917         LOGD("%s freed %s%zdK, %d%% free %zdK/%zdK, external %zdK/%zdK, "
    918              "paused %ums",
    919              GcReasonStr[reason],
    920              isSmall ? "<" : "",
    921              numBytesFreed ? MAX(numBytesFreed / 1024, 1) : 0,
    922              percentFree,
    923              currAllocated / 1024, currFootprint / 1024,
    924              extAllocated / 1024, extLimit / 1024,
    925              markSweepTime);
    926     } else {
    927         u4 rootTime = rootEnd - rootStart;
    928         u4 dirtySuspendTime = dirtyStart - dirtySuspend;
    929         u4 dirtyTime = dirtyEnd - dirtyStart;
    930         bool isSmall = numBytesFreed > 0 && numBytesFreed < 1024;
    931         totalTime = rootSuspendTime + rootTime + dirtySuspendTime + dirtyTime;
    932         LOGD("%s freed %s%zdK, %d%% free %zdK/%zdK, external %zdK/%zdK, "
    933              "paused %ums+%ums",
    934              GcReasonStr[reason],
    935              isSmall ? "<" : "",
    936              numBytesFreed ? MAX(numBytesFreed / 1024, 1) : 0,
    937              percentFree,
    938              currAllocated / 1024, currFootprint / 1024,
    939              extAllocated / 1024, extLimit / 1024,
    940              rootTime, dirtyTime);
    941     }
    942     dvmLogGcStats(numObjectsFreed, numBytesFreed, totalTime);
    943     if (gcHeap->ddmHpifWhen != 0) {
    944         LOGD_HEAP("Sending VM heap info to DDM\n");
    945         dvmDdmSendHeapInfo(gcHeap->ddmHpifWhen, false);
    946     }
    947     if (gcHeap->ddmHpsgWhen != 0) {
    948         LOGD_HEAP("Dumping VM heap to DDM\n");
    949         dvmDdmSendHeapSegments(false, false);
    950     }
    951     if (gcHeap->ddmNhsgWhen != 0) {
    952         LOGD_HEAP("Dumping native heap to DDM\n");
    953         dvmDdmSendHeapSegments(false, true);
    954     }
    955 }
    956 
    957 void dvmWaitForConcurrentGcToComplete(void)
    958 {
    959     Thread *self = dvmThreadSelf();
    960     ThreadStatus oldStatus;
    961     assert(self != NULL);
    962     oldStatus = dvmChangeStatus(self, THREAD_VMWAIT);
    963     dvmWaitCond(&gDvm.gcHeapCond, &gDvm.gcHeapLock);
    964     dvmChangeStatus(self, oldStatus);
    965 }
    966 
    967 #if WITH_HPROF
    968 /*
    969  * Perform garbage collection, writing heap information to the specified file.
    970  *
    971  * If "fd" is >= 0, the output will be written to that file descriptor.
    972  * Otherwise, "fileName" is used to create an output file.
    973  *
    974  * If "fileName" is NULL, a suitable name will be generated automatically.
    975  * (TODO: remove this when the SIGUSR1 feature goes away)
    976  *
    977  * If "directToDdms" is set, the other arguments are ignored, and data is
    978  * sent directly to DDMS.
    979  *
    980  * Returns 0 on success, or an error code on failure.
    981  */
    982 int hprofDumpHeap(const char* fileName, int fd, bool directToDdms)
    983 {
    984     int result;
    985 
    986     dvmLockMutex(&gDvm.gcHeapLock);
    987 
    988     gDvm.gcHeap->hprofDumpOnGc = true;
    989     gDvm.gcHeap->hprofFileName = fileName;
    990     gDvm.gcHeap->hprofFd = fd;
    991     gDvm.gcHeap->hprofDirectToDdms = directToDdms;
    992     dvmCollectGarbageInternal(false, GC_HPROF_DUMP_HEAP);
    993     result = gDvm.gcHeap->hprofResult;
    994 
    995     dvmUnlockMutex(&gDvm.gcHeapLock);
    996 
    997     return result;
    998 }
    999 
   1000 void dvmHeapSetHprofGcScanState(hprof_heap_tag_t state, u4 threadSerialNumber)
   1001 {
   1002     if (gDvm.gcHeap->hprofContext != NULL) {
   1003         hprofSetGcScanState(gDvm.gcHeap->hprofContext, state,
   1004                 threadSerialNumber);
   1005     }
   1006 }
   1007 #endif
   1008