Home | History | Annotate | Download | only in alloc
      1 /*
      2  * Copyright (C) 2008 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 /*
     17  * Garbage-collecting memory allocator.
     18  */
     19 #include "Dalvik.h"
     20 #include "alloc/HeapBitmap.h"
     21 #include "alloc/Verify.h"
     22 #include "alloc/Heap.h"
     23 #include "alloc/HeapInternal.h"
     24 #include "alloc/DdmHeap.h"
     25 #include "alloc/HeapSource.h"
     26 #include "alloc/MarkSweep.h"
     27 #include "os/os.h"
     28 
     29 #include <sys/time.h>
     30 #include <sys/resource.h>
     31 #include <limits.h>
     32 #include <errno.h>
     33 
     34 static const GcSpec kGcForMallocSpec = {
     35     true,  /* isPartial */
     36     false,  /* isConcurrent */
     37     true,  /* doPreserve */
     38     "GC_FOR_ALLOC"
     39 };
     40 
     41 const GcSpec *GC_FOR_MALLOC = &kGcForMallocSpec;
     42 
     43 static const GcSpec kGcConcurrentSpec  = {
     44     true,  /* isPartial */
     45     true,  /* isConcurrent */
     46     true,  /* doPreserve */
     47     "GC_CONCURRENT"
     48 };
     49 
     50 const GcSpec *GC_CONCURRENT = &kGcConcurrentSpec;
     51 
     52 static const GcSpec kGcExplicitSpec = {
     53     false,  /* isPartial */
     54     true,  /* isConcurrent */
     55     true,  /* doPreserve */
     56     "GC_EXPLICIT"
     57 };
     58 
     59 const GcSpec *GC_EXPLICIT = &kGcExplicitSpec;
     60 
     61 static const GcSpec kGcBeforeOomSpec = {
     62     false,  /* isPartial */
     63     false,  /* isConcurrent */
     64     false,  /* doPreserve */
     65     "GC_BEFORE_OOM"
     66 };
     67 
     68 const GcSpec *GC_BEFORE_OOM = &kGcBeforeOomSpec;
     69 
     70 /*
     71  * Initialize the GC heap.
     72  *
     73  * Returns true if successful, false otherwise.
     74  */
     75 bool dvmHeapStartup()
     76 {
     77     GcHeap *gcHeap;
     78 
     79     if (gDvm.heapGrowthLimit == 0) {
     80         gDvm.heapGrowthLimit = gDvm.heapMaximumSize;
     81     }
     82 
     83     gcHeap = dvmHeapSourceStartup(gDvm.heapStartingSize,
     84                                   gDvm.heapMaximumSize,
     85                                   gDvm.heapGrowthLimit);
     86     if (gcHeap == NULL) {
     87         return false;
     88     }
     89     gcHeap->ddmHpifWhen = 0;
     90     gcHeap->ddmHpsgWhen = 0;
     91     gcHeap->ddmHpsgWhat = 0;
     92     gcHeap->ddmNhsgWhen = 0;
     93     gcHeap->ddmNhsgWhat = 0;
     94     gDvm.gcHeap = gcHeap;
     95 
     96     /* Set up the lists we'll use for cleared reference objects.
     97      */
     98     gcHeap->clearedReferences = NULL;
     99 
    100     if (!dvmCardTableStartup(gDvm.heapMaximumSize, gDvm.heapGrowthLimit)) {
    101         LOGE_HEAP("card table startup failed.");
    102         return false;
    103     }
    104 
    105     return true;
    106 }
    107 
    108 bool dvmHeapStartupAfterZygote()
    109 {
    110     return dvmHeapSourceStartupAfterZygote();
    111 }
    112 
    113 void dvmHeapShutdown()
    114 {
    115 //TODO: make sure we're locked
    116     if (gDvm.gcHeap != NULL) {
    117         dvmCardTableShutdown();
    118         /* Destroy the heap.  Any outstanding pointers will point to
    119          * unmapped memory (unless/until someone else maps it).  This
    120          * frees gDvm.gcHeap as a side-effect.
    121          */
    122         dvmHeapSourceShutdown(&gDvm.gcHeap);
    123     }
    124 }
    125 
    126 /*
    127  * Shutdown any threads internal to the heap.
    128  */
    129 void dvmHeapThreadShutdown()
    130 {
    131     dvmHeapSourceThreadShutdown();
    132 }
    133 
    134 /*
    135  * Grab the lock, but put ourselves into THREAD_VMWAIT if it looks like
    136  * we're going to have to wait on the mutex.
    137  */
    138 bool dvmLockHeap()
    139 {
    140     if (dvmTryLockMutex(&gDvm.gcHeapLock) != 0) {
    141         Thread *self;
    142         ThreadStatus oldStatus;
    143 
    144         self = dvmThreadSelf();
    145         oldStatus = dvmChangeStatus(self, THREAD_VMWAIT);
    146         dvmLockMutex(&gDvm.gcHeapLock);
    147         dvmChangeStatus(self, oldStatus);
    148     }
    149 
    150     return true;
    151 }
    152 
    153 void dvmUnlockHeap()
    154 {
    155     dvmUnlockMutex(&gDvm.gcHeapLock);
    156 }
    157 
    158 /* Do a full garbage collection, which may grow the
    159  * heap as a side-effect if the live set is large.
    160  */
    161 static void gcForMalloc(bool clearSoftReferences)
    162 {
    163     if (gDvm.allocProf.enabled) {
    164         Thread* self = dvmThreadSelf();
    165         gDvm.allocProf.gcCount++;
    166         if (self != NULL) {
    167             self->allocProf.gcCount++;
    168         }
    169     }
    170     /* This may adjust the soft limit as a side-effect.
    171      */
    172     const GcSpec *spec = clearSoftReferences ? GC_BEFORE_OOM : GC_FOR_MALLOC;
    173     dvmCollectGarbageInternal(spec);
    174 }
    175 
    176 /* Try as hard as possible to allocate some memory.
    177  */
    178 static void *tryMalloc(size_t size)
    179 {
    180     void *ptr;
    181 
    182 //TODO: figure out better heuristics
    183 //    There will be a lot of churn if someone allocates a bunch of
    184 //    big objects in a row, and we hit the frag case each time.
    185 //    A full GC for each.
    186 //    Maybe we grow the heap in bigger leaps
    187 //    Maybe we skip the GC if the size is large and we did one recently
    188 //      (number of allocations ago) (watch for thread effects)
    189 //    DeflateTest allocs a bunch of ~128k buffers w/in 0-5 allocs of each other
    190 //      (or, at least, there are only 0-5 objects swept each time)
    191 
    192     ptr = dvmHeapSourceAlloc(size);
    193     if (ptr != NULL) {
    194         return ptr;
    195     }
    196 
    197     /*
    198      * The allocation failed.  If the GC is running, block until it
    199      * completes and retry.
    200      */
    201     if (gDvm.gcHeap->gcRunning) {
    202         /*
    203          * The GC is concurrently tracing the heap.  Release the heap
    204          * lock, wait for the GC to complete, and retrying allocating.
    205          */
    206         dvmWaitForConcurrentGcToComplete();
    207     } else {
    208       /*
    209        * Try a foreground GC since a concurrent GC is not currently running.
    210        */
    211       gcForMalloc(false);
    212     }
    213 
    214     ptr = dvmHeapSourceAlloc(size);
    215     if (ptr != NULL) {
    216         return ptr;
    217     }
    218 
    219     /* Even that didn't work;  this is an exceptional state.
    220      * Try harder, growing the heap if necessary.
    221      */
    222     ptr = dvmHeapSourceAllocAndGrow(size);
    223     if (ptr != NULL) {
    224         size_t newHeapSize;
    225 
    226         newHeapSize = dvmHeapSourceGetIdealFootprint();
    227 //TODO: may want to grow a little bit more so that the amount of free
    228 //      space is equal to the old free space + the utilization slop for
    229 //      the new allocation.
    230         LOGI_HEAP("Grow heap (frag case) to "
    231                 "%zu.%03zuMB for %zu-byte allocation",
    232                 FRACTIONAL_MB(newHeapSize), size);
    233         return ptr;
    234     }
    235 
    236     /* Most allocations should have succeeded by now, so the heap
    237      * is really full, really fragmented, or the requested size is
    238      * really big.  Do another GC, collecting SoftReferences this
    239      * time.  The VM spec requires that all SoftReferences have
    240      * been collected and cleared before throwing an OOME.
    241      */
    242 //TODO: wait for the finalizers from the previous GC to finish
    243     LOGI_HEAP("Forcing collection of SoftReferences for %zu-byte allocation",
    244             size);
    245     gcForMalloc(true);
    246     ptr = dvmHeapSourceAllocAndGrow(size);
    247     if (ptr != NULL) {
    248         return ptr;
    249     }
    250 //TODO: maybe wait for finalizers and try one last time
    251 
    252     LOGE_HEAP("Out of memory on a %zd-byte allocation.", size);
    253 //TODO: tell the HeapSource to dump its state
    254     dvmDumpThread(dvmThreadSelf(), false);
    255 
    256     return NULL;
    257 }
    258 
    259 /* Throw an OutOfMemoryError if there's a thread to attach it to.
    260  * Avoid recursing.
    261  *
    262  * The caller must not be holding the heap lock, or else the allocations
    263  * in dvmThrowException() will deadlock.
    264  */
    265 static void throwOOME()
    266 {
    267     Thread *self;
    268 
    269     if ((self = dvmThreadSelf()) != NULL) {
    270         /* If the current (failing) dvmMalloc() happened as part of thread
    271          * creation/attachment before the thread became part of the root set,
    272          * we can't rely on the thread-local trackedAlloc table, so
    273          * we can't keep track of a real allocated OOME object.  But, since
    274          * the thread is in the process of being created, it won't have
    275          * a useful stack anyway, so we may as well make things easier
    276          * by throwing the (stackless) pre-built OOME.
    277          */
    278         if (dvmIsOnThreadList(self) && !self->throwingOOME) {
    279             /* Let ourselves know that we tried to throw an OOM
    280              * error in the normal way in case we run out of
    281              * memory trying to allocate it inside dvmThrowException().
    282              */
    283             self->throwingOOME = true;
    284 
    285             /* Don't include a description string;
    286              * one fewer allocation.
    287              */
    288             dvmThrowOutOfMemoryError(NULL);
    289         } else {
    290             /*
    291              * This thread has already tried to throw an OutOfMemoryError,
    292              * which probably means that we're running out of memory
    293              * while recursively trying to throw.
    294              *
    295              * To avoid any more allocation attempts, "throw" a pre-built
    296              * OutOfMemoryError object (which won't have a useful stack trace).
    297              *
    298              * Note that since this call can't possibly allocate anything,
    299              * we don't care about the state of self->throwingOOME
    300              * (which will usually already be set).
    301              */
    302             dvmSetException(self, gDvm.outOfMemoryObj);
    303         }
    304         /* We're done with the possible recursion.
    305          */
    306         self->throwingOOME = false;
    307     }
    308 }
    309 
    310 /*
    311  * Allocate storage on the GC heap.  We guarantee 8-byte alignment.
    312  *
    313  * The new storage is zeroed out.
    314  *
    315  * Note that, in rare cases, this could get called while a GC is in
    316  * progress.  If a non-VM thread tries to attach itself through JNI,
    317  * it will need to allocate some objects.  If this becomes annoying to
    318  * deal with, we can block it at the source, but holding the allocation
    319  * mutex should be enough.
    320  *
    321  * In rare circumstances (JNI AttachCurrentThread) we can be called
    322  * from a non-VM thread.
    323  *
    324  * Use ALLOC_DONT_TRACK when we either don't want to track an allocation
    325  * (because it's being done for the interpreter "new" operation and will
    326  * be part of the root set immediately) or we can't (because this allocation
    327  * is for a brand new thread).
    328  *
    329  * Returns NULL and throws an exception on failure.
    330  *
    331  * TODO: don't do a GC if the debugger thinks all threads are suspended
    332  */
    333 void* dvmMalloc(size_t size, int flags)
    334 {
    335     void *ptr;
    336 
    337     dvmLockHeap();
    338 
    339     /* Try as hard as possible to allocate some memory.
    340      */
    341     ptr = tryMalloc(size);
    342     if (ptr != NULL) {
    343         /* We've got the memory.
    344          */
    345         if (gDvm.allocProf.enabled) {
    346             Thread* self = dvmThreadSelf();
    347             gDvm.allocProf.allocCount++;
    348             gDvm.allocProf.allocSize += size;
    349             if (self != NULL) {
    350                 self->allocProf.allocCount++;
    351                 self->allocProf.allocSize += size;
    352             }
    353         }
    354     } else {
    355         /* The allocation failed.
    356          */
    357 
    358         if (gDvm.allocProf.enabled) {
    359             Thread* self = dvmThreadSelf();
    360             gDvm.allocProf.failedAllocCount++;
    361             gDvm.allocProf.failedAllocSize += size;
    362             if (self != NULL) {
    363                 self->allocProf.failedAllocCount++;
    364                 self->allocProf.failedAllocSize += size;
    365             }
    366         }
    367     }
    368 
    369     dvmUnlockHeap();
    370 
    371     if (ptr != NULL) {
    372         /*
    373          * If caller hasn't asked us not to track it, add it to the
    374          * internal tracking list.
    375          */
    376         if ((flags & ALLOC_DONT_TRACK) == 0) {
    377             dvmAddTrackedAlloc((Object*)ptr, NULL);
    378         }
    379     } else {
    380         /*
    381          * The allocation failed; throw an OutOfMemoryError.
    382          */
    383         throwOOME();
    384     }
    385 
    386     return ptr;
    387 }
    388 
    389 /*
    390  * Returns true iff <obj> points to a valid allocated object.
    391  */
    392 bool dvmIsValidObject(const Object* obj)
    393 {
    394     /* Don't bother if it's NULL or not 8-byte aligned.
    395      */
    396     if (obj != NULL && ((uintptr_t)obj & (8-1)) == 0) {
    397         /* Even if the heap isn't locked, this shouldn't return
    398          * any false negatives.  The only mutation that could
    399          * be happening is allocation, which means that another
    400          * thread could be in the middle of a read-modify-write
    401          * to add a new bit for a new object.  However, that
    402          * RMW will have completed by the time any other thread
    403          * could possibly see the new pointer, so there is no
    404          * danger of dvmIsValidObject() being called on a valid
    405          * pointer whose bit isn't set.
    406          *
    407          * Freeing will only happen during the sweep phase, which
    408          * only happens while the heap is locked.
    409          */
    410         return dvmHeapSourceContains(obj);
    411     }
    412     return false;
    413 }
    414 
    415 size_t dvmObjectSizeInHeap(const Object *obj)
    416 {
    417     return dvmHeapSourceChunkSize(obj);
    418 }
    419 
    420 static void verifyRootsAndHeap()
    421 {
    422     dvmVerifyRoots();
    423     dvmVerifyBitmap(dvmHeapSourceGetLiveBits());
    424 }
    425 
    426 /*
    427  * Initiate garbage collection.
    428  *
    429  * NOTES:
    430  * - If we don't hold gDvm.threadListLock, it's possible for a thread to
    431  *   be added to the thread list while we work.  The thread should NOT
    432  *   start executing, so this is only interesting when we start chasing
    433  *   thread stacks.  (Before we do so, grab the lock.)
    434  *
    435  * We are not allowed to GC when the debugger has suspended the VM, which
    436  * is awkward because debugger requests can cause allocations.  The easiest
    437  * way to enforce this is to refuse to GC on an allocation made by the
    438  * JDWP thread -- we have to expand the heap or fail.
    439  */
    440 void dvmCollectGarbageInternal(const GcSpec* spec)
    441 {
    442     GcHeap *gcHeap = gDvm.gcHeap;
    443     u4 gcEnd = 0;
    444     u4 rootStart = 0 , rootEnd = 0;
    445     u4 dirtyStart = 0, dirtyEnd = 0;
    446     size_t numObjectsFreed, numBytesFreed;
    447     size_t currAllocated, currFootprint;
    448     size_t percentFree;
    449     int oldThreadPriority = INT_MAX;
    450 
    451     /* The heap lock must be held.
    452      */
    453 
    454     if (gcHeap->gcRunning) {
    455         LOGW_HEAP("Attempted recursive GC");
    456         return;
    457     }
    458 
    459     gcHeap->gcRunning = true;
    460 
    461     rootStart = dvmGetRelativeTimeMsec();
    462     dvmSuspendAllThreads(SUSPEND_FOR_GC);
    463 
    464     /*
    465      * If we are not marking concurrently raise the priority of the
    466      * thread performing the garbage collection.
    467      */
    468     if (!spec->isConcurrent) {
    469         oldThreadPriority = os_raiseThreadPriority();
    470     }
    471     if (gDvm.preVerify) {
    472         LOGV_HEAP("Verifying roots and heap before GC");
    473         verifyRootsAndHeap();
    474     }
    475 
    476     dvmMethodTraceGCBegin();
    477 
    478     /* Set up the marking context.
    479      */
    480     if (!dvmHeapBeginMarkStep(spec->isPartial)) {
    481         LOGE_HEAP("dvmHeapBeginMarkStep failed; aborting");
    482         dvmAbort();
    483     }
    484 
    485     /* Mark the set of objects that are strongly reachable from the roots.
    486      */
    487     LOGD_HEAP("Marking...");
    488     dvmHeapMarkRootSet();
    489 
    490     /* dvmHeapScanMarkedObjects() will build the lists of known
    491      * instances of the Reference classes.
    492      */
    493     assert(gcHeap->softReferences == NULL);
    494     assert(gcHeap->weakReferences == NULL);
    495     assert(gcHeap->finalizerReferences == NULL);
    496     assert(gcHeap->phantomReferences == NULL);
    497     assert(gcHeap->clearedReferences == NULL);
    498 
    499     if (spec->isConcurrent) {
    500         /*
    501          * Resume threads while tracing from the roots.  We unlock the
    502          * heap to allow mutator threads to allocate from free space.
    503          */
    504         dvmClearCardTable();
    505         dvmUnlockHeap();
    506         dvmResumeAllThreads(SUSPEND_FOR_GC);
    507         rootEnd = dvmGetRelativeTimeMsec();
    508     }
    509 
    510     /* Recursively mark any objects that marked objects point to strongly.
    511      * If we're not collecting soft references, soft-reachable
    512      * objects will also be marked.
    513      */
    514     LOGD_HEAP("Recursing...");
    515     dvmHeapScanMarkedObjects();
    516 
    517     if (spec->isConcurrent) {
    518         /*
    519          * Re-acquire the heap lock and perform the final thread
    520          * suspension.
    521          */
    522         dirtyStart = dvmGetRelativeTimeMsec();
    523         dvmLockHeap();
    524         dvmSuspendAllThreads(SUSPEND_FOR_GC);
    525         /*
    526          * As no barrier intercepts root updates, we conservatively
    527          * assume all roots may be gray and re-mark them.
    528          */
    529         dvmHeapReMarkRootSet();
    530         /*
    531          * With the exception of reference objects and weak interned
    532          * strings, all gray objects should now be on dirty cards.
    533          */
    534         if (gDvm.verifyCardTable) {
    535             dvmVerifyCardTable();
    536         }
    537         /*
    538          * Recursively mark gray objects pointed to by the roots or by
    539          * heap objects dirtied during the concurrent mark.
    540          */
    541         dvmHeapReScanMarkedObjects();
    542     }
    543 
    544     /*
    545      * All strongly-reachable objects have now been marked.  Process
    546      * weakly-reachable objects discovered while tracing.
    547      */
    548     dvmHeapProcessReferences(&gcHeap->softReferences,
    549                              spec->doPreserve == false,
    550                              &gcHeap->weakReferences,
    551                              &gcHeap->finalizerReferences,
    552                              &gcHeap->phantomReferences);
    553 
    554 #if defined(WITH_JIT)
    555     /*
    556      * Patching a chaining cell is very cheap as it only updates 4 words. It's
    557      * the overhead of stopping all threads and synchronizing the I/D cache
    558      * that makes it expensive.
    559      *
    560      * Therefore we batch those work orders in a queue and go through them
    561      * when threads are suspended for GC.
    562      */
    563     dvmCompilerPerformSafePointChecks();
    564 #endif
    565 
    566     LOGD_HEAP("Sweeping...");
    567 
    568     dvmHeapSweepSystemWeaks();
    569 
    570     /*
    571      * Live objects have a bit set in the mark bitmap, swap the mark
    572      * and live bitmaps.  The sweep can proceed concurrently viewing
    573      * the new live bitmap as the old mark bitmap, and vice versa.
    574      */
    575     dvmHeapSourceSwapBitmaps();
    576 
    577     if (gDvm.postVerify) {
    578         LOGV_HEAP("Verifying roots and heap after GC");
    579         verifyRootsAndHeap();
    580     }
    581 
    582     if (spec->isConcurrent) {
    583         dvmUnlockHeap();
    584         dvmResumeAllThreads(SUSPEND_FOR_GC);
    585         dirtyEnd = dvmGetRelativeTimeMsec();
    586     }
    587     dvmHeapSweepUnmarkedObjects(spec->isPartial, spec->isConcurrent,
    588                                 &numObjectsFreed, &numBytesFreed);
    589     LOGD_HEAP("Cleaning up...");
    590     dvmHeapFinishMarkStep();
    591     if (spec->isConcurrent) {
    592         dvmLockHeap();
    593     }
    594 
    595     LOGD_HEAP("Done.");
    596 
    597     /* Now's a good time to adjust the heap size, since
    598      * we know what our utilization is.
    599      *
    600      * This doesn't actually resize any memory;
    601      * it just lets the heap grow more when necessary.
    602      */
    603     dvmHeapSourceGrowForUtilization();
    604 
    605     currAllocated = dvmHeapSourceGetValue(HS_BYTES_ALLOCATED, NULL, 0);
    606     currFootprint = dvmHeapSourceGetValue(HS_FOOTPRINT, NULL, 0);
    607 
    608     dvmMethodTraceGCEnd();
    609     LOGV_HEAP("GC finished");
    610 
    611     gcHeap->gcRunning = false;
    612 
    613     LOGV_HEAP("Resuming threads");
    614 
    615     if (spec->isConcurrent) {
    616         /*
    617          * Wake-up any threads that blocked after a failed allocation
    618          * request.
    619          */
    620         dvmBroadcastCond(&gDvm.gcHeapCond);
    621     }
    622 
    623     if (!spec->isConcurrent) {
    624         dvmResumeAllThreads(SUSPEND_FOR_GC);
    625         dirtyEnd = dvmGetRelativeTimeMsec();
    626         /*
    627          * Restore the original thread scheduling priority if it was
    628          * changed at the start of the current garbage collection.
    629          */
    630         if (oldThreadPriority != INT_MAX) {
    631             os_lowerThreadPriority(oldThreadPriority);
    632         }
    633     }
    634 
    635     /*
    636      * Move queue of pending references back into Java.
    637      */
    638     dvmEnqueueClearedReferences(&gDvm.gcHeap->clearedReferences);
    639 
    640     gcEnd = dvmGetRelativeTimeMsec();
    641     percentFree = 100 - (size_t)(100.0f * (float)currAllocated / currFootprint);
    642     if (!spec->isConcurrent) {
    643         u4 markSweepTime = dirtyEnd - rootStart;
    644         u4 gcTime = gcEnd - rootStart;
    645         bool isSmall = numBytesFreed > 0 && numBytesFreed < 1024;
    646         ALOGD("%s freed %s%zdK, %d%% free %zdK/%zdK, paused %ums, total %ums",
    647              spec->reason,
    648              isSmall ? "<" : "",
    649              numBytesFreed ? MAX(numBytesFreed / 1024, 1) : 0,
    650              percentFree,
    651              currAllocated / 1024, currFootprint / 1024,
    652              markSweepTime, gcTime);
    653     } else {
    654         u4 rootTime = rootEnd - rootStart;
    655         u4 dirtyTime = dirtyEnd - dirtyStart;
    656         u4 gcTime = gcEnd - rootStart;
    657         bool isSmall = numBytesFreed > 0 && numBytesFreed < 1024;
    658         ALOGD("%s freed %s%zdK, %d%% free %zdK/%zdK, paused %ums+%ums, total %ums",
    659              spec->reason,
    660              isSmall ? "<" : "",
    661              numBytesFreed ? MAX(numBytesFreed / 1024, 1) : 0,
    662              percentFree,
    663              currAllocated / 1024, currFootprint / 1024,
    664              rootTime, dirtyTime, gcTime);
    665     }
    666     if (gcHeap->ddmHpifWhen != 0) {
    667         LOGD_HEAP("Sending VM heap info to DDM");
    668         dvmDdmSendHeapInfo(gcHeap->ddmHpifWhen, false);
    669     }
    670     if (gcHeap->ddmHpsgWhen != 0) {
    671         LOGD_HEAP("Dumping VM heap to DDM");
    672         dvmDdmSendHeapSegments(false, false);
    673     }
    674     if (gcHeap->ddmNhsgWhen != 0) {
    675         LOGD_HEAP("Dumping native heap to DDM");
    676         dvmDdmSendHeapSegments(false, true);
    677     }
    678 }
    679 
    680 /*
    681  * If the concurrent GC is running, wait for it to finish.  The caller
    682  * must hold the heap lock.
    683  *
    684  * Note: the second dvmChangeStatus() could stall if we were in RUNNING
    685  * on entry, and some other thread has asked us to suspend.  In that
    686  * case we will be suspended with the heap lock held, which can lead to
    687  * deadlock if the other thread tries to do something with the managed heap.
    688  * For example, the debugger might suspend us and then execute a method that
    689  * allocates memory.  We can avoid this situation by releasing the lock
    690  * before self-suspending.  (The developer can work around this specific
    691  * situation by single-stepping the VM.  Alternatively, we could disable
    692  * concurrent GC when the debugger is attached, but that might change
    693  * behavior more than is desirable.)
    694  *
    695  * This should not be a problem in production, because any GC-related
    696  * activity will grab the lock before issuing a suspend-all.  (We may briefly
    697  * suspend when the GC thread calls dvmUnlockHeap before dvmResumeAllThreads,
    698  * but there's no risk of deadlock.)
    699  */
    700 bool dvmWaitForConcurrentGcToComplete()
    701 {
    702     bool waited = gDvm.gcHeap->gcRunning;
    703     Thread *self = dvmThreadSelf();
    704     assert(self != NULL);
    705     u4 start = dvmGetRelativeTimeMsec();
    706     while (gDvm.gcHeap->gcRunning) {
    707         ThreadStatus oldStatus = dvmChangeStatus(self, THREAD_VMWAIT);
    708         dvmWaitCond(&gDvm.gcHeapCond, &gDvm.gcHeapLock);
    709         dvmChangeStatus(self, oldStatus);
    710     }
    711     u4 end = dvmGetRelativeTimeMsec();
    712     if (end - start > 0) {
    713         ALOGD("WAIT_FOR_CONCURRENT_GC blocked %ums", end - start);
    714     }
    715     return waited;
    716 }
    717