Home | History | Annotate | Download | only in alloc
      1 /*
      2  * Copyright (C) 2008 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 /*
     17  * Garbage-collecting memory allocator.
     18  */
     19 #include "Dalvik.h"
     20 #include "alloc/HeapBitmap.h"
     21 #include "alloc/Verify.h"
     22 #include "alloc/Heap.h"
     23 #include "alloc/HeapInternal.h"
     24 #include "alloc/DdmHeap.h"
     25 #include "alloc/HeapSource.h"
     26 #include "alloc/MarkSweep.h"
     27 #include "os/os.h"
     28 
     29 #include <sys/time.h>
     30 #include <sys/resource.h>
     31 #include <limits.h>
     32 #include <errno.h>
     33 
     34 static const GcSpec kGcForMallocSpec = {
     35     true,  /* isPartial */
     36     false,  /* isConcurrent */
     37     true,  /* doPreserve */
     38     "GC_FOR_ALLOC"
     39 };
     40 
     41 const GcSpec *GC_FOR_MALLOC = &kGcForMallocSpec;
     42 
     43 static const GcSpec kGcConcurrentSpec  = {
     44     true,  /* isPartial */
     45     true,  /* isConcurrent */
     46     true,  /* doPreserve */
     47     "GC_CONCURRENT"
     48 };
     49 
     50 const GcSpec *GC_CONCURRENT = &kGcConcurrentSpec;
     51 
     52 static const GcSpec kGcExplicitSpec = {
     53     false,  /* isPartial */
     54     true,  /* isConcurrent */
     55     true,  /* doPreserve */
     56     "GC_EXPLICIT"
     57 };
     58 
     59 const GcSpec *GC_EXPLICIT = &kGcExplicitSpec;
     60 
     61 static const GcSpec kGcBeforeOomSpec = {
     62     false,  /* isPartial */
     63     false,  /* isConcurrent */
     64     false,  /* doPreserve */
     65     "GC_BEFORE_OOM"
     66 };
     67 
     68 const GcSpec *GC_BEFORE_OOM = &kGcBeforeOomSpec;
     69 
     70 /*
     71  * Initialize the GC heap.
     72  *
     73  * Returns true if successful, false otherwise.
     74  */
     75 bool dvmHeapStartup()
     76 {
     77     GcHeap *gcHeap;
     78 
     79     if (gDvm.heapGrowthLimit == 0) {
     80         gDvm.heapGrowthLimit = gDvm.heapMaximumSize;
     81     }
     82 
     83     gcHeap = dvmHeapSourceStartup(gDvm.heapStartingSize,
     84                                   gDvm.heapMaximumSize,
     85                                   gDvm.heapGrowthLimit);
     86     if (gcHeap == NULL) {
     87         return false;
     88     }
     89     gcHeap->ddmHpifWhen = 0;
     90     gcHeap->ddmHpsgWhen = 0;
     91     gcHeap->ddmHpsgWhat = 0;
     92     gcHeap->ddmNhsgWhen = 0;
     93     gcHeap->ddmNhsgWhat = 0;
     94     gDvm.gcHeap = gcHeap;
     95 
     96     /* Set up the lists we'll use for cleared reference objects.
     97      */
     98     gcHeap->clearedReferences = NULL;
     99 
    100     if (!dvmCardTableStartup(gDvm.heapMaximumSize, gDvm.heapGrowthLimit)) {
    101         LOGE_HEAP("card table startup failed.");
    102         return false;
    103     }
    104 
    105     return true;
    106 }
    107 
    108 bool dvmHeapStartupAfterZygote()
    109 {
    110     return dvmHeapSourceStartupAfterZygote();
    111 }
    112 
    113 void dvmHeapShutdown()
    114 {
    115 //TODO: make sure we're locked
    116     if (gDvm.gcHeap != NULL) {
    117         dvmCardTableShutdown();
    118         /* Destroy the heap.  Any outstanding pointers will point to
    119          * unmapped memory (unless/until someone else maps it).  This
    120          * frees gDvm.gcHeap as a side-effect.
    121          */
    122         dvmHeapSourceShutdown(&gDvm.gcHeap);
    123     }
    124 }
    125 
    126 /*
    127  * Shutdown any threads internal to the heap.
    128  */
    129 void dvmHeapThreadShutdown()
    130 {
    131     dvmHeapSourceThreadShutdown();
    132 }
    133 
    134 /*
    135  * Grab the lock, but put ourselves into THREAD_VMWAIT if it looks like
    136  * we're going to have to wait on the mutex.
    137  */
    138 bool dvmLockHeap()
    139 {
    140     if (dvmTryLockMutex(&gDvm.gcHeapLock) != 0) {
    141         Thread *self;
    142         ThreadStatus oldStatus;
    143 
    144         self = dvmThreadSelf();
    145         oldStatus = dvmChangeStatus(self, THREAD_VMWAIT);
    146         dvmLockMutex(&gDvm.gcHeapLock);
    147         dvmChangeStatus(self, oldStatus);
    148     }
    149 
    150     return true;
    151 }
    152 
    153 void dvmUnlockHeap()
    154 {
    155     dvmUnlockMutex(&gDvm.gcHeapLock);
    156 }
    157 
    158 /* Do a full garbage collection, which may grow the
    159  * heap as a side-effect if the live set is large.
    160  */
    161 static void gcForMalloc(bool clearSoftReferences)
    162 {
    163     if (gDvm.allocProf.enabled) {
    164         Thread* self = dvmThreadSelf();
    165         gDvm.allocProf.gcCount++;
    166         if (self != NULL) {
    167             self->allocProf.gcCount++;
    168         }
    169     }
    170     /* This may adjust the soft limit as a side-effect.
    171      */
    172     const GcSpec *spec = clearSoftReferences ? GC_BEFORE_OOM : GC_FOR_MALLOC;
    173     dvmCollectGarbageInternal(spec);
    174 }
    175 
    176 /* Try as hard as possible to allocate some memory.
    177  */
    178 static void *tryMalloc(size_t size)
    179 {
    180     void *ptr;
    181 
    182     /* Don't try too hard if there's no way the allocation is
    183      * going to succeed.  We have to collect SoftReferences before
    184      * throwing an OOME, though.
    185      */
    186     if (size >= gDvm.heapGrowthLimit) {
    187         ALOGW("%zd byte allocation exceeds the %zd byte maximum heap size",
    188              size, gDvm.heapGrowthLimit);
    189         ptr = NULL;
    190         goto collect_soft_refs;
    191     }
    192 
    193 //TODO: figure out better heuristics
    194 //    There will be a lot of churn if someone allocates a bunch of
    195 //    big objects in a row, and we hit the frag case each time.
    196 //    A full GC for each.
    197 //    Maybe we grow the heap in bigger leaps
    198 //    Maybe we skip the GC if the size is large and we did one recently
    199 //      (number of allocations ago) (watch for thread effects)
    200 //    DeflateTest allocs a bunch of ~128k buffers w/in 0-5 allocs of each other
    201 //      (or, at least, there are only 0-5 objects swept each time)
    202 
    203     ptr = dvmHeapSourceAlloc(size);
    204     if (ptr != NULL) {
    205         return ptr;
    206     }
    207 
    208     /*
    209      * The allocation failed.  If the GC is running, block until it
    210      * completes and retry.
    211      */
    212     if (gDvm.gcHeap->gcRunning) {
    213         /*
    214          * The GC is concurrently tracing the heap.  Release the heap
    215          * lock, wait for the GC to complete, and retrying allocating.
    216          */
    217         dvmWaitForConcurrentGcToComplete();
    218         ptr = dvmHeapSourceAlloc(size);
    219         if (ptr != NULL) {
    220             return ptr;
    221         }
    222     }
    223     /*
    224      * Another failure.  Our thread was starved or there may be too
    225      * many live objects.  Try a foreground GC.  This will have no
    226      * effect if the concurrent GC is already running.
    227      */
    228     gcForMalloc(false);
    229     ptr = dvmHeapSourceAlloc(size);
    230     if (ptr != NULL) {
    231         return ptr;
    232     }
    233 
    234     /* Even that didn't work;  this is an exceptional state.
    235      * Try harder, growing the heap if necessary.
    236      */
    237     ptr = dvmHeapSourceAllocAndGrow(size);
    238     if (ptr != NULL) {
    239         size_t newHeapSize;
    240 
    241         newHeapSize = dvmHeapSourceGetIdealFootprint();
    242 //TODO: may want to grow a little bit more so that the amount of free
    243 //      space is equal to the old free space + the utilization slop for
    244 //      the new allocation.
    245         LOGI_HEAP("Grow heap (frag case) to "
    246                 "%zu.%03zuMB for %zu-byte allocation",
    247                 FRACTIONAL_MB(newHeapSize), size);
    248         return ptr;
    249     }
    250 
    251     /* Most allocations should have succeeded by now, so the heap
    252      * is really full, really fragmented, or the requested size is
    253      * really big.  Do another GC, collecting SoftReferences this
    254      * time.  The VM spec requires that all SoftReferences have
    255      * been collected and cleared before throwing an OOME.
    256      */
    257 //TODO: wait for the finalizers from the previous GC to finish
    258 collect_soft_refs:
    259     LOGI_HEAP("Forcing collection of SoftReferences for %zu-byte allocation",
    260             size);
    261     gcForMalloc(true);
    262     ptr = dvmHeapSourceAllocAndGrow(size);
    263     if (ptr != NULL) {
    264         return ptr;
    265     }
    266 //TODO: maybe wait for finalizers and try one last time
    267 
    268     LOGE_HEAP("Out of memory on a %zd-byte allocation.", size);
    269 //TODO: tell the HeapSource to dump its state
    270     dvmDumpThread(dvmThreadSelf(), false);
    271 
    272     return NULL;
    273 }
    274 
    275 /* Throw an OutOfMemoryError if there's a thread to attach it to.
    276  * Avoid recursing.
    277  *
    278  * The caller must not be holding the heap lock, or else the allocations
    279  * in dvmThrowException() will deadlock.
    280  */
    281 static void throwOOME()
    282 {
    283     Thread *self;
    284 
    285     if ((self = dvmThreadSelf()) != NULL) {
    286         /* If the current (failing) dvmMalloc() happened as part of thread
    287          * creation/attachment before the thread became part of the root set,
    288          * we can't rely on the thread-local trackedAlloc table, so
    289          * we can't keep track of a real allocated OOME object.  But, since
    290          * the thread is in the process of being created, it won't have
    291          * a useful stack anyway, so we may as well make things easier
    292          * by throwing the (stackless) pre-built OOME.
    293          */
    294         if (dvmIsOnThreadList(self) && !self->throwingOOME) {
    295             /* Let ourselves know that we tried to throw an OOM
    296              * error in the normal way in case we run out of
    297              * memory trying to allocate it inside dvmThrowException().
    298              */
    299             self->throwingOOME = true;
    300 
    301             /* Don't include a description string;
    302              * one fewer allocation.
    303              */
    304             dvmThrowOutOfMemoryError(NULL);
    305         } else {
    306             /*
    307              * This thread has already tried to throw an OutOfMemoryError,
    308              * which probably means that we're running out of memory
    309              * while recursively trying to throw.
    310              *
    311              * To avoid any more allocation attempts, "throw" a pre-built
    312              * OutOfMemoryError object (which won't have a useful stack trace).
    313              *
    314              * Note that since this call can't possibly allocate anything,
    315              * we don't care about the state of self->throwingOOME
    316              * (which will usually already be set).
    317              */
    318             dvmSetException(self, gDvm.outOfMemoryObj);
    319         }
    320         /* We're done with the possible recursion.
    321          */
    322         self->throwingOOME = false;
    323     }
    324 }
    325 
    326 /*
    327  * Allocate storage on the GC heap.  We guarantee 8-byte alignment.
    328  *
    329  * The new storage is zeroed out.
    330  *
    331  * Note that, in rare cases, this could get called while a GC is in
    332  * progress.  If a non-VM thread tries to attach itself through JNI,
    333  * it will need to allocate some objects.  If this becomes annoying to
    334  * deal with, we can block it at the source, but holding the allocation
    335  * mutex should be enough.
    336  *
    337  * In rare circumstances (JNI AttachCurrentThread) we can be called
    338  * from a non-VM thread.
    339  *
    340  * Use ALLOC_DONT_TRACK when we either don't want to track an allocation
    341  * (because it's being done for the interpreter "new" operation and will
    342  * be part of the root set immediately) or we can't (because this allocation
    343  * is for a brand new thread).
    344  *
    345  * Returns NULL and throws an exception on failure.
    346  *
    347  * TODO: don't do a GC if the debugger thinks all threads are suspended
    348  */
    349 void* dvmMalloc(size_t size, int flags)
    350 {
    351     void *ptr;
    352 
    353     dvmLockHeap();
    354 
    355     /* Try as hard as possible to allocate some memory.
    356      */
    357     ptr = tryMalloc(size);
    358     if (ptr != NULL) {
    359         /* We've got the memory.
    360          */
    361         if (gDvm.allocProf.enabled) {
    362             Thread* self = dvmThreadSelf();
    363             gDvm.allocProf.allocCount++;
    364             gDvm.allocProf.allocSize += size;
    365             if (self != NULL) {
    366                 self->allocProf.allocCount++;
    367                 self->allocProf.allocSize += size;
    368             }
    369         }
    370     } else {
    371         /* The allocation failed.
    372          */
    373 
    374         if (gDvm.allocProf.enabled) {
    375             Thread* self = dvmThreadSelf();
    376             gDvm.allocProf.failedAllocCount++;
    377             gDvm.allocProf.failedAllocSize += size;
    378             if (self != NULL) {
    379                 self->allocProf.failedAllocCount++;
    380                 self->allocProf.failedAllocSize += size;
    381             }
    382         }
    383     }
    384 
    385     dvmUnlockHeap();
    386 
    387     if (ptr != NULL) {
    388         /*
    389          * If caller hasn't asked us not to track it, add it to the
    390          * internal tracking list.
    391          */
    392         if ((flags & ALLOC_DONT_TRACK) == 0) {
    393             dvmAddTrackedAlloc((Object*)ptr, NULL);
    394         }
    395     } else {
    396         /*
    397          * The allocation failed; throw an OutOfMemoryError.
    398          */
    399         throwOOME();
    400     }
    401 
    402     return ptr;
    403 }
    404 
    405 /*
    406  * Returns true iff <obj> points to a valid allocated object.
    407  */
    408 bool dvmIsValidObject(const Object* obj)
    409 {
    410     /* Don't bother if it's NULL or not 8-byte aligned.
    411      */
    412     if (obj != NULL && ((uintptr_t)obj & (8-1)) == 0) {
    413         /* Even if the heap isn't locked, this shouldn't return
    414          * any false negatives.  The only mutation that could
    415          * be happening is allocation, which means that another
    416          * thread could be in the middle of a read-modify-write
    417          * to add a new bit for a new object.  However, that
    418          * RMW will have completed by the time any other thread
    419          * could possibly see the new pointer, so there is no
    420          * danger of dvmIsValidObject() being called on a valid
    421          * pointer whose bit isn't set.
    422          *
    423          * Freeing will only happen during the sweep phase, which
    424          * only happens while the heap is locked.
    425          */
    426         return dvmHeapSourceContains(obj);
    427     }
    428     return false;
    429 }
    430 
    431 size_t dvmObjectSizeInHeap(const Object *obj)
    432 {
    433     return dvmHeapSourceChunkSize(obj);
    434 }
    435 
    436 static void verifyRootsAndHeap()
    437 {
    438     dvmVerifyRoots();
    439     dvmVerifyBitmap(dvmHeapSourceGetLiveBits());
    440 }
    441 
    442 /*
    443  * Initiate garbage collection.
    444  *
    445  * NOTES:
    446  * - If we don't hold gDvm.threadListLock, it's possible for a thread to
    447  *   be added to the thread list while we work.  The thread should NOT
    448  *   start executing, so this is only interesting when we start chasing
    449  *   thread stacks.  (Before we do so, grab the lock.)
    450  *
    451  * We are not allowed to GC when the debugger has suspended the VM, which
    452  * is awkward because debugger requests can cause allocations.  The easiest
    453  * way to enforce this is to refuse to GC on an allocation made by the
    454  * JDWP thread -- we have to expand the heap or fail.
    455  */
    456 void dvmCollectGarbageInternal(const GcSpec* spec)
    457 {
    458     GcHeap *gcHeap = gDvm.gcHeap;
    459     u4 gcEnd = 0;
    460     u4 rootStart = 0 , rootEnd = 0;
    461     u4 dirtyStart = 0, dirtyEnd = 0;
    462     size_t numObjectsFreed, numBytesFreed;
    463     size_t currAllocated, currFootprint;
    464     size_t percentFree;
    465     int oldThreadPriority = INT_MAX;
    466 
    467     /* The heap lock must be held.
    468      */
    469 
    470     if (gcHeap->gcRunning) {
    471         LOGW_HEAP("Attempted recursive GC");
    472         return;
    473     }
    474 
    475     gcHeap->gcRunning = true;
    476 
    477     rootStart = dvmGetRelativeTimeMsec();
    478     dvmSuspendAllThreads(SUSPEND_FOR_GC);
    479 
    480     /*
    481      * If we are not marking concurrently raise the priority of the
    482      * thread performing the garbage collection.
    483      */
    484     if (!spec->isConcurrent) {
    485         oldThreadPriority = os_raiseThreadPriority();
    486     }
    487     if (gDvm.preVerify) {
    488         LOGV_HEAP("Verifying roots and heap before GC");
    489         verifyRootsAndHeap();
    490     }
    491 
    492     dvmMethodTraceGCBegin();
    493 
    494     /* Set up the marking context.
    495      */
    496     if (!dvmHeapBeginMarkStep(spec->isPartial)) {
    497         LOGE_HEAP("dvmHeapBeginMarkStep failed; aborting");
    498         dvmAbort();
    499     }
    500 
    501     /* Mark the set of objects that are strongly reachable from the roots.
    502      */
    503     LOGD_HEAP("Marking...");
    504     dvmHeapMarkRootSet();
    505 
    506     /* dvmHeapScanMarkedObjects() will build the lists of known
    507      * instances of the Reference classes.
    508      */
    509     assert(gcHeap->softReferences == NULL);
    510     assert(gcHeap->weakReferences == NULL);
    511     assert(gcHeap->finalizerReferences == NULL);
    512     assert(gcHeap->phantomReferences == NULL);
    513     assert(gcHeap->clearedReferences == NULL);
    514 
    515     if (spec->isConcurrent) {
    516         /*
    517          * Resume threads while tracing from the roots.  We unlock the
    518          * heap to allow mutator threads to allocate from free space.
    519          */
    520         dvmClearCardTable();
    521         dvmUnlockHeap();
    522         dvmResumeAllThreads(SUSPEND_FOR_GC);
    523         rootEnd = dvmGetRelativeTimeMsec();
    524     }
    525 
    526     /* Recursively mark any objects that marked objects point to strongly.
    527      * If we're not collecting soft references, soft-reachable
    528      * objects will also be marked.
    529      */
    530     LOGD_HEAP("Recursing...");
    531     dvmHeapScanMarkedObjects();
    532 
    533     if (spec->isConcurrent) {
    534         /*
    535          * Re-acquire the heap lock and perform the final thread
    536          * suspension.
    537          */
    538         dirtyStart = dvmGetRelativeTimeMsec();
    539         dvmLockHeap();
    540         dvmSuspendAllThreads(SUSPEND_FOR_GC);
    541         /*
    542          * As no barrier intercepts root updates, we conservatively
    543          * assume all roots may be gray and re-mark them.
    544          */
    545         dvmHeapReMarkRootSet();
    546         /*
    547          * With the exception of reference objects and weak interned
    548          * strings, all gray objects should now be on dirty cards.
    549          */
    550         if (gDvm.verifyCardTable) {
    551             dvmVerifyCardTable();
    552         }
    553         /*
    554          * Recursively mark gray objects pointed to by the roots or by
    555          * heap objects dirtied during the concurrent mark.
    556          */
    557         dvmHeapReScanMarkedObjects();
    558     }
    559 
    560     /*
    561      * All strongly-reachable objects have now been marked.  Process
    562      * weakly-reachable objects discovered while tracing.
    563      */
    564     dvmHeapProcessReferences(&gcHeap->softReferences,
    565                              spec->doPreserve == false,
    566                              &gcHeap->weakReferences,
    567                              &gcHeap->finalizerReferences,
    568                              &gcHeap->phantomReferences);
    569 
    570 #if defined(WITH_JIT)
    571     /*
    572      * Patching a chaining cell is very cheap as it only updates 4 words. It's
    573      * the overhead of stopping all threads and synchronizing the I/D cache
    574      * that makes it expensive.
    575      *
    576      * Therefore we batch those work orders in a queue and go through them
    577      * when threads are suspended for GC.
    578      */
    579     dvmCompilerPerformSafePointChecks();
    580 #endif
    581 
    582     LOGD_HEAP("Sweeping...");
    583 
    584     dvmHeapSweepSystemWeaks();
    585 
    586     /*
    587      * Live objects have a bit set in the mark bitmap, swap the mark
    588      * and live bitmaps.  The sweep can proceed concurrently viewing
    589      * the new live bitmap as the old mark bitmap, and vice versa.
    590      */
    591     dvmHeapSourceSwapBitmaps();
    592 
    593     if (gDvm.postVerify) {
    594         LOGV_HEAP("Verifying roots and heap after GC");
    595         verifyRootsAndHeap();
    596     }
    597 
    598     if (spec->isConcurrent) {
    599         dvmUnlockHeap();
    600         dvmResumeAllThreads(SUSPEND_FOR_GC);
    601         dirtyEnd = dvmGetRelativeTimeMsec();
    602     }
    603     dvmHeapSweepUnmarkedObjects(spec->isPartial, spec->isConcurrent,
    604                                 &numObjectsFreed, &numBytesFreed);
    605     LOGD_HEAP("Cleaning up...");
    606     dvmHeapFinishMarkStep();
    607     if (spec->isConcurrent) {
    608         dvmLockHeap();
    609     }
    610 
    611     LOGD_HEAP("Done.");
    612 
    613     /* Now's a good time to adjust the heap size, since
    614      * we know what our utilization is.
    615      *
    616      * This doesn't actually resize any memory;
    617      * it just lets the heap grow more when necessary.
    618      */
    619     dvmHeapSourceGrowForUtilization();
    620 
    621     currAllocated = dvmHeapSourceGetValue(HS_BYTES_ALLOCATED, NULL, 0);
    622     currFootprint = dvmHeapSourceGetValue(HS_FOOTPRINT, NULL, 0);
    623 
    624     dvmMethodTraceGCEnd();
    625     LOGV_HEAP("GC finished");
    626 
    627     gcHeap->gcRunning = false;
    628 
    629     LOGV_HEAP("Resuming threads");
    630 
    631     if (spec->isConcurrent) {
    632         /*
    633          * Wake-up any threads that blocked after a failed allocation
    634          * request.
    635          */
    636         dvmBroadcastCond(&gDvm.gcHeapCond);
    637     }
    638 
    639     if (!spec->isConcurrent) {
    640         dvmResumeAllThreads(SUSPEND_FOR_GC);
    641         dirtyEnd = dvmGetRelativeTimeMsec();
    642         /*
    643          * Restore the original thread scheduling priority if it was
    644          * changed at the start of the current garbage collection.
    645          */
    646         if (oldThreadPriority != INT_MAX) {
    647             os_lowerThreadPriority(oldThreadPriority);
    648         }
    649     }
    650 
    651     /*
    652      * Move queue of pending references back into Java.
    653      */
    654     dvmEnqueueClearedReferences(&gDvm.gcHeap->clearedReferences);
    655 
    656     gcEnd = dvmGetRelativeTimeMsec();
    657     percentFree = 100 - (size_t)(100.0f * (float)currAllocated / currFootprint);
    658     if (!spec->isConcurrent) {
    659         u4 markSweepTime = dirtyEnd - rootStart;
    660         u4 gcTime = gcEnd - rootStart;
    661         bool isSmall = numBytesFreed > 0 && numBytesFreed < 1024;
    662         ALOGD("%s freed %s%zdK, %d%% free %zdK/%zdK, paused %ums, total %ums",
    663              spec->reason,
    664              isSmall ? "<" : "",
    665              numBytesFreed ? MAX(numBytesFreed / 1024, 1) : 0,
    666              percentFree,
    667              currAllocated / 1024, currFootprint / 1024,
    668              markSweepTime, gcTime);
    669     } else {
    670         u4 rootTime = rootEnd - rootStart;
    671         u4 dirtyTime = dirtyEnd - dirtyStart;
    672         u4 gcTime = gcEnd - rootStart;
    673         bool isSmall = numBytesFreed > 0 && numBytesFreed < 1024;
    674         ALOGD("%s freed %s%zdK, %d%% free %zdK/%zdK, paused %ums+%ums, total %ums",
    675              spec->reason,
    676              isSmall ? "<" : "",
    677              numBytesFreed ? MAX(numBytesFreed / 1024, 1) : 0,
    678              percentFree,
    679              currAllocated / 1024, currFootprint / 1024,
    680              rootTime, dirtyTime, gcTime);
    681     }
    682     if (gcHeap->ddmHpifWhen != 0) {
    683         LOGD_HEAP("Sending VM heap info to DDM");
    684         dvmDdmSendHeapInfo(gcHeap->ddmHpifWhen, false);
    685     }
    686     if (gcHeap->ddmHpsgWhen != 0) {
    687         LOGD_HEAP("Dumping VM heap to DDM");
    688         dvmDdmSendHeapSegments(false, false);
    689     }
    690     if (gcHeap->ddmNhsgWhen != 0) {
    691         LOGD_HEAP("Dumping native heap to DDM");
    692         dvmDdmSendHeapSegments(false, true);
    693     }
    694 }
    695 
    696 /*
    697  * If the concurrent GC is running, wait for it to finish.  The caller
    698  * must hold the heap lock.
    699  *
    700  * Note: the second dvmChangeStatus() could stall if we were in RUNNING
    701  * on entry, and some other thread has asked us to suspend.  In that
    702  * case we will be suspended with the heap lock held, which can lead to
    703  * deadlock if the other thread tries to do something with the managed heap.
    704  * For example, the debugger might suspend us and then execute a method that
    705  * allocates memory.  We can avoid this situation by releasing the lock
    706  * before self-suspending.  (The developer can work around this specific
    707  * situation by single-stepping the VM.  Alternatively, we could disable
    708  * concurrent GC when the debugger is attached, but that might change
    709  * behavior more than is desirable.)
    710  *
    711  * This should not be a problem in production, because any GC-related
    712  * activity will grab the lock before issuing a suspend-all.  (We may briefly
    713  * suspend when the GC thread calls dvmUnlockHeap before dvmResumeAllThreads,
    714  * but there's no risk of deadlock.)
    715  */
    716 void dvmWaitForConcurrentGcToComplete()
    717 {
    718     Thread *self = dvmThreadSelf();
    719     assert(self != NULL);
    720     u4 start = dvmGetRelativeTimeMsec();
    721     while (gDvm.gcHeap->gcRunning) {
    722         ThreadStatus oldStatus = dvmChangeStatus(self, THREAD_VMWAIT);
    723         dvmWaitCond(&gDvm.gcHeapCond, &gDvm.gcHeapLock);
    724         dvmChangeStatus(self, oldStatus);
    725     }
    726     u4 end = dvmGetRelativeTimeMsec();
    727     ALOGD("WAIT_FOR_CONCURRENT_GC blocked %ums", end - start);
    728 }
    729