Home | History | Annotate | Download | only in alloc
      1 /*
      2  * Copyright (C) 2008 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #define ATRACE_TAG ATRACE_TAG_DALVIK
     18 
     19 /*
     20  * Garbage-collecting memory allocator.
     21  */
     22 #include "Dalvik.h"
     23 #include "alloc/HeapBitmap.h"
     24 #include "alloc/Verify.h"
     25 #include "alloc/Heap.h"
     26 #include "alloc/HeapInternal.h"
     27 #include "alloc/DdmHeap.h"
     28 #include "alloc/HeapSource.h"
     29 #include "alloc/MarkSweep.h"
     30 #include "os/os.h"
     31 
     32 #include <sys/mman.h>
     33 #include <sys/resource.h>
     34 #include <sys/time.h>
     35 #include <limits.h>
     36 #include <errno.h>
     37 
     38 #include <cutils/trace.h>
     39 
     40 static const GcSpec kGcForMallocSpec = {
     41     true,  /* isPartial */
     42     false,  /* isConcurrent */
     43     true,  /* doPreserve */
     44     "GC_FOR_ALLOC"
     45 };
     46 
     47 const GcSpec *GC_FOR_MALLOC = &kGcForMallocSpec;
     48 
     49 static const GcSpec kGcConcurrentSpec  = {
     50     true,  /* isPartial */
     51     true,  /* isConcurrent */
     52     true,  /* doPreserve */
     53     "GC_CONCURRENT"
     54 };
     55 
     56 const GcSpec *GC_CONCURRENT = &kGcConcurrentSpec;
     57 
     58 static const GcSpec kGcExplicitSpec = {
     59     false,  /* isPartial */
     60     true,  /* isConcurrent */
     61     true,  /* doPreserve */
     62     "GC_EXPLICIT"
     63 };
     64 
     65 const GcSpec *GC_EXPLICIT = &kGcExplicitSpec;
     66 
     67 static const GcSpec kGcBeforeOomSpec = {
     68     false,  /* isPartial */
     69     false,  /* isConcurrent */
     70     false,  /* doPreserve */
     71     "GC_BEFORE_OOM"
     72 };
     73 
     74 const GcSpec *GC_BEFORE_OOM = &kGcBeforeOomSpec;
     75 
     76 /*
     77  * Initialize the GC heap.
     78  *
     79  * Returns true if successful, false otherwise.
     80  */
     81 bool dvmHeapStartup()
     82 {
     83     GcHeap *gcHeap;
     84 
     85     if (gDvm.heapGrowthLimit == 0) {
     86         gDvm.heapGrowthLimit = gDvm.heapMaximumSize;
     87     }
     88 
     89     gcHeap = dvmHeapSourceStartup(gDvm.heapStartingSize,
     90                                   gDvm.heapMaximumSize,
     91                                   gDvm.heapGrowthLimit);
     92     if (gcHeap == NULL) {
     93         return false;
     94     }
     95     gcHeap->ddmHpifWhen = 0;
     96     gcHeap->ddmHpsgWhen = 0;
     97     gcHeap->ddmHpsgWhat = 0;
     98     gcHeap->ddmNhsgWhen = 0;
     99     gcHeap->ddmNhsgWhat = 0;
    100     gDvm.gcHeap = gcHeap;
    101 
    102     /* Set up the lists we'll use for cleared reference objects.
    103      */
    104     gcHeap->clearedReferences = NULL;
    105 
    106     if (!dvmCardTableStartup(gDvm.heapMaximumSize, gDvm.heapGrowthLimit)) {
    107         LOGE_HEAP("card table startup failed.");
    108         return false;
    109     }
    110 
    111     return true;
    112 }
    113 
    114 bool dvmHeapStartupAfterZygote()
    115 {
    116     return dvmHeapSourceStartupAfterZygote();
    117 }
    118 
    119 void dvmHeapShutdown()
    120 {
    121 //TODO: make sure we're locked
    122     if (gDvm.gcHeap != NULL) {
    123         dvmCardTableShutdown();
    124         /* Destroy the heap.  Any outstanding pointers will point to
    125          * unmapped memory (unless/until someone else maps it).  This
    126          * frees gDvm.gcHeap as a side-effect.
    127          */
    128         dvmHeapSourceShutdown(&gDvm.gcHeap);
    129     }
    130 }
    131 
    132 /*
    133  * Shutdown any threads internal to the heap.
    134  */
    135 void dvmHeapThreadShutdown()
    136 {
    137     dvmHeapSourceThreadShutdown();
    138 }
    139 
    140 /*
    141  * Grab the lock, but put ourselves into THREAD_VMWAIT if it looks like
    142  * we're going to have to wait on the mutex.
    143  */
    144 bool dvmLockHeap()
    145 {
    146     if (dvmTryLockMutex(&gDvm.gcHeapLock) != 0) {
    147         Thread *self;
    148         ThreadStatus oldStatus;
    149 
    150         self = dvmThreadSelf();
    151         oldStatus = dvmChangeStatus(self, THREAD_VMWAIT);
    152         dvmLockMutex(&gDvm.gcHeapLock);
    153         dvmChangeStatus(self, oldStatus);
    154     }
    155 
    156     return true;
    157 }
    158 
    159 void dvmUnlockHeap()
    160 {
    161     dvmUnlockMutex(&gDvm.gcHeapLock);
    162 }
    163 
    164 /* Do a full garbage collection, which may grow the
    165  * heap as a side-effect if the live set is large.
    166  */
    167 static void gcForMalloc(bool clearSoftReferences)
    168 {
    169     if (gDvm.allocProf.enabled) {
    170         Thread* self = dvmThreadSelf();
    171         gDvm.allocProf.gcCount++;
    172         if (self != NULL) {
    173             self->allocProf.gcCount++;
    174         }
    175     }
    176     /* This may adjust the soft limit as a side-effect.
    177      */
    178     const GcSpec *spec = clearSoftReferences ? GC_BEFORE_OOM : GC_FOR_MALLOC;
    179     dvmCollectGarbageInternal(spec);
    180 }
    181 
    182 /* Try as hard as possible to allocate some memory.
    183  */
    184 static void *tryMalloc(size_t size)
    185 {
    186     void *ptr;
    187 
    188 //TODO: figure out better heuristics
    189 //    There will be a lot of churn if someone allocates a bunch of
    190 //    big objects in a row, and we hit the frag case each time.
    191 //    A full GC for each.
    192 //    Maybe we grow the heap in bigger leaps
    193 //    Maybe we skip the GC if the size is large and we did one recently
    194 //      (number of allocations ago) (watch for thread effects)
    195 //    DeflateTest allocs a bunch of ~128k buffers w/in 0-5 allocs of each other
    196 //      (or, at least, there are only 0-5 objects swept each time)
    197 
    198     ptr = dvmHeapSourceAlloc(size);
    199     if (ptr != NULL) {
    200         return ptr;
    201     }
    202 
    203     /*
    204      * The allocation failed.  If the GC is running, block until it
    205      * completes and retry.
    206      */
    207     if (gDvm.gcHeap->gcRunning) {
    208         /*
    209          * The GC is concurrently tracing the heap.  Release the heap
    210          * lock, wait for the GC to complete, and retrying allocating.
    211          */
    212         dvmWaitForConcurrentGcToComplete();
    213     } else {
    214       /*
    215        * Try a foreground GC since a concurrent GC is not currently running.
    216        */
    217       gcForMalloc(false);
    218     }
    219 
    220     ptr = dvmHeapSourceAlloc(size);
    221     if (ptr != NULL) {
    222         return ptr;
    223     }
    224 
    225     /* Even that didn't work;  this is an exceptional state.
    226      * Try harder, growing the heap if necessary.
    227      */
    228     ptr = dvmHeapSourceAllocAndGrow(size);
    229     if (ptr != NULL) {
    230         size_t newHeapSize;
    231 
    232         newHeapSize = dvmHeapSourceGetIdealFootprint();
    233 //TODO: may want to grow a little bit more so that the amount of free
    234 //      space is equal to the old free space + the utilization slop for
    235 //      the new allocation.
    236         LOGI_HEAP("Grow heap (frag case) to "
    237                 "%zu.%03zuMB for %zu-byte allocation",
    238                 FRACTIONAL_MB(newHeapSize), size);
    239         return ptr;
    240     }
    241 
    242     /* Most allocations should have succeeded by now, so the heap
    243      * is really full, really fragmented, or the requested size is
    244      * really big.  Do another GC, collecting SoftReferences this
    245      * time.  The VM spec requires that all SoftReferences have
    246      * been collected and cleared before throwing an OOME.
    247      */
    248 //TODO: wait for the finalizers from the previous GC to finish
    249     LOGI_HEAP("Forcing collection of SoftReferences for %zu-byte allocation",
    250             size);
    251     gcForMalloc(true);
    252     ptr = dvmHeapSourceAllocAndGrow(size);
    253     if (ptr != NULL) {
    254         return ptr;
    255     }
    256 //TODO: maybe wait for finalizers and try one last time
    257 
    258     LOGE_HEAP("Out of memory on a %zd-byte allocation.", size);
    259 //TODO: tell the HeapSource to dump its state
    260     dvmDumpThread(dvmThreadSelf(), false);
    261 
    262     return NULL;
    263 }
    264 
    265 /* Throw an OutOfMemoryError if there's a thread to attach it to.
    266  * Avoid recursing.
    267  *
    268  * The caller must not be holding the heap lock, or else the allocations
    269  * in dvmThrowException() will deadlock.
    270  */
    271 static void throwOOME()
    272 {
    273     Thread *self;
    274 
    275     if ((self = dvmThreadSelf()) != NULL) {
    276         /* If the current (failing) dvmMalloc() happened as part of thread
    277          * creation/attachment before the thread became part of the root set,
    278          * we can't rely on the thread-local trackedAlloc table, so
    279          * we can't keep track of a real allocated OOME object.  But, since
    280          * the thread is in the process of being created, it won't have
    281          * a useful stack anyway, so we may as well make things easier
    282          * by throwing the (stackless) pre-built OOME.
    283          */
    284         if (dvmIsOnThreadList(self) && !self->throwingOOME) {
    285             /* Let ourselves know that we tried to throw an OOM
    286              * error in the normal way in case we run out of
    287              * memory trying to allocate it inside dvmThrowException().
    288              */
    289             self->throwingOOME = true;
    290 
    291             /* Don't include a description string;
    292              * one fewer allocation.
    293              */
    294             dvmThrowOutOfMemoryError(NULL);
    295         } else {
    296             /*
    297              * This thread has already tried to throw an OutOfMemoryError,
    298              * which probably means that we're running out of memory
    299              * while recursively trying to throw.
    300              *
    301              * To avoid any more allocation attempts, "throw" a pre-built
    302              * OutOfMemoryError object (which won't have a useful stack trace).
    303              *
    304              * Note that since this call can't possibly allocate anything,
    305              * we don't care about the state of self->throwingOOME
    306              * (which will usually already be set).
    307              */
    308             dvmSetException(self, gDvm.outOfMemoryObj);
    309         }
    310         /* We're done with the possible recursion.
    311          */
    312         self->throwingOOME = false;
    313     }
    314 }
    315 
    316 /*
    317  * Allocate storage on the GC heap.  We guarantee 8-byte alignment.
    318  *
    319  * The new storage is zeroed out.
    320  *
    321  * Note that, in rare cases, this could get called while a GC is in
    322  * progress.  If a non-VM thread tries to attach itself through JNI,
    323  * it will need to allocate some objects.  If this becomes annoying to
    324  * deal with, we can block it at the source, but holding the allocation
    325  * mutex should be enough.
    326  *
    327  * In rare circumstances (JNI AttachCurrentThread) we can be called
    328  * from a non-VM thread.
    329  *
    330  * Use ALLOC_DONT_TRACK when we either don't want to track an allocation
    331  * (because it's being done for the interpreter "new" operation and will
    332  * be part of the root set immediately) or we can't (because this allocation
    333  * is for a brand new thread).
    334  *
    335  * Returns NULL and throws an exception on failure.
    336  *
    337  * TODO: don't do a GC if the debugger thinks all threads are suspended
    338  */
    339 void* dvmMalloc(size_t size, int flags)
    340 {
    341     void *ptr;
    342 
    343     dvmLockHeap();
    344 
    345     /* Try as hard as possible to allocate some memory.
    346      */
    347     ptr = tryMalloc(size);
    348     if (ptr != NULL) {
    349         /* We've got the memory.
    350          */
    351         if (gDvm.allocProf.enabled) {
    352             Thread* self = dvmThreadSelf();
    353             gDvm.allocProf.allocCount++;
    354             gDvm.allocProf.allocSize += size;
    355             if (self != NULL) {
    356                 self->allocProf.allocCount++;
    357                 self->allocProf.allocSize += size;
    358             }
    359         }
    360     } else {
    361         /* The allocation failed.
    362          */
    363 
    364         if (gDvm.allocProf.enabled) {
    365             Thread* self = dvmThreadSelf();
    366             gDvm.allocProf.failedAllocCount++;
    367             gDvm.allocProf.failedAllocSize += size;
    368             if (self != NULL) {
    369                 self->allocProf.failedAllocCount++;
    370                 self->allocProf.failedAllocSize += size;
    371             }
    372         }
    373     }
    374 
    375     dvmUnlockHeap();
    376 
    377     if (ptr != NULL) {
    378         /*
    379          * If caller hasn't asked us not to track it, add it to the
    380          * internal tracking list.
    381          */
    382         if ((flags & ALLOC_DONT_TRACK) == 0) {
    383             dvmAddTrackedAlloc((Object*)ptr, NULL);
    384         }
    385     } else {
    386         /*
    387          * The allocation failed; throw an OutOfMemoryError.
    388          */
    389         throwOOME();
    390     }
    391 
    392     return ptr;
    393 }
    394 
    395 /*
    396  * Returns true iff <obj> points to a valid allocated object.
    397  */
    398 bool dvmIsValidObject(const Object* obj)
    399 {
    400     /* Don't bother if it's NULL or not 8-byte aligned.
    401      */
    402     if (obj != NULL && ((uintptr_t)obj & (8-1)) == 0) {
    403         /* Even if the heap isn't locked, this shouldn't return
    404          * any false negatives.  The only mutation that could
    405          * be happening is allocation, which means that another
    406          * thread could be in the middle of a read-modify-write
    407          * to add a new bit for a new object.  However, that
    408          * RMW will have completed by the time any other thread
    409          * could possibly see the new pointer, so there is no
    410          * danger of dvmIsValidObject() being called on a valid
    411          * pointer whose bit isn't set.
    412          *
    413          * Freeing will only happen during the sweep phase, which
    414          * only happens while the heap is locked.
    415          */
    416         return dvmHeapSourceContains(obj);
    417     }
    418     return false;
    419 }
    420 
    421 size_t dvmObjectSizeInHeap(const Object *obj)
    422 {
    423     return dvmHeapSourceChunkSize(obj);
    424 }
    425 
    426 static void verifyRootsAndHeap()
    427 {
    428     dvmVerifyRoots();
    429     dvmVerifyBitmap(dvmHeapSourceGetLiveBits());
    430 }
    431 
    432 /*
    433  * Initiate garbage collection.
    434  *
    435  * NOTES:
    436  * - If we don't hold gDvm.threadListLock, it's possible for a thread to
    437  *   be added to the thread list while we work.  The thread should NOT
    438  *   start executing, so this is only interesting when we start chasing
    439  *   thread stacks.  (Before we do so, grab the lock.)
    440  *
    441  * We are not allowed to GC when the debugger has suspended the VM, which
    442  * is awkward because debugger requests can cause allocations.  The easiest
    443  * way to enforce this is to refuse to GC on an allocation made by the
    444  * JDWP thread -- we have to expand the heap or fail.
    445  */
    446 void dvmCollectGarbageInternal(const GcSpec* spec)
    447 {
    448     GcHeap *gcHeap = gDvm.gcHeap;
    449     u4 gcEnd = 0;
    450     u4 rootStart = 0 , rootEnd = 0;
    451     u4 dirtyStart = 0, dirtyEnd = 0;
    452     size_t numObjectsFreed, numBytesFreed;
    453     size_t currAllocated, currFootprint;
    454     size_t percentFree;
    455     int oldThreadPriority = INT_MAX;
    456 
    457     /* The heap lock must be held.
    458      */
    459 
    460     if (gcHeap->gcRunning) {
    461         LOGW_HEAP("Attempted recursive GC");
    462         return;
    463     }
    464 
    465     // Trace the beginning of the top-level GC.
    466     if (spec == GC_FOR_MALLOC) {
    467         ATRACE_BEGIN("GC (alloc)");
    468     } else if (spec == GC_CONCURRENT) {
    469         ATRACE_BEGIN("GC (concurrent)");
    470     } else if (spec == GC_EXPLICIT) {
    471         ATRACE_BEGIN("GC (explicit)");
    472     } else if (spec == GC_BEFORE_OOM) {
    473         ATRACE_BEGIN("GC (before OOM)");
    474     } else {
    475         ATRACE_BEGIN("GC (unknown)");
    476     }
    477 
    478     gcHeap->gcRunning = true;
    479 
    480     rootStart = dvmGetRelativeTimeMsec();
    481     ATRACE_BEGIN("GC: Threads Suspended"); // Suspend A
    482     dvmSuspendAllThreads(SUSPEND_FOR_GC);
    483 
    484     /*
    485      * If we are not marking concurrently raise the priority of the
    486      * thread performing the garbage collection.
    487      */
    488     if (!spec->isConcurrent) {
    489         oldThreadPriority = os_raiseThreadPriority();
    490     }
    491     if (gDvm.preVerify) {
    492         LOGV_HEAP("Verifying roots and heap before GC");
    493         verifyRootsAndHeap();
    494     }
    495 
    496     dvmMethodTraceGCBegin();
    497 
    498     /* Set up the marking context.
    499      */
    500     if (!dvmHeapBeginMarkStep(spec->isPartial)) {
    501         ATRACE_END(); // Suspend A
    502         ATRACE_END(); // Top-level GC
    503         LOGE_HEAP("dvmHeapBeginMarkStep failed; aborting");
    504         dvmAbort();
    505     }
    506 
    507     /* Mark the set of objects that are strongly reachable from the roots.
    508      */
    509     LOGD_HEAP("Marking...");
    510     dvmHeapMarkRootSet();
    511 
    512     /* dvmHeapScanMarkedObjects() will build the lists of known
    513      * instances of the Reference classes.
    514      */
    515     assert(gcHeap->softReferences == NULL);
    516     assert(gcHeap->weakReferences == NULL);
    517     assert(gcHeap->finalizerReferences == NULL);
    518     assert(gcHeap->phantomReferences == NULL);
    519     assert(gcHeap->clearedReferences == NULL);
    520 
    521     if (spec->isConcurrent) {
    522         /*
    523          * Resume threads while tracing from the roots.  We unlock the
    524          * heap to allow mutator threads to allocate from free space.
    525          */
    526         dvmClearCardTable();
    527         dvmUnlockHeap();
    528         dvmResumeAllThreads(SUSPEND_FOR_GC);
    529         ATRACE_END(); // Suspend A
    530         rootEnd = dvmGetRelativeTimeMsec();
    531     }
    532 
    533     /* Recursively mark any objects that marked objects point to strongly.
    534      * If we're not collecting soft references, soft-reachable
    535      * objects will also be marked.
    536      */
    537     LOGD_HEAP("Recursing...");
    538     dvmHeapScanMarkedObjects();
    539 
    540     if (spec->isConcurrent) {
    541         /*
    542          * Re-acquire the heap lock and perform the final thread
    543          * suspension.
    544          */
    545         dirtyStart = dvmGetRelativeTimeMsec();
    546         dvmLockHeap();
    547         ATRACE_BEGIN("GC: Threads Suspended"); // Suspend B
    548         dvmSuspendAllThreads(SUSPEND_FOR_GC);
    549         /*
    550          * As no barrier intercepts root updates, we conservatively
    551          * assume all roots may be gray and re-mark them.
    552          */
    553         dvmHeapReMarkRootSet();
    554         /*
    555          * With the exception of reference objects and weak interned
    556          * strings, all gray objects should now be on dirty cards.
    557          */
    558         if (gDvm.verifyCardTable) {
    559             dvmVerifyCardTable();
    560         }
    561         /*
    562          * Recursively mark gray objects pointed to by the roots or by
    563          * heap objects dirtied during the concurrent mark.
    564          */
    565         dvmHeapReScanMarkedObjects();
    566     }
    567 
    568     /*
    569      * All strongly-reachable objects have now been marked.  Process
    570      * weakly-reachable objects discovered while tracing.
    571      */
    572     dvmHeapProcessReferences(&gcHeap->softReferences,
    573                              spec->doPreserve == false,
    574                              &gcHeap->weakReferences,
    575                              &gcHeap->finalizerReferences,
    576                              &gcHeap->phantomReferences);
    577 
    578 #if defined(WITH_JIT)
    579     /*
    580      * Patching a chaining cell is very cheap as it only updates 4 words. It's
    581      * the overhead of stopping all threads and synchronizing the I/D cache
    582      * that makes it expensive.
    583      *
    584      * Therefore we batch those work orders in a queue and go through them
    585      * when threads are suspended for GC.
    586      */
    587     dvmCompilerPerformSafePointChecks();
    588 #endif
    589 
    590     LOGD_HEAP("Sweeping...");
    591 
    592     dvmHeapSweepSystemWeaks();
    593 
    594     /*
    595      * Live objects have a bit set in the mark bitmap, swap the mark
    596      * and live bitmaps.  The sweep can proceed concurrently viewing
    597      * the new live bitmap as the old mark bitmap, and vice versa.
    598      */
    599     dvmHeapSourceSwapBitmaps();
    600 
    601     if (gDvm.postVerify) {
    602         LOGV_HEAP("Verifying roots and heap after GC");
    603         verifyRootsAndHeap();
    604     }
    605 
    606     if (spec->isConcurrent) {
    607         dvmUnlockHeap();
    608         dvmResumeAllThreads(SUSPEND_FOR_GC);
    609         ATRACE_END(); // Suspend B
    610         dirtyEnd = dvmGetRelativeTimeMsec();
    611     }
    612     dvmHeapSweepUnmarkedObjects(spec->isPartial, spec->isConcurrent,
    613                                 &numObjectsFreed, &numBytesFreed);
    614     LOGD_HEAP("Cleaning up...");
    615     dvmHeapFinishMarkStep();
    616     if (spec->isConcurrent) {
    617         dvmLockHeap();
    618     }
    619 
    620     LOGD_HEAP("Done.");
    621 
    622     /* Now's a good time to adjust the heap size, since
    623      * we know what our utilization is.
    624      *
    625      * This doesn't actually resize any memory;
    626      * it just lets the heap grow more when necessary.
    627      */
    628     dvmHeapSourceGrowForUtilization();
    629 
    630     currAllocated = dvmHeapSourceGetValue(HS_BYTES_ALLOCATED, NULL, 0);
    631     currFootprint = dvmHeapSourceGetValue(HS_FOOTPRINT, NULL, 0);
    632 
    633     dvmMethodTraceGCEnd();
    634     LOGV_HEAP("GC finished");
    635 
    636     gcHeap->gcRunning = false;
    637 
    638     LOGV_HEAP("Resuming threads");
    639 
    640     if (spec->isConcurrent) {
    641         /*
    642          * Wake-up any threads that blocked after a failed allocation
    643          * request.
    644          */
    645         dvmBroadcastCond(&gDvm.gcHeapCond);
    646     }
    647 
    648     if (!spec->isConcurrent) {
    649         dvmResumeAllThreads(SUSPEND_FOR_GC);
    650         ATRACE_END(); // Suspend A
    651         dirtyEnd = dvmGetRelativeTimeMsec();
    652         /*
    653          * Restore the original thread scheduling priority if it was
    654          * changed at the start of the current garbage collection.
    655          */
    656         if (oldThreadPriority != INT_MAX) {
    657             os_lowerThreadPriority(oldThreadPriority);
    658         }
    659     }
    660 
    661     /*
    662      * Move queue of pending references back into Java.
    663      */
    664     dvmEnqueueClearedReferences(&gDvm.gcHeap->clearedReferences);
    665 
    666     gcEnd = dvmGetRelativeTimeMsec();
    667     percentFree = 100 - (size_t)(100.0f * (float)currAllocated / currFootprint);
    668     if (!spec->isConcurrent) {
    669         u4 markSweepTime = dirtyEnd - rootStart;
    670         u4 gcTime = gcEnd - rootStart;
    671         bool isSmall = numBytesFreed > 0 && numBytesFreed < 1024;
    672         ALOGD("%s freed %s%zdK, %d%% free %zdK/%zdK, paused %ums, total %ums",
    673              spec->reason,
    674              isSmall ? "<" : "",
    675              numBytesFreed ? MAX(numBytesFreed / 1024, 1) : 0,
    676              percentFree,
    677              currAllocated / 1024, currFootprint / 1024,
    678              markSweepTime, gcTime);
    679     } else {
    680         u4 rootTime = rootEnd - rootStart;
    681         u4 dirtyTime = dirtyEnd - dirtyStart;
    682         u4 gcTime = gcEnd - rootStart;
    683         bool isSmall = numBytesFreed > 0 && numBytesFreed < 1024;
    684         ALOGD("%s freed %s%zdK, %d%% free %zdK/%zdK, paused %ums+%ums, total %ums",
    685              spec->reason,
    686              isSmall ? "<" : "",
    687              numBytesFreed ? MAX(numBytesFreed / 1024, 1) : 0,
    688              percentFree,
    689              currAllocated / 1024, currFootprint / 1024,
    690              rootTime, dirtyTime, gcTime);
    691     }
    692     if (gcHeap->ddmHpifWhen != 0) {
    693         LOGD_HEAP("Sending VM heap info to DDM");
    694         dvmDdmSendHeapInfo(gcHeap->ddmHpifWhen, false);
    695     }
    696     if (gcHeap->ddmHpsgWhen != 0) {
    697         LOGD_HEAP("Dumping VM heap to DDM");
    698         dvmDdmSendHeapSegments(false, false);
    699     }
    700     if (gcHeap->ddmNhsgWhen != 0) {
    701         LOGD_HEAP("Dumping native heap to DDM");
    702         dvmDdmSendHeapSegments(false, true);
    703     }
    704 
    705     ATRACE_END(); // Top-level GC
    706 }
    707 
    708 /*
    709  * If the concurrent GC is running, wait for it to finish.  The caller
    710  * must hold the heap lock.
    711  *
    712  * Note: the second dvmChangeStatus() could stall if we were in RUNNING
    713  * on entry, and some other thread has asked us to suspend.  In that
    714  * case we will be suspended with the heap lock held, which can lead to
    715  * deadlock if the other thread tries to do something with the managed heap.
    716  * For example, the debugger might suspend us and then execute a method that
    717  * allocates memory.  We can avoid this situation by releasing the lock
    718  * before self-suspending.  (The developer can work around this specific
    719  * situation by single-stepping the VM.  Alternatively, we could disable
    720  * concurrent GC when the debugger is attached, but that might change
    721  * behavior more than is desirable.)
    722  *
    723  * This should not be a problem in production, because any GC-related
    724  * activity will grab the lock before issuing a suspend-all.  (We may briefly
    725  * suspend when the GC thread calls dvmUnlockHeap before dvmResumeAllThreads,
    726  * but there's no risk of deadlock.)
    727  */
    728 bool dvmWaitForConcurrentGcToComplete()
    729 {
    730     ATRACE_BEGIN("GC: Wait For Concurrent");
    731     bool waited = gDvm.gcHeap->gcRunning;
    732     Thread *self = dvmThreadSelf();
    733     assert(self != NULL);
    734     u4 start = dvmGetRelativeTimeMsec();
    735     while (gDvm.gcHeap->gcRunning) {
    736         ThreadStatus oldStatus = dvmChangeStatus(self, THREAD_VMWAIT);
    737         dvmWaitCond(&gDvm.gcHeapCond, &gDvm.gcHeapLock);
    738         dvmChangeStatus(self, oldStatus);
    739     }
    740     u4 end = dvmGetRelativeTimeMsec();
    741     if (end - start > 0) {
    742         ALOGD("WAIT_FOR_CONCURRENT_GC blocked %ums", end - start);
    743     }
    744     ATRACE_END();
    745     return waited;
    746 }
    747