Home | History | Annotate | Download | only in alloc
      1 /*
      2  * Copyright (C) 2008 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #define ATRACE_TAG ATRACE_TAG_DALVIK
     18 
     19 /*
     20  * Garbage-collecting memory allocator.
     21  */
     22 #include "Dalvik.h"
     23 #include "alloc/HeapBitmap.h"
     24 #include "alloc/Verify.h"
     25 #include "alloc/Heap.h"
     26 #include "alloc/HeapInternal.h"
     27 #include "alloc/DdmHeap.h"
     28 #include "alloc/HeapSource.h"
     29 #include "alloc/MarkSweep.h"
     30 #include "os/os.h"
     31 
     32 #include <sys/time.h>
     33 #include <sys/resource.h>
     34 #include <limits.h>
     35 #include <errno.h>
     36 
     37 #include <cutils/trace.h>
     38 
     39 static const GcSpec kGcForMallocSpec = {
     40     true,  /* isPartial */
     41     false,  /* isConcurrent */
     42     true,  /* doPreserve */
     43     "GC_FOR_ALLOC"
     44 };
     45 
     46 const GcSpec *GC_FOR_MALLOC = &kGcForMallocSpec;
     47 
     48 static const GcSpec kGcConcurrentSpec  = {
     49     true,  /* isPartial */
     50     true,  /* isConcurrent */
     51     true,  /* doPreserve */
     52     "GC_CONCURRENT"
     53 };
     54 
     55 const GcSpec *GC_CONCURRENT = &kGcConcurrentSpec;
     56 
     57 static const GcSpec kGcExplicitSpec = {
     58     false,  /* isPartial */
     59     true,  /* isConcurrent */
     60     true,  /* doPreserve */
     61     "GC_EXPLICIT"
     62 };
     63 
     64 const GcSpec *GC_EXPLICIT = &kGcExplicitSpec;
     65 
     66 static const GcSpec kGcBeforeOomSpec = {
     67     false,  /* isPartial */
     68     false,  /* isConcurrent */
     69     false,  /* doPreserve */
     70     "GC_BEFORE_OOM"
     71 };
     72 
     73 const GcSpec *GC_BEFORE_OOM = &kGcBeforeOomSpec;
     74 
     75 /*
     76  * Initialize the GC heap.
     77  *
     78  * Returns true if successful, false otherwise.
     79  */
     80 bool dvmHeapStartup()
     81 {
     82     GcHeap *gcHeap;
     83 
     84     if (gDvm.heapGrowthLimit == 0) {
     85         gDvm.heapGrowthLimit = gDvm.heapMaximumSize;
     86     }
     87 
     88     gcHeap = dvmHeapSourceStartup(gDvm.heapStartingSize,
     89                                   gDvm.heapMaximumSize,
     90                                   gDvm.heapGrowthLimit);
     91     if (gcHeap == NULL) {
     92         return false;
     93     }
     94     gcHeap->ddmHpifWhen = 0;
     95     gcHeap->ddmHpsgWhen = 0;
     96     gcHeap->ddmHpsgWhat = 0;
     97     gcHeap->ddmNhsgWhen = 0;
     98     gcHeap->ddmNhsgWhat = 0;
     99     gDvm.gcHeap = gcHeap;
    100 
    101     /* Set up the lists we'll use for cleared reference objects.
    102      */
    103     gcHeap->clearedReferences = NULL;
    104 
    105     if (!dvmCardTableStartup(gDvm.heapMaximumSize, gDvm.heapGrowthLimit)) {
    106         LOGE_HEAP("card table startup failed.");
    107         return false;
    108     }
    109 
    110     return true;
    111 }
    112 
    113 bool dvmHeapStartupAfterZygote()
    114 {
    115     return dvmHeapSourceStartupAfterZygote();
    116 }
    117 
    118 void dvmHeapShutdown()
    119 {
    120 //TODO: make sure we're locked
    121     if (gDvm.gcHeap != NULL) {
    122         dvmCardTableShutdown();
    123         /* Destroy the heap.  Any outstanding pointers will point to
    124          * unmapped memory (unless/until someone else maps it).  This
    125          * frees gDvm.gcHeap as a side-effect.
    126          */
    127         dvmHeapSourceShutdown(&gDvm.gcHeap);
    128     }
    129 }
    130 
    131 /*
    132  * Shutdown any threads internal to the heap.
    133  */
    134 void dvmHeapThreadShutdown()
    135 {
    136     dvmHeapSourceThreadShutdown();
    137 }
    138 
    139 /*
    140  * Grab the lock, but put ourselves into THREAD_VMWAIT if it looks like
    141  * we're going to have to wait on the mutex.
    142  */
    143 bool dvmLockHeap()
    144 {
    145     if (dvmTryLockMutex(&gDvm.gcHeapLock) != 0) {
    146         Thread *self;
    147         ThreadStatus oldStatus;
    148 
    149         self = dvmThreadSelf();
    150         oldStatus = dvmChangeStatus(self, THREAD_VMWAIT);
    151         dvmLockMutex(&gDvm.gcHeapLock);
    152         dvmChangeStatus(self, oldStatus);
    153     }
    154 
    155     return true;
    156 }
    157 
    158 void dvmUnlockHeap()
    159 {
    160     dvmUnlockMutex(&gDvm.gcHeapLock);
    161 }
    162 
    163 /* Do a full garbage collection, which may grow the
    164  * heap as a side-effect if the live set is large.
    165  */
    166 static void gcForMalloc(bool clearSoftReferences)
    167 {
    168     if (gDvm.allocProf.enabled) {
    169         Thread* self = dvmThreadSelf();
    170         gDvm.allocProf.gcCount++;
    171         if (self != NULL) {
    172             self->allocProf.gcCount++;
    173         }
    174     }
    175     /* This may adjust the soft limit as a side-effect.
    176      */
    177     const GcSpec *spec = clearSoftReferences ? GC_BEFORE_OOM : GC_FOR_MALLOC;
    178     dvmCollectGarbageInternal(spec);
    179 }
    180 
    181 /* Try as hard as possible to allocate some memory.
    182  */
    183 static void *tryMalloc(size_t size)
    184 {
    185     void *ptr;
    186 
    187 //TODO: figure out better heuristics
    188 //    There will be a lot of churn if someone allocates a bunch of
    189 //    big objects in a row, and we hit the frag case each time.
    190 //    A full GC for each.
    191 //    Maybe we grow the heap in bigger leaps
    192 //    Maybe we skip the GC if the size is large and we did one recently
    193 //      (number of allocations ago) (watch for thread effects)
    194 //    DeflateTest allocs a bunch of ~128k buffers w/in 0-5 allocs of each other
    195 //      (or, at least, there are only 0-5 objects swept each time)
    196 
    197     ptr = dvmHeapSourceAlloc(size);
    198     if (ptr != NULL) {
    199         return ptr;
    200     }
    201 
    202     /*
    203      * The allocation failed.  If the GC is running, block until it
    204      * completes and retry.
    205      */
    206     if (gDvm.gcHeap->gcRunning) {
    207         /*
    208          * The GC is concurrently tracing the heap.  Release the heap
    209          * lock, wait for the GC to complete, and retrying allocating.
    210          */
    211         dvmWaitForConcurrentGcToComplete();
    212     } else {
    213       /*
    214        * Try a foreground GC since a concurrent GC is not currently running.
    215        */
    216       gcForMalloc(false);
    217     }
    218 
    219     ptr = dvmHeapSourceAlloc(size);
    220     if (ptr != NULL) {
    221         return ptr;
    222     }
    223 
    224     /* Even that didn't work;  this is an exceptional state.
    225      * Try harder, growing the heap if necessary.
    226      */
    227     ptr = dvmHeapSourceAllocAndGrow(size);
    228     if (ptr != NULL) {
    229         size_t newHeapSize;
    230 
    231         newHeapSize = dvmHeapSourceGetIdealFootprint();
    232 //TODO: may want to grow a little bit more so that the amount of free
    233 //      space is equal to the old free space + the utilization slop for
    234 //      the new allocation.
    235         LOGI_HEAP("Grow heap (frag case) to "
    236                 "%zu.%03zuMB for %zu-byte allocation",
    237                 FRACTIONAL_MB(newHeapSize), size);
    238         return ptr;
    239     }
    240 
    241     /* Most allocations should have succeeded by now, so the heap
    242      * is really full, really fragmented, or the requested size is
    243      * really big.  Do another GC, collecting SoftReferences this
    244      * time.  The VM spec requires that all SoftReferences have
    245      * been collected and cleared before throwing an OOME.
    246      */
    247 //TODO: wait for the finalizers from the previous GC to finish
    248     LOGI_HEAP("Forcing collection of SoftReferences for %zu-byte allocation",
    249             size);
    250     gcForMalloc(true);
    251     ptr = dvmHeapSourceAllocAndGrow(size);
    252     if (ptr != NULL) {
    253         return ptr;
    254     }
    255 //TODO: maybe wait for finalizers and try one last time
    256 
    257     LOGE_HEAP("Out of memory on a %zd-byte allocation.", size);
    258 //TODO: tell the HeapSource to dump its state
    259     dvmDumpThread(dvmThreadSelf(), false);
    260 
    261     return NULL;
    262 }
    263 
    264 /* Throw an OutOfMemoryError if there's a thread to attach it to.
    265  * Avoid recursing.
    266  *
    267  * The caller must not be holding the heap lock, or else the allocations
    268  * in dvmThrowException() will deadlock.
    269  */
    270 static void throwOOME()
    271 {
    272     Thread *self;
    273 
    274     if ((self = dvmThreadSelf()) != NULL) {
    275         /* If the current (failing) dvmMalloc() happened as part of thread
    276          * creation/attachment before the thread became part of the root set,
    277          * we can't rely on the thread-local trackedAlloc table, so
    278          * we can't keep track of a real allocated OOME object.  But, since
    279          * the thread is in the process of being created, it won't have
    280          * a useful stack anyway, so we may as well make things easier
    281          * by throwing the (stackless) pre-built OOME.
    282          */
    283         if (dvmIsOnThreadList(self) && !self->throwingOOME) {
    284             /* Let ourselves know that we tried to throw an OOM
    285              * error in the normal way in case we run out of
    286              * memory trying to allocate it inside dvmThrowException().
    287              */
    288             self->throwingOOME = true;
    289 
    290             /* Don't include a description string;
    291              * one fewer allocation.
    292              */
    293             dvmThrowOutOfMemoryError(NULL);
    294         } else {
    295             /*
    296              * This thread has already tried to throw an OutOfMemoryError,
    297              * which probably means that we're running out of memory
    298              * while recursively trying to throw.
    299              *
    300              * To avoid any more allocation attempts, "throw" a pre-built
    301              * OutOfMemoryError object (which won't have a useful stack trace).
    302              *
    303              * Note that since this call can't possibly allocate anything,
    304              * we don't care about the state of self->throwingOOME
    305              * (which will usually already be set).
    306              */
    307             dvmSetException(self, gDvm.outOfMemoryObj);
    308         }
    309         /* We're done with the possible recursion.
    310          */
    311         self->throwingOOME = false;
    312     }
    313 }
    314 
    315 /*
    316  * Allocate storage on the GC heap.  We guarantee 8-byte alignment.
    317  *
    318  * The new storage is zeroed out.
    319  *
    320  * Note that, in rare cases, this could get called while a GC is in
    321  * progress.  If a non-VM thread tries to attach itself through JNI,
    322  * it will need to allocate some objects.  If this becomes annoying to
    323  * deal with, we can block it at the source, but holding the allocation
    324  * mutex should be enough.
    325  *
    326  * In rare circumstances (JNI AttachCurrentThread) we can be called
    327  * from a non-VM thread.
    328  *
    329  * Use ALLOC_DONT_TRACK when we either don't want to track an allocation
    330  * (because it's being done for the interpreter "new" operation and will
    331  * be part of the root set immediately) or we can't (because this allocation
    332  * is for a brand new thread).
    333  *
    334  * Returns NULL and throws an exception on failure.
    335  *
    336  * TODO: don't do a GC if the debugger thinks all threads are suspended
    337  */
    338 void* dvmMalloc(size_t size, int flags)
    339 {
    340     void *ptr;
    341 
    342     dvmLockHeap();
    343 
    344     /* Try as hard as possible to allocate some memory.
    345      */
    346     ptr = tryMalloc(size);
    347     if (ptr != NULL) {
    348         /* We've got the memory.
    349          */
    350         if (gDvm.allocProf.enabled) {
    351             Thread* self = dvmThreadSelf();
    352             gDvm.allocProf.allocCount++;
    353             gDvm.allocProf.allocSize += size;
    354             if (self != NULL) {
    355                 self->allocProf.allocCount++;
    356                 self->allocProf.allocSize += size;
    357             }
    358         }
    359     } else {
    360         /* The allocation failed.
    361          */
    362 
    363         if (gDvm.allocProf.enabled) {
    364             Thread* self = dvmThreadSelf();
    365             gDvm.allocProf.failedAllocCount++;
    366             gDvm.allocProf.failedAllocSize += size;
    367             if (self != NULL) {
    368                 self->allocProf.failedAllocCount++;
    369                 self->allocProf.failedAllocSize += size;
    370             }
    371         }
    372     }
    373 
    374     dvmUnlockHeap();
    375 
    376     if (ptr != NULL) {
    377         /*
    378          * If caller hasn't asked us not to track it, add it to the
    379          * internal tracking list.
    380          */
    381         if ((flags & ALLOC_DONT_TRACK) == 0) {
    382             dvmAddTrackedAlloc((Object*)ptr, NULL);
    383         }
    384     } else {
    385         /*
    386          * The allocation failed; throw an OutOfMemoryError.
    387          */
    388         throwOOME();
    389     }
    390 
    391     return ptr;
    392 }
    393 
    394 /*
    395  * Returns true iff <obj> points to a valid allocated object.
    396  */
    397 bool dvmIsValidObject(const Object* obj)
    398 {
    399     /* Don't bother if it's NULL or not 8-byte aligned.
    400      */
    401     if (obj != NULL && ((uintptr_t)obj & (8-1)) == 0) {
    402         /* Even if the heap isn't locked, this shouldn't return
    403          * any false negatives.  The only mutation that could
    404          * be happening is allocation, which means that another
    405          * thread could be in the middle of a read-modify-write
    406          * to add a new bit for a new object.  However, that
    407          * RMW will have completed by the time any other thread
    408          * could possibly see the new pointer, so there is no
    409          * danger of dvmIsValidObject() being called on a valid
    410          * pointer whose bit isn't set.
    411          *
    412          * Freeing will only happen during the sweep phase, which
    413          * only happens while the heap is locked.
    414          */
    415         return dvmHeapSourceContains(obj);
    416     }
    417     return false;
    418 }
    419 
    420 size_t dvmObjectSizeInHeap(const Object *obj)
    421 {
    422     return dvmHeapSourceChunkSize(obj);
    423 }
    424 
    425 static void verifyRootsAndHeap()
    426 {
    427     dvmVerifyRoots();
    428     dvmVerifyBitmap(dvmHeapSourceGetLiveBits());
    429 }
    430 
    431 /*
    432  * Initiate garbage collection.
    433  *
    434  * NOTES:
    435  * - If we don't hold gDvm.threadListLock, it's possible for a thread to
    436  *   be added to the thread list while we work.  The thread should NOT
    437  *   start executing, so this is only interesting when we start chasing
    438  *   thread stacks.  (Before we do so, grab the lock.)
    439  *
    440  * We are not allowed to GC when the debugger has suspended the VM, which
    441  * is awkward because debugger requests can cause allocations.  The easiest
    442  * way to enforce this is to refuse to GC on an allocation made by the
    443  * JDWP thread -- we have to expand the heap or fail.
    444  */
    445 void dvmCollectGarbageInternal(const GcSpec* spec)
    446 {
    447     GcHeap *gcHeap = gDvm.gcHeap;
    448     u4 gcEnd = 0;
    449     u4 rootStart = 0 , rootEnd = 0;
    450     u4 dirtyStart = 0, dirtyEnd = 0;
    451     size_t numObjectsFreed, numBytesFreed;
    452     size_t currAllocated, currFootprint;
    453     size_t percentFree;
    454     int oldThreadPriority = INT_MAX;
    455 
    456     /* The heap lock must be held.
    457      */
    458 
    459     if (gcHeap->gcRunning) {
    460         LOGW_HEAP("Attempted recursive GC");
    461         return;
    462     }
    463 
    464     // Trace the beginning of the top-level GC.
    465     if (spec == GC_FOR_MALLOC) {
    466         ATRACE_BEGIN("GC (alloc)");
    467     } else if (spec == GC_CONCURRENT) {
    468         ATRACE_BEGIN("GC (concurrent)");
    469     } else if (spec == GC_EXPLICIT) {
    470         ATRACE_BEGIN("GC (explicit)");
    471     } else if (spec == GC_BEFORE_OOM) {
    472         ATRACE_BEGIN("GC (before OOM)");
    473     } else {
    474         ATRACE_BEGIN("GC (unknown)");
    475     }
    476 
    477     gcHeap->gcRunning = true;
    478 
    479     rootStart = dvmGetRelativeTimeMsec();
    480     ATRACE_BEGIN("GC: Threads Suspended"); // Suspend A
    481     dvmSuspendAllThreads(SUSPEND_FOR_GC);
    482 
    483     /*
    484      * If we are not marking concurrently raise the priority of the
    485      * thread performing the garbage collection.
    486      */
    487     if (!spec->isConcurrent) {
    488         oldThreadPriority = os_raiseThreadPriority();
    489     }
    490     if (gDvm.preVerify) {
    491         LOGV_HEAP("Verifying roots and heap before GC");
    492         verifyRootsAndHeap();
    493     }
    494 
    495     dvmMethodTraceGCBegin();
    496 
    497     /* Set up the marking context.
    498      */
    499     if (!dvmHeapBeginMarkStep(spec->isPartial)) {
    500         ATRACE_END(); // Suspend A
    501         ATRACE_END(); // Top-level GC
    502         LOGE_HEAP("dvmHeapBeginMarkStep failed; aborting");
    503         dvmAbort();
    504     }
    505 
    506     /* Mark the set of objects that are strongly reachable from the roots.
    507      */
    508     LOGD_HEAP("Marking...");
    509     dvmHeapMarkRootSet();
    510 
    511     /* dvmHeapScanMarkedObjects() will build the lists of known
    512      * instances of the Reference classes.
    513      */
    514     assert(gcHeap->softReferences == NULL);
    515     assert(gcHeap->weakReferences == NULL);
    516     assert(gcHeap->finalizerReferences == NULL);
    517     assert(gcHeap->phantomReferences == NULL);
    518     assert(gcHeap->clearedReferences == NULL);
    519 
    520     if (spec->isConcurrent) {
    521         /*
    522          * Resume threads while tracing from the roots.  We unlock the
    523          * heap to allow mutator threads to allocate from free space.
    524          */
    525         dvmClearCardTable();
    526         dvmUnlockHeap();
    527         dvmResumeAllThreads(SUSPEND_FOR_GC);
    528         ATRACE_END(); // Suspend A
    529         rootEnd = dvmGetRelativeTimeMsec();
    530     }
    531 
    532     /* Recursively mark any objects that marked objects point to strongly.
    533      * If we're not collecting soft references, soft-reachable
    534      * objects will also be marked.
    535      */
    536     LOGD_HEAP("Recursing...");
    537     dvmHeapScanMarkedObjects();
    538 
    539     if (spec->isConcurrent) {
    540         /*
    541          * Re-acquire the heap lock and perform the final thread
    542          * suspension.
    543          */
    544         dirtyStart = dvmGetRelativeTimeMsec();
    545         dvmLockHeap();
    546         ATRACE_BEGIN("GC: Threads Suspended"); // Suspend B
    547         dvmSuspendAllThreads(SUSPEND_FOR_GC);
    548         /*
    549          * As no barrier intercepts root updates, we conservatively
    550          * assume all roots may be gray and re-mark them.
    551          */
    552         dvmHeapReMarkRootSet();
    553         /*
    554          * With the exception of reference objects and weak interned
    555          * strings, all gray objects should now be on dirty cards.
    556          */
    557         if (gDvm.verifyCardTable) {
    558             dvmVerifyCardTable();
    559         }
    560         /*
    561          * Recursively mark gray objects pointed to by the roots or by
    562          * heap objects dirtied during the concurrent mark.
    563          */
    564         dvmHeapReScanMarkedObjects();
    565     }
    566 
    567     /*
    568      * All strongly-reachable objects have now been marked.  Process
    569      * weakly-reachable objects discovered while tracing.
    570      */
    571     dvmHeapProcessReferences(&gcHeap->softReferences,
    572                              spec->doPreserve == false,
    573                              &gcHeap->weakReferences,
    574                              &gcHeap->finalizerReferences,
    575                              &gcHeap->phantomReferences);
    576 
    577 #if defined(WITH_JIT)
    578     /*
    579      * Patching a chaining cell is very cheap as it only updates 4 words. It's
    580      * the overhead of stopping all threads and synchronizing the I/D cache
    581      * that makes it expensive.
    582      *
    583      * Therefore we batch those work orders in a queue and go through them
    584      * when threads are suspended for GC.
    585      */
    586     dvmCompilerPerformSafePointChecks();
    587 #endif
    588 
    589     LOGD_HEAP("Sweeping...");
    590 
    591     dvmHeapSweepSystemWeaks();
    592 
    593     /*
    594      * Live objects have a bit set in the mark bitmap, swap the mark
    595      * and live bitmaps.  The sweep can proceed concurrently viewing
    596      * the new live bitmap as the old mark bitmap, and vice versa.
    597      */
    598     dvmHeapSourceSwapBitmaps();
    599 
    600     if (gDvm.postVerify) {
    601         LOGV_HEAP("Verifying roots and heap after GC");
    602         verifyRootsAndHeap();
    603     }
    604 
    605     if (spec->isConcurrent) {
    606         dvmUnlockHeap();
    607         dvmResumeAllThreads(SUSPEND_FOR_GC);
    608         ATRACE_END(); // Suspend B
    609         dirtyEnd = dvmGetRelativeTimeMsec();
    610     }
    611     dvmHeapSweepUnmarkedObjects(spec->isPartial, spec->isConcurrent,
    612                                 &numObjectsFreed, &numBytesFreed);
    613     LOGD_HEAP("Cleaning up...");
    614     dvmHeapFinishMarkStep();
    615     if (spec->isConcurrent) {
    616         dvmLockHeap();
    617     }
    618 
    619     LOGD_HEAP("Done.");
    620 
    621     /* Now's a good time to adjust the heap size, since
    622      * we know what our utilization is.
    623      *
    624      * This doesn't actually resize any memory;
    625      * it just lets the heap grow more when necessary.
    626      */
    627     dvmHeapSourceGrowForUtilization();
    628 
    629     currAllocated = dvmHeapSourceGetValue(HS_BYTES_ALLOCATED, NULL, 0);
    630     currFootprint = dvmHeapSourceGetValue(HS_FOOTPRINT, NULL, 0);
    631 
    632     dvmMethodTraceGCEnd();
    633     LOGV_HEAP("GC finished");
    634 
    635     gcHeap->gcRunning = false;
    636 
    637     LOGV_HEAP("Resuming threads");
    638 
    639     if (spec->isConcurrent) {
    640         /*
    641          * Wake-up any threads that blocked after a failed allocation
    642          * request.
    643          */
    644         dvmBroadcastCond(&gDvm.gcHeapCond);
    645     }
    646 
    647     if (!spec->isConcurrent) {
    648         dvmResumeAllThreads(SUSPEND_FOR_GC);
    649         ATRACE_END(); // Suspend A
    650         dirtyEnd = dvmGetRelativeTimeMsec();
    651         /*
    652          * Restore the original thread scheduling priority if it was
    653          * changed at the start of the current garbage collection.
    654          */
    655         if (oldThreadPriority != INT_MAX) {
    656             os_lowerThreadPriority(oldThreadPriority);
    657         }
    658     }
    659 
    660     /*
    661      * Move queue of pending references back into Java.
    662      */
    663     dvmEnqueueClearedReferences(&gDvm.gcHeap->clearedReferences);
    664 
    665     gcEnd = dvmGetRelativeTimeMsec();
    666     percentFree = 100 - (size_t)(100.0f * (float)currAllocated / currFootprint);
    667     if (!spec->isConcurrent) {
    668         u4 markSweepTime = dirtyEnd - rootStart;
    669         u4 gcTime = gcEnd - rootStart;
    670         bool isSmall = numBytesFreed > 0 && numBytesFreed < 1024;
    671         ALOGD("%s freed %s%zdK, %d%% free %zdK/%zdK, paused %ums, total %ums",
    672              spec->reason,
    673              isSmall ? "<" : "",
    674              numBytesFreed ? MAX(numBytesFreed / 1024, 1) : 0,
    675              percentFree,
    676              currAllocated / 1024, currFootprint / 1024,
    677              markSweepTime, gcTime);
    678     } else {
    679         u4 rootTime = rootEnd - rootStart;
    680         u4 dirtyTime = dirtyEnd - dirtyStart;
    681         u4 gcTime = gcEnd - rootStart;
    682         bool isSmall = numBytesFreed > 0 && numBytesFreed < 1024;
    683         ALOGD("%s freed %s%zdK, %d%% free %zdK/%zdK, paused %ums+%ums, total %ums",
    684              spec->reason,
    685              isSmall ? "<" : "",
    686              numBytesFreed ? MAX(numBytesFreed / 1024, 1) : 0,
    687              percentFree,
    688              currAllocated / 1024, currFootprint / 1024,
    689              rootTime, dirtyTime, gcTime);
    690     }
    691     if (gcHeap->ddmHpifWhen != 0) {
    692         LOGD_HEAP("Sending VM heap info to DDM");
    693         dvmDdmSendHeapInfo(gcHeap->ddmHpifWhen, false);
    694     }
    695     if (gcHeap->ddmHpsgWhen != 0) {
    696         LOGD_HEAP("Dumping VM heap to DDM");
    697         dvmDdmSendHeapSegments(false, false);
    698     }
    699     if (gcHeap->ddmNhsgWhen != 0) {
    700         LOGD_HEAP("Dumping native heap to DDM");
    701         dvmDdmSendHeapSegments(false, true);
    702     }
    703 
    704     ATRACE_END(); // Top-level GC
    705 }
    706 
    707 /*
    708  * If the concurrent GC is running, wait for it to finish.  The caller
    709  * must hold the heap lock.
    710  *
    711  * Note: the second dvmChangeStatus() could stall if we were in RUNNING
    712  * on entry, and some other thread has asked us to suspend.  In that
    713  * case we will be suspended with the heap lock held, which can lead to
    714  * deadlock if the other thread tries to do something with the managed heap.
    715  * For example, the debugger might suspend us and then execute a method that
    716  * allocates memory.  We can avoid this situation by releasing the lock
    717  * before self-suspending.  (The developer can work around this specific
    718  * situation by single-stepping the VM.  Alternatively, we could disable
    719  * concurrent GC when the debugger is attached, but that might change
    720  * behavior more than is desirable.)
    721  *
    722  * This should not be a problem in production, because any GC-related
    723  * activity will grab the lock before issuing a suspend-all.  (We may briefly
    724  * suspend when the GC thread calls dvmUnlockHeap before dvmResumeAllThreads,
    725  * but there's no risk of deadlock.)
    726  */
    727 bool dvmWaitForConcurrentGcToComplete()
    728 {
    729     ATRACE_BEGIN("GC: Wait For Concurrent");
    730     bool waited = gDvm.gcHeap->gcRunning;
    731     Thread *self = dvmThreadSelf();
    732     assert(self != NULL);
    733     u4 start = dvmGetRelativeTimeMsec();
    734     while (gDvm.gcHeap->gcRunning) {
    735         ThreadStatus oldStatus = dvmChangeStatus(self, THREAD_VMWAIT);
    736         dvmWaitCond(&gDvm.gcHeapCond, &gDvm.gcHeapLock);
    737         dvmChangeStatus(self, oldStatus);
    738     }
    739     u4 end = dvmGetRelativeTimeMsec();
    740     if (end - start > 0) {
    741         ALOGD("WAIT_FOR_CONCURRENT_GC blocked %ums", end - start);
    742     }
    743     ATRACE_END();
    744     return waited;
    745 }
    746