Home | History | Annotate | Download | only in alloc
      1 /*
      2  * Copyright (C) 2008 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 /*
     17  * Garbage-collecting memory allocator.
     18  */
     19 #include "Dalvik.h"
     20 #include "alloc/HeapTable.h"
     21 #include "alloc/Heap.h"
     22 #include "alloc/HeapInternal.h"
     23 #include "alloc/DdmHeap.h"
     24 #include "alloc/HeapSource.h"
     25 #include "alloc/MarkSweep.h"
     26 
     27 #include "utils/threads.h"      // need Android thread priorities
     28 #define kInvalidPriority        10000
     29 
     30 #include <cutils/sched_policy.h>
     31 
     32 #include <sys/time.h>
     33 #include <sys/resource.h>
     34 #include <limits.h>
     35 #include <errno.h>
     36 
     37 #define kNonCollectableRefDefault   16
     38 #define kFinalizableRefDefault      128
     39 
     40 static const char* GcReasonStr[] = {
     41     [GC_FOR_MALLOC] = "GC_FOR_MALLOC",
     42     [GC_EXPLICIT] = "GC_EXPLICIT",
     43     [GC_EXTERNAL_ALLOC] = "GC_EXTERNAL_ALLOC",
     44     [GC_HPROF_DUMP_HEAP] = "GC_HPROF_DUMP_HEAP"
     45 };
     46 
     47 /*
     48  * Initialize the GC heap.
     49  *
     50  * Returns true if successful, false otherwise.
     51  */
     52 bool dvmHeapStartup()
     53 {
     54     GcHeap *gcHeap;
     55 
     56 #if defined(WITH_ALLOC_LIMITS)
     57     gDvm.checkAllocLimits = false;
     58     gDvm.allocationLimit = -1;
     59 #endif
     60 
     61     gcHeap = dvmHeapSourceStartup(gDvm.heapSizeStart, gDvm.heapSizeMax);
     62     if (gcHeap == NULL) {
     63         return false;
     64     }
     65     gcHeap->heapWorkerCurrentObject = NULL;
     66     gcHeap->heapWorkerCurrentMethod = NULL;
     67     gcHeap->heapWorkerInterpStartTime = 0LL;
     68     gcHeap->softReferenceCollectionState = SR_COLLECT_NONE;
     69     gcHeap->softReferenceHeapSizeThreshold = gDvm.heapSizeStart;
     70     gcHeap->ddmHpifWhen = 0;
     71     gcHeap->ddmHpsgWhen = 0;
     72     gcHeap->ddmHpsgWhat = 0;
     73     gcHeap->ddmNhsgWhen = 0;
     74     gcHeap->ddmNhsgWhat = 0;
     75 #if WITH_HPROF
     76     gcHeap->hprofDumpOnGc = false;
     77     gcHeap->hprofContext = NULL;
     78 #endif
     79 
     80     /* This needs to be set before we call dvmHeapInitHeapRefTable().
     81      */
     82     gDvm.gcHeap = gcHeap;
     83 
     84     /* Set up the table we'll use for ALLOC_NO_GC.
     85      */
     86     if (!dvmHeapInitHeapRefTable(&gcHeap->nonCollectableRefs,
     87                            kNonCollectableRefDefault))
     88     {
     89         LOGE_HEAP("Can't allocate GC_NO_ALLOC table\n");
     90         goto fail;
     91     }
     92 
     93     /* Set up the lists and lock we'll use for finalizable
     94      * and reference objects.
     95      */
     96     dvmInitMutex(&gDvm.heapWorkerListLock);
     97     gcHeap->finalizableRefs = NULL;
     98     gcHeap->pendingFinalizationRefs = NULL;
     99     gcHeap->referenceOperations = NULL;
    100 
    101     /* Initialize the HeapWorker locks and other state
    102      * that the GC uses.
    103      */
    104     dvmInitializeHeapWorkerState();
    105 
    106     return true;
    107 
    108 fail:
    109     gDvm.gcHeap = NULL;
    110     dvmHeapSourceShutdown(gcHeap);
    111     return false;
    112 }
    113 
    114 bool dvmHeapStartupAfterZygote()
    115 {
    116     /* Update our idea of the last GC start time so that we
    117      * don't use the last time that Zygote happened to GC.
    118      */
    119     gDvm.gcHeap->gcStartTime = dvmGetRelativeTimeUsec();
    120 
    121     return dvmHeapSourceStartupAfterZygote();
    122 }
    123 
    124 void dvmHeapShutdown()
    125 {
    126 //TODO: make sure we're locked
    127     if (gDvm.gcHeap != NULL) {
    128         GcHeap *gcHeap;
    129 
    130         gcHeap = gDvm.gcHeap;
    131         gDvm.gcHeap = NULL;
    132 
    133         /* Tables are allocated on the native heap;
    134          * they need to be cleaned up explicitly.
    135          * The process may stick around, so we don't
    136          * want to leak any native memory.
    137          */
    138         dvmHeapFreeHeapRefTable(&gcHeap->nonCollectableRefs);
    139 
    140         dvmHeapFreeLargeTable(gcHeap->finalizableRefs);
    141         gcHeap->finalizableRefs = NULL;
    142 
    143         dvmHeapFreeLargeTable(gcHeap->pendingFinalizationRefs);
    144         gcHeap->pendingFinalizationRefs = NULL;
    145 
    146         dvmHeapFreeLargeTable(gcHeap->referenceOperations);
    147         gcHeap->referenceOperations = NULL;
    148 
    149         /* Destroy the heap.  Any outstanding pointers
    150          * will point to unmapped memory (unless/until
    151          * someone else maps it).  This frees gcHeap
    152          * as a side-effect.
    153          */
    154         dvmHeapSourceShutdown(gcHeap);
    155     }
    156 }
    157 
    158 /*
    159  * We've been asked to allocate something we can't, e.g. an array so
    160  * large that (length * elementWidth) is larger than 2^31.
    161  *
    162  * _The Java Programming Language_, 4th edition, says, "you can be sure
    163  * that all SoftReferences to softly reachable objects will be cleared
    164  * before an OutOfMemoryError is thrown."
    165  *
    166  * It's unclear whether that holds for all situations where an OOM can
    167  * be thrown, or just in the context of an allocation that fails due
    168  * to lack of heap space.  For simplicity we just throw the exception.
    169  *
    170  * (OOM due to actually running out of space is handled elsewhere.)
    171  */
    172 void dvmThrowBadAllocException(const char* msg)
    173 {
    174     dvmThrowException("Ljava/lang/OutOfMemoryError;", msg);
    175 }
    176 
    177 /*
    178  * Grab the lock, but put ourselves into THREAD_VMWAIT if it looks like
    179  * we're going to have to wait on the mutex.
    180  */
    181 bool dvmLockHeap()
    182 {
    183     if (pthread_mutex_trylock(&gDvm.gcHeapLock) != 0) {
    184         Thread *self;
    185         ThreadStatus oldStatus;
    186         int cc;
    187 
    188         self = dvmThreadSelf();
    189         if (self != NULL) {
    190             oldStatus = dvmChangeStatus(self, THREAD_VMWAIT);
    191         } else {
    192             LOGI("ODD: waiting on heap lock, no self\n");
    193             oldStatus = -1; // shut up gcc
    194         }
    195 
    196         cc = pthread_mutex_lock(&gDvm.gcHeapLock);
    197         assert(cc == 0);
    198 
    199         if (self != NULL) {
    200             dvmChangeStatus(self, oldStatus);
    201         }
    202     }
    203 
    204     return true;
    205 }
    206 
    207 void dvmUnlockHeap()
    208 {
    209     dvmUnlockMutex(&gDvm.gcHeapLock);
    210 }
    211 
    212 /* Pop an object from the list of pending finalizations and
    213  * reference clears/enqueues, and return the object.
    214  * The caller must call dvmReleaseTrackedAlloc()
    215  * on the object when finished.
    216  *
    217  * Typically only called by the heap worker thread.
    218  */
    219 Object *dvmGetNextHeapWorkerObject(HeapWorkerOperation *op)
    220 {
    221     Object *obj;
    222     LargeHeapRefTable *table;
    223     GcHeap *gcHeap = gDvm.gcHeap;
    224 
    225     assert(op != NULL);
    226 
    227     obj = NULL;
    228 
    229     dvmLockMutex(&gDvm.heapWorkerListLock);
    230 
    231     /* We must handle reference operations before finalizations.
    232      * If:
    233      *     a) Someone subclasses WeakReference and overrides clear()
    234      *     b) A reference of this type is the last reference to
    235      *        a finalizable object
    236      * then we need to guarantee that the overridden clear() is called
    237      * on the reference before finalize() is called on the referent.
    238      * Both of these operations will always be scheduled at the same
    239      * time, so handling reference operations first will guarantee
    240      * the required order.
    241      */
    242     obj = dvmHeapGetNextObjectFromLargeTable(&gcHeap->referenceOperations);
    243     if (obj != NULL) {
    244         uintptr_t workBits;
    245 
    246         workBits = (uintptr_t)obj & WORKER_ENQUEUE;
    247         assert(workBits != 0);
    248         obj = (Object *)((uintptr_t)obj & ~WORKER_ENQUEUE);
    249 
    250         *op = workBits;
    251     } else {
    252         obj = dvmHeapGetNextObjectFromLargeTable(
    253                 &gcHeap->pendingFinalizationRefs);
    254         if (obj != NULL) {
    255             *op = WORKER_FINALIZE;
    256         }
    257     }
    258 
    259     if (obj != NULL) {
    260         /* Don't let the GC collect the object until the
    261          * worker thread is done with it.
    262          *
    263          * This call is safe;  it uses thread-local storage
    264          * and doesn't acquire any locks.
    265          */
    266         dvmAddTrackedAlloc(obj, NULL);
    267     }
    268 
    269     dvmUnlockMutex(&gDvm.heapWorkerListLock);
    270 
    271     return obj;
    272 }
    273 
    274 /* Used for a heap size change hysteresis to avoid collecting
    275  * SoftReferences when the heap only grows by a small amount.
    276  */
    277 #define SOFT_REFERENCE_GROWTH_SLACK (128 * 1024)
    278 
    279 /* Whenever the effective heap size may have changed,
    280  * this function must be called.
    281  */
    282 void dvmHeapSizeChanged()
    283 {
    284     GcHeap *gcHeap = gDvm.gcHeap;
    285     size_t currentHeapSize;
    286 
    287     currentHeapSize = dvmHeapSourceGetIdealFootprint();
    288 
    289     /* See if the heap size has changed enough that we should care
    290      * about it.
    291      */
    292     if (currentHeapSize <= gcHeap->softReferenceHeapSizeThreshold -
    293             4 * SOFT_REFERENCE_GROWTH_SLACK)
    294     {
    295         /* The heap has shrunk enough that we'll use this as a new
    296          * threshold.  Since we're doing better on space, there's
    297          * no need to collect any SoftReferences.
    298          *
    299          * This is 4x the growth hysteresis because we don't want
    300          * to snap down so easily after a shrink.  If we just cleared
    301          * up a bunch of SoftReferences, we don't want to disallow
    302          * any new ones from being created.
    303          * TODO: determine if the 4x is important, needed, or even good
    304          */
    305         gcHeap->softReferenceHeapSizeThreshold = currentHeapSize;
    306         gcHeap->softReferenceCollectionState = SR_COLLECT_NONE;
    307     } else if (currentHeapSize >= gcHeap->softReferenceHeapSizeThreshold +
    308             SOFT_REFERENCE_GROWTH_SLACK)
    309     {
    310         /* The heap has grown enough to warrant collecting SoftReferences.
    311          */
    312         gcHeap->softReferenceHeapSizeThreshold = currentHeapSize;
    313         gcHeap->softReferenceCollectionState = SR_COLLECT_SOME;
    314     }
    315 }
    316 
    317 
    318 /* Do a full garbage collection, which may grow the
    319  * heap as a side-effect if the live set is large.
    320  */
    321 static void gcForMalloc(bool collectSoftReferences)
    322 {
    323 #ifdef WITH_PROFILER
    324     if (gDvm.allocProf.enabled) {
    325         Thread* self = dvmThreadSelf();
    326         gDvm.allocProf.gcCount++;
    327         if (self != NULL) {
    328             self->allocProf.gcCount++;
    329         }
    330     }
    331 #endif
    332     /* This may adjust the soft limit as a side-effect.
    333      */
    334     LOGD_HEAP("dvmMalloc initiating GC%s\n",
    335             collectSoftReferences ? "(collect SoftReferences)" : "");
    336     dvmCollectGarbageInternal(collectSoftReferences, GC_FOR_MALLOC);
    337 }
    338 
    339 /* Try as hard as possible to allocate some memory.
    340  */
    341 static DvmHeapChunk *tryMalloc(size_t size)
    342 {
    343     DvmHeapChunk *hc;
    344 
    345     /* Don't try too hard if there's no way the allocation is
    346      * going to succeed.  We have to collect SoftReferences before
    347      * throwing an OOME, though.
    348      */
    349     if (size >= gDvm.heapSizeMax) {
    350         LOGW_HEAP("dvmMalloc(%zu/0x%08zx): "
    351                 "someone's allocating a huge buffer\n", size, size);
    352         hc = NULL;
    353         goto collect_soft_refs;
    354     }
    355 
    356 //TODO: figure out better heuristics
    357 //    There will be a lot of churn if someone allocates a bunch of
    358 //    big objects in a row, and we hit the frag case each time.
    359 //    A full GC for each.
    360 //    Maybe we grow the heap in bigger leaps
    361 //    Maybe we skip the GC if the size is large and we did one recently
    362 //      (number of allocations ago) (watch for thread effects)
    363 //    DeflateTest allocs a bunch of ~128k buffers w/in 0-5 allocs of each other
    364 //      (or, at least, there are only 0-5 objects swept each time)
    365 
    366     hc = dvmHeapSourceAlloc(size + sizeof(DvmHeapChunk));
    367     if (hc != NULL) {
    368         return hc;
    369     }
    370 
    371     /* The allocation failed.  Free up some space by doing
    372      * a full garbage collection.  This may grow the heap
    373      * if the live set is sufficiently large.
    374      */
    375     gcForMalloc(false);
    376     hc = dvmHeapSourceAlloc(size + sizeof(DvmHeapChunk));
    377     if (hc != NULL) {
    378         return hc;
    379     }
    380 
    381     /* Even that didn't work;  this is an exceptional state.
    382      * Try harder, growing the heap if necessary.
    383      */
    384     hc = dvmHeapSourceAllocAndGrow(size + sizeof(DvmHeapChunk));
    385     dvmHeapSizeChanged();
    386     if (hc != NULL) {
    387         size_t newHeapSize;
    388 
    389         newHeapSize = dvmHeapSourceGetIdealFootprint();
    390 //TODO: may want to grow a little bit more so that the amount of free
    391 //      space is equal to the old free space + the utilization slop for
    392 //      the new allocation.
    393         LOGI_HEAP("Grow heap (frag case) to "
    394                 "%zu.%03zuMB for %zu-byte allocation\n",
    395                 FRACTIONAL_MB(newHeapSize), size);
    396         return hc;
    397     }
    398 
    399     /* Most allocations should have succeeded by now, so the heap
    400      * is really full, really fragmented, or the requested size is
    401      * really big.  Do another GC, collecting SoftReferences this
    402      * time.  The VM spec requires that all SoftReferences have
    403      * been collected and cleared before throwing an OOME.
    404      */
    405 //TODO: wait for the finalizers from the previous GC to finish
    406 collect_soft_refs:
    407     LOGI_HEAP("Forcing collection of SoftReferences for %zu-byte allocation\n",
    408             size);
    409     gcForMalloc(true);
    410     hc = dvmHeapSourceAllocAndGrow(size + sizeof(DvmHeapChunk));
    411     dvmHeapSizeChanged();
    412     if (hc != NULL) {
    413         return hc;
    414     }
    415 //TODO: maybe wait for finalizers and try one last time
    416 
    417     LOGE_HEAP("Out of memory on a %zd-byte allocation.\n", size);
    418 //TODO: tell the HeapSource to dump its state
    419     dvmDumpThread(dvmThreadSelf(), false);
    420 
    421     return NULL;
    422 }
    423 
    424 /* Throw an OutOfMemoryError if there's a thread to attach it to.
    425  * Avoid recursing.
    426  *
    427  * The caller must not be holding the heap lock, or else the allocations
    428  * in dvmThrowException() will deadlock.
    429  */
    430 static void throwOOME()
    431 {
    432     Thread *self;
    433 
    434     if ((self = dvmThreadSelf()) != NULL) {
    435         /* If the current (failing) dvmMalloc() happened as part of thread
    436          * creation/attachment before the thread became part of the root set,
    437          * we can't rely on the thread-local trackedAlloc table, so
    438          * we can't keep track of a real allocated OOME object.  But, since
    439          * the thread is in the process of being created, it won't have
    440          * a useful stack anyway, so we may as well make things easier
    441          * by throwing the (stackless) pre-built OOME.
    442          */
    443         if (dvmIsOnThreadList(self) && !self->throwingOOME) {
    444             /* Let ourselves know that we tried to throw an OOM
    445              * error in the normal way in case we run out of
    446              * memory trying to allocate it inside dvmThrowException().
    447              */
    448             self->throwingOOME = true;
    449 
    450             /* Don't include a description string;
    451              * one fewer allocation.
    452              */
    453             dvmThrowException("Ljava/lang/OutOfMemoryError;", NULL);
    454         } else {
    455             /*
    456              * This thread has already tried to throw an OutOfMemoryError,
    457              * which probably means that we're running out of memory
    458              * while recursively trying to throw.
    459              *
    460              * To avoid any more allocation attempts, "throw" a pre-built
    461              * OutOfMemoryError object (which won't have a useful stack trace).
    462              *
    463              * Note that since this call can't possibly allocate anything,
    464              * we don't care about the state of self->throwingOOME
    465              * (which will usually already be set).
    466              */
    467             dvmSetException(self, gDvm.outOfMemoryObj);
    468         }
    469         /* We're done with the possible recursion.
    470          */
    471         self->throwingOOME = false;
    472     }
    473 }
    474 
    475 /*
    476  * Allocate storage on the GC heap.  We guarantee 8-byte alignment.
    477  *
    478  * The new storage is zeroed out.
    479  *
    480  * Note that, in rare cases, this could get called while a GC is in
    481  * progress.  If a non-VM thread tries to attach itself through JNI,
    482  * it will need to allocate some objects.  If this becomes annoying to
    483  * deal with, we can block it at the source, but holding the allocation
    484  * mutex should be enough.
    485  *
    486  * In rare circumstances (JNI AttachCurrentThread) we can be called
    487  * from a non-VM thread.
    488  *
    489  * We implement ALLOC_NO_GC by maintaining an internal list of objects
    490  * that should not be collected.  This requires no actual flag storage in
    491  * the object itself, which is good, but makes flag queries expensive.
    492  *
    493  * Use ALLOC_DONT_TRACK when we either don't want to track an allocation
    494  * (because it's being done for the interpreter "new" operation and will
    495  * be part of the root set immediately) or we can't (because this allocation
    496  * is for a brand new thread).
    497  *
    498  * Returns NULL and throws an exception on failure.
    499  *
    500  * TODO: don't do a GC if the debugger thinks all threads are suspended
    501  */
    502 void* dvmMalloc(size_t size, int flags)
    503 {
    504     GcHeap *gcHeap = gDvm.gcHeap;
    505     DvmHeapChunk *hc;
    506     void *ptr;
    507     bool triedGc, triedGrowing;
    508 
    509 #if 0
    510     /* handy for spotting large allocations */
    511     if (size >= 100000) {
    512         LOGI("dvmMalloc(%d):\n", size);
    513         dvmDumpThread(dvmThreadSelf(), false);
    514     }
    515 #endif
    516 
    517 #if defined(WITH_ALLOC_LIMITS)
    518     /*
    519      * See if they've exceeded the allocation limit for this thread.
    520      *
    521      * A limit value of -1 means "no limit".
    522      *
    523      * This is enabled at compile time because it requires us to do a
    524      * TLS lookup for the Thread pointer.  This has enough of a performance
    525      * impact that we don't want to do it if we don't have to.  (Now that
    526      * we're using gDvm.checkAllocLimits we may want to reconsider this,
    527      * but it's probably still best to just compile the check out of
    528      * production code -- one less thing to hit on every allocation.)
    529      */
    530     if (gDvm.checkAllocLimits) {
    531         Thread* self = dvmThreadSelf();
    532         if (self != NULL) {
    533             int count = self->allocLimit;
    534             if (count > 0) {
    535                 self->allocLimit--;
    536             } else if (count == 0) {
    537                 /* fail! */
    538                 assert(!gDvm.initializing);
    539                 self->allocLimit = -1;
    540                 dvmThrowException("Ldalvik/system/AllocationLimitError;",
    541                     "thread allocation limit exceeded");
    542                 return NULL;
    543             }
    544         }
    545     }
    546 
    547     if (gDvm.allocationLimit >= 0) {
    548         assert(!gDvm.initializing);
    549         gDvm.allocationLimit = -1;
    550         dvmThrowException("Ldalvik/system/AllocationLimitError;",
    551             "global allocation limit exceeded");
    552         return NULL;
    553     }
    554 #endif
    555 
    556     dvmLockHeap();
    557 
    558     /* Try as hard as possible to allocate some memory.
    559      */
    560     hc = tryMalloc(size);
    561     if (hc != NULL) {
    562 alloc_succeeded:
    563         /* We've got the memory.
    564          */
    565         if ((flags & ALLOC_FINALIZABLE) != 0) {
    566             /* This object is an instance of a class that
    567              * overrides finalize().  Add it to the finalizable list.
    568              *
    569              * Note that until DVM_OBJECT_INIT() is called on this
    570              * object, its clazz will be NULL.  Since the object is
    571              * in this table, it will be scanned as part of the root
    572              * set.  scanObject() explicitly deals with the NULL clazz.
    573              */
    574             if (!dvmHeapAddRefToLargeTable(&gcHeap->finalizableRefs,
    575                                     (Object *)hc->data))
    576             {
    577                 LOGE_HEAP("dvmMalloc(): no room for any more "
    578                         "finalizable objects\n");
    579                 dvmAbort();
    580             }
    581         }
    582 
    583         ptr = hc->data;
    584 
    585         /* The caller may not want us to collect this object.
    586          * If not, throw it in the nonCollectableRefs table, which
    587          * will be added to the root set when we GC.
    588          *
    589          * Note that until DVM_OBJECT_INIT() is called on this
    590          * object, its clazz will be NULL.  Since the object is
    591          * in this table, it will be scanned as part of the root
    592          * set.  scanObject() explicitly deals with the NULL clazz.
    593          */
    594         if ((flags & ALLOC_NO_GC) != 0) {
    595             if (!dvmHeapAddToHeapRefTable(&gcHeap->nonCollectableRefs, ptr)) {
    596                 LOGE_HEAP("dvmMalloc(): no room for any more "
    597                         "ALLOC_NO_GC objects: %zd\n",
    598                         dvmHeapNumHeapRefTableEntries(
    599                                 &gcHeap->nonCollectableRefs));
    600                 dvmAbort();
    601             }
    602         }
    603 
    604 #ifdef WITH_PROFILER
    605         if (gDvm.allocProf.enabled) {
    606             Thread* self = dvmThreadSelf();
    607             gDvm.allocProf.allocCount++;
    608             gDvm.allocProf.allocSize += size;
    609             if (self != NULL) {
    610                 self->allocProf.allocCount++;
    611                 self->allocProf.allocSize += size;
    612             }
    613         }
    614 #endif
    615     } else {
    616         /* The allocation failed.
    617          */
    618         ptr = NULL;
    619 
    620 #ifdef WITH_PROFILER
    621         if (gDvm.allocProf.enabled) {
    622             Thread* self = dvmThreadSelf();
    623             gDvm.allocProf.failedAllocCount++;
    624             gDvm.allocProf.failedAllocSize += size;
    625             if (self != NULL) {
    626                 self->allocProf.failedAllocCount++;
    627                 self->allocProf.failedAllocSize += size;
    628             }
    629         }
    630 #endif
    631     }
    632 
    633     dvmUnlockHeap();
    634 
    635     if (ptr != NULL) {
    636         /*
    637          * If this block is immediately GCable, and they haven't asked us not
    638          * to track it, add it to the internal tracking list.
    639          *
    640          * If there's no "self" yet, we can't track it.  Calls made before
    641          * the Thread exists should use ALLOC_NO_GC.
    642          */
    643         if ((flags & (ALLOC_DONT_TRACK | ALLOC_NO_GC)) == 0) {
    644             dvmAddTrackedAlloc(ptr, NULL);
    645         }
    646     } else {
    647         /*
    648          * The allocation failed; throw an OutOfMemoryError.
    649          */
    650         throwOOME();
    651     }
    652 
    653     return ptr;
    654 }
    655 
    656 /*
    657  * Returns true iff <obj> points to a valid allocated object.
    658  */
    659 bool dvmIsValidObject(const Object* obj)
    660 {
    661     const DvmHeapChunk *hc;
    662 
    663     /* Don't bother if it's NULL or not 8-byte aligned.
    664      */
    665     hc = ptr2chunk(obj);
    666     if (obj != NULL && ((uintptr_t)hc & (8-1)) == 0) {
    667         /* Even if the heap isn't locked, this shouldn't return
    668          * any false negatives.  The only mutation that could
    669          * be happening is allocation, which means that another
    670          * thread could be in the middle of a read-modify-write
    671          * to add a new bit for a new object.  However, that
    672          * RMW will have completed by the time any other thread
    673          * could possibly see the new pointer, so there is no
    674          * danger of dvmIsValidObject() being called on a valid
    675          * pointer whose bit isn't set.
    676          *
    677          * Freeing will only happen during the sweep phase, which
    678          * only happens while the heap is locked.
    679          */
    680         return dvmHeapSourceContains(hc);
    681     }
    682     return false;
    683 }
    684 
    685 /*
    686  * Clear flags that were passed into dvmMalloc() et al.
    687  * e.g., ALLOC_NO_GC, ALLOC_DONT_TRACK.
    688  */
    689 void dvmClearAllocFlags(Object *obj, int mask)
    690 {
    691     if ((mask & ALLOC_NO_GC) != 0) {
    692         dvmLockHeap();
    693         if (dvmIsValidObject(obj)) {
    694             if (!dvmHeapRemoveFromHeapRefTable(&gDvm.gcHeap->nonCollectableRefs,
    695                                                obj))
    696             {
    697                 LOGE_HEAP("dvmMalloc(): failed to remove ALLOC_NO_GC bit from "
    698                         "object 0x%08x\n", (uintptr_t)obj);
    699                 dvmAbort();
    700             }
    701 //TODO: shrink if the table is very empty
    702         }
    703         dvmUnlockHeap();
    704     }
    705 
    706     if ((mask & ALLOC_DONT_TRACK) != 0) {
    707         dvmReleaseTrackedAlloc(obj, NULL);
    708     }
    709 }
    710 
    711 size_t dvmObjectSizeInHeap(const Object *obj)
    712 {
    713     return dvmHeapSourceChunkSize(ptr2chunk(obj)) - sizeof(DvmHeapChunk);
    714 }
    715 
    716 /*
    717  * Initiate garbage collection.
    718  *
    719  * NOTES:
    720  * - If we don't hold gDvm.threadListLock, it's possible for a thread to
    721  *   be added to the thread list while we work.  The thread should NOT
    722  *   start executing, so this is only interesting when we start chasing
    723  *   thread stacks.  (Before we do so, grab the lock.)
    724  *
    725  * We are not allowed to GC when the debugger has suspended the VM, which
    726  * is awkward because debugger requests can cause allocations.  The easiest
    727  * way to enforce this is to refuse to GC on an allocation made by the
    728  * JDWP thread -- we have to expand the heap or fail.
    729  */
    730 void dvmCollectGarbageInternal(bool collectSoftReferences, enum GcReason reason)
    731 {
    732     GcHeap *gcHeap = gDvm.gcHeap;
    733     Object *softReferences;
    734     Object *weakReferences;
    735     Object *phantomReferences;
    736 
    737     u8 now;
    738     s8 timeSinceLastGc;
    739     s8 gcElapsedTime;
    740     int numFreed;
    741     size_t sizeFreed;
    742 
    743 #if DVM_TRACK_HEAP_MARKING
    744     /* Since weak and soft references are always cleared,
    745      * they don't require any marking.
    746      * (Soft are lumped into strong when they aren't cleared.)
    747      */
    748     size_t strongMarkCount = 0;
    749     size_t strongMarkSize = 0;
    750     size_t finalizeMarkCount = 0;
    751     size_t finalizeMarkSize = 0;
    752     size_t phantomMarkCount = 0;
    753     size_t phantomMarkSize = 0;
    754 #endif
    755 
    756     /* The heap lock must be held.
    757      */
    758 
    759     if (gcHeap->gcRunning) {
    760         LOGW_HEAP("Attempted recursive GC\n");
    761         return;
    762     }
    763     gcHeap->gcRunning = true;
    764     now = dvmGetRelativeTimeUsec();
    765     if (gcHeap->gcStartTime != 0) {
    766         timeSinceLastGc = (now - gcHeap->gcStartTime) / 1000;
    767     } else {
    768         timeSinceLastGc = 0;
    769     }
    770     gcHeap->gcStartTime = now;
    771 
    772     LOGV_HEAP("%s starting -- suspending threads\n", GcReasonStr[reason]);
    773 
    774     dvmSuspendAllThreads(SUSPEND_FOR_GC);
    775 
    776     /* Get the priority (the "nice" value) of the current thread.  The
    777      * getpriority() call can legitimately return -1, so we have to
    778      * explicitly test errno.
    779      */
    780     errno = 0;
    781     int oldThreadPriority = kInvalidPriority;
    782     int priorityResult = getpriority(PRIO_PROCESS, 0);
    783     if (errno != 0) {
    784         LOGI_HEAP("getpriority(self) failed: %s\n", strerror(errno));
    785     } else if (priorityResult > ANDROID_PRIORITY_NORMAL) {
    786         /* Current value is numerically greater than "normal", which
    787          * in backward UNIX terms means lower priority.
    788          */
    789 
    790         if (priorityResult >= ANDROID_PRIORITY_BACKGROUND) {
    791             set_sched_policy(dvmGetSysThreadId(), SP_FOREGROUND);
    792         }
    793 
    794         if (setpriority(PRIO_PROCESS, 0, ANDROID_PRIORITY_NORMAL) != 0) {
    795             LOGI_HEAP("Unable to elevate priority from %d to %d\n",
    796                 priorityResult, ANDROID_PRIORITY_NORMAL);
    797         } else {
    798             /* priority elevated; save value so we can restore it later */
    799             LOGD_HEAP("Elevating priority from %d to %d\n",
    800                 priorityResult, ANDROID_PRIORITY_NORMAL);
    801             oldThreadPriority = priorityResult;
    802         }
    803     }
    804 
    805     /* Wait for the HeapWorker thread to block.
    806      * (It may also already be suspended in interp code,
    807      * in which case it's not holding heapWorkerLock.)
    808      */
    809     dvmLockMutex(&gDvm.heapWorkerLock);
    810 
    811     /* Make sure that the HeapWorker thread hasn't become
    812      * wedged inside interp code.  If it has, this call will
    813      * print a message and abort the VM.
    814      */
    815     dvmAssertHeapWorkerThreadRunning();
    816 
    817     /* Lock the pendingFinalizationRefs list.
    818      *
    819      * Acquire the lock after suspending so the finalizer
    820      * thread can't block in the RUNNING state while
    821      * we try to suspend.
    822      */
    823     dvmLockMutex(&gDvm.heapWorkerListLock);
    824 
    825 #ifdef WITH_PROFILER
    826     dvmMethodTraceGCBegin();
    827 #endif
    828 
    829 #if WITH_HPROF
    830 
    831 /* Set DUMP_HEAP_ON_DDMS_UPDATE to 1 to enable heap dumps
    832  * whenever DDMS requests a heap update (HPIF chunk).
    833  * The output files will appear in /data/misc, which must
    834  * already exist.
    835  * You must define "WITH_HPROF := true" in your buildspec.mk
    836  * and recompile libdvm for this to work.
    837  *
    838  * To enable stack traces for each allocation, define
    839  * "WITH_HPROF_STACK := true" in buildspec.mk.  This option slows down
    840  * allocations and also requires 8 additional bytes per object on the
    841  * GC heap.
    842  */
    843 #define DUMP_HEAP_ON_DDMS_UPDATE 0
    844 #if DUMP_HEAP_ON_DDMS_UPDATE
    845     gcHeap->hprofDumpOnGc |= (gcHeap->ddmHpifWhen != 0);
    846 #endif
    847 
    848     if (gcHeap->hprofDumpOnGc) {
    849         char nameBuf[128];
    850 
    851         gcHeap->hprofResult = -1;
    852 
    853         if (gcHeap->hprofFileName == NULL) {
    854             /* no filename was provided; invent one */
    855             sprintf(nameBuf, "/data/misc/heap-dump-tm%d-pid%d.hprof",
    856                 (int) time(NULL), (int) getpid());
    857             gcHeap->hprofFileName = nameBuf;
    858         }
    859         gcHeap->hprofContext = hprofStartup(gcHeap->hprofFileName,
    860                 gcHeap->hprofDirectToDdms);
    861         if (gcHeap->hprofContext != NULL) {
    862             hprofStartHeapDump(gcHeap->hprofContext);
    863         }
    864         gcHeap->hprofDumpOnGc = false;
    865         gcHeap->hprofFileName = NULL;
    866     }
    867 #endif
    868 
    869     if (timeSinceLastGc < 10000) {
    870         LOGD_HEAP("GC! (%dms since last GC)\n",
    871                 (int)timeSinceLastGc);
    872     } else {
    873         LOGD_HEAP("GC! (%d sec since last GC)\n",
    874                 (int)(timeSinceLastGc / 1000));
    875     }
    876 #if DVM_TRACK_HEAP_MARKING
    877     gcHeap->markCount = 0;
    878     gcHeap->markSize = 0;
    879 #endif
    880 
    881     /* Set up the marking context.
    882      */
    883     if (!dvmHeapBeginMarkStep()) {
    884         LOGE_HEAP("dvmHeapBeginMarkStep failed; aborting\n");
    885         dvmAbort();
    886     }
    887 
    888     /* Mark the set of objects that are strongly reachable from the roots.
    889      */
    890     LOGD_HEAP("Marking...");
    891     dvmHeapMarkRootSet();
    892 
    893     /* dvmHeapScanMarkedObjects() will build the lists of known
    894      * instances of the Reference classes.
    895      */
    896     gcHeap->softReferences = NULL;
    897     gcHeap->weakReferences = NULL;
    898     gcHeap->phantomReferences = NULL;
    899 
    900     /* Make sure that we don't hard-mark the referents of Reference
    901      * objects by default.
    902      */
    903     gcHeap->markAllReferents = false;
    904 
    905     /* Don't mark SoftReferences if our caller wants us to collect them.
    906      * This has to be set before calling dvmHeapScanMarkedObjects().
    907      */
    908     if (collectSoftReferences) {
    909         gcHeap->softReferenceCollectionState = SR_COLLECT_ALL;
    910     }
    911 
    912     /* Recursively mark any objects that marked objects point to strongly.
    913      * If we're not collecting soft references, soft-reachable
    914      * objects will also be marked.
    915      */
    916     LOGD_HEAP("Recursing...");
    917     dvmHeapScanMarkedObjects();
    918 #if DVM_TRACK_HEAP_MARKING
    919     strongMarkCount = gcHeap->markCount;
    920     strongMarkSize = gcHeap->markSize;
    921     gcHeap->markCount = 0;
    922     gcHeap->markSize = 0;
    923 #endif
    924 
    925     /* Latch these so that the other calls to dvmHeapScanMarkedObjects() don't
    926      * mess with them.
    927      */
    928     softReferences = gcHeap->softReferences;
    929     weakReferences = gcHeap->weakReferences;
    930     phantomReferences = gcHeap->phantomReferences;
    931 
    932     /* All strongly-reachable objects have now been marked.
    933      */
    934     if (gcHeap->softReferenceCollectionState != SR_COLLECT_NONE) {
    935         LOGD_HEAP("Handling soft references...");
    936         dvmHeapHandleReferences(softReferences, REF_SOFT);
    937         // markCount always zero
    938 
    939         /* Now that we've tried collecting SoftReferences,
    940          * fall back to not collecting them.  If the heap
    941          * grows, we will start collecting again.
    942          */
    943         gcHeap->softReferenceCollectionState = SR_COLLECT_NONE;
    944     } // else dvmHeapScanMarkedObjects() already marked the soft-reachable set
    945     LOGD_HEAP("Handling weak references...");
    946     dvmHeapHandleReferences(weakReferences, REF_WEAK);
    947     // markCount always zero
    948 
    949     /* Once all weak-reachable objects have been taken
    950      * care of, any remaining unmarked objects can be finalized.
    951      */
    952     LOGD_HEAP("Finding finalizations...");
    953     dvmHeapScheduleFinalizations();
    954 #if DVM_TRACK_HEAP_MARKING
    955     finalizeMarkCount = gcHeap->markCount;
    956     finalizeMarkSize = gcHeap->markSize;
    957     gcHeap->markCount = 0;
    958     gcHeap->markSize = 0;
    959 #endif
    960 
    961     /* Any remaining objects that are not pending finalization
    962      * could be phantom-reachable.  This will mark any phantom-reachable
    963      * objects, as well as enqueue their references.
    964      */
    965     LOGD_HEAP("Handling phantom references...");
    966     dvmHeapHandleReferences(phantomReferences, REF_PHANTOM);
    967 #if DVM_TRACK_HEAP_MARKING
    968     phantomMarkCount = gcHeap->markCount;
    969     phantomMarkSize = gcHeap->markSize;
    970     gcHeap->markCount = 0;
    971     gcHeap->markSize = 0;
    972 #endif
    973 
    974 //TODO: take care of JNI weak global references
    975 
    976 #if DVM_TRACK_HEAP_MARKING
    977     LOGI_HEAP("Marked objects: %dB strong, %dB final, %dB phantom\n",
    978             strongMarkSize, finalizeMarkSize, phantomMarkSize);
    979 #endif
    980 
    981 #ifdef WITH_DEADLOCK_PREDICTION
    982     dvmDumpMonitorInfo("before sweep");
    983 #endif
    984     LOGD_HEAP("Sweeping...");
    985     dvmHeapSweepUnmarkedObjects(&numFreed, &sizeFreed);
    986 #ifdef WITH_DEADLOCK_PREDICTION
    987     dvmDumpMonitorInfo("after sweep");
    988 #endif
    989 
    990     LOGD_HEAP("Cleaning up...");
    991     dvmHeapFinishMarkStep();
    992 
    993     LOGD_HEAP("Done.");
    994 
    995     /* Now's a good time to adjust the heap size, since
    996      * we know what our utilization is.
    997      *
    998      * This doesn't actually resize any memory;
    999      * it just lets the heap grow more when necessary.
   1000      */
   1001     dvmHeapSourceGrowForUtilization();
   1002     dvmHeapSizeChanged();
   1003 
   1004 #if WITH_HPROF
   1005     if (gcHeap->hprofContext != NULL) {
   1006         hprofFinishHeapDump(gcHeap->hprofContext);
   1007 //TODO: write a HEAP_SUMMARY record
   1008         if (hprofShutdown(gcHeap->hprofContext))
   1009             gcHeap->hprofResult = 0;    /* indicate success */
   1010         gcHeap->hprofContext = NULL;
   1011     }
   1012 #endif
   1013 
   1014     /* Now that we've freed up the GC heap, return any large
   1015      * free chunks back to the system.  They'll get paged back
   1016      * in the next time they're used.  Don't do it immediately,
   1017      * though;  if the process is still allocating a bunch of
   1018      * memory, we'll be taking a ton of page faults that we don't
   1019      * necessarily need to.
   1020      *
   1021      * Cancel any old scheduled trims, and schedule a new one.
   1022      */
   1023     dvmScheduleHeapSourceTrim(5);  // in seconds
   1024 
   1025 #ifdef WITH_PROFILER
   1026     dvmMethodTraceGCEnd();
   1027 #endif
   1028     LOGV_HEAP("GC finished -- resuming threads\n");
   1029 
   1030     gcHeap->gcRunning = false;
   1031 
   1032     dvmUnlockMutex(&gDvm.heapWorkerListLock);
   1033     dvmUnlockMutex(&gDvm.heapWorkerLock);
   1034 
   1035 #if defined(WITH_JIT)
   1036     extern void dvmCompilerPerformSafePointChecks(void);
   1037 
   1038     /*
   1039      * Patching a chaining cell is very cheap as it only updates 4 words. It's
   1040      * the overhead of stopping all threads and synchronizing the I/D cache
   1041      * that makes it expensive.
   1042      *
   1043      * Therefore we batch those work orders in a queue and go through them
   1044      * when threads are suspended for GC.
   1045      */
   1046     dvmCompilerPerformSafePointChecks();
   1047 #endif
   1048 
   1049     dvmResumeAllThreads(SUSPEND_FOR_GC);
   1050     if (oldThreadPriority != kInvalidPriority) {
   1051         if (setpriority(PRIO_PROCESS, 0, oldThreadPriority) != 0) {
   1052             LOGW_HEAP("Unable to reset priority to %d: %s\n",
   1053                 oldThreadPriority, strerror(errno));
   1054         } else {
   1055             LOGD_HEAP("Reset priority to %d\n", oldThreadPriority);
   1056         }
   1057 
   1058         if (oldThreadPriority >= ANDROID_PRIORITY_BACKGROUND) {
   1059             set_sched_policy(dvmGetSysThreadId(), SP_BACKGROUND);
   1060         }
   1061     }
   1062     gcElapsedTime = (dvmGetRelativeTimeUsec() - gcHeap->gcStartTime) / 1000;
   1063     LOGD("%s freed %d objects / %zd bytes in %dms\n",
   1064          GcReasonStr[reason], numFreed, sizeFreed, (int)gcElapsedTime);
   1065     dvmLogGcStats(numFreed, sizeFreed, gcElapsedTime);
   1066 
   1067     if (gcHeap->ddmHpifWhen != 0) {
   1068         LOGD_HEAP("Sending VM heap info to DDM\n");
   1069         dvmDdmSendHeapInfo(gcHeap->ddmHpifWhen, false);
   1070     }
   1071     if (gcHeap->ddmHpsgWhen != 0) {
   1072         LOGD_HEAP("Dumping VM heap to DDM\n");
   1073         dvmDdmSendHeapSegments(false, false);
   1074     }
   1075     if (gcHeap->ddmNhsgWhen != 0) {
   1076         LOGD_HEAP("Dumping native heap to DDM\n");
   1077         dvmDdmSendHeapSegments(false, true);
   1078     }
   1079 }
   1080 
   1081 #if WITH_HPROF
   1082 /*
   1083  * Perform garbage collection, writing heap information to the specified file.
   1084  *
   1085  * If "fileName" is NULL, a suitable name will be generated automatically.
   1086  *
   1087  * Returns 0 on success, or an error code on failure.
   1088  */
   1089 int hprofDumpHeap(const char* fileName, bool directToDdms)
   1090 {
   1091     int result;
   1092 
   1093     dvmLockMutex(&gDvm.gcHeapLock);
   1094 
   1095     gDvm.gcHeap->hprofDumpOnGc = true;
   1096     gDvm.gcHeap->hprofFileName = fileName;
   1097     gDvm.gcHeap->hprofDirectToDdms = directToDdms;
   1098     dvmCollectGarbageInternal(false, GC_HPROF_DUMP_HEAP);
   1099     result = gDvm.gcHeap->hprofResult;
   1100 
   1101     dvmUnlockMutex(&gDvm.gcHeapLock);
   1102 
   1103     return result;
   1104 }
   1105 
   1106 void dvmHeapSetHprofGcScanState(hprof_heap_tag_t state, u4 threadSerialNumber)
   1107 {
   1108     if (gDvm.gcHeap->hprofContext != NULL) {
   1109         hprofSetGcScanState(gDvm.gcHeap->hprofContext, state,
   1110                 threadSerialNumber);
   1111     }
   1112 }
   1113 #endif
   1114