Home | History | Annotate | Download | only in alloc
      1 /*
      2  * Copyright (C) 2008 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include <cutils/mspace.h>
     18 #include <stdint.h>
     19 #include <sys/mman.h>
     20 #include <errno.h>
     21 
     22 #define SIZE_MAX UINT_MAX  // TODO: get SIZE_MAX from stdint.h
     23 
     24 #include "Dalvik.h"
     25 #include "alloc/Heap.h"
     26 #include "alloc/HeapInternal.h"
     27 #include "alloc/HeapSource.h"
     28 #include "alloc/HeapBitmap.h"
     29 #include "alloc/HeapBitmapInlines.h"
     30 
     31 // TODO: find a real header file for these.
     32 extern "C" int dlmalloc_trim(size_t);
     33 extern "C" void dlmalloc_walk_free_pages(void(*)(void*, void*, void*), void*);
     34 
     35 static void snapIdealFootprint();
     36 static void setIdealFootprint(size_t max);
     37 static size_t getMaximumSize(const HeapSource *hs);
     38 static void trimHeaps();
     39 
     40 #define HEAP_UTILIZATION_MAX        1024
     41 #define DEFAULT_HEAP_UTILIZATION    512     // Range 1..HEAP_UTILIZATION_MAX
     42 #define HEAP_IDEAL_FREE             (2 * 1024 * 1024)
     43 #define HEAP_MIN_FREE               (HEAP_IDEAL_FREE / 4)
     44 
     45 /* How long to wait after a GC before performing a heap trim
     46  * operation to reclaim unused pages.
     47  */
     48 #define HEAP_TRIM_IDLE_TIME_MS (5 * 1000)
     49 
     50 /* Start a concurrent collection when free memory falls under this
     51  * many bytes.
     52  */
     53 #define CONCURRENT_START (128 << 10)
     54 
     55 /* The next GC will not be concurrent when free memory after a GC is
     56  * under this many bytes.
     57  */
     58 #define CONCURRENT_MIN_FREE (CONCURRENT_START + (128 << 10))
     59 
     60 #define HS_BOILERPLATE() \
     61     do { \
     62         assert(gDvm.gcHeap != NULL); \
     63         assert(gDvm.gcHeap->heapSource != NULL); \
     64         assert(gHs == gDvm.gcHeap->heapSource); \
     65     } while (0)
     66 
     67 struct Heap {
     68     /* The mspace to allocate from.
     69      */
     70     mspace msp;
     71 
     72     /* The largest size that this heap is allowed to grow to.
     73      */
     74     size_t maximumSize;
     75 
     76     /* Number of bytes allocated from this mspace for objects,
     77      * including any overhead.  This value is NOT exact, and
     78      * should only be used as an input for certain heuristics.
     79      */
     80     size_t bytesAllocated;
     81 
     82     /* Number of bytes allocated from this mspace at which a
     83      * concurrent garbage collection will be started.
     84      */
     85     size_t concurrentStartBytes;
     86 
     87     /* Number of objects currently allocated from this mspace.
     88      */
     89     size_t objectsAllocated;
     90 
     91     /*
     92      * The lowest address of this heap, inclusive.
     93      */
     94     char *base;
     95 
     96     /*
     97      * The highest address of this heap, exclusive.
     98      */
     99     char *limit;
    100 };
    101 
    102 struct HeapSource {
    103     /* Target ideal heap utilization ratio; range 1..HEAP_UTILIZATION_MAX
    104      */
    105     size_t targetUtilization;
    106 
    107     /* The starting heap size.
    108      */
    109     size_t startSize;
    110 
    111     /* The largest that the heap source as a whole is allowed to grow.
    112      */
    113     size_t maximumSize;
    114 
    115     /*
    116      * The largest size we permit the heap to grow.  This value allows
    117      * the user to limit the heap growth below the maximum size.  This
    118      * is a work around until we can dynamically set the maximum size.
    119      * This value can range between the starting size and the maximum
    120      * size but should never be set below the current footprint of the
    121      * heap.
    122      */
    123     size_t growthLimit;
    124 
    125     /* The desired max size of the heap source as a whole.
    126      */
    127     size_t idealSize;
    128 
    129     /* The maximum number of bytes allowed to be allocated from the
    130      * active heap before a GC is forced.  This is used to "shrink" the
    131      * heap in lieu of actual compaction.
    132      */
    133     size_t softLimit;
    134 
    135     /* The heaps; heaps[0] is always the active heap,
    136      * which new objects should be allocated from.
    137      */
    138     Heap heaps[HEAP_SOURCE_MAX_HEAP_COUNT];
    139 
    140     /* The current number of heaps.
    141      */
    142     size_t numHeaps;
    143 
    144     /* True if zygote mode was active when the HeapSource was created.
    145      */
    146     bool sawZygote;
    147 
    148     /*
    149      * The base address of the virtual memory reservation.
    150      */
    151     char *heapBase;
    152 
    153     /*
    154      * The length in bytes of the virtual memory reservation.
    155      */
    156     size_t heapLength;
    157 
    158     /*
    159      * The live object bitmap.
    160      */
    161     HeapBitmap liveBits;
    162 
    163     /*
    164      * The mark bitmap.
    165      */
    166     HeapBitmap markBits;
    167 
    168     /*
    169      * State for the GC daemon.
    170      */
    171     bool hasGcThread;
    172     pthread_t gcThread;
    173     bool gcThreadShutdown;
    174     pthread_mutex_t gcThreadMutex;
    175     pthread_cond_t gcThreadCond;
    176     bool gcThreadTrimNeeded;
    177 };
    178 
    179 #define hs2heap(hs_) (&((hs_)->heaps[0]))
    180 
    181 /*
    182  * Returns true iff a soft limit is in effect for the active heap.
    183  */
    184 static bool isSoftLimited(const HeapSource *hs)
    185 {
    186     /* softLimit will be either SIZE_MAX or the limit for the
    187      * active mspace.  idealSize can be greater than softLimit
    188      * if there is more than one heap.  If there is only one
    189      * heap, a non-SIZE_MAX softLimit should always be the same
    190      * as idealSize.
    191      */
    192     return hs->softLimit <= hs->idealSize;
    193 }
    194 
    195 /*
    196  * Returns approximately the maximum number of bytes allowed to be
    197  * allocated from the active heap before a GC is forced.
    198  */
    199 static size_t getAllocLimit(const HeapSource *hs)
    200 {
    201     if (isSoftLimited(hs)) {
    202         return hs->softLimit;
    203     } else {
    204         return mspace_max_allowed_footprint(hs2heap(hs)->msp);
    205     }
    206 }
    207 
    208 /*
    209  * Returns the current footprint of all heaps.  If includeActive
    210  * is false, don't count the heap at index 0.
    211  */
    212 static size_t oldHeapOverhead(const HeapSource *hs, bool includeActive)
    213 {
    214     size_t footprint = 0;
    215     size_t i;
    216 
    217     if (includeActive) {
    218         i = 0;
    219     } else {
    220         i = 1;
    221     }
    222     for (/* i = i */; i < hs->numHeaps; i++) {
    223 //TODO: include size of bitmaps?  If so, don't use bitsLen, listen to .max
    224         footprint += mspace_footprint(hs->heaps[i].msp);
    225     }
    226     return footprint;
    227 }
    228 
    229 /*
    230  * Returns the heap that <ptr> could have come from, or NULL
    231  * if it could not have come from any heap.
    232  */
    233 static Heap *ptr2heap(const HeapSource *hs, const void *ptr)
    234 {
    235     const size_t numHeaps = hs->numHeaps;
    236 
    237     if (ptr != NULL) {
    238         for (size_t i = 0; i < numHeaps; i++) {
    239             const Heap *const heap = &hs->heaps[i];
    240 
    241             if ((const char *)ptr >= heap->base && (const char *)ptr < heap->limit) {
    242                 return (Heap *)heap;
    243             }
    244         }
    245     }
    246     return NULL;
    247 }
    248 
    249 /*
    250  * Functions to update heapSource->bytesAllocated when an object
    251  * is allocated or freed.  mspace_usable_size() will give
    252  * us a much more accurate picture of heap utilization than
    253  * the requested byte sizes would.
    254  *
    255  * These aren't exact, and should not be treated as such.
    256  */
    257 static void countAllocation(Heap *heap, const void *ptr)
    258 {
    259     assert(heap->bytesAllocated < mspace_footprint(heap->msp));
    260 
    261     heap->bytesAllocated += mspace_usable_size(heap->msp, ptr) +
    262             HEAP_SOURCE_CHUNK_OVERHEAD;
    263     heap->objectsAllocated++;
    264     HeapSource* hs = gDvm.gcHeap->heapSource;
    265     dvmHeapBitmapSetObjectBit(&hs->liveBits, ptr);
    266 
    267     assert(heap->bytesAllocated < mspace_footprint(heap->msp));
    268 }
    269 
    270 static void countFree(Heap *heap, const void *ptr, size_t *numBytes)
    271 {
    272     size_t delta = mspace_usable_size(heap->msp, ptr) + HEAP_SOURCE_CHUNK_OVERHEAD;
    273     assert(delta > 0);
    274     if (delta < heap->bytesAllocated) {
    275         heap->bytesAllocated -= delta;
    276     } else {
    277         heap->bytesAllocated = 0;
    278     }
    279     HeapSource* hs = gDvm.gcHeap->heapSource;
    280     dvmHeapBitmapClearObjectBit(&hs->liveBits, ptr);
    281     if (heap->objectsAllocated > 0) {
    282         heap->objectsAllocated--;
    283     }
    284     *numBytes += delta;
    285 }
    286 
    287 static HeapSource *gHs = NULL;
    288 
    289 static mspace createMspace(void *base, size_t startSize, size_t maximumSize)
    290 {
    291     /* Create an unlocked dlmalloc mspace to use as
    292      * a heap source.
    293      *
    294      * We start off reserving startSize / 2 bytes but
    295      * letting the heap grow to startSize.  This saves
    296      * memory in the case where a process uses even less
    297      * than the starting size.
    298      */
    299     LOGV_HEAP("Creating VM heap of size %zu", startSize);
    300     errno = 0;
    301 
    302     mspace msp = create_contiguous_mspace_with_base(startSize/2,
    303             maximumSize, /*locked=*/false, base);
    304     if (msp != NULL) {
    305         /* Don't let the heap grow past the starting size without
    306          * our intervention.
    307          */
    308         mspace_set_max_allowed_footprint(msp, startSize);
    309     } else {
    310         /* There's no guarantee that errno has meaning when the call
    311          * fails, but it often does.
    312          */
    313         LOGE_HEAP("Can't create VM heap of size (%zu,%zu): %s",
    314             startSize/2, maximumSize, strerror(errno));
    315     }
    316 
    317     return msp;
    318 }
    319 
    320 /*
    321  * Add the initial heap.  Returns false if the initial heap was
    322  * already added to the heap source.
    323  */
    324 static bool addInitialHeap(HeapSource *hs, mspace msp, size_t maximumSize)
    325 {
    326     assert(hs != NULL);
    327     assert(msp != NULL);
    328     if (hs->numHeaps != 0) {
    329         return false;
    330     }
    331     hs->heaps[0].msp = msp;
    332     hs->heaps[0].maximumSize = maximumSize;
    333     hs->heaps[0].concurrentStartBytes = SIZE_MAX;
    334     hs->heaps[0].base = hs->heapBase;
    335     hs->heaps[0].limit = hs->heapBase + hs->heaps[0].maximumSize;
    336     hs->numHeaps = 1;
    337     return true;
    338 }
    339 
    340 /*
    341  * Adds an additional heap to the heap source.  Returns false if there
    342  * are too many heaps or insufficient free space to add another heap.
    343  */
    344 static bool addNewHeap(HeapSource *hs)
    345 {
    346     Heap heap;
    347 
    348     assert(hs != NULL);
    349     if (hs->numHeaps >= HEAP_SOURCE_MAX_HEAP_COUNT) {
    350         ALOGE("Attempt to create too many heaps (%zd >= %zd)",
    351                 hs->numHeaps, HEAP_SOURCE_MAX_HEAP_COUNT);
    352         dvmAbort();
    353         return false;
    354     }
    355 
    356     memset(&heap, 0, sizeof(heap));
    357 
    358     /*
    359      * Heap storage comes from a common virtual memory reservation.
    360      * The new heap will start on the page after the old heap.
    361      */
    362     void *sbrk0 = contiguous_mspace_sbrk0(hs->heaps[0].msp);
    363     char *base = (char *)ALIGN_UP_TO_PAGE_SIZE(sbrk0);
    364     size_t overhead = base - hs->heaps[0].base;
    365     assert(((size_t)hs->heaps[0].base & (SYSTEM_PAGE_SIZE - 1)) == 0);
    366 
    367     if (overhead + HEAP_MIN_FREE >= hs->maximumSize) {
    368         LOGE_HEAP("No room to create any more heaps "
    369                   "(%zd overhead, %zd max)",
    370                   overhead, hs->maximumSize);
    371         return false;
    372     }
    373 
    374     heap.maximumSize = hs->growthLimit - overhead;
    375     heap.concurrentStartBytes = HEAP_MIN_FREE - CONCURRENT_START;
    376     heap.base = base;
    377     heap.limit = heap.base + heap.maximumSize;
    378     heap.msp = createMspace(base, HEAP_MIN_FREE, hs->maximumSize - overhead);
    379     if (heap.msp == NULL) {
    380         return false;
    381     }
    382 
    383     /* Don't let the soon-to-be-old heap grow any further.
    384      */
    385     hs->heaps[0].maximumSize = overhead;
    386     hs->heaps[0].limit = base;
    387     mspace msp = hs->heaps[0].msp;
    388     mspace_set_max_allowed_footprint(msp, mspace_footprint(msp));
    389 
    390     /* Put the new heap in the list, at heaps[0].
    391      * Shift existing heaps down.
    392      */
    393     memmove(&hs->heaps[1], &hs->heaps[0], hs->numHeaps * sizeof(hs->heaps[0]));
    394     hs->heaps[0] = heap;
    395     hs->numHeaps++;
    396 
    397     return true;
    398 }
    399 
    400 /*
    401  * The garbage collection daemon.  Initiates a concurrent collection
    402  * when signaled.  Also periodically trims the heaps when a few seconds
    403  * have elapsed since the last concurrent GC.
    404  */
    405 static void *gcDaemonThread(void* arg)
    406 {
    407     dvmChangeStatus(NULL, THREAD_VMWAIT);
    408     dvmLockMutex(&gHs->gcThreadMutex);
    409     while (gHs->gcThreadShutdown != true) {
    410         bool trim = false;
    411         if (gHs->gcThreadTrimNeeded) {
    412             int result = dvmRelativeCondWait(&gHs->gcThreadCond, &gHs->gcThreadMutex,
    413                     HEAP_TRIM_IDLE_TIME_MS, 0);
    414             if (result == ETIMEDOUT) {
    415                 /* Timed out waiting for a GC request, schedule a heap trim. */
    416                 trim = true;
    417             }
    418         } else {
    419             dvmWaitCond(&gHs->gcThreadCond, &gHs->gcThreadMutex);
    420         }
    421 
    422         dvmLockHeap();
    423         /*
    424          * Another thread may have started a concurrent garbage
    425          * collection before we were scheduled.  Check for this
    426          * condition before proceeding.
    427          */
    428         if (!gDvm.gcHeap->gcRunning) {
    429             dvmChangeStatus(NULL, THREAD_RUNNING);
    430             if (trim) {
    431                 trimHeaps();
    432                 gHs->gcThreadTrimNeeded = false;
    433             } else {
    434                 dvmCollectGarbageInternal(GC_CONCURRENT);
    435                 gHs->gcThreadTrimNeeded = true;
    436             }
    437             dvmChangeStatus(NULL, THREAD_VMWAIT);
    438         }
    439         dvmUnlockHeap();
    440     }
    441     dvmChangeStatus(NULL, THREAD_RUNNING);
    442     return NULL;
    443 }
    444 
    445 static bool gcDaemonStartup()
    446 {
    447     dvmInitMutex(&gHs->gcThreadMutex);
    448     pthread_cond_init(&gHs->gcThreadCond, NULL);
    449     gHs->gcThreadShutdown = false;
    450     gHs->hasGcThread = dvmCreateInternalThread(&gHs->gcThread, "GC",
    451                                                gcDaemonThread, NULL);
    452     return gHs->hasGcThread;
    453 }
    454 
    455 static void gcDaemonShutdown()
    456 {
    457     if (gHs->hasGcThread) {
    458         dvmLockMutex(&gHs->gcThreadMutex);
    459         gHs->gcThreadShutdown = true;
    460         dvmSignalCond(&gHs->gcThreadCond);
    461         dvmUnlockMutex(&gHs->gcThreadMutex);
    462         pthread_join(gHs->gcThread, NULL);
    463     }
    464 }
    465 
    466 /*
    467  * Create a stack big enough for the worst possible case, where the
    468  * heap is perfectly full of the smallest object.
    469  * TODO: be better about memory usage; use a smaller stack with
    470  *       overflow detection and recovery.
    471  */
    472 static bool allocMarkStack(GcMarkStack *stack, size_t maximumSize)
    473 {
    474     const char *name = "dalvik-mark-stack";
    475     void *addr;
    476 
    477     assert(stack != NULL);
    478     stack->length = maximumSize * sizeof(Object*) /
    479         (sizeof(Object) + HEAP_SOURCE_CHUNK_OVERHEAD);
    480     addr = dvmAllocRegion(stack->length, PROT_READ | PROT_WRITE, name);
    481     if (addr == NULL) {
    482         return false;
    483     }
    484     stack->base = (const Object **)addr;
    485     stack->limit = (const Object **)((char *)addr + stack->length);
    486     stack->top = NULL;
    487     madvise(stack->base, stack->length, MADV_DONTNEED);
    488     return true;
    489 }
    490 
    491 static void freeMarkStack(GcMarkStack *stack)
    492 {
    493     assert(stack != NULL);
    494     munmap(stack->base, stack->length);
    495     memset(stack, 0, sizeof(*stack));
    496 }
    497 
    498 /*
    499  * Initializes the heap source; must be called before any other
    500  * dvmHeapSource*() functions.  Returns a GcHeap structure
    501  * allocated from the heap source.
    502  */
    503 GcHeap* dvmHeapSourceStartup(size_t startSize, size_t maximumSize,
    504                              size_t growthLimit)
    505 {
    506     GcHeap *gcHeap;
    507     HeapSource *hs;
    508     mspace msp;
    509     size_t length;
    510     void *base;
    511 
    512     assert(gHs == NULL);
    513 
    514     if (!(startSize <= growthLimit && growthLimit <= maximumSize)) {
    515         ALOGE("Bad heap size parameters (start=%zd, max=%zd, limit=%zd)",
    516              startSize, maximumSize, growthLimit);
    517         return NULL;
    518     }
    519 
    520     /*
    521      * Allocate a contiguous region of virtual memory to subdivided
    522      * among the heaps managed by the garbage collector.
    523      */
    524     length = ALIGN_UP_TO_PAGE_SIZE(maximumSize);
    525     base = dvmAllocRegion(length, PROT_NONE, "dalvik-heap");
    526     if (base == NULL) {
    527         return NULL;
    528     }
    529 
    530     /* Create an unlocked dlmalloc mspace to use as
    531      * a heap source.
    532      */
    533     msp = createMspace(base, startSize, maximumSize);
    534     if (msp == NULL) {
    535         goto fail;
    536     }
    537 
    538     gcHeap = (GcHeap *)calloc(1, sizeof(*gcHeap));
    539     if (gcHeap == NULL) {
    540         LOGE_HEAP("Can't allocate heap descriptor");
    541         goto fail;
    542     }
    543 
    544     hs = (HeapSource *)calloc(1, sizeof(*hs));
    545     if (hs == NULL) {
    546         LOGE_HEAP("Can't allocate heap source");
    547         free(gcHeap);
    548         goto fail;
    549     }
    550 
    551     hs->targetUtilization = DEFAULT_HEAP_UTILIZATION;
    552     hs->startSize = startSize;
    553     hs->maximumSize = maximumSize;
    554     hs->growthLimit = growthLimit;
    555     hs->idealSize = startSize;
    556     hs->softLimit = SIZE_MAX;    // no soft limit at first
    557     hs->numHeaps = 0;
    558     hs->sawZygote = gDvm.zygote;
    559     hs->hasGcThread = false;
    560     hs->heapBase = (char *)base;
    561     hs->heapLength = length;
    562     if (!addInitialHeap(hs, msp, growthLimit)) {
    563         LOGE_HEAP("Can't add initial heap");
    564         goto fail;
    565     }
    566     if (!dvmHeapBitmapInit(&hs->liveBits, base, length, "dalvik-bitmap-1")) {
    567         LOGE_HEAP("Can't create liveBits");
    568         goto fail;
    569     }
    570     if (!dvmHeapBitmapInit(&hs->markBits, base, length, "dalvik-bitmap-2")) {
    571         LOGE_HEAP("Can't create markBits");
    572         dvmHeapBitmapDelete(&hs->liveBits);
    573         goto fail;
    574     }
    575     if (!allocMarkStack(&gcHeap->markContext.stack, hs->maximumSize)) {
    576         ALOGE("Can't create markStack");
    577         dvmHeapBitmapDelete(&hs->markBits);
    578         dvmHeapBitmapDelete(&hs->liveBits);
    579         goto fail;
    580     }
    581     gcHeap->markContext.bitmap = &hs->markBits;
    582     gcHeap->heapSource = hs;
    583 
    584     gHs = hs;
    585     return gcHeap;
    586 
    587 fail:
    588     munmap(base, length);
    589     return NULL;
    590 }
    591 
    592 bool dvmHeapSourceStartupAfterZygote()
    593 {
    594     return gDvm.concurrentMarkSweep ? gcDaemonStartup() : true;
    595 }
    596 
    597 /*
    598  * This is called while in zygote mode, right before we fork() for the
    599  * first time.  We create a heap for all future zygote process allocations,
    600  * in an attempt to avoid touching pages in the zygote heap.  (This would
    601  * probably be unnecessary if we had a compacting GC -- the source of our
    602  * troubles is small allocations filling in the gaps from larger ones.)
    603  */
    604 bool dvmHeapSourceStartupBeforeFork()
    605 {
    606     HeapSource *hs = gHs; // use a local to avoid the implicit "volatile"
    607 
    608     HS_BOILERPLATE();
    609 
    610     assert(gDvm.zygote);
    611 
    612     if (!gDvm.newZygoteHeapAllocated) {
    613         /* Create a new heap for post-fork zygote allocations.  We only
    614          * try once, even if it fails.
    615          */
    616         ALOGV("Splitting out new zygote heap");
    617         gDvm.newZygoteHeapAllocated = true;
    618         return addNewHeap(hs);
    619     }
    620     return true;
    621 }
    622 
    623 void dvmHeapSourceThreadShutdown()
    624 {
    625     if (gDvm.gcHeap != NULL && gDvm.concurrentMarkSweep) {
    626         gcDaemonShutdown();
    627     }
    628 }
    629 
    630 /*
    631  * Tears down the entire GcHeap structure and all of the substructures
    632  * attached to it.  This call has the side effect of setting the given
    633  * gcHeap pointer and gHs to NULL.
    634  */
    635 void dvmHeapSourceShutdown(GcHeap **gcHeap)
    636 {
    637     assert(gcHeap != NULL);
    638     if (*gcHeap != NULL && (*gcHeap)->heapSource != NULL) {
    639         HeapSource *hs = (*gcHeap)->heapSource;
    640         dvmHeapBitmapDelete(&hs->liveBits);
    641         dvmHeapBitmapDelete(&hs->markBits);
    642         freeMarkStack(&(*gcHeap)->markContext.stack);
    643         munmap(hs->heapBase, hs->heapLength);
    644         free(hs);
    645         gHs = NULL;
    646         free(*gcHeap);
    647         *gcHeap = NULL;
    648     }
    649 }
    650 
    651 /*
    652  * Gets the begining of the allocation for the HeapSource.
    653  */
    654 void *dvmHeapSourceGetBase()
    655 {
    656     return gHs->heapBase;
    657 }
    658 
    659 /*
    660  * Returns the requested value. If the per-heap stats are requested, fill
    661  * them as well.
    662  *
    663  * Caller must hold the heap lock.
    664  */
    665 size_t dvmHeapSourceGetValue(HeapSourceValueSpec spec, size_t perHeapStats[],
    666                              size_t arrayLen)
    667 {
    668     HeapSource *hs = gHs;
    669     size_t value = 0;
    670     size_t total = 0;
    671 
    672     HS_BOILERPLATE();
    673 
    674     assert(arrayLen >= hs->numHeaps || perHeapStats == NULL);
    675     for (size_t i = 0; i < hs->numHeaps; i++) {
    676         Heap *const heap = &hs->heaps[i];
    677 
    678         switch (spec) {
    679         case HS_FOOTPRINT:
    680             value = mspace_footprint(heap->msp);
    681             break;
    682         case HS_ALLOWED_FOOTPRINT:
    683             value = mspace_max_allowed_footprint(heap->msp);
    684             break;
    685         case HS_BYTES_ALLOCATED:
    686             value = heap->bytesAllocated;
    687             break;
    688         case HS_OBJECTS_ALLOCATED:
    689             value = heap->objectsAllocated;
    690             break;
    691         default:
    692             // quiet gcc
    693             break;
    694         }
    695         if (perHeapStats) {
    696             perHeapStats[i] = value;
    697         }
    698         total += value;
    699     }
    700     return total;
    701 }
    702 
    703 void dvmHeapSourceGetRegions(uintptr_t *base, uintptr_t *max, size_t numHeaps)
    704 {
    705     HeapSource *hs = gHs;
    706 
    707     HS_BOILERPLATE();
    708 
    709     assert(numHeaps <= hs->numHeaps);
    710     for (size_t i = 0; i < numHeaps; ++i) {
    711         base[i] = (uintptr_t)hs->heaps[i].base;
    712         max[i] = MIN((uintptr_t)hs->heaps[i].limit - 1, hs->markBits.max);
    713     }
    714 }
    715 
    716 /*
    717  * Get the bitmap representing all live objects.
    718  */
    719 HeapBitmap *dvmHeapSourceGetLiveBits()
    720 {
    721     HS_BOILERPLATE();
    722 
    723     return &gHs->liveBits;
    724 }
    725 
    726 /*
    727  * Get the bitmap representing all marked objects.
    728  */
    729 HeapBitmap *dvmHeapSourceGetMarkBits()
    730 {
    731     HS_BOILERPLATE();
    732 
    733     return &gHs->markBits;
    734 }
    735 
    736 void dvmHeapSourceSwapBitmaps()
    737 {
    738     HeapBitmap tmp = gHs->liveBits;
    739     gHs->liveBits = gHs->markBits;
    740     gHs->markBits = tmp;
    741 }
    742 
    743 void dvmHeapSourceZeroMarkBitmap()
    744 {
    745     HS_BOILERPLATE();
    746 
    747     dvmHeapBitmapZero(&gHs->markBits);
    748 }
    749 
    750 void dvmMarkImmuneObjects(const char *immuneLimit)
    751 {
    752     /*
    753      * Copy the contents of the live bit vector for immune object
    754      * range into the mark bit vector.
    755      */
    756     /* The only values generated by dvmHeapSourceGetImmuneLimit() */
    757     assert(immuneLimit == gHs->heaps[0].base ||
    758            immuneLimit == NULL);
    759     assert(gHs->liveBits.base == gHs->markBits.base);
    760     assert(gHs->liveBits.bitsLen == gHs->markBits.bitsLen);
    761     /* heap[0] is never immune */
    762     assert(gHs->heaps[0].base >= immuneLimit);
    763     assert(gHs->heaps[0].limit > immuneLimit);
    764 
    765     for (size_t i = 1; i < gHs->numHeaps; ++i) {
    766         if (gHs->heaps[i].base < immuneLimit) {
    767             assert(gHs->heaps[i].limit <= immuneLimit);
    768             /* Compute the number of words to copy in the bitmap. */
    769             size_t index = HB_OFFSET_TO_INDEX(
    770                 (uintptr_t)gHs->heaps[i].base - gHs->liveBits.base);
    771             /* Compute the starting offset in the live and mark bits. */
    772             char *src = (char *)(gHs->liveBits.bits + index);
    773             char *dst = (char *)(gHs->markBits.bits + index);
    774             /* Compute the number of bytes of the live bitmap to copy. */
    775             size_t length = HB_OFFSET_TO_BYTE_INDEX(
    776                 gHs->heaps[i].limit - gHs->heaps[i].base);
    777             /* Do the copy. */
    778             memcpy(dst, src, length);
    779             /* Make sure max points to the address of the highest set bit. */
    780             if (gHs->markBits.max < (uintptr_t)gHs->heaps[i].limit) {
    781                 gHs->markBits.max = (uintptr_t)gHs->heaps[i].limit;
    782             }
    783         }
    784     }
    785 }
    786 
    787 /*
    788  * Allocates <n> bytes of zeroed data.
    789  */
    790 void* dvmHeapSourceAlloc(size_t n)
    791 {
    792     HS_BOILERPLATE();
    793 
    794     HeapSource *hs = gHs;
    795     Heap* heap = hs2heap(hs);
    796     if (heap->bytesAllocated + n > hs->softLimit) {
    797         /*
    798          * This allocation would push us over the soft limit; act as
    799          * if the heap is full.
    800          */
    801         LOGV_HEAP("softLimit of %zd.%03zdMB hit for %zd-byte allocation",
    802                   FRACTIONAL_MB(hs->softLimit), n);
    803         return NULL;
    804     }
    805     void* ptr = mspace_calloc(heap->msp, 1, n);
    806     if (ptr == NULL) {
    807         return NULL;
    808     }
    809     countAllocation(heap, ptr);
    810     /*
    811      * Check to see if a concurrent GC should be initiated.
    812      */
    813     if (gDvm.gcHeap->gcRunning || !hs->hasGcThread) {
    814         /*
    815          * The garbage collector thread is already running or has yet
    816          * to be started.  Do nothing.
    817          */
    818         return ptr;
    819     }
    820     if (heap->bytesAllocated > heap->concurrentStartBytes) {
    821         /*
    822          * We have exceeded the allocation threshold.  Wake up the
    823          * garbage collector.
    824          */
    825         dvmSignalCond(&gHs->gcThreadCond);
    826     }
    827     return ptr;
    828 }
    829 
    830 /* Remove any hard limits, try to allocate, and shrink back down.
    831  * Last resort when trying to allocate an object.
    832  */
    833 static void* heapAllocAndGrow(HeapSource *hs, Heap *heap, size_t n)
    834 {
    835     /* Grow as much as possible, but don't let the real footprint
    836      * go over the absolute max.
    837      */
    838     size_t max = heap->maximumSize;
    839 
    840     mspace_set_max_allowed_footprint(heap->msp, max);
    841     void* ptr = dvmHeapSourceAlloc(n);
    842 
    843     /* Shrink back down as small as possible.  Our caller may
    844      * readjust max_allowed to a more appropriate value.
    845      */
    846     mspace_set_max_allowed_footprint(heap->msp,
    847                                      mspace_footprint(heap->msp));
    848     return ptr;
    849 }
    850 
    851 /*
    852  * Allocates <n> bytes of zeroed data, growing as much as possible
    853  * if necessary.
    854  */
    855 void* dvmHeapSourceAllocAndGrow(size_t n)
    856 {
    857     HS_BOILERPLATE();
    858 
    859     HeapSource *hs = gHs;
    860     Heap* heap = hs2heap(hs);
    861     void* ptr = dvmHeapSourceAlloc(n);
    862     if (ptr != NULL) {
    863         return ptr;
    864     }
    865 
    866     size_t oldIdealSize = hs->idealSize;
    867     if (isSoftLimited(hs)) {
    868         /* We're soft-limited.  Try removing the soft limit to
    869          * see if we can allocate without actually growing.
    870          */
    871         hs->softLimit = SIZE_MAX;
    872         ptr = dvmHeapSourceAlloc(n);
    873         if (ptr != NULL) {
    874             /* Removing the soft limit worked;  fix things up to
    875              * reflect the new effective ideal size.
    876              */
    877             snapIdealFootprint();
    878             return ptr;
    879         }
    880         // softLimit intentionally left at SIZE_MAX.
    881     }
    882 
    883     /* We're not soft-limited.  Grow the heap to satisfy the request.
    884      * If this call fails, no footprints will have changed.
    885      */
    886     ptr = heapAllocAndGrow(hs, heap, n);
    887     if (ptr != NULL) {
    888         /* The allocation succeeded.  Fix up the ideal size to
    889          * reflect any footprint modifications that had to happen.
    890          */
    891         snapIdealFootprint();
    892     } else {
    893         /* We just couldn't do it.  Restore the original ideal size,
    894          * fixing up softLimit if necessary.
    895          */
    896         setIdealFootprint(oldIdealSize);
    897     }
    898     return ptr;
    899 }
    900 
    901 /*
    902  * Frees the first numPtrs objects in the ptrs list and returns the
    903  * amount of reclaimed storage. The list must contain addresses all in
    904  * the same mspace, and must be in increasing order. This implies that
    905  * there are no duplicates, and no entries are NULL.
    906  */
    907 size_t dvmHeapSourceFreeList(size_t numPtrs, void **ptrs)
    908 {
    909     HS_BOILERPLATE();
    910 
    911     if (numPtrs == 0) {
    912         return 0;
    913     }
    914 
    915     assert(ptrs != NULL);
    916     assert(*ptrs != NULL);
    917     Heap* heap = ptr2heap(gHs, *ptrs);
    918     size_t numBytes = 0;
    919     if (heap != NULL) {
    920         mspace msp = heap->msp;
    921         // Calling mspace_free on shared heaps disrupts sharing too
    922         // much. For heap[0] -- the 'active heap' -- we call
    923         // mspace_free, but on the other heaps we only do some
    924         // accounting.
    925         if (heap == gHs->heaps) {
    926             // mspace_merge_objects takes two allocated objects, and
    927             // if the second immediately follows the first, will merge
    928             // them, returning a larger object occupying the same
    929             // memory. This is a local operation, and doesn't require
    930             // dlmalloc to manipulate any freelists. It's pretty
    931             // inexpensive compared to free().
    932 
    933             // ptrs is an array of objects all in memory order, and if
    934             // client code has been allocating lots of short-lived
    935             // objects, this is likely to contain runs of objects all
    936             // now garbage, and thus highly amenable to this optimization.
    937 
    938             // Unroll the 0th iteration around the loop below,
    939             // countFree ptrs[0] and initializing merged.
    940             assert(ptrs[0] != NULL);
    941             assert(ptr2heap(gHs, ptrs[0]) == heap);
    942             countFree(heap, ptrs[0], &numBytes);
    943             void *merged = ptrs[0];
    944             for (size_t i = 1; i < numPtrs; i++) {
    945                 assert(merged != NULL);
    946                 assert(ptrs[i] != NULL);
    947                 assert((intptr_t)merged < (intptr_t)ptrs[i]);
    948                 assert(ptr2heap(gHs, ptrs[i]) == heap);
    949                 countFree(heap, ptrs[i], &numBytes);
    950                 // Try to merge. If it works, merged now includes the
    951                 // memory of ptrs[i]. If it doesn't, free merged, and
    952                 // see if ptrs[i] starts a new run of adjacent
    953                 // objects to merge.
    954                 if (mspace_merge_objects(msp, merged, ptrs[i]) == NULL) {
    955                     mspace_free(msp, merged);
    956                     merged = ptrs[i];
    957                 }
    958             }
    959             assert(merged != NULL);
    960             mspace_free(msp, merged);
    961         } else {
    962             // This is not an 'active heap'. Only do the accounting.
    963             for (size_t i = 0; i < numPtrs; i++) {
    964                 assert(ptrs[i] != NULL);
    965                 assert(ptr2heap(gHs, ptrs[i]) == heap);
    966                 countFree(heap, ptrs[i], &numBytes);
    967             }
    968         }
    969     }
    970     return numBytes;
    971 }
    972 
    973 /*
    974  * Returns true iff <ptr> is in the heap source.
    975  */
    976 bool dvmHeapSourceContainsAddress(const void *ptr)
    977 {
    978     HS_BOILERPLATE();
    979 
    980     return (dvmHeapBitmapCoversAddress(&gHs->liveBits, ptr));
    981 }
    982 
    983 /*
    984  * Returns true iff <ptr> was allocated from the heap source.
    985  */
    986 bool dvmHeapSourceContains(const void *ptr)
    987 {
    988     HS_BOILERPLATE();
    989 
    990     if (dvmHeapSourceContainsAddress(ptr)) {
    991         return dvmHeapBitmapIsObjectBitSet(&gHs->liveBits, ptr) != 0;
    992     }
    993     return false;
    994 }
    995 
    996 bool dvmIsZygoteObject(const Object* obj)
    997 {
    998     HeapSource *hs = gHs;
    999 
   1000     HS_BOILERPLATE();
   1001 
   1002     if (dvmHeapSourceContains(obj) && hs->sawZygote) {
   1003         Heap *heap = ptr2heap(hs, obj);
   1004         if (heap != NULL) {
   1005             /* If the object is not in the active heap, we assume that
   1006              * it was allocated as part of zygote.
   1007              */
   1008             return heap != hs->heaps;
   1009         }
   1010     }
   1011     /* The pointer is outside of any known heap, or we are not
   1012      * running in zygote mode.
   1013      */
   1014     return false;
   1015 }
   1016 
   1017 /*
   1018  * Returns the number of usable bytes in an allocated chunk; the size
   1019  * may be larger than the size passed to dvmHeapSourceAlloc().
   1020  */
   1021 size_t dvmHeapSourceChunkSize(const void *ptr)
   1022 {
   1023     HS_BOILERPLATE();
   1024 
   1025     Heap* heap = ptr2heap(gHs, ptr);
   1026     if (heap != NULL) {
   1027         return mspace_usable_size(heap->msp, ptr);
   1028     }
   1029     return 0;
   1030 }
   1031 
   1032 /*
   1033  * Returns the number of bytes that the heap source has allocated
   1034  * from the system using sbrk/mmap, etc.
   1035  *
   1036  * Caller must hold the heap lock.
   1037  */
   1038 size_t dvmHeapSourceFootprint()
   1039 {
   1040     HS_BOILERPLATE();
   1041 
   1042 //TODO: include size of bitmaps?
   1043     return oldHeapOverhead(gHs, true);
   1044 }
   1045 
   1046 static size_t getMaximumSize(const HeapSource *hs)
   1047 {
   1048     return hs->growthLimit;
   1049 }
   1050 
   1051 /*
   1052  * Returns the current maximum size of the heap source respecting any
   1053  * growth limits.
   1054  */
   1055 size_t dvmHeapSourceGetMaximumSize()
   1056 {
   1057     HS_BOILERPLATE();
   1058     return getMaximumSize(gHs);
   1059 }
   1060 
   1061 /*
   1062  * Removes any growth limits.  Allows the user to allocate up to the
   1063  * maximum heap size.
   1064  */
   1065 void dvmClearGrowthLimit()
   1066 {
   1067     HS_BOILERPLATE();
   1068     dvmLockHeap();
   1069     dvmWaitForConcurrentGcToComplete();
   1070     gDvm.gcHeap->cardTableLength = gDvm.gcHeap->cardTableMaxLength;
   1071     gHs->growthLimit = gHs->maximumSize;
   1072     size_t overhead = oldHeapOverhead(gHs, false);
   1073     gHs->heaps[0].maximumSize = gHs->maximumSize - overhead;
   1074     gHs->heaps[0].limit = gHs->heaps[0].base + gHs->heaps[0].maximumSize;
   1075     dvmUnlockHeap();
   1076 }
   1077 
   1078 /*
   1079  * Return the real bytes used by old heaps plus the soft usage of the
   1080  * current heap.  When a soft limit is in effect, this is effectively
   1081  * what it's compared against (though, in practice, it only looks at
   1082  * the current heap).
   1083  */
   1084 static size_t getSoftFootprint(bool includeActive)
   1085 {
   1086     HS_BOILERPLATE();
   1087 
   1088     HeapSource *hs = gHs;
   1089     size_t ret = oldHeapOverhead(hs, false);
   1090     if (includeActive) {
   1091         ret += hs->heaps[0].bytesAllocated;
   1092     }
   1093 
   1094     return ret;
   1095 }
   1096 
   1097 /*
   1098  * Gets the maximum number of bytes that the heap source is allowed
   1099  * to allocate from the system.
   1100  */
   1101 size_t dvmHeapSourceGetIdealFootprint()
   1102 {
   1103     HeapSource *hs = gHs;
   1104 
   1105     HS_BOILERPLATE();
   1106 
   1107     return hs->idealSize;
   1108 }
   1109 
   1110 /*
   1111  * Sets the soft limit, handling any necessary changes to the allowed
   1112  * footprint of the active heap.
   1113  */
   1114 static void setSoftLimit(HeapSource *hs, size_t softLimit)
   1115 {
   1116     /* Compare against the actual footprint, rather than the
   1117      * max_allowed, because the heap may not have grown all the
   1118      * way to the allowed size yet.
   1119      */
   1120     mspace msp = hs->heaps[0].msp;
   1121     size_t currentHeapSize = mspace_footprint(msp);
   1122     if (softLimit < currentHeapSize) {
   1123         /* Don't let the heap grow any more, and impose a soft limit.
   1124          */
   1125         mspace_set_max_allowed_footprint(msp, currentHeapSize);
   1126         hs->softLimit = softLimit;
   1127     } else {
   1128         /* Let the heap grow to the requested max, and remove any
   1129          * soft limit, if set.
   1130          */
   1131         mspace_set_max_allowed_footprint(msp, softLimit);
   1132         hs->softLimit = SIZE_MAX;
   1133     }
   1134 }
   1135 
   1136 /*
   1137  * Sets the maximum number of bytes that the heap source is allowed
   1138  * to allocate from the system.  Clamps to the appropriate maximum
   1139  * value.
   1140  */
   1141 static void setIdealFootprint(size_t max)
   1142 {
   1143     HS_BOILERPLATE();
   1144 
   1145     HeapSource *hs = gHs;
   1146     size_t maximumSize = getMaximumSize(hs);
   1147     if (max > maximumSize) {
   1148         LOGI_HEAP("Clamp target GC heap from %zd.%03zdMB to %u.%03uMB",
   1149                 FRACTIONAL_MB(max),
   1150                 FRACTIONAL_MB(maximumSize));
   1151         max = maximumSize;
   1152     }
   1153 
   1154     /* Convert max into a size that applies to the active heap.
   1155      * Old heaps will count against the ideal size.
   1156      */
   1157     size_t overhead = getSoftFootprint(false);
   1158     size_t activeMax;
   1159     if (overhead < max) {
   1160         activeMax = max - overhead;
   1161     } else {
   1162         activeMax = 0;
   1163     }
   1164 
   1165     setSoftLimit(hs, activeMax);
   1166     hs->idealSize = max;
   1167 }
   1168 
   1169 /*
   1170  * Make the ideal footprint equal to the current footprint.
   1171  */
   1172 static void snapIdealFootprint()
   1173 {
   1174     HS_BOILERPLATE();
   1175 
   1176     setIdealFootprint(getSoftFootprint(true));
   1177 }
   1178 
   1179 /*
   1180  * Gets the current ideal heap utilization, represented as a number
   1181  * between zero and one.
   1182  */
   1183 float dvmGetTargetHeapUtilization()
   1184 {
   1185     HeapSource *hs = gHs;
   1186 
   1187     HS_BOILERPLATE();
   1188 
   1189     return (float)hs->targetUtilization / (float)HEAP_UTILIZATION_MAX;
   1190 }
   1191 
   1192 /*
   1193  * Sets the new ideal heap utilization, represented as a number
   1194  * between zero and one.
   1195  */
   1196 void dvmSetTargetHeapUtilization(float newTarget)
   1197 {
   1198     HeapSource *hs = gHs;
   1199 
   1200     HS_BOILERPLATE();
   1201 
   1202     /* Clamp it to a reasonable range.
   1203      */
   1204     // TODO: This may need some tuning.
   1205     if (newTarget < 0.2) {
   1206         newTarget = 0.2;
   1207     } else if (newTarget > 0.8) {
   1208         newTarget = 0.8;
   1209     }
   1210 
   1211     hs->targetUtilization =
   1212             (size_t)(newTarget * (float)HEAP_UTILIZATION_MAX);
   1213     ALOGV("Set heap target utilization to %zd/%d (%f)",
   1214             hs->targetUtilization, HEAP_UTILIZATION_MAX, newTarget);
   1215 }
   1216 
   1217 /*
   1218  * Given the size of a live set, returns the ideal heap size given
   1219  * the current target utilization and MIN/MAX values.
   1220  *
   1221  * targetUtilization is in the range 1..HEAP_UTILIZATION_MAX.
   1222  */
   1223 static size_t getUtilizationTarget(size_t liveSize, size_t targetUtilization)
   1224 {
   1225     /* Use the current target utilization ratio to determine the
   1226      * ideal heap size based on the size of the live set.
   1227      */
   1228     size_t targetSize = (liveSize / targetUtilization) * HEAP_UTILIZATION_MAX;
   1229 
   1230     /* Cap the amount of free space, though, so we don't end up
   1231      * with, e.g., 8MB of free space when the live set size hits 8MB.
   1232      */
   1233     if (targetSize > liveSize + HEAP_IDEAL_FREE) {
   1234         targetSize = liveSize + HEAP_IDEAL_FREE;
   1235     } else if (targetSize < liveSize + HEAP_MIN_FREE) {
   1236         targetSize = liveSize + HEAP_MIN_FREE;
   1237     }
   1238     return targetSize;
   1239 }
   1240 
   1241 /*
   1242  * Given the current contents of the active heap, increase the allowed
   1243  * heap footprint to match the target utilization ratio.  This
   1244  * should only be called immediately after a full mark/sweep.
   1245  */
   1246 void dvmHeapSourceGrowForUtilization()
   1247 {
   1248     HS_BOILERPLATE();
   1249 
   1250     HeapSource *hs = gHs;
   1251     Heap* heap = hs2heap(hs);
   1252 
   1253     /* Use the current target utilization ratio to determine the
   1254      * ideal heap size based on the size of the live set.
   1255      * Note that only the active heap plays any part in this.
   1256      *
   1257      * Avoid letting the old heaps influence the target free size,
   1258      * because they may be full of objects that aren't actually
   1259      * in the working set.  Just look at the allocated size of
   1260      * the current heap.
   1261      */
   1262     size_t currentHeapUsed = heap->bytesAllocated;
   1263     size_t targetHeapSize =
   1264             getUtilizationTarget(currentHeapUsed, hs->targetUtilization);
   1265 
   1266     /* The ideal size includes the old heaps; add overhead so that
   1267      * it can be immediately subtracted again in setIdealFootprint().
   1268      * If the target heap size would exceed the max, setIdealFootprint()
   1269      * will clamp it to a legal value.
   1270      */
   1271     size_t overhead = getSoftFootprint(false);
   1272     setIdealFootprint(targetHeapSize + overhead);
   1273 
   1274     size_t freeBytes = getAllocLimit(hs);
   1275     if (freeBytes < CONCURRENT_MIN_FREE) {
   1276         /* Not enough free memory to allow a concurrent GC. */
   1277         heap->concurrentStartBytes = SIZE_MAX;
   1278     } else {
   1279         heap->concurrentStartBytes = freeBytes - CONCURRENT_START;
   1280     }
   1281 }
   1282 
   1283 /*
   1284  * Return free pages to the system.
   1285  * TODO: move this somewhere else, especially the native heap part.
   1286  */
   1287 static void releasePagesInRange(void *start, void *end, void *nbytes)
   1288 {
   1289     /* Linux requires that the madvise() start address is page-aligned.
   1290     * We also align the end address.
   1291     */
   1292     start = (void *)ALIGN_UP_TO_PAGE_SIZE(start);
   1293     end = (void *)((size_t)end & ~(SYSTEM_PAGE_SIZE - 1));
   1294     if (start < end) {
   1295         size_t length = (char *)end - (char *)start;
   1296         madvise(start, length, MADV_DONTNEED);
   1297         *(size_t *)nbytes += length;
   1298     }
   1299 }
   1300 
   1301 /*
   1302  * Return unused memory to the system if possible.
   1303  */
   1304 static void trimHeaps()
   1305 {
   1306     HS_BOILERPLATE();
   1307 
   1308     HeapSource *hs = gHs;
   1309     size_t heapBytes = 0;
   1310     for (size_t i = 0; i < hs->numHeaps; i++) {
   1311         Heap *heap = &hs->heaps[i];
   1312 
   1313         /* Return the wilderness chunk to the system.
   1314          */
   1315         mspace_trim(heap->msp, 0);
   1316 
   1317         /* Return any whole free pages to the system.
   1318          */
   1319         mspace_walk_free_pages(heap->msp, releasePagesInRange, &heapBytes);
   1320     }
   1321 
   1322     /* Same for the native heap.
   1323      */
   1324     dlmalloc_trim(0);
   1325     size_t nativeBytes = 0;
   1326     dlmalloc_walk_free_pages(releasePagesInRange, &nativeBytes);
   1327 
   1328     LOGD_HEAP("madvised %zd (GC) + %zd (native) = %zd total bytes",
   1329             heapBytes, nativeBytes, heapBytes + nativeBytes);
   1330 }
   1331 
   1332 /*
   1333  * Walks over the heap source and passes every allocated and
   1334  * free chunk to the callback.
   1335  */
   1336 void dvmHeapSourceWalk(void(*callback)(const void *chunkptr, size_t chunklen,
   1337                                        const void *userptr, size_t userlen,
   1338                                        void *arg),
   1339                        void *arg)
   1340 {
   1341     HS_BOILERPLATE();
   1342 
   1343     /* Walk the heaps from oldest to newest.
   1344      */
   1345 //TODO: do this in address order
   1346     HeapSource *hs = gHs;
   1347     for (size_t i = hs->numHeaps; i > 0; --i) {
   1348         mspace_walk_heap(hs->heaps[i-1].msp, callback, arg);
   1349     }
   1350 }
   1351 
   1352 /*
   1353  * Gets the number of heaps available in the heap source.
   1354  *
   1355  * Caller must hold the heap lock, because gHs caches a field
   1356  * in gDvm.gcHeap.
   1357  */
   1358 size_t dvmHeapSourceGetNumHeaps()
   1359 {
   1360     HS_BOILERPLATE();
   1361 
   1362     return gHs->numHeaps;
   1363 }
   1364 
   1365 void *dvmHeapSourceGetImmuneLimit(bool isPartial)
   1366 {
   1367     if (isPartial) {
   1368         return hs2heap(gHs)->base;
   1369     } else {
   1370         return NULL;
   1371     }
   1372 }
   1373