Home | History | Annotate | Download | only in alloc
      1 /*
      2  * Copyright (C) 2008 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include <cutils/mspace.h>
     18 #include <limits.h>     // for INT_MAX
     19 #include <sys/mman.h>
     20 #include <errno.h>
     21 
     22 #include "Dalvik.h"
     23 #include "alloc/Heap.h"
     24 #include "alloc/HeapInternal.h"
     25 #include "alloc/HeapSource.h"
     26 #include "alloc/HeapBitmap.h"
     27 
     28 // TODO: find a real header file for these.
     29 extern int dlmalloc_trim(size_t);
     30 extern void dlmalloc_walk_free_pages(void(*)(void*, void*, void*), void*);
     31 
     32 static void snapIdealFootprint(void);
     33 static void setIdealFootprint(size_t max);
     34 
     35 #define ALIGN_UP_TO_PAGE_SIZE(p) \
     36     (((size_t)(p) + (SYSTEM_PAGE_SIZE - 1)) & ~(SYSTEM_PAGE_SIZE - 1))
     37 #define ALIGN_DOWN_TO_PAGE_SIZE(p) \
     38     ((size_t)(p) & ~(SYSTEM_PAGE_SIZE - 1))
     39 
     40 #define HEAP_UTILIZATION_MAX        1024
     41 #define DEFAULT_HEAP_UTILIZATION    512     // Range 1..HEAP_UTILIZATION_MAX
     42 #define HEAP_IDEAL_FREE             (2 * 1024 * 1024)
     43 #define HEAP_MIN_FREE               (HEAP_IDEAL_FREE / 4)
     44 
     45 #define HS_BOILERPLATE() \
     46     do { \
     47         assert(gDvm.gcHeap != NULL); \
     48         assert(gDvm.gcHeap->heapSource != NULL); \
     49         assert(gHs == gDvm.gcHeap->heapSource); \
     50     } while (0)
     51 
     52 #define DEBUG_HEAP_SOURCE 0
     53 #if DEBUG_HEAP_SOURCE
     54 #define HSTRACE(...)  LOG(LOG_INFO, LOG_TAG "-hs", __VA_ARGS__)
     55 #else
     56 #define HSTRACE(...)  /**/
     57 #endif
     58 
     59 /*
     60 =======================================================
     61 =======================================================
     62 =======================================================
     63 
     64 How will this be used?
     65 allocating/freeing: Heap.c just wants to say "alloc(n)" and get a ptr
     66     - if allocating in large doesn't work, try allocating from small
     67 Heap.c will use HeapSource.h; HeapSource.c will do the right thing
     68     between small and large
     69     - some operations should be abstracted; put in a structure
     70 
     71 How do we manage the size trade-offs?
     72 - keep mspace max footprint clamped to actual footprint
     73 - if small-alloc returns null, adjust large vs. small ratio
     74     - give small all available slack and retry
     75     - success or fail, snap back to actual footprint and give rest to large
     76 
     77 managed as "small actual" + "large actual" + "delta to allowed total footprint"
     78 - when allocating from one source or the other, give the delta to the
     79     active source, but snap back afterwards
     80 - that may not work so great for a gc heap, because small will always consume.
     81     - but we need to use the memory, and the current max is the amount we
     82       need to fill before a GC.
     83 
     84 Find a way to permanently steal pages from the middle of the heap
     85     - segment tricks?
     86 
     87 Allocate String and char[] in a separate heap?
     88 
     89 Maybe avoid growing small heap, even if there's slack?  Look at
     90 live ratio of small heap after a gc; scale it based on that.
     91 
     92 =======================================================
     93 =======================================================
     94 =======================================================
     95 */
     96 
     97 typedef struct {
     98     /* The mspace to allocate from.
     99      */
    100     mspace msp;
    101 
    102     /* The bitmap that keeps track of where objects are in the heap.
    103      */
    104     HeapBitmap objectBitmap;
    105 
    106     /* The largest size that this heap is allowed to grow to.
    107      */
    108     size_t absoluteMaxSize;
    109 
    110     /* Number of bytes allocated from this mspace for objects,
    111      * including any overhead.  This value is NOT exact, and
    112      * should only be used as an input for certain heuristics.
    113      */
    114     size_t bytesAllocated;
    115 
    116     /* Number of objects currently allocated from this mspace.
    117      */
    118     size_t objectsAllocated;
    119 } Heap;
    120 
    121 struct HeapSource {
    122     /* Target ideal heap utilization ratio; range 1..HEAP_UTILIZATION_MAX
    123      */
    124     size_t targetUtilization;
    125 
    126     /* Requested minimum heap size, or zero if there is no minimum.
    127      */
    128     size_t minimumSize;
    129 
    130     /* The starting heap size.
    131      */
    132     size_t startSize;
    133 
    134     /* The largest that the heap source as a whole is allowed to grow.
    135      */
    136     size_t absoluteMaxSize;
    137 
    138     /* The desired max size of the heap source as a whole.
    139      */
    140     size_t idealSize;
    141 
    142     /* The maximum number of bytes allowed to be allocated from the
    143      * active heap before a GC is forced.  This is used to "shrink" the
    144      * heap in lieu of actual compaction.
    145      */
    146     size_t softLimit;
    147 
    148     /* The heaps; heaps[0] is always the active heap,
    149      * which new objects should be allocated from.
    150      */
    151     Heap heaps[HEAP_SOURCE_MAX_HEAP_COUNT];
    152 
    153     /* The current number of heaps.
    154      */
    155     size_t numHeaps;
    156 
    157     /* External allocation count.
    158      */
    159     size_t externalBytesAllocated;
    160 
    161     /* The maximum number of external bytes that may be allocated.
    162      */
    163     size_t externalLimit;
    164 
    165     /* True if zygote mode was active when the HeapSource was created.
    166      */
    167     bool sawZygote;
    168 };
    169 
    170 #define hs2heap(hs_) (&((hs_)->heaps[0]))
    171 
    172 /*
    173  * Returns true iff a soft limit is in effect for the active heap.
    174  */
    175 static inline bool
    176 softLimited(const HeapSource *hs)
    177 {
    178     /* softLimit will be either INT_MAX or the limit for the
    179      * active mspace.  idealSize can be greater than softLimit
    180      * if there is more than one heap.  If there is only one
    181      * heap, a non-INT_MAX softLimit should always be the same
    182      * as idealSize.
    183      */
    184     return hs->softLimit <= hs->idealSize;
    185 }
    186 
    187 /*
    188  * Returns the current footprint of all heaps.  If includeActive
    189  * is false, don't count the heap at index 0.
    190  */
    191 static inline size_t
    192 oldHeapOverhead(const HeapSource *hs, bool includeActive)
    193 {
    194     size_t footprint = 0;
    195     size_t i;
    196 
    197     if (includeActive) {
    198         i = 0;
    199     } else {
    200         i = 1;
    201     }
    202     for (/* i = i */; i < hs->numHeaps; i++) {
    203 //TODO: include size of bitmaps?  If so, don't use bitsLen, listen to .max
    204         footprint += mspace_footprint(hs->heaps[i].msp);
    205     }
    206     return footprint;
    207 }
    208 
    209 /*
    210  * Returns the heap that <ptr> could have come from, or NULL
    211  * if it could not have come from any heap.
    212  */
    213 static inline Heap *
    214 ptr2heap(const HeapSource *hs, const void *ptr)
    215 {
    216     const size_t numHeaps = hs->numHeaps;
    217     size_t i;
    218 
    219 //TODO: unroll this to HEAP_SOURCE_MAX_HEAP_COUNT
    220     if (ptr != NULL) {
    221         for (i = 0; i < numHeaps; i++) {
    222             const Heap *const heap = &hs->heaps[i];
    223 
    224             if (dvmHeapBitmapMayContainObject(&heap->objectBitmap, ptr)) {
    225                 return (Heap *)heap;
    226             }
    227         }
    228     }
    229     return NULL;
    230 }
    231 
    232 /*
    233  * Functions to update heapSource->bytesAllocated when an object
    234  * is allocated or freed.  mspace_usable_size() will give
    235  * us a much more accurate picture of heap utilization than
    236  * the requested byte sizes would.
    237  *
    238  * These aren't exact, and should not be treated as such.
    239  */
    240 static inline void
    241 countAllocation(Heap *heap, const void *ptr, bool isObj)
    242 {
    243     assert(heap->bytesAllocated < mspace_footprint(heap->msp));
    244 
    245     heap->bytesAllocated += mspace_usable_size(heap->msp, ptr) +
    246             HEAP_SOURCE_CHUNK_OVERHEAD;
    247     if (isObj) {
    248         heap->objectsAllocated++;
    249         dvmHeapBitmapSetObjectBit(&heap->objectBitmap, ptr);
    250     }
    251 
    252     assert(heap->bytesAllocated < mspace_footprint(heap->msp));
    253 }
    254 
    255 static inline void
    256 countFree(Heap *heap, const void *ptr, bool isObj)
    257 {
    258     size_t delta;
    259 
    260     delta = mspace_usable_size(heap->msp, ptr) + HEAP_SOURCE_CHUNK_OVERHEAD;
    261     assert(delta > 0);
    262     if (delta < heap->bytesAllocated) {
    263         heap->bytesAllocated -= delta;
    264     } else {
    265         heap->bytesAllocated = 0;
    266     }
    267     if (isObj) {
    268         dvmHeapBitmapClearObjectBit(&heap->objectBitmap, ptr);
    269         if (heap->objectsAllocated > 0) {
    270             heap->objectsAllocated--;
    271         }
    272     }
    273 }
    274 
    275 static HeapSource *gHs = NULL;
    276 
    277 static mspace
    278 createMspace(size_t startSize, size_t absoluteMaxSize, size_t id)
    279 {
    280     mspace msp;
    281     char name[PATH_MAX];
    282 
    283     /* If two ashmem regions have the same name, only one gets
    284      * the name when looking at the maps.
    285      */
    286     snprintf(name, sizeof(name)-1, "dalvik-heap%s/%zd",
    287         gDvm.zygote ? "/zygote" : "", id);
    288     name[sizeof(name)-1] = '\0';
    289 
    290     /* Create an unlocked dlmalloc mspace to use as
    291      * a small-object heap source.
    292      *
    293      * We start off reserving heapSizeStart/2 bytes but
    294      * letting the heap grow to heapSizeStart.  This saves
    295      * memory in the case where a process uses even less
    296      * than the starting size.
    297      */
    298     LOGV_HEAP("Creating VM heap of size %u\n", startSize);
    299     errno = 0;
    300     msp = create_contiguous_mspace_with_name(startSize/2,
    301             absoluteMaxSize, /*locked=*/false, name);
    302     if (msp != NULL) {
    303         /* Don't let the heap grow past the starting size without
    304          * our intervention.
    305          */
    306         mspace_set_max_allowed_footprint(msp, startSize);
    307     } else {
    308         /* There's no guarantee that errno has meaning when the call
    309          * fails, but it often does.
    310          */
    311         LOGE_HEAP("Can't create VM heap of size (%u,%u) (errno=%d)\n",
    312             startSize/2, absoluteMaxSize, errno);
    313     }
    314 
    315     return msp;
    316 }
    317 
    318 static bool
    319 addNewHeap(HeapSource *hs, mspace msp, size_t mspAbsoluteMaxSize)
    320 {
    321     Heap heap;
    322 
    323     if (hs->numHeaps >= HEAP_SOURCE_MAX_HEAP_COUNT) {
    324         LOGE("Attempt to create too many heaps (%zd >= %zd)\n",
    325                 hs->numHeaps, HEAP_SOURCE_MAX_HEAP_COUNT);
    326         dvmAbort();
    327         return false;
    328     }
    329 
    330     memset(&heap, 0, sizeof(heap));
    331 
    332     if (msp != NULL) {
    333         heap.msp = msp;
    334         heap.absoluteMaxSize = mspAbsoluteMaxSize;
    335     } else {
    336         size_t overhead;
    337 
    338         overhead = oldHeapOverhead(hs, true);
    339         if (overhead + HEAP_MIN_FREE >= hs->absoluteMaxSize) {
    340             LOGE_HEAP("No room to create any more heaps "
    341                     "(%zd overhead, %zd max)\n",
    342                     overhead, hs->absoluteMaxSize);
    343             return false;
    344         }
    345         heap.absoluteMaxSize = hs->absoluteMaxSize - overhead;
    346         heap.msp = createMspace(HEAP_MIN_FREE, heap.absoluteMaxSize,
    347                 hs->numHeaps);
    348         if (heap.msp == NULL) {
    349             return false;
    350         }
    351     }
    352     if (!dvmHeapBitmapInit(&heap.objectBitmap,
    353                            (void *)ALIGN_DOWN_TO_PAGE_SIZE(heap.msp),
    354                            heap.absoluteMaxSize,
    355                            "objects"))
    356     {
    357         LOGE_HEAP("Can't create objectBitmap\n");
    358         goto fail;
    359     }
    360 
    361     /* Don't let the soon-to-be-old heap grow any further.
    362      */
    363     if (hs->numHeaps > 0) {
    364         mspace msp = hs->heaps[0].msp;
    365         mspace_set_max_allowed_footprint(msp, mspace_footprint(msp));
    366     }
    367 
    368     /* Put the new heap in the list, at heaps[0].
    369      * Shift existing heaps down.
    370      */
    371     memmove(&hs->heaps[1], &hs->heaps[0], hs->numHeaps * sizeof(hs->heaps[0]));
    372     hs->heaps[0] = heap;
    373     hs->numHeaps++;
    374 
    375     return true;
    376 
    377 fail:
    378     if (msp == NULL) {
    379         destroy_contiguous_mspace(heap.msp);
    380     }
    381     return false;
    382 }
    383 
    384 /*
    385  * Initializes the heap source; must be called before any other
    386  * dvmHeapSource*() functions.  Returns a GcHeap structure
    387  * allocated from the heap source.
    388  */
    389 GcHeap *
    390 dvmHeapSourceStartup(size_t startSize, size_t absoluteMaxSize)
    391 {
    392     GcHeap *gcHeap;
    393     HeapSource *hs;
    394     Heap *heap;
    395     mspace msp;
    396 
    397     assert(gHs == NULL);
    398 
    399     if (startSize > absoluteMaxSize) {
    400         LOGE("Bad heap parameters (start=%d, max=%d)\n",
    401            startSize, absoluteMaxSize);
    402         return NULL;
    403     }
    404 
    405     /* Create an unlocked dlmalloc mspace to use as
    406      * the small object heap source.
    407      */
    408     msp = createMspace(startSize, absoluteMaxSize, 0);
    409     if (msp == NULL) {
    410         return false;
    411     }
    412 
    413     /* Allocate a descriptor from the heap we just created.
    414      */
    415     gcHeap = mspace_malloc(msp, sizeof(*gcHeap));
    416     if (gcHeap == NULL) {
    417         LOGE_HEAP("Can't allocate heap descriptor\n");
    418         goto fail;
    419     }
    420     memset(gcHeap, 0, sizeof(*gcHeap));
    421 
    422     hs = mspace_malloc(msp, sizeof(*hs));
    423     if (hs == NULL) {
    424         LOGE_HEAP("Can't allocate heap source\n");
    425         goto fail;
    426     }
    427     memset(hs, 0, sizeof(*hs));
    428 
    429     hs->targetUtilization = DEFAULT_HEAP_UTILIZATION;
    430     hs->minimumSize = 0;
    431     hs->startSize = startSize;
    432     hs->absoluteMaxSize = absoluteMaxSize;
    433     hs->idealSize = startSize;
    434     hs->softLimit = INT_MAX;    // no soft limit at first
    435     hs->numHeaps = 0;
    436     hs->sawZygote = gDvm.zygote;
    437     if (!addNewHeap(hs, msp, absoluteMaxSize)) {
    438         LOGE_HEAP("Can't add initial heap\n");
    439         goto fail;
    440     }
    441 
    442     gcHeap->heapSource = hs;
    443 
    444     countAllocation(hs2heap(hs), gcHeap, false);
    445     countAllocation(hs2heap(hs), hs, false);
    446 
    447     gHs = hs;
    448     return gcHeap;
    449 
    450 fail:
    451     destroy_contiguous_mspace(msp);
    452     return NULL;
    453 }
    454 
    455 /*
    456  * If the HeapSource was created while in zygote mode, this
    457  * will create a new heap for post-zygote allocations.
    458  * Having a separate heap should maximize the number of pages
    459  * that a given app_process shares with the zygote process.
    460  */
    461 bool
    462 dvmHeapSourceStartupAfterZygote()
    463 {
    464     HeapSource *hs = gHs; // use a local to avoid the implicit "volatile"
    465 
    466     HS_BOILERPLATE();
    467 
    468     assert(!gDvm.zygote);
    469 
    470     if (hs->sawZygote) {
    471         /* Create a new heap for post-zygote allocations.
    472          */
    473         return addNewHeap(hs, NULL, 0);
    474     }
    475     return true;
    476 }
    477 
    478 /*
    479  * This is called while in zygote mode, right before we fork() for the
    480  * first time.  We create a heap for all future zygote process allocations,
    481  * in an attempt to avoid touching pages in the zygote heap.  (This would
    482  * probably be unnecessary if we had a compacting GC -- the source of our
    483  * troubles is small allocations filling in the gaps from larger ones.)
    484  */
    485 bool
    486 dvmHeapSourceStartupBeforeFork()
    487 {
    488     HeapSource *hs = gHs; // use a local to avoid the implicit "volatile"
    489 
    490     HS_BOILERPLATE();
    491 
    492     assert(gDvm.zygote);
    493 
    494     if (!gDvm.newZygoteHeapAllocated) {
    495         /* Create a new heap for post-fork zygote allocations.  We only
    496          * try once, even if it fails.
    497          */
    498         LOGV("Splitting out new zygote heap\n");
    499         gDvm.newZygoteHeapAllocated = true;
    500         return addNewHeap(hs, NULL, 0);
    501     }
    502     return true;
    503 }
    504 
    505 /*
    506  * Tears down the heap source and frees any resources associated with it.
    507  */
    508 void
    509 dvmHeapSourceShutdown(GcHeap *gcHeap)
    510 {
    511     if (gcHeap != NULL && gcHeap->heapSource != NULL) {
    512         HeapSource *hs;
    513         size_t numHeaps;
    514         size_t i;
    515 
    516         hs = gcHeap->heapSource;
    517         gHs = NULL;
    518 
    519         /* Cache numHeaps because hs will be invalid after the last
    520          * heap is freed.
    521          */
    522         numHeaps = hs->numHeaps;
    523 
    524         for (i = 0; i < numHeaps; i++) {
    525             Heap *heap = &hs->heaps[i];
    526 
    527             dvmHeapBitmapDelete(&heap->objectBitmap);
    528             destroy_contiguous_mspace(heap->msp);
    529         }
    530         /* The last heap is the original one, which contains the
    531          * HeapSource object itself.
    532          */
    533     }
    534 }
    535 
    536 /*
    537  * Returns the requested value. If the per-heap stats are requested, fill
    538  * them as well.
    539  *
    540  * Caller must hold the heap lock.
    541  */
    542 size_t
    543 dvmHeapSourceGetValue(enum HeapSourceValueSpec spec, size_t perHeapStats[],
    544                       size_t arrayLen)
    545 {
    546     HeapSource *hs = gHs;
    547     size_t value = 0;
    548     size_t total = 0;
    549     size_t i;
    550 
    551     HS_BOILERPLATE();
    552 
    553     switch (spec) {
    554     case HS_EXTERNAL_BYTES_ALLOCATED:
    555         return hs->externalBytesAllocated;
    556     case HS_EXTERNAL_LIMIT:
    557         return hs->externalLimit;
    558     default:
    559         // look at all heaps.
    560         ;
    561     }
    562 
    563     assert(arrayLen >= hs->numHeaps || perHeapStats == NULL);
    564     for (i = 0; i < hs->numHeaps; i++) {
    565         Heap *const heap = &hs->heaps[i];
    566 
    567         switch (spec) {
    568         case HS_FOOTPRINT:
    569             value = mspace_footprint(heap->msp);
    570             break;
    571         case HS_ALLOWED_FOOTPRINT:
    572             value = mspace_max_allowed_footprint(heap->msp);
    573             break;
    574         case HS_BYTES_ALLOCATED:
    575             value = heap->bytesAllocated;
    576             break;
    577         case HS_OBJECTS_ALLOCATED:
    578             value = heap->objectsAllocated;
    579             break;
    580         default:
    581             // quiet gcc
    582             break;
    583         }
    584         if (perHeapStats) {
    585             perHeapStats[i] = value;
    586         }
    587         total += value;
    588     }
    589     return total;
    590 }
    591 
    592 /*
    593  * Writes shallow copies of the currently-used bitmaps into outBitmaps,
    594  * returning the number of bitmaps written.  Returns 0 if the array was
    595  * not long enough or if there are no heaps, either of which is an error.
    596  */
    597 size_t
    598 dvmHeapSourceGetObjectBitmaps(HeapBitmap outBitmaps[], size_t maxBitmaps)
    599 {
    600     HeapSource *hs = gHs;
    601 
    602     HS_BOILERPLATE();
    603 
    604     assert(hs->numHeaps != 0);
    605     if (maxBitmaps >= hs->numHeaps) {
    606         size_t i;
    607 
    608         for (i = 0; i < hs->numHeaps; i++) {
    609             outBitmaps[i] = hs->heaps[i].objectBitmap;
    610         }
    611         return i;
    612     }
    613     return 0;
    614 }
    615 
    616 /*
    617  * Replaces the object location HeapBitmaps with the elements of
    618  * <objectBitmaps>.  The elements of <objectBitmaps> are overwritten
    619  * with shallow copies of the old bitmaps.
    620  *
    621  * Returns false if the number of bitmaps doesn't match the number
    622  * of heaps.
    623  */
    624 bool
    625 dvmHeapSourceReplaceObjectBitmaps(HeapBitmap objectBitmaps[], size_t nBitmaps)
    626 {
    627     HeapSource *hs = gHs;
    628     size_t i;
    629 
    630     HS_BOILERPLATE();
    631 
    632     if (nBitmaps != hs->numHeaps) {
    633         return false;
    634     }
    635 
    636     for (i = 0; i < hs->numHeaps; i++) {
    637         Heap *heap = &hs->heaps[i];
    638         HeapBitmap swap;
    639 
    640         swap = heap->objectBitmap;
    641         heap->objectBitmap = objectBitmaps[i];
    642         objectBitmaps[i] = swap;
    643     }
    644     return true;
    645 }
    646 
    647 /*
    648  * Allocates <n> bytes of zeroed data.
    649  */
    650 void *
    651 dvmHeapSourceAlloc(size_t n)
    652 {
    653     HeapSource *hs = gHs;
    654     Heap *heap;
    655     void *ptr;
    656 
    657     HS_BOILERPLATE();
    658     heap = hs2heap(hs);
    659 
    660     if (heap->bytesAllocated + n <= hs->softLimit) {
    661 // TODO: allocate large blocks (>64k?) as separate mmap regions so that
    662 //       they don't increase the high-water mark when they're freed.
    663 // TODO: zero out large objects using madvise
    664         ptr = mspace_calloc(heap->msp, 1, n);
    665         if (ptr != NULL) {
    666             countAllocation(heap, ptr, true);
    667         }
    668     } else {
    669         /* This allocation would push us over the soft limit;
    670          * act as if the heap is full.
    671          */
    672         LOGV_HEAP("softLimit of %zd.%03zdMB hit for %zd-byte allocation\n",
    673                 FRACTIONAL_MB(hs->softLimit), n);
    674         ptr = NULL;
    675     }
    676     return ptr;
    677 }
    678 
    679 /* Remove any hard limits, try to allocate, and shrink back down.
    680  * Last resort when trying to allocate an object.
    681  */
    682 static void *
    683 heapAllocAndGrow(HeapSource *hs, Heap *heap, size_t n)
    684 {
    685     void *ptr;
    686     size_t max;
    687 
    688     /* Grow as much as possible, but don't let the real footprint
    689      * plus external allocations go over the absolute max.
    690      */
    691     max = heap->absoluteMaxSize;
    692     if (max > hs->externalBytesAllocated) {
    693         max -= hs->externalBytesAllocated;
    694 
    695         mspace_set_max_allowed_footprint(heap->msp, max);
    696         ptr = dvmHeapSourceAlloc(n);
    697 
    698         /* Shrink back down as small as possible.  Our caller may
    699          * readjust max_allowed to a more appropriate value.
    700          */
    701         mspace_set_max_allowed_footprint(heap->msp,
    702                 mspace_footprint(heap->msp));
    703     } else {
    704         ptr = NULL;
    705     }
    706 
    707     return ptr;
    708 }
    709 
    710 /*
    711  * Allocates <n> bytes of zeroed data, growing as much as possible
    712  * if necessary.
    713  */
    714 void *
    715 dvmHeapSourceAllocAndGrow(size_t n)
    716 {
    717     HeapSource *hs = gHs;
    718     Heap *heap;
    719     void *ptr;
    720     size_t oldIdealSize;
    721 
    722     HS_BOILERPLATE();
    723     heap = hs2heap(hs);
    724 
    725     ptr = dvmHeapSourceAlloc(n);
    726     if (ptr != NULL) {
    727         return ptr;
    728     }
    729 
    730     oldIdealSize = hs->idealSize;
    731     if (softLimited(hs)) {
    732         /* We're soft-limited.  Try removing the soft limit to
    733          * see if we can allocate without actually growing.
    734          */
    735         hs->softLimit = INT_MAX;
    736         ptr = dvmHeapSourceAlloc(n);
    737         if (ptr != NULL) {
    738             /* Removing the soft limit worked;  fix things up to
    739              * reflect the new effective ideal size.
    740              */
    741             snapIdealFootprint();
    742             return ptr;
    743         }
    744         // softLimit intentionally left at INT_MAX.
    745     }
    746 
    747     /* We're not soft-limited.  Grow the heap to satisfy the request.
    748      * If this call fails, no footprints will have changed.
    749      */
    750     ptr = heapAllocAndGrow(hs, heap, n);
    751     if (ptr != NULL) {
    752         /* The allocation succeeded.  Fix up the ideal size to
    753          * reflect any footprint modifications that had to happen.
    754          */
    755         snapIdealFootprint();
    756     } else {
    757         /* We just couldn't do it.  Restore the original ideal size,
    758          * fixing up softLimit if necessary.
    759          */
    760         setIdealFootprint(oldIdealSize);
    761     }
    762     return ptr;
    763 }
    764 
    765 /*
    766  * Frees the memory pointed to by <ptr>, which may be NULL.
    767  */
    768 void
    769 dvmHeapSourceFree(void *ptr)
    770 {
    771     Heap *heap;
    772 
    773     HS_BOILERPLATE();
    774 
    775     heap = ptr2heap(gHs, ptr);
    776     if (heap != NULL) {
    777         countFree(heap, ptr, true);
    778         /* Only free objects that are in the active heap.
    779          * Touching old heaps would pull pages into this process.
    780          */
    781         if (heap == gHs->heaps) {
    782             mspace_free(heap->msp, ptr);
    783         }
    784     }
    785 }
    786 
    787 /*
    788  * Frees the first numPtrs objects in the ptrs list. The list must
    789  * contain addresses all in the same mspace, and must be in increasing
    790  * order. This implies that there are no duplicates, and no entries
    791  * are NULL.
    792  */
    793 void
    794 dvmHeapSourceFreeList(size_t numPtrs, void **ptrs)
    795 {
    796     Heap *heap;
    797 
    798     HS_BOILERPLATE();
    799 
    800     if (numPtrs == 0) {
    801         return;
    802     }
    803 
    804     assert(ptrs != NULL);
    805     assert(*ptrs != NULL);
    806     heap = ptr2heap(gHs, *ptrs);
    807     if (heap != NULL) {
    808         mspace *msp = heap->msp;
    809         // Calling mspace_free on shared heaps disrupts sharing too
    810         // much. For heap[0] -- the 'active heap' -- we call
    811         // mspace_free, but on the other heaps we only do some
    812         // accounting.
    813         if (heap == gHs->heaps) {
    814             // mspace_merge_objects takes two allocated objects, and
    815             // if the second immediately follows the first, will merge
    816             // them, returning a larger object occupying the same
    817             // memory. This is a local operation, and doesn't require
    818             // dlmalloc to manipulate any freelists. It's pretty
    819             // inexpensive compared to free().
    820 
    821             // ptrs is an array of objects all in memory order, and if
    822             // client code has been allocating lots of short-lived
    823             // objects, this is likely to contain runs of objects all
    824             // now garbage, and thus highly amenable to this optimization.
    825 
    826             // Unroll the 0th iteration around the loop below,
    827             // countFree ptrs[0] and initializing merged.
    828             assert(ptrs[0] != NULL);
    829             assert(ptr2heap(gHs, ptrs[0]) == heap);
    830             countFree(heap, ptrs[0], true);
    831             void *merged = ptrs[0];
    832 
    833             size_t i;
    834             for (i = 1; i < numPtrs; i++) {
    835                 assert(merged != NULL);
    836                 assert(ptrs[i] != NULL);
    837                 assert((intptr_t)merged < (intptr_t)ptrs[i]);
    838                 assert(ptr2heap(gHs, ptrs[i]) == heap);
    839                 countFree(heap, ptrs[i], true);
    840                 // Try to merge. If it works, merged now includes the
    841                 // memory of ptrs[i]. If it doesn't, free merged, and
    842                 // see if ptrs[i] starts a new run of adjacent
    843                 // objects to merge.
    844                 if (mspace_merge_objects(msp, merged, ptrs[i]) == NULL) {
    845                     mspace_free(msp, merged);
    846                     merged = ptrs[i];
    847                 }
    848             }
    849             assert(merged != NULL);
    850             mspace_free(msp, merged);
    851         } else {
    852             // This is not an 'active heap'. Only do the accounting.
    853             size_t i;
    854             for (i = 0; i < numPtrs; i++) {
    855                 assert(ptrs[i] != NULL);
    856                 assert(ptr2heap(gHs, ptrs[i]) == heap);
    857                 countFree(heap, ptrs[i], true);
    858             }
    859         }
    860     }
    861 }
    862 
    863 /*
    864  * Returns true iff <ptr> was allocated from the heap source.
    865  */
    866 bool
    867 dvmHeapSourceContains(const void *ptr)
    868 {
    869     Heap *heap;
    870 
    871     HS_BOILERPLATE();
    872 
    873     heap = ptr2heap(gHs, ptr);
    874     if (heap != NULL) {
    875         return dvmHeapBitmapIsObjectBitSet(&heap->objectBitmap, ptr) != 0;
    876     }
    877     return false;
    878 }
    879 
    880 /*
    881  * Returns the value of the requested flag.
    882  */
    883 bool
    884 dvmHeapSourceGetPtrFlag(const void *ptr, enum HeapSourcePtrFlag flag)
    885 {
    886     if (ptr == NULL) {
    887         return false;
    888     }
    889 
    890     if (flag == HS_CONTAINS) {
    891         return dvmHeapSourceContains(ptr);
    892     } else if (flag == HS_ALLOCATED_IN_ZYGOTE) {
    893         HeapSource *hs = gHs;
    894 
    895         HS_BOILERPLATE();
    896 
    897         if (hs->sawZygote) {
    898             Heap *heap;
    899 
    900             heap = ptr2heap(hs, ptr);
    901             if (heap != NULL) {
    902                 /* If the object is not in the active heap, we assume that
    903                  * it was allocated as part of zygote.
    904                  */
    905                 return heap != hs->heaps;
    906             }
    907         }
    908         /* The pointer is outside of any known heap, or we are not
    909          * running in zygote mode.
    910          */
    911         return false;
    912     }
    913 
    914     return false;
    915 }
    916 
    917 /*
    918  * Returns the number of usable bytes in an allocated chunk; the size
    919  * may be larger than the size passed to dvmHeapSourceAlloc().
    920  */
    921 size_t
    922 dvmHeapSourceChunkSize(const void *ptr)
    923 {
    924     Heap *heap;
    925 
    926     HS_BOILERPLATE();
    927 
    928     heap = ptr2heap(gHs, ptr);
    929     if (heap != NULL) {
    930         return mspace_usable_size(heap->msp, ptr);
    931     }
    932     return 0;
    933 }
    934 
    935 /*
    936  * Returns the number of bytes that the heap source has allocated
    937  * from the system using sbrk/mmap, etc.
    938  *
    939  * Caller must hold the heap lock.
    940  */
    941 size_t
    942 dvmHeapSourceFootprint()
    943 {
    944     HS_BOILERPLATE();
    945 
    946 //TODO: include size of bitmaps?
    947     return oldHeapOverhead(gHs, true);
    948 }
    949 
    950 /*
    951  * Return the real bytes used by old heaps and external memory
    952  * plus the soft usage of the current heap.  When a soft limit
    953  * is in effect, this is effectively what it's compared against
    954  * (though, in practice, it only looks at the current heap).
    955  */
    956 static size_t
    957 getSoftFootprint(bool includeActive)
    958 {
    959     HeapSource *hs = gHs;
    960     size_t ret;
    961 
    962     HS_BOILERPLATE();
    963 
    964     ret = oldHeapOverhead(hs, false) + hs->externalBytesAllocated;
    965     if (includeActive) {
    966         ret += hs->heaps[0].bytesAllocated;
    967     }
    968 
    969     return ret;
    970 }
    971 
    972 /*
    973  * Gets the maximum number of bytes that the heap source is allowed
    974  * to allocate from the system.
    975  */
    976 size_t
    977 dvmHeapSourceGetIdealFootprint()
    978 {
    979     HeapSource *hs = gHs;
    980 
    981     HS_BOILERPLATE();
    982 
    983     return hs->idealSize;
    984 }
    985 
    986 /*
    987  * Sets the soft limit, handling any necessary changes to the allowed
    988  * footprint of the active heap.
    989  */
    990 static void
    991 setSoftLimit(HeapSource *hs, size_t softLimit)
    992 {
    993     /* Compare against the actual footprint, rather than the
    994      * max_allowed, because the heap may not have grown all the
    995      * way to the allowed size yet.
    996      */
    997     mspace msp = hs->heaps[0].msp;
    998     size_t currentHeapSize = mspace_footprint(msp);
    999     if (softLimit < currentHeapSize) {
   1000         /* Don't let the heap grow any more, and impose a soft limit.
   1001          */
   1002         mspace_set_max_allowed_footprint(msp, currentHeapSize);
   1003         hs->softLimit = softLimit;
   1004     } else {
   1005         /* Let the heap grow to the requested max, and remove any
   1006          * soft limit, if set.
   1007          */
   1008         mspace_set_max_allowed_footprint(msp, softLimit);
   1009         hs->softLimit = INT_MAX;
   1010     }
   1011 }
   1012 
   1013 /*
   1014  * Sets the maximum number of bytes that the heap source is allowed
   1015  * to allocate from the system.  Clamps to the appropriate maximum
   1016  * value.
   1017  */
   1018 static void
   1019 setIdealFootprint(size_t max)
   1020 {
   1021     HeapSource *hs = gHs;
   1022 #if DEBUG_HEAP_SOURCE
   1023     HeapSource oldHs = *hs;
   1024     mspace msp = hs->heaps[0].msp;
   1025     size_t oldAllowedFootprint =
   1026             mspace_max_allowed_footprint(msp);
   1027 #endif
   1028 
   1029     HS_BOILERPLATE();
   1030 
   1031     if (max > hs->absoluteMaxSize) {
   1032         LOGI_HEAP("Clamp target GC heap from %zd.%03zdMB to %u.%03uMB\n",
   1033                 FRACTIONAL_MB(max),
   1034                 FRACTIONAL_MB(hs->absoluteMaxSize));
   1035         max = hs->absoluteMaxSize;
   1036     } else if (max < hs->minimumSize) {
   1037         max = hs->minimumSize;
   1038     }
   1039 
   1040     /* Convert max into a size that applies to the active heap.
   1041      * Old heaps and external allocations will count against the ideal size.
   1042      */
   1043     size_t overhead = getSoftFootprint(false);
   1044     size_t activeMax;
   1045     if (overhead < max) {
   1046         activeMax = max - overhead;
   1047     } else {
   1048         activeMax = 0;
   1049     }
   1050 
   1051     setSoftLimit(hs, activeMax);
   1052     hs->idealSize = max;
   1053 
   1054     HSTRACE("IDEAL %zd->%zd (%d), soft %zd->%zd (%d), allowed %zd->%zd (%d), "
   1055             "ext %zd\n",
   1056             oldHs.idealSize, hs->idealSize, hs->idealSize - oldHs.idealSize,
   1057             oldHs.softLimit, hs->softLimit, hs->softLimit - oldHs.softLimit,
   1058             oldAllowedFootprint, mspace_max_allowed_footprint(msp),
   1059             mspace_max_allowed_footprint(msp) - oldAllowedFootprint,
   1060             hs->externalBytesAllocated);
   1061 
   1062 }
   1063 
   1064 /*
   1065  * Make the ideal footprint equal to the current footprint.
   1066  */
   1067 static void
   1068 snapIdealFootprint()
   1069 {
   1070     HeapSource *hs = gHs;
   1071 
   1072     HS_BOILERPLATE();
   1073 
   1074     setIdealFootprint(getSoftFootprint(true));
   1075 }
   1076 
   1077 /*
   1078  * Gets the current ideal heap utilization, represented as a number
   1079  * between zero and one.
   1080  */
   1081 float dvmGetTargetHeapUtilization()
   1082 {
   1083     HeapSource *hs = gHs;
   1084 
   1085     HS_BOILERPLATE();
   1086 
   1087     return (float)hs->targetUtilization / (float)HEAP_UTILIZATION_MAX;
   1088 }
   1089 
   1090 /*
   1091  * Sets the new ideal heap utilization, represented as a number
   1092  * between zero and one.
   1093  */
   1094 void dvmSetTargetHeapUtilization(float newTarget)
   1095 {
   1096     HeapSource *hs = gHs;
   1097     size_t newUtilization;
   1098 
   1099     HS_BOILERPLATE();
   1100 
   1101     /* Clamp it to a reasonable range.
   1102      */
   1103     // TODO: This may need some tuning.
   1104     if (newTarget < 0.2) {
   1105         newTarget = 0.2;
   1106     } else if (newTarget > 0.8) {
   1107         newTarget = 0.8;
   1108     }
   1109 
   1110     hs->targetUtilization =
   1111             (size_t)(newTarget * (float)HEAP_UTILIZATION_MAX);
   1112     LOGV("Set heap target utilization to %zd/%d (%f)\n",
   1113             hs->targetUtilization, HEAP_UTILIZATION_MAX, newTarget);
   1114 }
   1115 
   1116 /*
   1117  * If set is true, sets the new minimum heap size to size; always
   1118  * returns the current (or previous) size.  If size is negative,
   1119  * removes the current minimum constraint (if present).
   1120  */
   1121 size_t
   1122 dvmMinimumHeapSize(size_t size, bool set)
   1123 {
   1124     HeapSource *hs = gHs;
   1125     size_t oldMinimumSize;
   1126 
   1127     /* gHs caches an entry in gDvm.gcHeap;  we need to hold the
   1128      * heap lock if we're going to look at it.  We also need the
   1129      * lock for the call to setIdealFootprint().
   1130      */
   1131     dvmLockHeap();
   1132 
   1133     HS_BOILERPLATE();
   1134 
   1135     oldMinimumSize = hs->minimumSize;
   1136 
   1137     if (set) {
   1138         /* Don't worry about external allocations right now.
   1139          * setIdealFootprint() will take them into account when
   1140          * minimumSize is used, and it's better to hold onto the
   1141          * intended minimumSize than to clamp it arbitrarily based
   1142          * on the current allocations.
   1143          */
   1144         if (size > hs->absoluteMaxSize) {
   1145             size = hs->absoluteMaxSize;
   1146         }
   1147         hs->minimumSize = size;
   1148         if (size > hs->idealSize) {
   1149             /* Force a snap to the minimum value, which we just set
   1150              * and which setIdealFootprint() will take into consideration.
   1151              */
   1152             setIdealFootprint(hs->idealSize);
   1153         }
   1154         /* Otherwise we'll just keep it in mind the next time
   1155          * setIdealFootprint() is called.
   1156          */
   1157     }
   1158 
   1159     dvmUnlockHeap();
   1160 
   1161     return oldMinimumSize;
   1162 }
   1163 
   1164 /*
   1165  * Given the size of a live set, returns the ideal heap size given
   1166  * the current target utilization and MIN/MAX values.
   1167  *
   1168  * targetUtilization is in the range 1..HEAP_UTILIZATION_MAX.
   1169  */
   1170 static size_t
   1171 getUtilizationTarget(const HeapSource *hs,
   1172         size_t liveSize, size_t targetUtilization)
   1173 {
   1174     size_t targetSize;
   1175 
   1176     /* Use the current target utilization ratio to determine the
   1177      * ideal heap size based on the size of the live set.
   1178      */
   1179     targetSize = (liveSize / targetUtilization) * HEAP_UTILIZATION_MAX;
   1180 
   1181     /* Cap the amount of free space, though, so we don't end up
   1182      * with, e.g., 8MB of free space when the live set size hits 8MB.
   1183      */
   1184     if (targetSize > liveSize + HEAP_IDEAL_FREE) {
   1185         targetSize = liveSize + HEAP_IDEAL_FREE;
   1186     } else if (targetSize < liveSize + HEAP_MIN_FREE) {
   1187         targetSize = liveSize + HEAP_MIN_FREE;
   1188     }
   1189     return targetSize;
   1190 }
   1191 
   1192 /*
   1193  * Given the current contents of the active heap, increase the allowed
   1194  * heap footprint to match the target utilization ratio.  This
   1195  * should only be called immediately after a full mark/sweep.
   1196  */
   1197 void dvmHeapSourceGrowForUtilization()
   1198 {
   1199     HeapSource *hs = gHs;
   1200     Heap *heap;
   1201     size_t targetHeapSize;
   1202     size_t currentHeapUsed;
   1203     size_t oldIdealSize;
   1204     size_t newHeapMax;
   1205     size_t overhead;
   1206 
   1207     HS_BOILERPLATE();
   1208     heap = hs2heap(hs);
   1209 
   1210     /* Use the current target utilization ratio to determine the
   1211      * ideal heap size based on the size of the live set.
   1212      * Note that only the active heap plays any part in this.
   1213      *
   1214      * Avoid letting the old heaps influence the target free size,
   1215      * because they may be full of objects that aren't actually
   1216      * in the working set.  Just look at the allocated size of
   1217      * the current heap.
   1218      */
   1219     currentHeapUsed = heap->bytesAllocated;
   1220 #define LET_EXTERNAL_INFLUENCE_UTILIZATION 1
   1221 #if LET_EXTERNAL_INFLUENCE_UTILIZATION
   1222     /* This is a hack to deal with the side-effects of moving
   1223      * bitmap data out of the Dalvik heap.  Since the amount
   1224      * of free space after a GC scales with the size of the
   1225      * live set, many apps expected the large free space that
   1226      * appeared along with megabytes' worth of bitmaps.  When
   1227      * the bitmaps were removed, the free size shrank significantly,
   1228      * and apps started GCing constantly.  This makes it so the
   1229      * post-GC free space is the same size it would have been
   1230      * if the bitmaps were still in the Dalvik heap.
   1231      */
   1232     currentHeapUsed += hs->externalBytesAllocated;
   1233 #endif
   1234     targetHeapSize =
   1235             getUtilizationTarget(hs, currentHeapUsed, hs->targetUtilization);
   1236 #if LET_EXTERNAL_INFLUENCE_UTILIZATION
   1237     currentHeapUsed -= hs->externalBytesAllocated;
   1238     targetHeapSize -= hs->externalBytesAllocated;
   1239 #endif
   1240 
   1241     /* The ideal size includes the old heaps; add overhead so that
   1242      * it can be immediately subtracted again in setIdealFootprint().
   1243      * If the target heap size would exceed the max, setIdealFootprint()
   1244      * will clamp it to a legal value.
   1245      */
   1246     overhead = getSoftFootprint(false);
   1247     oldIdealSize = hs->idealSize;
   1248     setIdealFootprint(targetHeapSize + overhead);
   1249 
   1250     newHeapMax = mspace_max_allowed_footprint(heap->msp);
   1251     if (softLimited(hs)) {
   1252         LOGD_HEAP("GC old usage %zd.%zd%%; now "
   1253                 "%zd.%03zdMB used / %zd.%03zdMB soft max "
   1254                 "(%zd.%03zdMB over, "
   1255                 "%zd.%03zdMB ext, "
   1256                 "%zd.%03zdMB real max)\n",
   1257                 FRACTIONAL_PCT(currentHeapUsed, oldIdealSize),
   1258                 FRACTIONAL_MB(currentHeapUsed),
   1259                 FRACTIONAL_MB(hs->softLimit),
   1260                 FRACTIONAL_MB(overhead),
   1261                 FRACTIONAL_MB(hs->externalBytesAllocated),
   1262                 FRACTIONAL_MB(newHeapMax));
   1263     } else {
   1264         LOGD_HEAP("GC old usage %zd.%zd%%; now "
   1265                 "%zd.%03zdMB used / %zd.%03zdMB real max "
   1266                 "(%zd.%03zdMB over, "
   1267                 "%zd.%03zdMB ext)\n",
   1268                 FRACTIONAL_PCT(currentHeapUsed, oldIdealSize),
   1269                 FRACTIONAL_MB(currentHeapUsed),
   1270                 FRACTIONAL_MB(newHeapMax),
   1271                 FRACTIONAL_MB(overhead),
   1272                 FRACTIONAL_MB(hs->externalBytesAllocated));
   1273     }
   1274 }
   1275 
   1276 /*
   1277  * Return free pages to the system.
   1278  * TODO: move this somewhere else, especially the native heap part.
   1279  */
   1280 
   1281 static void releasePagesInRange(void *start, void *end, void *nbytes)
   1282 {
   1283     /* Linux requires that the madvise() start address is page-aligned.
   1284     * We also align the end address.
   1285     */
   1286     start = (void *)ALIGN_UP_TO_PAGE_SIZE(start);
   1287     end = (void *)((size_t)end & ~(SYSTEM_PAGE_SIZE - 1));
   1288     if (start < end) {
   1289         size_t length = (char *)end - (char *)start;
   1290         madvise(start, length, MADV_DONTNEED);
   1291         *(size_t *)nbytes += length;
   1292     }
   1293 }
   1294 
   1295 /*
   1296  * Return unused memory to the system if possible.
   1297  */
   1298 void
   1299 dvmHeapSourceTrim(size_t bytesTrimmed[], size_t arrayLen)
   1300 {
   1301     HeapSource *hs = gHs;
   1302     size_t nativeBytes, heapBytes;
   1303     size_t i;
   1304 
   1305     HS_BOILERPLATE();
   1306 
   1307     assert(arrayLen >= hs->numHeaps);
   1308 
   1309     heapBytes = 0;
   1310     for (i = 0; i < hs->numHeaps; i++) {
   1311         Heap *heap = &hs->heaps[i];
   1312 
   1313         /* Return the wilderness chunk to the system.
   1314          */
   1315         mspace_trim(heap->msp, 0);
   1316 
   1317         /* Return any whole free pages to the system.
   1318          */
   1319         bytesTrimmed[i] = 0;
   1320         mspace_walk_free_pages(heap->msp, releasePagesInRange,
   1321                                &bytesTrimmed[i]);
   1322         heapBytes += bytesTrimmed[i];
   1323     }
   1324 
   1325     /* Same for the native heap.
   1326      */
   1327     dlmalloc_trim(0);
   1328     nativeBytes = 0;
   1329     dlmalloc_walk_free_pages(releasePagesInRange, &nativeBytes);
   1330 
   1331     LOGD_HEAP("madvised %zd (GC) + %zd (native) = %zd total bytes\n",
   1332             heapBytes, nativeBytes, heapBytes + nativeBytes);
   1333 }
   1334 
   1335 /*
   1336  * Walks over the heap source and passes every allocated and
   1337  * free chunk to the callback.
   1338  */
   1339 void
   1340 dvmHeapSourceWalk(void(*callback)(const void *chunkptr, size_t chunklen,
   1341                                       const void *userptr, size_t userlen,
   1342                                       void *arg),
   1343                   void *arg)
   1344 {
   1345     HeapSource *hs = gHs;
   1346     size_t i;
   1347 
   1348     HS_BOILERPLATE();
   1349 
   1350     /* Walk the heaps from oldest to newest.
   1351      */
   1352 //TODO: do this in address order
   1353     for (i = hs->numHeaps; i > 0; --i) {
   1354         mspace_walk_heap(hs->heaps[i-1].msp, callback, arg);
   1355     }
   1356 }
   1357 
   1358 /*
   1359  * Gets the number of heaps available in the heap source.
   1360  *
   1361  * Caller must hold the heap lock, because gHs caches a field
   1362  * in gDvm.gcHeap.
   1363  */
   1364 size_t
   1365 dvmHeapSourceGetNumHeaps()
   1366 {
   1367     HeapSource *hs = gHs;
   1368 
   1369     HS_BOILERPLATE();
   1370 
   1371     return hs->numHeaps;
   1372 }
   1373 
   1374 
   1375 /*
   1376  * External allocation tracking
   1377  *
   1378  * In some situations, memory outside of the heap is tied to the
   1379  * lifetime of objects in the heap.  Since that memory is kept alive
   1380  * by heap objects, it should provide memory pressure that can influence
   1381  * GCs.
   1382  */
   1383 
   1384 
   1385 static bool
   1386 externalAllocPossible(const HeapSource *hs, size_t n)
   1387 {
   1388     const Heap *heap;
   1389     size_t currentHeapSize;
   1390 
   1391     /* Make sure that this allocation is even possible.
   1392      * Don't let the external size plus the actual heap size
   1393      * go over the absolute max.  This essentially treats
   1394      * external allocations as part of the active heap.
   1395      *
   1396      * Note that this will fail "mysteriously" if there's
   1397      * a small softLimit but a large heap footprint.
   1398      */
   1399     heap = hs2heap(hs);
   1400     currentHeapSize = mspace_max_allowed_footprint(heap->msp);
   1401     if (currentHeapSize + hs->externalBytesAllocated + n <=
   1402             heap->absoluteMaxSize)
   1403     {
   1404         return true;
   1405     }
   1406     HSTRACE("externalAllocPossible(): "
   1407             "footprint %zu + extAlloc %zu + n %zu >= max %zu (space for %zu)\n",
   1408             currentHeapSize, hs->externalBytesAllocated, n,
   1409             heap->absoluteMaxSize,
   1410             heap->absoluteMaxSize -
   1411                     (currentHeapSize + hs->externalBytesAllocated));
   1412     return false;
   1413 }
   1414 
   1415 #define EXTERNAL_TARGET_UTILIZATION 820  // 80%
   1416 
   1417 /*
   1418  * Tries to update the internal count of externally-allocated memory.
   1419  * If there's enough room for that memory, returns true.  If not, returns
   1420  * false and does not update the count.
   1421  *
   1422  * The caller must ensure externalAllocPossible(hs, n) == true.
   1423  */
   1424 static bool
   1425 externalAlloc(HeapSource *hs, size_t n, bool grow)
   1426 {
   1427     Heap *heap;
   1428     size_t currentHeapSize;
   1429     size_t newTotal;
   1430     size_t max;
   1431     bool grew;
   1432 
   1433     assert(hs->externalLimit >= hs->externalBytesAllocated);
   1434 
   1435     HSTRACE("externalAlloc(%zd%s)\n", n, grow ? ", grow" : "");
   1436     assert(externalAllocPossible(hs, n));  // The caller must ensure this.
   1437 
   1438     /* External allocations have their own "free space" that they
   1439      * can allocate from without causing a GC.
   1440      */
   1441     if (hs->externalBytesAllocated + n <= hs->externalLimit) {
   1442         hs->externalBytesAllocated += n;
   1443 #if defined(WITH_PROFILER) && PROFILE_EXTERNAL_ALLOCATIONS
   1444         if (gDvm.allocProf.enabled) {
   1445             Thread* self = dvmThreadSelf();
   1446             gDvm.allocProf.externalAllocCount++;
   1447             gDvm.allocProf.externalAllocSize += n;
   1448             if (self != NULL) {
   1449                 self->allocProf.externalAllocCount++;
   1450                 self->allocProf.externalAllocSize += n;
   1451             }
   1452         }
   1453 #endif
   1454         return true;
   1455     }
   1456     if (!grow) {
   1457         return false;
   1458     }
   1459 
   1460     /* GROW */
   1461     hs->externalBytesAllocated += n;
   1462     hs->externalLimit = getUtilizationTarget(hs,
   1463             hs->externalBytesAllocated, EXTERNAL_TARGET_UTILIZATION);
   1464     HSTRACE("EXTERNAL grow limit to %zd\n", hs->externalLimit);
   1465     return true;
   1466 }
   1467 
   1468 static void
   1469 gcForExternalAlloc(bool collectSoftReferences)
   1470 {
   1471 #ifdef WITH_PROFILER  // even if !PROFILE_EXTERNAL_ALLOCATIONS
   1472     if (gDvm.allocProf.enabled) {
   1473         Thread* self = dvmThreadSelf();
   1474         gDvm.allocProf.gcCount++;
   1475         if (self != NULL) {
   1476             self->allocProf.gcCount++;
   1477         }
   1478     }
   1479 #endif
   1480     dvmCollectGarbageInternal(collectSoftReferences, GC_EXTERNAL_ALLOC);
   1481 }
   1482 
   1483 /*
   1484  * Updates the internal count of externally-allocated memory.  If there's
   1485  * enough room for that memory, returns true.  If not, returns false and
   1486  * does not update the count.
   1487  *
   1488  * May cause a GC as a side-effect.
   1489  */
   1490 bool
   1491 dvmTrackExternalAllocation(size_t n)
   1492 {
   1493     HeapSource *hs = gHs;
   1494     size_t overhead;
   1495     bool ret = false;
   1496 
   1497     /* gHs caches an entry in gDvm.gcHeap;  we need to hold the
   1498      * heap lock if we're going to look at it.
   1499      */
   1500     dvmLockHeap();
   1501 
   1502     HS_BOILERPLATE();
   1503     assert(hs->externalLimit >= hs->externalBytesAllocated);
   1504 
   1505     if (!externalAllocPossible(hs, n)) {
   1506         LOGE_HEAP("%zd-byte external allocation "
   1507                 "too large for this process.\n", n);
   1508         goto out;
   1509     }
   1510 
   1511     /* Try "allocating" using the existing "free space".
   1512      */
   1513     HSTRACE("EXTERNAL alloc %zu (%zu < %zu)\n",
   1514             n, hs->externalBytesAllocated, hs->externalLimit);
   1515     if (externalAlloc(hs, n, false)) {
   1516         ret = true;
   1517         goto out;
   1518     }
   1519 
   1520     /* The "allocation" failed.  Free up some space by doing
   1521      * a full garbage collection.  This may grow the heap source
   1522      * if the live set is sufficiently large.
   1523      */
   1524     HSTRACE("EXTERNAL alloc %zd: GC 1\n", n);
   1525     gcForExternalAlloc(false);  // don't collect SoftReferences
   1526     if (externalAlloc(hs, n, false)) {
   1527         ret = true;
   1528         goto out;
   1529     }
   1530 
   1531     /* Even that didn't work;  this is an exceptional state.
   1532      * Try harder, growing the heap source if necessary.
   1533      */
   1534     HSTRACE("EXTERNAL alloc %zd: frag\n", n);
   1535     ret = externalAlloc(hs, n, true);
   1536     dvmHeapSizeChanged();
   1537     if (ret) {
   1538         goto out;
   1539     }
   1540 
   1541     /* We couldn't even grow enough to satisfy the request.
   1542      * Try one last GC, collecting SoftReferences this time.
   1543      */
   1544     HSTRACE("EXTERNAL alloc %zd: GC 2\n", n);
   1545     gcForExternalAlloc(true);  // collect SoftReferences
   1546     ret = externalAlloc(hs, n, true);
   1547     dvmHeapSizeChanged();
   1548     if (!ret) {
   1549         LOGE_HEAP("Out of external memory on a %zu-byte allocation.\n", n);
   1550     }
   1551 
   1552 #if defined(WITH_PROFILER) && PROFILE_EXTERNAL_ALLOCATIONS
   1553     if (gDvm.allocProf.enabled) {
   1554         Thread* self = dvmThreadSelf();
   1555         gDvm.allocProf.failedExternalAllocCount++;
   1556         gDvm.allocProf.failedExternalAllocSize += n;
   1557         if (self != NULL) {
   1558             self->allocProf.failedExternalAllocCount++;
   1559             self->allocProf.failedExternalAllocSize += n;
   1560         }
   1561     }
   1562 #endif
   1563 
   1564 out:
   1565     dvmUnlockHeap();
   1566 
   1567     return ret;
   1568 }
   1569 
   1570 /*
   1571  * Reduces the internal count of externally-allocated memory.
   1572  */
   1573 void
   1574 dvmTrackExternalFree(size_t n)
   1575 {
   1576     HeapSource *hs = gHs;
   1577     size_t newIdealSize;
   1578     size_t newExternalLimit;
   1579     size_t oldExternalBytesAllocated;
   1580 
   1581     HSTRACE("EXTERNAL free %zu (%zu < %zu)\n",
   1582             n, hs->externalBytesAllocated, hs->externalLimit);
   1583 
   1584     /* gHs caches an entry in gDvm.gcHeap;  we need to hold the
   1585      * heap lock if we're going to look at it.
   1586      */
   1587     dvmLockHeap();
   1588 
   1589     HS_BOILERPLATE();
   1590     assert(hs->externalLimit >= hs->externalBytesAllocated);
   1591 
   1592     oldExternalBytesAllocated = hs->externalBytesAllocated;
   1593     if (n <= hs->externalBytesAllocated) {
   1594         hs->externalBytesAllocated -= n;
   1595     } else {
   1596         n = hs->externalBytesAllocated;
   1597         hs->externalBytesAllocated = 0;
   1598     }
   1599 
   1600 #if defined(WITH_PROFILER) && PROFILE_EXTERNAL_ALLOCATIONS
   1601     if (gDvm.allocProf.enabled) {
   1602         Thread* self = dvmThreadSelf();
   1603         gDvm.allocProf.externalFreeCount++;
   1604         gDvm.allocProf.externalFreeSize += n;
   1605         if (self != NULL) {
   1606             self->allocProf.externalFreeCount++;
   1607             self->allocProf.externalFreeSize += n;
   1608         }
   1609     }
   1610 #endif
   1611 
   1612     /* Shrink as quickly as we can.
   1613      */
   1614     newExternalLimit = getUtilizationTarget(hs,
   1615             hs->externalBytesAllocated, EXTERNAL_TARGET_UTILIZATION);
   1616     if (newExternalLimit < oldExternalBytesAllocated) {
   1617         /* Make sure that the remaining free space is at least
   1618          * big enough to allocate something of the size that was
   1619          * just freed.  This makes it more likely that
   1620          *     externalFree(N); externalAlloc(N);
   1621          * will work without causing a GC.
   1622          */
   1623         HSTRACE("EXTERNAL free preserved %zu extra free bytes\n",
   1624                 oldExternalBytesAllocated - newExternalLimit);
   1625         newExternalLimit = oldExternalBytesAllocated;
   1626     }
   1627     if (newExternalLimit < hs->externalLimit) {
   1628         hs->externalLimit = newExternalLimit;
   1629     }
   1630 
   1631     dvmUnlockHeap();
   1632 }
   1633 
   1634 /*
   1635  * Returns the number of externally-allocated bytes being tracked by
   1636  * dvmTrackExternalAllocation/Free().
   1637  */
   1638 size_t
   1639 dvmGetExternalBytesAllocated()
   1640 {
   1641     const HeapSource *hs = gHs;
   1642     size_t ret;
   1643 
   1644     /* gHs caches an entry in gDvm.gcHeap;  we need to hold the
   1645      * heap lock if we're going to look at it.  We also need the
   1646      * lock for the call to setIdealFootprint().
   1647      */
   1648     dvmLockHeap();
   1649     HS_BOILERPLATE();
   1650     ret = hs->externalBytesAllocated;
   1651     dvmUnlockHeap();
   1652 
   1653     return ret;
   1654 }
   1655