Home | History | Annotate | Download | only in alloc

Lines Matching full:heap

25 #include "alloc/Heap.h"
45 /* Number of seconds to wait after a GC before performing a heap trim
67 struct Heap {
72 /* The largest size that this heap is allowed to grow to.
92 * The lowest address of this heap, inclusive.
97 * The highest address of this heap, exclusive.
103 /* Target ideal heap utilization ratio; range 1..HEAP_UTILIZATION_MAX
107 /* The starting heap size.
111 /* The largest that the heap source as a whole is allowed to grow.
116 * The largest size we permit the heap to grow. This value allows
117 * the user to limit the heap growth below the maximum size. This
121 * heap.
125 /* The desired max size of the heap source as a whole.
130 * active heap before a GC is forced. This is used to "shrink" the
131 * heap in lieu of actual compaction.
135 /* The heaps; heaps[0] is always the active heap,
138 Heap heaps[HEAP_SOURCE_MAX_HEAP_COUNT];
182 * Returns true iff a soft limit is in effect for the active heap.
188 * if there is more than one heap. If there is only one
189 * heap, a non-SIZE_MAX softLimit should always be the same
197 * allocated from the active heap before a GC is forced.
210 * is false, don't count the heap at index 0.
230 * Returns the heap that <ptr> could have come from, or NULL
231 * if it could not have come from any heap.
233 static Heap *ptr2heap(const HeapSource *hs, const void *ptr)
239 const Heap *const heap = &hs->heaps[i];
241 if ((const char *)ptr >= heap->base && (const char *)ptr < heap->limit) {
242 return (Heap *)heap;
252 * us a much more accurate picture of heap utilization than
257 static void countAllocation(Heap *heap, const void *ptr)
259 assert(heap->bytesAllocated < mspace_footprint(heap->msp));
261 heap->bytesAllocated += mspace_usable_size(heap->msp, ptr) +
263 heap->objectsAllocated++;
267 assert(heap->bytesAllocated < mspace_footprint(heap->msp));
270 static void countFree(Heap *heap, const void *ptr, size_t *numBytes)
272 size_t delta = mspace_usable_size(heap->msp, ptr) + HEAP_SOURCE_CHUNK_OVERHEAD;
274 if (delta < heap->bytesAllocated) {
275 heap->bytesAllocated -= delta;
277 heap->bytesAllocated = 0;
281 if (heap->objectsAllocated > 0) {
282 heap->objectsAllocated--;
292 * a heap source.
295 * letting the heap grow to startSize. This saves
299 LOGV_HEAP("Creating VM heap of size %zu", startSize);
305 /* Don't let the heap grow past the starting size without
313 LOGE_HEAP("Can't create VM heap of size (%zu,%zu): %s",
321 * Add the initial heap. Returns false if the initial heap was
322 * already added to the heap source.
341 * Adds an additional heap to the heap source. Returns false if there
342 * are too many heaps or insufficient free space to add another heap.
346 Heap heap;
356 memset(&heap, 0, sizeof(heap));
359 * Heap storage comes from a common virtual memory reservation.
360 * The new heap will start on the page after the old heap.
374 heap.maximumSize = hs->growthLimit - overhead;
375 heap.concurrentStartBytes = HEAP_MIN_FREE - CONCURRENT_START;
376 heap.base = base;
377 heap.limit = heap.base + heap.maximumSize;
378 heap.msp = createMspace(base, HEAP_MIN_FREE, hs->maximumSize - overhead);
379 if (heap.msp == NULL) {
383 /* Don't let the soon-to-be-old heap grow any further.
390 /* Put the new heap in the list, at heaps[0].
394 hs->heaps[0] = heap;
415 /* Timed out waiting for a GC request, schedule a heap trim. */
468 * heap is perfectly full of the smallest object.
499 * Initializes the heap source; must be called before any other
501 * allocated from the heap source.
515 LOGE("Bad heap size parameters (start=%zd, max=%zd, limit=%zd)",
525 base = dvmAllocRegion(length, PROT_NONE, "dalvik-heap");
531 * a heap source.
540 LOGE_HEAP("Can't allocate heap descriptor");
547 LOGE_HEAP("Can't allocate heap source");
565 LOGE_HEAP("Can't add initial heap");
601 * first time. We create a heap for all future zygote process allocations,
602 * in an attempt to avoid touching pages in the zygote heap. (This would
615 /* Create a new heap for post-fork zygote allocations. We only
618 LOGV("Splitting out new zygote heap");
662 * Returns the requested value. If the per-heap stats are requested, fill
665 * Caller must hold the heap lock.
678 Heap *const heap = &hs->heaps[i];
682 value = mspace_footprint(heap->msp);
685 value = mspace_max_allowed_footprint(heap->msp);
688 value = heap->bytesAllocated;
691 value = heap->objectsAllocated;
763 /* heap[0] is never immune */
797 Heap* heap = hs2heap(hs);
798 if (heap->bytesAllocated + n > hs->softLimit) {
801 * if the heap is full.
807 void* ptr = mspace_calloc(heap->msp, 1, n);
811 countAllocation(heap, ptr);
822 if (heap->bytesAllocated > heap->concurrentStartBytes) {
835 static void* heapAllocAndGrow(HeapSource *hs, Heap *heap, size_t n)
840 size_t max = heap->maximumSize;
842 mspace_set_max_allowed_footprint(heap->msp, max);
848 mspace_set_max_allowed_footprint(heap->msp,
849 mspace_footprint(heap->msp));
862 Heap* heap = hs2heap(hs);
885 /* We're not soft-limited. Grow the heap to satisfy the request.
888 ptr = heapAllocAndGrow(hs, heap, n);
919 Heap* heap = ptr2heap(gHs, *ptrs);
921 if (heap != NULL) {
922 mspace msp = heap->msp;
924 // much. For heap[0] -- the 'active heap' -- we call
927 if (heap == gHs->heaps) {
943 assert(ptr2heap(gHs, ptrs[0]) == heap);
944 countFree(heap, ptrs[0], &numBytes);
950 assert(ptr2heap(gHs, ptrs[i]) == heap);
951 countFree(heap, ptrs[i], &numBytes);
964 // This is not an 'active heap'. Only do the accounting.
967 assert(ptr2heap(gHs, ptrs[i]) == heap);
968 countFree(heap, ptrs[i], &numBytes);
976 * Returns true iff <ptr> is in the heap source.
986 * Returns true iff <ptr> was allocated from the heap source.
1005 Heap *heap = ptr2heap(hs, obj);
1006 if (heap != NULL) {
1007 /* If the object is not in the active heap, we assume that
1010 return heap != hs->heaps;
1013 /* The pointer is outside of any known heap, or we are not
1027 Heap* heap = ptr2heap(gHs, ptr);
1028 if (heap != NULL) {
1029 return mspace_usable_size(heap->msp, ptr);
1035 * Returns the number of bytes that the heap source has allocated
1038 * Caller must hold the heap lock.
1054 * Returns the current maximum size of the heap source respecting any
1065 * maximum heap size.
1082 * current heap. When a soft limit is in effect, this is effectively
1084 * the current heap).
1100 * Gets the maximum number of bytes that the heap source is allowed
1114 * footprint of the active heap.
1119 * max_allowed, because the heap may not have grown all the
1125 /* Don't let the heap grow any more, and impose a soft limit.
1130 /* Let the heap grow to the requested max, and remove any
1139 * Sets the maximum number of bytes that the heap source is allowed
1150 LOGI_HEAP("Clamp target GC heap from %zd.%03zdMB to %u.%03uMB",
1156 /* Convert max into a size that applies to the active heap.
1182 * Gets the current ideal heap utilization, represented as a number
1195 * Sets the new ideal heap utilization, represented as a number
1215 LOGV("Set heap target utilization to %zd/%d (%f)",
1220 * Given the size of a live set, returns the ideal heap size given
1228 * ideal heap size based on the size of the live set.
1244 * Given the current contents of the active heap, increase the allowed
1245 * heap footprint to match the target utilization ratio. This
1253 Heap* heap = hs2heap(hs);
1256 * ideal heap size based on the size of the live set.
1257 * Note that only the active heap plays any part in this.
1262 * the current heap.
1264 size_t currentHeapUsed = heap->bytesAllocated;
1270 * If the target heap size would exceed the max, setIdealFootprint()
1279 heap->concurrentStartBytes = SIZE_MAX;
1281 heap->concurrentStartBytes = freeBytes - CONCURRENT_START;
1287 * TODO: move this somewhere else, especially the native heap part.
1313 Heap *heap = &hs->heaps[i];
1317 mspace_trim(heap->msp, 0);
1321 mspace_walk_free_pages(heap->msp, releasePagesInRange, &heapBytes);
1324 /* Same for the native heap.
1335 * Walks over the heap source and passes every allocated and
1355 * Gets the number of heaps available in the heap source.
1357 * Caller must hold the heap lock, because gHs caches a field