Home | History | Annotate | Download | only in src

Lines Matching defs:space

242 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
244 if (space != NEW_SPACE || FLAG_gc_global) {
262 // Is there enough space left in OLD to guarantee that a scavenge can
267 // allocator has not yet allocated from the OS and assigned to any space,
268 // and does not count available bytes already in the old space or code
269 // space. Undercounting is safe---we may get an unrequested full GC when
320 PrintF("New space, used: %8" V8_PTR_PREFIX "d"
330 PrintF("Old data space, used: %8" V8_PTR_PREFIX "d"
336 PrintF("Code space, used: %8" V8_PTR_PREFIX "d"
342 PrintF("Map space, used: %8" V8_PTR_PREFIX "d"
348 PrintF("Cell space, used: %8" V8_PTR_PREFIX "d"
354 PrintF("Large object space, used: %8" V8_PTR_PREFIX "d"
409 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
410 total += space->SizeOfObjects();
448 // Since we are ignoring the return value, the exact choice of space does
458 // Since we are ignoring the return value, the exact choice of space does
481 bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
598 // We add a slack-factor of 2 in order to have space for a series of
601 // The ReserveSpace method on the large object space checks how much
617 // Committing memory to from space failed.
620 for (PagedSpace* space = spaces.next();
621 space != NULL;
622 space = spaces.next()) {
623 space->RelinkPageListInChunkOrder(true);
629 // Committing memory to from space failed again.
631 V8::FatalProcessOutOfMemory("Committing semi space failed.");
673 static void VerifyPageWatermarkValidity(PagedSpace* space,
675 PageIterator it(space, PageIterator::PAGES_IN_USE);
758 // space for the mutation speed.
896 // Visitor class to verify pointers in code or data space do not point into
897 // new space.
911 // Verify that there are no pointers to new space in spaces where we
930 // Grow the size of new space if there is room to grow and enough
972 // Flip the semispaces. After flipping, to space is empty, from space has
978 // to space or promoted to the old generation. For to-space
979 // objects, we treat the bottom of the to space as a queue. Newly
985 // We treat the top of the to space as a queue of addresses of
990 // There is guaranteed to be enough room at the top of the to space
992 // frees up its size in bytes from the top of the new space, and
1090 // String is still in new space. Update the table entry.
1223 // to new space.
1230 // Take another spin if there are now unswept objects in new space
1884 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
1888 AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
1903 // allocation in new space.
2329 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2331 { MaybeObject* maybe_result = Allocate(proxy_map(), space);
2707 AllocationSpace space =
2710 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
2738 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2741 space,
2773 // are allocated in large object space.
2894 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
2898 // space when new space is full and the object is not a large object.
2900 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
2903 AllocateRaw(map->instance_size(), space, retry_space);
2956 AllocationSpace space =
2959 { MaybeObject* maybe_result = Allocate(function_map, space);
3002 // fields that point to new space so it's safe to skip the write
3167 AllocationSpace space =
3169 if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
3171 { MaybeObject* maybe_obj = Allocate(map, space);
3291 // functions which may leave us with an object in old space.
3310 // Since we know the clone is allocated in new space, we can copy
3443 // If the string is in new space it cannot be used as a symbol.
3541 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3544 if (space == NEW_SPACE) {
3546 // Allocate in large object space, retry space will be ignored.
3547 space = LO_SPACE;
3549 // Allocate in new space, retry in large object space.
3552 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
3553 space = LO_SPACE;
3556 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3576 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3579 if (space == NEW_SPACE) {
3581 // Allocate in large object space, retry space will be ignored.
3582 space = LO_SPACE;
3584 // Allocate in new space, retry in large object space.
3587 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
3588 space = LO_SPACE;
3591 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3682 AllocationSpace space =
3685 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
3686 // Too big for new space.
3687 space = LO_SPACE;
3688 } else if (space == OLD_POINTER_SPACE &&
3690 // Too big for old pointer space.
3691 space = LO_SPACE;
3697 return AllocateRaw(size, space, retry_space);
3834 AllocationSpace space =
3837 { MaybeObject* maybe_result = Allocate(map, space);
3921 // conditionally uncommit from space.
3934 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
3935 space->Print();
3942 // We do not look for code in new space, map space, or old space. If code
3970 PrintF("To space : ");
3972 PrintF("Old pointer space : ");
3974 PrintF("Old data space : ");
3976 PrintF("Code space : ");
3978 PrintF("Map space : ");
3980 PrintF("Cell space : ");
3982 PrintF("Large object space : ");
4007 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
4008 return InSpace(value->address(), space);
4012 bool Heap::InSpace(Address addr, AllocationSpace space) {
4016 switch (space) {
4043 PagedSpace* space,
4045 PageIterator it(space, PageIterator::PAGES_IN_USE);
4061 static void VerifyPointersUnderWatermark(LargeObjectSpace* space) {
4062 LargeObjectIterator it(space);
4412 PagedSpace* space,
4417 PageIterator it(space, PageIterator::PAGES_IN_USE);
4437 ASSERT(space == old_pointer_space_ ||
4438 (space == map_space_ &&
4547 // space. We therefore cannot use a larger max semispace size
4553 // If we are not using snapshots we reserve space for the actual
4569 // The new space size must be a power of two to support single-bit testing
4821 // Configuration is based on the flags new-space-size (really the semispace
4822 // size) and old-space
4841 // space. The chunk is double the size of the requested reserved
4842 // new space size to ensure that we can find a pair of semispaces that
4859 // Initialize old pointer space.
4868 // Initialize old data space.
4877 // Initialize the code space, set its maximum capacity to the old
4880 // virtual address space, so that they can call each other with near calls.
4892 // Initialize map space.
4901 // Initialize global property cell space.
4906 // The large object code space may contain code or data. We set the memory
5020 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
5021 space->Shrink();
5030 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
5031 space->Protect();
5039 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
5040 space->Unprotect();
5108 Space* AllSpaces::next() {
5193 // Move to the next space
5200 // Return iterator for the new current space.
5205 // Create an iterator for the space to iterate.
5272 // For code space, using FreeListNode::IsFreeListNode is OK.
5423 // Done with the last space.
5629 for (OldSpace* space = spaces.next();
5630 space != NULL;
5631 space = spaces.next()) {
5632 holes_size += space->Waste() + space->AvailableFree();