Home | History | Annotate | Download | only in src

Lines Matching refs:size_in_bytes

806 HeapObject* PagedSpace::SlowMCAllocateRaw(int size_in_bytes) {
827 return AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
1541 void FreeListNode::set_size(int size_in_bytes) {
1542 ASSERT(size_in_bytes > 0);
1543 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1552 if (size_in_bytes > ByteArray::kAlignedSize) {
1556 this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes));
1557 } else if (size_in_bytes == kPointerSize) {
1559 } else if (size_in_bytes == 2 * kPointerSize) {
1564 // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
1621 int OldSpaceFreeList::Free(Address start, int size_in_bytes) {
1623 MemoryAllocator::ZapBlock(start, size_in_bytes);
1626 node->set_size(size_in_bytes);
1632 return size_in_bytes;
1638 if (size_in_bytes < kMinBlockSize) {
1639 return size_in_bytes;
1643 int index = size_in_bytes >> kPointerSizeLog2;
1646 available_ += size_in_bytes;
1652 Object* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) {
1653 ASSERT(0 < size_in_bytes);
1654 ASSERT(size_in_bytes <= kMaxBlockSize);
1655 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1658 int index = size_in_bytes >> kPointerSizeLog2;
1664 available_ -= size_in_bytes;
1676 return Failure::RetryAfterGC(size_in_bytes, owner_);
1684 size_in_bytes);
1710 available_ -= size_in_bytes + rem_bytes;
1720 available_ -= size_in_bytes;
1891 HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
1894 // should succeed (size_in_bytes should not be greater than a page's
1898 return AllocateInNextPage(current_page, size_in_bytes);
1905 Object* result = free_list_.Allocate(size_in_bytes, &wasted_bytes);
1908 accounting_stats_.AllocateBytes(size_in_bytes);
1923 return AllocateInNextPage(current_page, size_in_bytes);
1959 int size_in_bytes) {
1963 return AllocateLinearly(&allocation_info_, size_in_bytes);
2317 HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
2318 ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
2324 return AllocateInNextPage(current_page, size_in_bytes);
2333 accounting_stats_.AllocateBytes(size_in_bytes);
2348 return AllocateInNextPage(current_page, size_in_bytes);
2360 int size_in_bytes) {
2363 ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
2366 return AllocateLinearly(&allocation_info_, size_in_bytes);
2495 LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
2498 size_t requested = ChunkSizeFor(size_in_bytes);
2513 int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
2516 size_in_bytes += (Page::kPageSize - os_alignment);
2517 return size_in_bytes + Page::kObjectStartOffset;
2619 Object* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
2620 ASSERT(0 < size_in_bytes);
2621 return AllocateRawInternal(size_in_bytes,
2622 size_in_bytes,
2627 Object* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
2628 ASSERT(0 < size_in_bytes);
2629 int extra_rset_bytes = ExtraRSetBytesFor(size_in_bytes);
2630 return AllocateRawInternal(size_in_bytes + extra_rset_bytes,
2631 size_in_bytes,
2636 Object* LargeObjectSpace::AllocateRaw(int size_in_bytes) {
2637 ASSERT(0 < size_in_bytes);
2638 return AllocateRawInternal(size_in_bytes,
2639 size_in_bytes,