Home | History | Annotate | Download | only in heap
      1 // Copyright 2011 the V8 project authors. All rights reserved.
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 #include <stdlib.h>
     29 
     30 #include "src/base/platform/platform.h"
     31 #include "src/snapshot/snapshot.h"
     32 #include "src/v8.h"
     33 #include "test/cctest/cctest.h"
     34 #include "test/cctest/heap/heap-tester.h"
     35 #include "test/cctest/heap/utils-inl.h"
     36 
     37 namespace v8 {
     38 namespace internal {
     39 
     40 #if 0
     41 static void VerifyRegionMarking(Address page_start) {
     42 #ifdef ENABLE_CARDMARKING_WRITE_BARRIER
     43   Page* p = Page::FromAddress(page_start);
     44 
     45   p->SetRegionMarks(Page::kAllRegionsCleanMarks);
     46 
     47   for (Address addr = p->ObjectAreaStart();
     48        addr < p->ObjectAreaEnd();
     49        addr += kPointerSize) {
     50     CHECK(!Page::FromAddress(addr)->IsRegionDirty(addr));
     51   }
     52 
     53   for (Address addr = p->ObjectAreaStart();
     54        addr < p->ObjectAreaEnd();
     55        addr += kPointerSize) {
     56     Page::FromAddress(addr)->MarkRegionDirty(addr);
     57   }
     58 
     59   for (Address addr = p->ObjectAreaStart();
     60        addr < p->ObjectAreaEnd();
     61        addr += kPointerSize) {
     62     CHECK(Page::FromAddress(addr)->IsRegionDirty(addr));
     63   }
     64 #endif
     65 }
     66 #endif
     67 
     68 
     69 // TODO(gc) you can no longer allocate pages like this. Details are hidden.
     70 #if 0
     71 TEST(Page) {
     72   byte* mem = NewArray<byte>(2*Page::kPageSize);
     73   CHECK(mem != NULL);
     74 
     75   Address start = reinterpret_cast<Address>(mem);
     76   Address page_start = RoundUp(start, Page::kPageSize);
     77 
     78   Page* p = Page::FromAddress(page_start);
     79   // Initialized Page has heap pointer, normally set by memory_allocator.
     80   p->heap_ = CcTest::heap();
     81   CHECK(p->address() == page_start);
     82   CHECK(p->is_valid());
     83 
     84   p->opaque_header = 0;
     85   p->SetIsLargeObjectPage(false);
     86   CHECK(!p->next_page()->is_valid());
     87 
     88   CHECK(p->ObjectAreaStart() == page_start + Page::kObjectStartOffset);
     89   CHECK(p->ObjectAreaEnd() == page_start + Page::kPageSize);
     90 
     91   CHECK(p->Offset(page_start + Page::kObjectStartOffset) ==
     92         Page::kObjectStartOffset);
     93   CHECK(p->Offset(page_start + Page::kPageSize) == Page::kPageSize);
     94 
     95   CHECK(p->OffsetToAddress(Page::kObjectStartOffset) == p->ObjectAreaStart());
     96   CHECK(p->OffsetToAddress(Page::kPageSize) == p->ObjectAreaEnd());
     97 
     98   // test region marking
     99   VerifyRegionMarking(page_start);
    100 
    101   DeleteArray(mem);
    102 }
    103 #endif
    104 
    105 
    106 // Temporarily sets a given allocator in an isolate.
    107 class TestMemoryAllocatorScope {
    108  public:
    109   TestMemoryAllocatorScope(Isolate* isolate, MemoryAllocator* allocator)
    110       : isolate_(isolate),
    111         old_allocator_(isolate->memory_allocator_) {
    112     isolate->memory_allocator_ = allocator;
    113   }
    114 
    115   ~TestMemoryAllocatorScope() {
    116     isolate_->memory_allocator_ = old_allocator_;
    117   }
    118 
    119  private:
    120   Isolate* isolate_;
    121   MemoryAllocator* old_allocator_;
    122 
    123   DISALLOW_COPY_AND_ASSIGN(TestMemoryAllocatorScope);
    124 };
    125 
    126 
    127 // Temporarily sets a given code range in an isolate.
    128 class TestCodeRangeScope {
    129  public:
    130   TestCodeRangeScope(Isolate* isolate, CodeRange* code_range)
    131       : isolate_(isolate),
    132         old_code_range_(isolate->code_range_) {
    133     isolate->code_range_ = code_range;
    134   }
    135 
    136   ~TestCodeRangeScope() {
    137     isolate_->code_range_ = old_code_range_;
    138   }
    139 
    140  private:
    141   Isolate* isolate_;
    142   CodeRange* old_code_range_;
    143 
    144   DISALLOW_COPY_AND_ASSIGN(TestCodeRangeScope);
    145 };
    146 
    147 
    148 static void VerifyMemoryChunk(Isolate* isolate,
    149                               Heap* heap,
    150                               CodeRange* code_range,
    151                               size_t reserve_area_size,
    152                               size_t commit_area_size,
    153                               size_t second_commit_area_size,
    154                               Executability executable) {
    155   MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
    156   CHECK(memory_allocator->SetUp(heap->MaxReserved(),
    157                                 heap->MaxExecutableSize()));
    158   TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
    159   TestCodeRangeScope test_code_range_scope(isolate, code_range);
    160 
    161   size_t header_size = (executable == EXECUTABLE)
    162                        ? MemoryAllocator::CodePageGuardStartOffset()
    163                        : MemoryChunk::kObjectStartOffset;
    164   size_t guard_size = (executable == EXECUTABLE)
    165                        ? MemoryAllocator::CodePageGuardSize()
    166                        : 0;
    167 
    168   MemoryChunk* memory_chunk = memory_allocator->AllocateChunk(reserve_area_size,
    169                                                               commit_area_size,
    170                                                               executable,
    171                                                               NULL);
    172   size_t alignment = code_range != NULL && code_range->valid()
    173                          ? MemoryChunk::kAlignment
    174                          : base::OS::CommitPageSize();
    175   size_t reserved_size =
    176       ((executable == EXECUTABLE))
    177           ? RoundUp(header_size + guard_size + reserve_area_size + guard_size,
    178                     alignment)
    179           : RoundUp(header_size + reserve_area_size,
    180                     base::OS::CommitPageSize());
    181   CHECK(memory_chunk->size() == reserved_size);
    182   CHECK(memory_chunk->area_start() < memory_chunk->address() +
    183                                      memory_chunk->size());
    184   CHECK(memory_chunk->area_end() <= memory_chunk->address() +
    185                                     memory_chunk->size());
    186   CHECK(static_cast<size_t>(memory_chunk->area_size()) == commit_area_size);
    187 
    188   Address area_start = memory_chunk->area_start();
    189 
    190   memory_chunk->CommitArea(second_commit_area_size);
    191   CHECK(area_start == memory_chunk->area_start());
    192   CHECK(memory_chunk->area_start() < memory_chunk->address() +
    193                                      memory_chunk->size());
    194   CHECK(memory_chunk->area_end() <= memory_chunk->address() +
    195                                     memory_chunk->size());
    196   CHECK(static_cast<size_t>(memory_chunk->area_size()) ==
    197       second_commit_area_size);
    198 
    199   memory_allocator->Free(memory_chunk);
    200   memory_allocator->TearDown();
    201   delete memory_allocator;
    202 }
    203 
    204 
    205 TEST(Regress3540) {
    206   Isolate* isolate = CcTest::i_isolate();
    207   Heap* heap = isolate->heap();
    208   const int pageSize = Page::kPageSize;
    209   MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
    210   CHECK(
    211       memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize()));
    212   TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
    213   CodeRange* code_range = new CodeRange(isolate);
    214   const size_t code_range_size = 4 * pageSize;
    215   if (!code_range->SetUp(
    216           code_range_size +
    217           RoundUp(v8::base::OS::CommitPageSize() * kReservedCodeRangePages,
    218                   MemoryChunk::kAlignment) +
    219           v8::internal::MemoryAllocator::CodePageAreaSize())) {
    220     return;
    221   }
    222 
    223   Address address;
    224   size_t size;
    225   size_t request_size = code_range_size - 2 * pageSize;
    226   address = code_range->AllocateRawMemory(
    227       request_size, request_size - (2 * MemoryAllocator::CodePageGuardSize()),
    228       &size);
    229   CHECK(address != NULL);
    230 
    231   Address null_address;
    232   size_t null_size;
    233   request_size = code_range_size - pageSize;
    234   null_address = code_range->AllocateRawMemory(
    235       request_size, request_size - (2 * MemoryAllocator::CodePageGuardSize()),
    236       &null_size);
    237   CHECK(null_address == NULL);
    238 
    239   code_range->FreeRawMemory(address, size);
    240   delete code_range;
    241   memory_allocator->TearDown();
    242   delete memory_allocator;
    243 }
    244 
    245 
    246 static unsigned int Pseudorandom() {
    247   static uint32_t lo = 2345;
    248   lo = 18273 * (lo & 0xFFFFF) + (lo >> 16);
    249   return lo & 0xFFFFF;
    250 }
    251 
    252 
    253 TEST(MemoryChunk) {
    254   Isolate* isolate = CcTest::i_isolate();
    255   Heap* heap = isolate->heap();
    256 
    257   size_t reserve_area_size = 1 * MB;
    258   size_t initial_commit_area_size, second_commit_area_size;
    259 
    260   for (int i = 0; i < 100; i++) {
    261     initial_commit_area_size = Pseudorandom();
    262     second_commit_area_size = Pseudorandom();
    263 
    264     // With CodeRange.
    265     CodeRange* code_range = new CodeRange(isolate);
    266     const size_t code_range_size = 32 * MB;
    267     if (!code_range->SetUp(code_range_size)) return;
    268 
    269     VerifyMemoryChunk(isolate,
    270                       heap,
    271                       code_range,
    272                       reserve_area_size,
    273                       initial_commit_area_size,
    274                       second_commit_area_size,
    275                       EXECUTABLE);
    276 
    277     VerifyMemoryChunk(isolate,
    278                       heap,
    279                       code_range,
    280                       reserve_area_size,
    281                       initial_commit_area_size,
    282                       second_commit_area_size,
    283                       NOT_EXECUTABLE);
    284     delete code_range;
    285 
    286     // Without CodeRange.
    287     code_range = NULL;
    288     VerifyMemoryChunk(isolate,
    289                       heap,
    290                       code_range,
    291                       reserve_area_size,
    292                       initial_commit_area_size,
    293                       second_commit_area_size,
    294                       EXECUTABLE);
    295 
    296     VerifyMemoryChunk(isolate,
    297                       heap,
    298                       code_range,
    299                       reserve_area_size,
    300                       initial_commit_area_size,
    301                       second_commit_area_size,
    302                       NOT_EXECUTABLE);
    303   }
    304 }
    305 
    306 
    307 TEST(MemoryAllocator) {
    308   Isolate* isolate = CcTest::i_isolate();
    309   Heap* heap = isolate->heap();
    310 
    311   MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
    312   CHECK(memory_allocator != nullptr);
    313   CHECK(memory_allocator->SetUp(heap->MaxReserved(),
    314                                 heap->MaxExecutableSize()));
    315   TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
    316 
    317   {
    318     int total_pages = 0;
    319     OldSpace faked_space(heap, OLD_SPACE, NOT_EXECUTABLE);
    320     Page* first_page = memory_allocator->AllocatePage(
    321         faked_space.AreaSize(), &faked_space, NOT_EXECUTABLE);
    322 
    323     first_page->InsertAfter(faked_space.anchor()->prev_page());
    324     CHECK(first_page->is_valid());
    325     CHECK(first_page->next_page() == faked_space.anchor());
    326     total_pages++;
    327 
    328     for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) {
    329       CHECK(p->owner() == &faked_space);
    330     }
    331 
    332     // Again, we should get n or n - 1 pages.
    333     Page* other = memory_allocator->AllocatePage(faked_space.AreaSize(),
    334                                                  &faked_space, NOT_EXECUTABLE);
    335     CHECK(other->is_valid());
    336     total_pages++;
    337     other->InsertAfter(first_page);
    338     int page_count = 0;
    339     for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) {
    340       CHECK(p->owner() == &faked_space);
    341       page_count++;
    342     }
    343     CHECK(total_pages == page_count);
    344 
    345     Page* second_page = first_page->next_page();
    346     CHECK(second_page->is_valid());
    347 
    348     // OldSpace's destructor will tear down the space and free up all pages.
    349   }
    350   memory_allocator->TearDown();
    351   delete memory_allocator;
    352 }
    353 
    354 
    355 TEST(NewSpace) {
    356   Isolate* isolate = CcTest::i_isolate();
    357   Heap* heap = isolate->heap();
    358   MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
    359   CHECK(memory_allocator->SetUp(heap->MaxReserved(),
    360                                 heap->MaxExecutableSize()));
    361   TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
    362 
    363   NewSpace new_space(heap);
    364 
    365   CHECK(new_space.SetUp(CcTest::heap()->ReservedSemiSpaceSize(),
    366                         CcTest::heap()->ReservedSemiSpaceSize()));
    367   CHECK(new_space.HasBeenSetUp());
    368 
    369   while (new_space.Available() >= Page::kMaxRegularHeapObjectSize) {
    370     Object* obj =
    371         new_space.AllocateRawUnaligned(Page::kMaxRegularHeapObjectSize)
    372             .ToObjectChecked();
    373     CHECK(new_space.Contains(HeapObject::cast(obj)));
    374   }
    375 
    376   new_space.TearDown();
    377   memory_allocator->TearDown();
    378   delete memory_allocator;
    379 }
    380 
    381 
    382 TEST(OldSpace) {
    383   Isolate* isolate = CcTest::i_isolate();
    384   Heap* heap = isolate->heap();
    385   MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
    386   CHECK(memory_allocator->SetUp(heap->MaxReserved(),
    387                                 heap->MaxExecutableSize()));
    388   TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
    389 
    390   OldSpace* s = new OldSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
    391   CHECK(s != NULL);
    392 
    393   CHECK(s->SetUp());
    394 
    395   while (s->Available() > 0) {
    396     s->AllocateRawUnaligned(Page::kMaxRegularHeapObjectSize).ToObjectChecked();
    397   }
    398 
    399   delete s;
    400   memory_allocator->TearDown();
    401   delete memory_allocator;
    402 }
    403 
    404 
    405 TEST(CompactionSpace) {
    406   Isolate* isolate = CcTest::i_isolate();
    407   Heap* heap = isolate->heap();
    408   MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
    409   CHECK(memory_allocator != nullptr);
    410   CHECK(
    411       memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize()));
    412   TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
    413 
    414   CompactionSpace* compaction_space =
    415       new CompactionSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
    416   CHECK(compaction_space != NULL);
    417   CHECK(compaction_space->SetUp());
    418 
    419   OldSpace* old_space = new OldSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
    420   CHECK(old_space != NULL);
    421   CHECK(old_space->SetUp());
    422 
    423   // Cannot loop until "Available()" since we initially have 0 bytes available
    424   // and would thus neither grow, nor be able to allocate an object.
    425   const int kNumObjects = 100;
    426   const int kNumObjectsPerPage =
    427       compaction_space->AreaSize() / Page::kMaxRegularHeapObjectSize;
    428   const int kExpectedPages =
    429       (kNumObjects + kNumObjectsPerPage - 1) / kNumObjectsPerPage;
    430   for (int i = 0; i < kNumObjects; i++) {
    431     compaction_space->AllocateRawUnaligned(Page::kMaxRegularHeapObjectSize)
    432         .ToObjectChecked();
    433   }
    434   int pages_in_old_space = old_space->CountTotalPages();
    435   int pages_in_compaction_space = compaction_space->CountTotalPages();
    436   CHECK_EQ(pages_in_compaction_space, kExpectedPages);
    437   CHECK_LE(pages_in_old_space, 1);
    438 
    439   old_space->MergeCompactionSpace(compaction_space);
    440   CHECK_EQ(old_space->CountTotalPages(),
    441            pages_in_old_space + pages_in_compaction_space);
    442 
    443   delete compaction_space;
    444   delete old_space;
    445 
    446   memory_allocator->TearDown();
    447   delete memory_allocator;
    448 }
    449 
    450 
    451 TEST(CompactionSpaceUsingExternalMemory) {
    452   const int kObjectSize = 512;
    453 
    454   Isolate* isolate = CcTest::i_isolate();
    455   Heap* heap = isolate->heap();
    456   MemoryAllocator* allocator = new MemoryAllocator(isolate);
    457   CHECK(allocator != nullptr);
    458   CHECK(allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize()));
    459   TestMemoryAllocatorScope test_scope(isolate, allocator);
    460 
    461   CompactionSpaceCollection* collection = new CompactionSpaceCollection(heap);
    462   CompactionSpace* compaction_space = collection->Get(OLD_SPACE);
    463   CHECK(compaction_space != NULL);
    464   CHECK(compaction_space->SetUp());
    465 
    466   OldSpace* old_space = new OldSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
    467   CHECK(old_space != NULL);
    468   CHECK(old_space->SetUp());
    469 
    470   // The linear allocation area already counts as used bytes, making
    471   // exact testing impossible.
    472   heap->DisableInlineAllocation();
    473 
    474   // Test:
    475   // * Allocate a backing store in old_space.
    476   // * Compute the number num_rest_objects of kObjectSize objects that fit into
    477   //   of available memory.
    478   //   kNumRestObjects.
    479   // * Add the rest of available memory to the compaction space.
    480   // * Allocate kNumRestObjects in the compaction space.
    481   // * Allocate one object more.
    482   // * Merge the compaction space and compare the expected number of pages.
    483 
    484   // Allocate a single object in old_space to initialize a backing page.
    485   old_space->AllocateRawUnaligned(kObjectSize).ToObjectChecked();
    486   // Compute the number of objects that fit into the rest in old_space.
    487   intptr_t rest = static_cast<int>(old_space->Available());
    488   CHECK_GT(rest, 0);
    489   intptr_t num_rest_objects = rest / kObjectSize;
    490   // After allocating num_rest_objects in compaction_space we allocate a bit
    491   // more.
    492   const intptr_t kAdditionalCompactionMemory = kObjectSize;
    493   // We expect a single old_space page.
    494   const intptr_t kExpectedInitialOldSpacePages = 1;
    495   // We expect a single additional page in compaction space because we mostly
    496   // use external memory.
    497   const intptr_t kExpectedCompactionPages = 1;
    498   // We expect two pages to be reachable from old_space in the end.
    499   const intptr_t kExpectedOldSpacePagesAfterMerge = 2;
    500 
    501   CHECK_EQ(old_space->CountTotalPages(), kExpectedInitialOldSpacePages);
    502   CHECK_EQ(compaction_space->CountTotalPages(), 0);
    503   CHECK_EQ(compaction_space->Capacity(), 0);
    504   // Make the rest of memory available for compaction.
    505   old_space->DivideUponCompactionSpaces(&collection, 1, rest);
    506   CHECK_EQ(compaction_space->CountTotalPages(), 0);
    507   CHECK_EQ(compaction_space->Capacity(), rest);
    508   while (num_rest_objects-- > 0) {
    509     compaction_space->AllocateRawUnaligned(kObjectSize).ToObjectChecked();
    510   }
    511   // We only used external memory so far.
    512   CHECK_EQ(compaction_space->CountTotalPages(), 0);
    513   // Additional allocation.
    514   compaction_space->AllocateRawUnaligned(kAdditionalCompactionMemory)
    515       .ToObjectChecked();
    516   // Now the compaction space shouldve also acquired a page.
    517   CHECK_EQ(compaction_space->CountTotalPages(), kExpectedCompactionPages);
    518 
    519   old_space->MergeCompactionSpace(compaction_space);
    520   CHECK_EQ(old_space->CountTotalPages(), kExpectedOldSpacePagesAfterMerge);
    521 
    522   delete collection;
    523   delete old_space;
    524 
    525   allocator->TearDown();
    526   delete allocator;
    527 }
    528 
    529 
    530 CompactionSpaceCollection** HeapTester::InitializeCompactionSpaces(
    531     Heap* heap, int num_spaces) {
    532   CompactionSpaceCollection** spaces =
    533       new CompactionSpaceCollection*[num_spaces];
    534   for (int i = 0; i < num_spaces; i++) {
    535     spaces[i] = new CompactionSpaceCollection(heap);
    536   }
    537   return spaces;
    538 }
    539 
    540 
    541 void HeapTester::DestroyCompactionSpaces(CompactionSpaceCollection** spaces,
    542                                          int num_spaces) {
    543   for (int i = 0; i < num_spaces; i++) {
    544     delete spaces[i];
    545   }
    546   delete[] spaces;
    547 }
    548 
    549 
    550 void HeapTester::MergeCompactionSpaces(PagedSpace* space,
    551                                        CompactionSpaceCollection** spaces,
    552                                        int num_spaces) {
    553   AllocationSpace id = space->identity();
    554   for (int i = 0; i < num_spaces; i++) {
    555     space->MergeCompactionSpace(spaces[i]->Get(id));
    556     CHECK_EQ(spaces[i]->Get(id)->accounting_stats_.Size(), 0);
    557     CHECK_EQ(spaces[i]->Get(id)->accounting_stats_.Capacity(), 0);
    558     CHECK_EQ(spaces[i]->Get(id)->Waste(), 0);
    559   }
    560 }
    561 
    562 
    563 void HeapTester::AllocateInCompactionSpaces(CompactionSpaceCollection** spaces,
    564                                             AllocationSpace id, int num_spaces,
    565                                             int num_objects, int object_size) {
    566   for (int i = 0; i < num_spaces; i++) {
    567     for (int j = 0; j < num_objects; j++) {
    568       spaces[i]->Get(id)->AllocateRawUnaligned(object_size).ToObjectChecked();
    569     }
    570     spaces[i]->Get(id)->EmptyAllocationInfo();
    571     CHECK_EQ(spaces[i]->Get(id)->accounting_stats_.Size(),
    572              num_objects * object_size);
    573     CHECK_GE(spaces[i]->Get(id)->accounting_stats_.Capacity(),
    574              spaces[i]->Get(id)->accounting_stats_.Size());
    575   }
    576 }
    577 
    578 
    579 void HeapTester::CompactionStats(CompactionSpaceCollection** spaces,
    580                                  AllocationSpace id, int num_spaces,
    581                                  intptr_t* capacity, intptr_t* size) {
    582   *capacity = 0;
    583   *size = 0;
    584   for (int i = 0; i < num_spaces; i++) {
    585     *capacity += spaces[i]->Get(id)->accounting_stats_.Capacity();
    586     *size += spaces[i]->Get(id)->accounting_stats_.Size();
    587   }
    588 }
    589 
    590 
    591 void HeapTester::TestCompactionSpaceDivide(int num_additional_objects,
    592                                            int object_size,
    593                                            int num_compaction_spaces,
    594                                            int additional_capacity_in_bytes) {
    595   Isolate* isolate = CcTest::i_isolate();
    596   Heap* heap = isolate->heap();
    597   OldSpace* old_space = new OldSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
    598   CHECK(old_space != nullptr);
    599   CHECK(old_space->SetUp());
    600   old_space->AllocateRawUnaligned(object_size).ToObjectChecked();
    601   old_space->EmptyAllocationInfo();
    602 
    603   intptr_t rest_capacity = old_space->accounting_stats_.Capacity() -
    604                            old_space->accounting_stats_.Size();
    605   intptr_t capacity_for_compaction_space =
    606       rest_capacity / num_compaction_spaces;
    607   int num_objects_in_compaction_space =
    608       static_cast<int>(capacity_for_compaction_space) / object_size +
    609       num_additional_objects;
    610   CHECK_GT(num_objects_in_compaction_space, 0);
    611   intptr_t initial_old_space_capacity = old_space->accounting_stats_.Capacity();
    612 
    613   CompactionSpaceCollection** spaces =
    614       InitializeCompactionSpaces(heap, num_compaction_spaces);
    615   old_space->DivideUponCompactionSpaces(spaces, num_compaction_spaces,
    616                                         capacity_for_compaction_space);
    617 
    618   intptr_t compaction_capacity = 0;
    619   intptr_t compaction_size = 0;
    620   CompactionStats(spaces, OLD_SPACE, num_compaction_spaces,
    621                   &compaction_capacity, &compaction_size);
    622 
    623   intptr_t old_space_capacity = old_space->accounting_stats_.Capacity();
    624   intptr_t old_space_size = old_space->accounting_stats_.Size();
    625   // Compaction space memory is subtracted from the original space's capacity.
    626   CHECK_EQ(old_space_capacity,
    627            initial_old_space_capacity - compaction_capacity);
    628   CHECK_EQ(compaction_size, 0);
    629 
    630   AllocateInCompactionSpaces(spaces, OLD_SPACE, num_compaction_spaces,
    631                              num_objects_in_compaction_space, object_size);
    632 
    633   // Old space size and capacity should be the same as after dividing.
    634   CHECK_EQ(old_space->accounting_stats_.Size(), old_space_size);
    635   CHECK_EQ(old_space->accounting_stats_.Capacity(), old_space_capacity);
    636 
    637   CompactionStats(spaces, OLD_SPACE, num_compaction_spaces,
    638                   &compaction_capacity, &compaction_size);
    639   MergeCompactionSpaces(old_space, spaces, num_compaction_spaces);
    640 
    641   CHECK_EQ(old_space->accounting_stats_.Capacity(),
    642            old_space_capacity + compaction_capacity);
    643   CHECK_EQ(old_space->accounting_stats_.Size(),
    644            old_space_size + compaction_size);
    645   // We check against the expected end capacity.
    646   CHECK_EQ(old_space->accounting_stats_.Capacity(),
    647            initial_old_space_capacity + additional_capacity_in_bytes);
    648 
    649   DestroyCompactionSpaces(spaces, num_compaction_spaces);
    650   delete old_space;
    651 }
    652 
    653 
    654 HEAP_TEST(CompactionSpaceDivideSinglePage) {
    655   const int kObjectSize = KB;
    656   const int kCompactionSpaces = 4;
    657   // Since the bound for objects is tight and the dividing is best effort, we
    658   // subtract some objects to make sure we still fit in the initial page.
    659   // A CHECK makes sure that the overall number of allocated objects stays
    660   // > 0.
    661   const int kAdditionalObjects = -10;
    662   const int kAdditionalCapacityRequired = 0;
    663   TestCompactionSpaceDivide(kAdditionalObjects, kObjectSize, kCompactionSpaces,
    664                             kAdditionalCapacityRequired);
    665 }
    666 
    667 
    668 HEAP_TEST(CompactionSpaceDivideMultiplePages) {
    669   const int kObjectSize = KB;
    670   const int kCompactionSpaces = 4;
    671   // Allocate half a page of objects to ensure that we need one more page per
    672   // compaction space.
    673   const int kAdditionalObjects = (Page::kPageSize / kObjectSize / 2);
    674   const int kAdditionalCapacityRequired =
    675       Page::kAllocatableMemory * kCompactionSpaces;
    676   TestCompactionSpaceDivide(kAdditionalObjects, kObjectSize, kCompactionSpaces,
    677                             kAdditionalCapacityRequired);
    678 }
    679 
    680 
    681 TEST(LargeObjectSpace) {
    682   v8::V8::Initialize();
    683 
    684   LargeObjectSpace* lo = CcTest::heap()->lo_space();
    685   CHECK(lo != NULL);
    686 
    687   int lo_size = Page::kPageSize;
    688 
    689   Object* obj = lo->AllocateRaw(lo_size, NOT_EXECUTABLE).ToObjectChecked();
    690   CHECK(obj->IsHeapObject());
    691 
    692   HeapObject* ho = HeapObject::cast(obj);
    693 
    694   CHECK(lo->Contains(HeapObject::cast(obj)));
    695 
    696   CHECK(lo->FindObject(ho->address()) == obj);
    697 
    698   CHECK(lo->Contains(ho));
    699 
    700   while (true) {
    701     intptr_t available = lo->Available();
    702     { AllocationResult allocation = lo->AllocateRaw(lo_size, NOT_EXECUTABLE);
    703       if (allocation.IsRetry()) break;
    704     }
    705     // The available value is conservative such that it may report
    706     // zero prior to heap exhaustion.
    707     CHECK(lo->Available() < available || available == 0);
    708   }
    709 
    710   CHECK(!lo->IsEmpty());
    711 
    712   CHECK(lo->AllocateRaw(lo_size, NOT_EXECUTABLE).IsRetry());
    713 }
    714 
    715 
    716 TEST(SizeOfFirstPageIsLargeEnough) {
    717   if (i::FLAG_always_opt) return;
    718   // Bootstrapping without a snapshot causes more allocations.
    719   CcTest::InitializeVM();
    720   Isolate* isolate = CcTest::i_isolate();
    721   if (!isolate->snapshot_available()) return;
    722   if (Snapshot::EmbedsScript(isolate)) return;
    723 
    724   // If this test fails due to enabling experimental natives that are not part
    725   // of the snapshot, we may need to adjust CalculateFirstPageSizes.
    726 
    727   // Freshly initialized VM gets by with one page per space.
    728   for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) {
    729     // Debug code can be very large, so skip CODE_SPACE if we are generating it.
    730     if (i == CODE_SPACE && i::FLAG_debug_code) continue;
    731     CHECK_EQ(1, isolate->heap()->paged_space(i)->CountTotalPages());
    732   }
    733 
    734   // Executing the empty script gets by with one page per space.
    735   HandleScope scope(isolate);
    736   CompileRun("/*empty*/");
    737   for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) {
    738     // Debug code can be very large, so skip CODE_SPACE if we are generating it.
    739     if (i == CODE_SPACE && i::FLAG_debug_code) continue;
    740     CHECK_EQ(1, isolate->heap()->paged_space(i)->CountTotalPages());
    741   }
    742 
    743   // No large objects required to perform the above steps.
    744   CHECK(isolate->heap()->lo_space()->IsEmpty());
    745 }
    746 
    747 
    748 UNINITIALIZED_TEST(NewSpaceGrowsToTargetCapacity) {
    749   FLAG_target_semi_space_size = 2 * (Page::kPageSize / MB);
    750   if (FLAG_optimize_for_size) return;
    751 
    752   v8::Isolate::CreateParams create_params;
    753   create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
    754   v8::Isolate* isolate = v8::Isolate::New(create_params);
    755   {
    756     v8::Isolate::Scope isolate_scope(isolate);
    757     v8::HandleScope handle_scope(isolate);
    758     v8::Context::New(isolate)->Enter();
    759 
    760     Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
    761 
    762     NewSpace* new_space = i_isolate->heap()->new_space();
    763 
    764     // This test doesn't work if we start with a non-default new space
    765     // configuration.
    766     if (new_space->InitialTotalCapacity() == Page::kPageSize) {
    767       CHECK_EQ(new_space->CommittedMemory(), new_space->InitialTotalCapacity());
    768 
    769       // Fill up the first (and only) page of the semi space.
    770       FillCurrentPage(new_space);
    771 
    772       // Try to allocate out of the new space. A new page should be added and
    773       // the
    774       // allocation should succeed.
    775       v8::internal::AllocationResult allocation =
    776           new_space->AllocateRawUnaligned(80);
    777       CHECK(!allocation.IsRetry());
    778       CHECK_EQ(new_space->CommittedMemory(), 2 * Page::kPageSize);
    779 
    780       // Turn the allocation into a proper object so isolate teardown won't
    781       // crash.
    782       HeapObject* free_space = NULL;
    783       CHECK(allocation.To(&free_space));
    784       new_space->heap()->CreateFillerObjectAt(free_space->address(), 80);
    785     }
    786   }
    787   isolate->Dispose();
    788 }
    789 
    790 
    791 static HeapObject* AllocateUnaligned(NewSpace* space, int size) {
    792   AllocationResult allocation = space->AllocateRawUnaligned(size);
    793   CHECK(!allocation.IsRetry());
    794   HeapObject* filler = NULL;
    795   CHECK(allocation.To(&filler));
    796   space->heap()->CreateFillerObjectAt(filler->address(), size);
    797   return filler;
    798 }
    799 
    800 class Observer : public InlineAllocationObserver {
    801  public:
    802   explicit Observer(intptr_t step_size)
    803       : InlineAllocationObserver(step_size), count_(0) {}
    804 
    805   void Step(int bytes_allocated, Address, size_t) override { count_++; }
    806 
    807   int count() const { return count_; }
    808 
    809  private:
    810   int count_;
    811 };
    812 
    813 
    814 UNINITIALIZED_TEST(InlineAllocationObserver) {
    815   v8::Isolate::CreateParams create_params;
    816   create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
    817   v8::Isolate* isolate = v8::Isolate::New(create_params);
    818   {
    819     v8::Isolate::Scope isolate_scope(isolate);
    820     v8::HandleScope handle_scope(isolate);
    821     v8::Context::New(isolate)->Enter();
    822 
    823     Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
    824 
    825     NewSpace* new_space = i_isolate->heap()->new_space();
    826 
    827     Observer observer1(128);
    828     new_space->AddInlineAllocationObserver(&observer1);
    829 
    830     // The observer should not get notified if we have only allocated less than
    831     // 128 bytes.
    832     AllocateUnaligned(new_space, 64);
    833     CHECK_EQ(observer1.count(), 0);
    834 
    835     // The observer should get called when we have allocated exactly 128 bytes.
    836     AllocateUnaligned(new_space, 64);
    837     CHECK_EQ(observer1.count(), 1);
    838 
    839     // Another >128 bytes should get another notification.
    840     AllocateUnaligned(new_space, 136);
    841     CHECK_EQ(observer1.count(), 2);
    842 
    843     // Allocating a large object should get only one notification.
    844     AllocateUnaligned(new_space, 1024);
    845     CHECK_EQ(observer1.count(), 3);
    846 
    847     // Allocating another 2048 bytes in small objects should get 16
    848     // notifications.
    849     for (int i = 0; i < 64; ++i) {
    850       AllocateUnaligned(new_space, 32);
    851     }
    852     CHECK_EQ(observer1.count(), 19);
    853 
    854     // Multiple observers should work.
    855     Observer observer2(96);
    856     new_space->AddInlineAllocationObserver(&observer2);
    857 
    858     AllocateUnaligned(new_space, 2048);
    859     CHECK_EQ(observer1.count(), 20);
    860     CHECK_EQ(observer2.count(), 1);
    861 
    862     AllocateUnaligned(new_space, 104);
    863     CHECK_EQ(observer1.count(), 20);
    864     CHECK_EQ(observer2.count(), 2);
    865 
    866     // Callback should stop getting called after an observer is removed.
    867     new_space->RemoveInlineAllocationObserver(&observer1);
    868 
    869     AllocateUnaligned(new_space, 384);
    870     CHECK_EQ(observer1.count(), 20);  // no more notifications.
    871     CHECK_EQ(observer2.count(), 3);   // this one is still active.
    872 
    873     // Ensure that PauseInlineAllocationObserversScope work correctly.
    874     AllocateUnaligned(new_space, 48);
    875     CHECK_EQ(observer2.count(), 3);
    876     {
    877       PauseInlineAllocationObserversScope pause_observers(new_space);
    878       CHECK_EQ(observer2.count(), 3);
    879       AllocateUnaligned(new_space, 384);
    880       CHECK_EQ(observer2.count(), 3);
    881     }
    882     CHECK_EQ(observer2.count(), 3);
    883     // Coupled with the 48 bytes allocated before the pause, another 48 bytes
    884     // allocated here should trigger a notification.
    885     AllocateUnaligned(new_space, 48);
    886     CHECK_EQ(observer2.count(), 4);
    887 
    888     new_space->RemoveInlineAllocationObserver(&observer2);
    889     AllocateUnaligned(new_space, 384);
    890     CHECK_EQ(observer1.count(), 20);
    891     CHECK_EQ(observer2.count(), 4);
    892   }
    893   isolate->Dispose();
    894 }
    895 
    896 
    897 UNINITIALIZED_TEST(InlineAllocationObserverCadence) {
    898   v8::Isolate::CreateParams create_params;
    899   create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
    900   v8::Isolate* isolate = v8::Isolate::New(create_params);
    901   {
    902     v8::Isolate::Scope isolate_scope(isolate);
    903     v8::HandleScope handle_scope(isolate);
    904     v8::Context::New(isolate)->Enter();
    905 
    906     Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
    907 
    908     NewSpace* new_space = i_isolate->heap()->new_space();
    909 
    910     Observer observer1(512);
    911     new_space->AddInlineAllocationObserver(&observer1);
    912     Observer observer2(576);
    913     new_space->AddInlineAllocationObserver(&observer2);
    914 
    915     for (int i = 0; i < 512; ++i) {
    916       AllocateUnaligned(new_space, 32);
    917     }
    918 
    919     new_space->RemoveInlineAllocationObserver(&observer1);
    920     new_space->RemoveInlineAllocationObserver(&observer2);
    921 
    922     CHECK_EQ(observer1.count(), 32);
    923     CHECK_EQ(observer2.count(), 28);
    924   }
    925   isolate->Dispose();
    926 }
    927 
    928 }  // namespace internal
    929 }  // namespace v8
    930