Home | History | Annotate | Download | only in heap
      1 // Copyright 2011 the V8 project authors. All rights reserved.
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 #include <stdlib.h>
     29 
     30 #include "src/base/platform/platform.h"
     31 #include "src/snapshot/snapshot.h"
     32 #include "src/v8.h"
     33 #include "test/cctest/cctest.h"
     34 #include "test/cctest/heap/heap-tester.h"
     35 
     36 namespace v8 {
     37 namespace internal {
     38 
     39 #if 0
     40 static void VerifyRegionMarking(Address page_start) {
     41 #ifdef ENABLE_CARDMARKING_WRITE_BARRIER
     42   Page* p = Page::FromAddress(page_start);
     43 
     44   p->SetRegionMarks(Page::kAllRegionsCleanMarks);
     45 
     46   for (Address addr = p->ObjectAreaStart();
     47        addr < p->ObjectAreaEnd();
     48        addr += kPointerSize) {
     49     CHECK(!Page::FromAddress(addr)->IsRegionDirty(addr));
     50   }
     51 
     52   for (Address addr = p->ObjectAreaStart();
     53        addr < p->ObjectAreaEnd();
     54        addr += kPointerSize) {
     55     Page::FromAddress(addr)->MarkRegionDirty(addr);
     56   }
     57 
     58   for (Address addr = p->ObjectAreaStart();
     59        addr < p->ObjectAreaEnd();
     60        addr += kPointerSize) {
     61     CHECK(Page::FromAddress(addr)->IsRegionDirty(addr));
     62   }
     63 #endif
     64 }
     65 #endif
     66 
     67 
     68 // TODO(gc) you can no longer allocate pages like this. Details are hidden.
     69 #if 0
     70 TEST(Page) {
     71   byte* mem = NewArray<byte>(2*Page::kPageSize);
     72   CHECK(mem != NULL);
     73 
     74   Address start = reinterpret_cast<Address>(mem);
     75   Address page_start = RoundUp(start, Page::kPageSize);
     76 
     77   Page* p = Page::FromAddress(page_start);
     78   // Initialized Page has heap pointer, normally set by memory_allocator.
     79   p->heap_ = CcTest::heap();
     80   CHECK(p->address() == page_start);
     81   CHECK(p->is_valid());
     82 
     83   p->opaque_header = 0;
     84   p->SetIsLargeObjectPage(false);
     85   CHECK(!p->next_page()->is_valid());
     86 
     87   CHECK(p->ObjectAreaStart() == page_start + Page::kObjectStartOffset);
     88   CHECK(p->ObjectAreaEnd() == page_start + Page::kPageSize);
     89 
     90   CHECK(p->Offset(page_start + Page::kObjectStartOffset) ==
     91         Page::kObjectStartOffset);
     92   CHECK(p->Offset(page_start + Page::kPageSize) == Page::kPageSize);
     93 
     94   CHECK(p->OffsetToAddress(Page::kObjectStartOffset) == p->ObjectAreaStart());
     95   CHECK(p->OffsetToAddress(Page::kPageSize) == p->ObjectAreaEnd());
     96 
     97   // test region marking
     98   VerifyRegionMarking(page_start);
     99 
    100   DeleteArray(mem);
    101 }
    102 #endif
    103 
    104 
    105 // Temporarily sets a given allocator in an isolate.
    106 class TestMemoryAllocatorScope {
    107  public:
    108   TestMemoryAllocatorScope(Isolate* isolate, MemoryAllocator* allocator)
    109       : isolate_(isolate), old_allocator_(isolate->heap()->memory_allocator()) {
    110     isolate->heap()->memory_allocator_ = allocator;
    111   }
    112 
    113   ~TestMemoryAllocatorScope() {
    114     isolate_->heap()->memory_allocator_ = old_allocator_;
    115   }
    116 
    117  private:
    118   Isolate* isolate_;
    119   MemoryAllocator* old_allocator_;
    120 
    121   DISALLOW_COPY_AND_ASSIGN(TestMemoryAllocatorScope);
    122 };
    123 
    124 
    125 // Temporarily sets a given code range in an isolate.
    126 class TestCodeRangeScope {
    127  public:
    128   TestCodeRangeScope(Isolate* isolate, CodeRange* code_range)
    129       : isolate_(isolate),
    130         old_code_range_(isolate->heap()->memory_allocator()->code_range()) {
    131     isolate->heap()->memory_allocator()->code_range_ = code_range;
    132   }
    133 
    134   ~TestCodeRangeScope() {
    135     isolate_->heap()->memory_allocator()->code_range_ = old_code_range_;
    136   }
    137 
    138  private:
    139   Isolate* isolate_;
    140   CodeRange* old_code_range_;
    141 
    142   DISALLOW_COPY_AND_ASSIGN(TestCodeRangeScope);
    143 };
    144 
    145 
    146 static void VerifyMemoryChunk(Isolate* isolate,
    147                               Heap* heap,
    148                               CodeRange* code_range,
    149                               size_t reserve_area_size,
    150                               size_t commit_area_size,
    151                               size_t second_commit_area_size,
    152                               Executability executable) {
    153   MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
    154   CHECK(memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize(),
    155                                 0));
    156   {
    157     TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
    158     TestCodeRangeScope test_code_range_scope(isolate, code_range);
    159 
    160     size_t header_size = (executable == EXECUTABLE)
    161                              ? MemoryAllocator::CodePageGuardStartOffset()
    162                              : MemoryChunk::kObjectStartOffset;
    163     size_t guard_size =
    164         (executable == EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0;
    165 
    166     MemoryChunk* memory_chunk = memory_allocator->AllocateChunk(
    167         reserve_area_size, commit_area_size, executable, NULL);
    168     size_t alignment = code_range != NULL && code_range->valid()
    169                            ? MemoryChunk::kAlignment
    170                            : base::OS::CommitPageSize();
    171     size_t reserved_size =
    172         ((executable == EXECUTABLE))
    173             ? RoundUp(header_size + guard_size + reserve_area_size + guard_size,
    174                       alignment)
    175             : RoundUp(header_size + reserve_area_size,
    176                       base::OS::CommitPageSize());
    177     CHECK(memory_chunk->size() == reserved_size);
    178     CHECK(memory_chunk->area_start() <
    179           memory_chunk->address() + memory_chunk->size());
    180     CHECK(memory_chunk->area_end() <=
    181           memory_chunk->address() + memory_chunk->size());
    182     CHECK(static_cast<size_t>(memory_chunk->area_size()) == commit_area_size);
    183 
    184     Address area_start = memory_chunk->area_start();
    185 
    186     memory_chunk->CommitArea(second_commit_area_size);
    187     CHECK(area_start == memory_chunk->area_start());
    188     CHECK(memory_chunk->area_start() <
    189           memory_chunk->address() + memory_chunk->size());
    190     CHECK(memory_chunk->area_end() <=
    191           memory_chunk->address() + memory_chunk->size());
    192     CHECK(static_cast<size_t>(memory_chunk->area_size()) ==
    193           second_commit_area_size);
    194 
    195     memory_allocator->Free<MemoryAllocator::kFull>(memory_chunk);
    196   }
    197   memory_allocator->TearDown();
    198   delete memory_allocator;
    199 }
    200 
    201 
    202 TEST(Regress3540) {
    203   Isolate* isolate = CcTest::i_isolate();
    204   Heap* heap = isolate->heap();
    205   MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
    206   CHECK(memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize(),
    207                                 0));
    208   TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
    209   CodeRange* code_range = new CodeRange(isolate);
    210   size_t code_range_size =
    211       kMinimumCodeRangeSize > 0 ? kMinimumCodeRangeSize : 3 * Page::kPageSize;
    212   if (!code_range->SetUp(code_range_size)) {
    213     return;
    214   }
    215 
    216   Address address;
    217   size_t size;
    218   size_t request_size = code_range_size - Page::kPageSize;
    219   address = code_range->AllocateRawMemory(
    220       request_size, request_size - (2 * MemoryAllocator::CodePageGuardSize()),
    221       &size);
    222   CHECK_NOT_NULL(address);
    223 
    224   Address null_address;
    225   size_t null_size;
    226   request_size = code_range_size - Page::kPageSize;
    227   null_address = code_range->AllocateRawMemory(
    228       request_size, request_size - (2 * MemoryAllocator::CodePageGuardSize()),
    229       &null_size);
    230   CHECK_NULL(null_address);
    231 
    232   code_range->FreeRawMemory(address, size);
    233   delete code_range;
    234   memory_allocator->TearDown();
    235   delete memory_allocator;
    236 }
    237 
    238 
    239 static unsigned int Pseudorandom() {
    240   static uint32_t lo = 2345;
    241   lo = 18273 * (lo & 0xFFFFF) + (lo >> 16);
    242   return lo & 0xFFFFF;
    243 }
    244 
    245 
    246 TEST(MemoryChunk) {
    247   Isolate* isolate = CcTest::i_isolate();
    248   Heap* heap = isolate->heap();
    249 
    250   size_t reserve_area_size = 1 * MB;
    251   size_t initial_commit_area_size, second_commit_area_size;
    252 
    253   for (int i = 0; i < 100; i++) {
    254     initial_commit_area_size = Pseudorandom();
    255     second_commit_area_size = Pseudorandom();
    256 
    257     // With CodeRange.
    258     CodeRange* code_range = new CodeRange(isolate);
    259     const size_t code_range_size = 32 * MB;
    260     if (!code_range->SetUp(code_range_size)) return;
    261 
    262     VerifyMemoryChunk(isolate,
    263                       heap,
    264                       code_range,
    265                       reserve_area_size,
    266                       initial_commit_area_size,
    267                       second_commit_area_size,
    268                       EXECUTABLE);
    269 
    270     VerifyMemoryChunk(isolate,
    271                       heap,
    272                       code_range,
    273                       reserve_area_size,
    274                       initial_commit_area_size,
    275                       second_commit_area_size,
    276                       NOT_EXECUTABLE);
    277     delete code_range;
    278 
    279     // Without a valid CodeRange, i.e., omitting SetUp.
    280     code_range = new CodeRange(isolate);
    281     VerifyMemoryChunk(isolate,
    282                       heap,
    283                       code_range,
    284                       reserve_area_size,
    285                       initial_commit_area_size,
    286                       second_commit_area_size,
    287                       EXECUTABLE);
    288 
    289     VerifyMemoryChunk(isolate,
    290                       heap,
    291                       code_range,
    292                       reserve_area_size,
    293                       initial_commit_area_size,
    294                       second_commit_area_size,
    295                       NOT_EXECUTABLE);
    296     delete code_range;
    297   }
    298 }
    299 
    300 
    301 TEST(MemoryAllocator) {
    302   Isolate* isolate = CcTest::i_isolate();
    303   Heap* heap = isolate->heap();
    304 
    305   MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
    306   CHECK(memory_allocator != nullptr);
    307   CHECK(memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize(),
    308                                 0));
    309   TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
    310 
    311   {
    312     int total_pages = 0;
    313     OldSpace faked_space(heap, OLD_SPACE, NOT_EXECUTABLE);
    314     Page* first_page = memory_allocator->AllocatePage(
    315         faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
    316         NOT_EXECUTABLE);
    317 
    318     first_page->InsertAfter(faked_space.anchor()->prev_page());
    319     CHECK(Page::IsValid(first_page));
    320     CHECK(first_page->next_page() == faked_space.anchor());
    321     total_pages++;
    322 
    323     for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) {
    324       CHECK(p->owner() == &faked_space);
    325     }
    326 
    327     // Again, we should get n or n - 1 pages.
    328     Page* other = memory_allocator->AllocatePage(
    329         faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
    330         NOT_EXECUTABLE);
    331     CHECK(Page::IsValid(other));
    332     total_pages++;
    333     other->InsertAfter(first_page);
    334     int page_count = 0;
    335     for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) {
    336       CHECK(p->owner() == &faked_space);
    337       page_count++;
    338     }
    339     CHECK(total_pages == page_count);
    340 
    341     Page* second_page = first_page->next_page();
    342     CHECK(Page::IsValid(second_page));
    343 
    344     // OldSpace's destructor will tear down the space and free up all pages.
    345   }
    346   memory_allocator->TearDown();
    347   delete memory_allocator;
    348 }
    349 
    350 
    351 TEST(NewSpace) {
    352   Isolate* isolate = CcTest::i_isolate();
    353   Heap* heap = isolate->heap();
    354   MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
    355   CHECK(memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize(),
    356                                 0));
    357   TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
    358 
    359   NewSpace new_space(heap);
    360 
    361   CHECK(new_space.SetUp(CcTest::heap()->InitialSemiSpaceSize(),
    362                         CcTest::heap()->InitialSemiSpaceSize()));
    363   CHECK(new_space.HasBeenSetUp());
    364 
    365   while (new_space.Available() >= Page::kMaxRegularHeapObjectSize) {
    366     Object* obj =
    367         new_space.AllocateRawUnaligned(Page::kMaxRegularHeapObjectSize)
    368             .ToObjectChecked();
    369     CHECK(new_space.Contains(HeapObject::cast(obj)));
    370   }
    371 
    372   new_space.TearDown();
    373   memory_allocator->TearDown();
    374   delete memory_allocator;
    375 }
    376 
    377 
    378 TEST(OldSpace) {
    379   Isolate* isolate = CcTest::i_isolate();
    380   Heap* heap = isolate->heap();
    381   MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
    382   CHECK(memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize(),
    383                                 0));
    384   TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
    385 
    386   OldSpace* s = new OldSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
    387   CHECK(s != NULL);
    388 
    389   CHECK(s->SetUp());
    390 
    391   while (s->Available() > 0) {
    392     s->AllocateRawUnaligned(Page::kMaxRegularHeapObjectSize).ToObjectChecked();
    393   }
    394 
    395   delete s;
    396   memory_allocator->TearDown();
    397   delete memory_allocator;
    398 }
    399 
    400 
    401 TEST(CompactionSpace) {
    402   Isolate* isolate = CcTest::i_isolate();
    403   Heap* heap = isolate->heap();
    404   MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
    405   CHECK(memory_allocator != nullptr);
    406   CHECK(memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize(),
    407                                 0));
    408   TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
    409 
    410   CompactionSpace* compaction_space =
    411       new CompactionSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
    412   CHECK(compaction_space != NULL);
    413   CHECK(compaction_space->SetUp());
    414 
    415   OldSpace* old_space = new OldSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
    416   CHECK(old_space != NULL);
    417   CHECK(old_space->SetUp());
    418 
    419   // Cannot loop until "Available()" since we initially have 0 bytes available
    420   // and would thus neither grow, nor be able to allocate an object.
    421   const int kNumObjects = 100;
    422   const int kNumObjectsPerPage =
    423       compaction_space->AreaSize() / Page::kMaxRegularHeapObjectSize;
    424   const int kExpectedPages =
    425       (kNumObjects + kNumObjectsPerPage - 1) / kNumObjectsPerPage;
    426   for (int i = 0; i < kNumObjects; i++) {
    427     compaction_space->AllocateRawUnaligned(Page::kMaxRegularHeapObjectSize)
    428         .ToObjectChecked();
    429   }
    430   int pages_in_old_space = old_space->CountTotalPages();
    431   int pages_in_compaction_space = compaction_space->CountTotalPages();
    432   CHECK_EQ(pages_in_compaction_space, kExpectedPages);
    433   CHECK_LE(pages_in_old_space, 1);
    434 
    435   old_space->MergeCompactionSpace(compaction_space);
    436   CHECK_EQ(old_space->CountTotalPages(),
    437            pages_in_old_space + pages_in_compaction_space);
    438 
    439   delete compaction_space;
    440   delete old_space;
    441 
    442   memory_allocator->TearDown();
    443   delete memory_allocator;
    444 }
    445 
    446 
    447 TEST(LargeObjectSpace) {
    448   v8::V8::Initialize();
    449 
    450   LargeObjectSpace* lo = CcTest::heap()->lo_space();
    451   CHECK(lo != NULL);
    452 
    453   int lo_size = Page::kPageSize;
    454 
    455   Object* obj = lo->AllocateRaw(lo_size, NOT_EXECUTABLE).ToObjectChecked();
    456   CHECK(obj->IsHeapObject());
    457 
    458   HeapObject* ho = HeapObject::cast(obj);
    459 
    460   CHECK(lo->Contains(HeapObject::cast(obj)));
    461 
    462   CHECK(lo->FindObject(ho->address()) == obj);
    463 
    464   CHECK(lo->Contains(ho));
    465 
    466   while (true) {
    467     intptr_t available = lo->Available();
    468     { AllocationResult allocation = lo->AllocateRaw(lo_size, NOT_EXECUTABLE);
    469       if (allocation.IsRetry()) break;
    470     }
    471     // The available value is conservative such that it may report
    472     // zero prior to heap exhaustion.
    473     CHECK(lo->Available() < available || available == 0);
    474   }
    475 
    476   CHECK(!lo->IsEmpty());
    477 
    478   CHECK(lo->AllocateRaw(lo_size, NOT_EXECUTABLE).IsRetry());
    479 }
    480 
    481 
    482 TEST(SizeOfFirstPageIsLargeEnough) {
    483   if (i::FLAG_always_opt) return;
    484   // Bootstrapping without a snapshot causes more allocations.
    485   CcTest::InitializeVM();
    486   Isolate* isolate = CcTest::i_isolate();
    487   if (!isolate->snapshot_available()) return;
    488   HandleScope scope(isolate);
    489   v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext();
    490   // Skip this test on the custom snapshot builder.
    491   if (!CcTest::global()
    492            ->Get(context, v8_str("assertEquals"))
    493            .ToLocalChecked()
    494            ->IsUndefined()) {
    495     return;
    496   }
    497 
    498   // If this test fails due to enabling experimental natives that are not part
    499   // of the snapshot, we may need to adjust CalculateFirstPageSizes.
    500 
    501   // Freshly initialized VM gets by with one page per space.
    502   for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) {
    503     // Debug code can be very large, so skip CODE_SPACE if we are generating it.
    504     if (i == CODE_SPACE && i::FLAG_debug_code) continue;
    505     CHECK_EQ(1, isolate->heap()->paged_space(i)->CountTotalPages());
    506   }
    507 
    508   // Executing the empty script gets by with one page per space.
    509   CompileRun("/*empty*/");
    510   for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) {
    511     // Debug code can be very large, so skip CODE_SPACE if we are generating it.
    512     if (i == CODE_SPACE && i::FLAG_debug_code) continue;
    513     CHECK_EQ(1, isolate->heap()->paged_space(i)->CountTotalPages());
    514   }
    515 
    516   // No large objects required to perform the above steps.
    517   CHECK(isolate->heap()->lo_space()->IsEmpty());
    518 }
    519 
    520 static HeapObject* AllocateUnaligned(NewSpace* space, int size) {
    521   AllocationResult allocation = space->AllocateRawUnaligned(size);
    522   CHECK(!allocation.IsRetry());
    523   HeapObject* filler = NULL;
    524   CHECK(allocation.To(&filler));
    525   space->heap()->CreateFillerObjectAt(filler->address(), size,
    526                                       ClearRecordedSlots::kNo);
    527   return filler;
    528 }
    529 
    530 static HeapObject* AllocateUnaligned(PagedSpace* space, int size) {
    531   AllocationResult allocation = space->AllocateRaw(size, kDoubleUnaligned);
    532   CHECK(!allocation.IsRetry());
    533   HeapObject* filler = NULL;
    534   CHECK(allocation.To(&filler));
    535   space->heap()->CreateFillerObjectAt(filler->address(), size,
    536                                       ClearRecordedSlots::kNo);
    537   return filler;
    538 }
    539 
    540 static HeapObject* AllocateUnaligned(LargeObjectSpace* space, int size) {
    541   AllocationResult allocation = space->AllocateRaw(size, EXECUTABLE);
    542   CHECK(!allocation.IsRetry());
    543   HeapObject* filler = NULL;
    544   CHECK(allocation.To(&filler));
    545   return filler;
    546 }
    547 
    548 class Observer : public AllocationObserver {
    549  public:
    550   explicit Observer(intptr_t step_size)
    551       : AllocationObserver(step_size), count_(0) {}
    552 
    553   void Step(int bytes_allocated, Address, size_t) override { count_++; }
    554 
    555   int count() const { return count_; }
    556 
    557  private:
    558   int count_;
    559 };
    560 
    561 template <typename T>
    562 void testAllocationObserver(Isolate* i_isolate, T* space) {
    563   Observer observer1(128);
    564   space->AddAllocationObserver(&observer1);
    565 
    566   // The observer should not get notified if we have only allocated less than
    567   // 128 bytes.
    568   AllocateUnaligned(space, 64);
    569   CHECK_EQ(observer1.count(), 0);
    570 
    571   // The observer should get called when we have allocated exactly 128 bytes.
    572   AllocateUnaligned(space, 64);
    573   CHECK_EQ(observer1.count(), 1);
    574 
    575   // Another >128 bytes should get another notification.
    576   AllocateUnaligned(space, 136);
    577   CHECK_EQ(observer1.count(), 2);
    578 
    579   // Allocating a large object should get only one notification.
    580   AllocateUnaligned(space, 1024);
    581   CHECK_EQ(observer1.count(), 3);
    582 
    583   // Allocating another 2048 bytes in small objects should get 16
    584   // notifications.
    585   for (int i = 0; i < 64; ++i) {
    586     AllocateUnaligned(space, 32);
    587   }
    588   CHECK_EQ(observer1.count(), 19);
    589 
    590   // Multiple observers should work.
    591   Observer observer2(96);
    592   space->AddAllocationObserver(&observer2);
    593 
    594   AllocateUnaligned(space, 2048);
    595   CHECK_EQ(observer1.count(), 20);
    596   CHECK_EQ(observer2.count(), 1);
    597 
    598   AllocateUnaligned(space, 104);
    599   CHECK_EQ(observer1.count(), 20);
    600   CHECK_EQ(observer2.count(), 2);
    601 
    602   // Callback should stop getting called after an observer is removed.
    603   space->RemoveAllocationObserver(&observer1);
    604 
    605   AllocateUnaligned(space, 384);
    606   CHECK_EQ(observer1.count(), 20);  // no more notifications.
    607   CHECK_EQ(observer2.count(), 3);   // this one is still active.
    608 
    609   // Ensure that PauseInlineAllocationObserversScope work correctly.
    610   AllocateUnaligned(space, 48);
    611   CHECK_EQ(observer2.count(), 3);
    612   {
    613     PauseAllocationObserversScope pause_observers(i_isolate->heap());
    614     CHECK_EQ(observer2.count(), 3);
    615     AllocateUnaligned(space, 384);
    616     CHECK_EQ(observer2.count(), 3);
    617   }
    618   CHECK_EQ(observer2.count(), 3);
    619   // Coupled with the 48 bytes allocated before the pause, another 48 bytes
    620   // allocated here should trigger a notification.
    621   AllocateUnaligned(space, 48);
    622   CHECK_EQ(observer2.count(), 4);
    623 
    624   space->RemoveAllocationObserver(&observer2);
    625   AllocateUnaligned(space, 384);
    626   CHECK_EQ(observer1.count(), 20);
    627   CHECK_EQ(observer2.count(), 4);
    628 }
    629 
    630 UNINITIALIZED_TEST(AllocationObserver) {
    631   v8::Isolate::CreateParams create_params;
    632   create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
    633   v8::Isolate* isolate = v8::Isolate::New(create_params);
    634   {
    635     v8::Isolate::Scope isolate_scope(isolate);
    636     v8::HandleScope handle_scope(isolate);
    637     v8::Context::New(isolate)->Enter();
    638 
    639     Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
    640 
    641     testAllocationObserver<NewSpace>(i_isolate, i_isolate->heap()->new_space());
    642     // Old space is used but the code path is shared for all
    643     // classes inheriting from PagedSpace.
    644     testAllocationObserver<PagedSpace>(i_isolate,
    645                                        i_isolate->heap()->old_space());
    646     testAllocationObserver<LargeObjectSpace>(i_isolate,
    647                                              i_isolate->heap()->lo_space());
    648   }
    649   isolate->Dispose();
    650 }
    651 
    652 
    653 UNINITIALIZED_TEST(InlineAllocationObserverCadence) {
    654   v8::Isolate::CreateParams create_params;
    655   create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
    656   v8::Isolate* isolate = v8::Isolate::New(create_params);
    657   {
    658     v8::Isolate::Scope isolate_scope(isolate);
    659     v8::HandleScope handle_scope(isolate);
    660     v8::Context::New(isolate)->Enter();
    661 
    662     Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
    663 
    664     NewSpace* new_space = i_isolate->heap()->new_space();
    665 
    666     Observer observer1(512);
    667     new_space->AddAllocationObserver(&observer1);
    668     Observer observer2(576);
    669     new_space->AddAllocationObserver(&observer2);
    670 
    671     for (int i = 0; i < 512; ++i) {
    672       AllocateUnaligned(new_space, 32);
    673     }
    674 
    675     new_space->RemoveAllocationObserver(&observer1);
    676     new_space->RemoveAllocationObserver(&observer2);
    677 
    678     CHECK_EQ(observer1.count(), 32);
    679     CHECK_EQ(observer2.count(), 28);
    680   }
    681   isolate->Dispose();
    682 }
    683 
    684 }  // namespace internal
    685 }  // namespace v8
    686