Home | History | Annotate | Download | only in cctest
      1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 #include <stdlib.h>
     29 
     30 #include "v8.h"
     31 #include "cctest.h"
     32 
     33 using namespace v8::internal;
     34 
     35 static void VerifyRegionMarking(Address page_start) {
     36   Page* p = Page::FromAddress(page_start);
     37 
     38   p->SetRegionMarks(Page::kAllRegionsCleanMarks);
     39 
     40   for (Address addr = p->ObjectAreaStart();
     41        addr < p->ObjectAreaEnd();
     42        addr += kPointerSize) {
     43     CHECK(!Page::FromAddress(addr)->IsRegionDirty(addr));
     44   }
     45 
     46   for (Address addr = p->ObjectAreaStart();
     47        addr < p->ObjectAreaEnd();
     48        addr += kPointerSize) {
     49     Page::FromAddress(addr)->MarkRegionDirty(addr);
     50   }
     51 
     52   for (Address addr = p->ObjectAreaStart();
     53        addr < p->ObjectAreaEnd();
     54        addr += kPointerSize) {
     55     CHECK(Page::FromAddress(addr)->IsRegionDirty(addr));
     56   }
     57 }
     58 
     59 
     60 TEST(Page) {
     61   byte* mem = NewArray<byte>(2*Page::kPageSize);
     62   CHECK(mem != NULL);
     63 
     64   Address start = reinterpret_cast<Address>(mem);
     65   Address page_start = RoundUp(start, Page::kPageSize);
     66 
     67   Page* p = Page::FromAddress(page_start);
     68   // Initialized Page has heap pointer, normally set by memory_allocator.
     69   p->heap_ = HEAP;
     70   CHECK(p->address() == page_start);
     71   CHECK(p->is_valid());
     72 
     73   p->opaque_header = 0;
     74   p->SetIsLargeObjectPage(false);
     75   CHECK(!p->next_page()->is_valid());
     76 
     77   CHECK(p->ObjectAreaStart() == page_start + Page::kObjectStartOffset);
     78   CHECK(p->ObjectAreaEnd() == page_start + Page::kPageSize);
     79 
     80   CHECK(p->Offset(page_start + Page::kObjectStartOffset) ==
     81         Page::kObjectStartOffset);
     82   CHECK(p->Offset(page_start + Page::kPageSize) == Page::kPageSize);
     83 
     84   CHECK(p->OffsetToAddress(Page::kObjectStartOffset) == p->ObjectAreaStart());
     85   CHECK(p->OffsetToAddress(Page::kPageSize) == p->ObjectAreaEnd());
     86 
     87   // test region marking
     88   VerifyRegionMarking(page_start);
     89 
     90   DeleteArray(mem);
     91 }
     92 
     93 
     94 namespace v8 {
     95 namespace internal {
     96 
     97 // Temporarily sets a given allocator in an isolate.
     98 class TestMemoryAllocatorScope {
     99  public:
    100   TestMemoryAllocatorScope(Isolate* isolate, MemoryAllocator* allocator)
    101       : isolate_(isolate),
    102         old_allocator_(isolate->memory_allocator_) {
    103     isolate->memory_allocator_ = allocator;
    104   }
    105 
    106   ~TestMemoryAllocatorScope() {
    107     isolate_->memory_allocator_ = old_allocator_;
    108   }
    109 
    110  private:
    111   Isolate* isolate_;
    112   MemoryAllocator* old_allocator_;
    113 
    114   DISALLOW_COPY_AND_ASSIGN(TestMemoryAllocatorScope);
    115 };
    116 
    117 } }  // namespace v8::internal
    118 
    119 
    120 TEST(MemoryAllocator) {
    121   OS::Setup();
    122   Isolate* isolate = Isolate::Current();
    123   isolate->InitializeLoggingAndCounters();
    124   Heap* heap = isolate->heap();
    125   CHECK(heap->ConfigureHeapDefault());
    126   MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
    127   CHECK(memory_allocator->Setup(heap->MaxReserved(),
    128                                 heap->MaxExecutableSize()));
    129   TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
    130 
    131   OldSpace faked_space(heap,
    132                        heap->MaxReserved(),
    133                        OLD_POINTER_SPACE,
    134                        NOT_EXECUTABLE);
    135   int total_pages = 0;
    136   int requested = MemoryAllocator::kPagesPerChunk;
    137   int allocated;
    138   // If we request n pages, we should get n or n - 1.
    139   Page* first_page = memory_allocator->AllocatePages(
    140       requested, &allocated, &faked_space);
    141   CHECK(first_page->is_valid());
    142   CHECK(allocated == requested || allocated == requested - 1);
    143   total_pages += allocated;
    144 
    145   Page* last_page = first_page;
    146   for (Page* p = first_page; p->is_valid(); p = p->next_page()) {
    147     CHECK(memory_allocator->IsPageInSpace(p, &faked_space));
    148     last_page = p;
    149   }
    150 
    151   // Again, we should get n or n - 1 pages.
    152   Page* others = memory_allocator->AllocatePages(
    153       requested, &allocated, &faked_space);
    154   CHECK(others->is_valid());
    155   CHECK(allocated == requested || allocated == requested - 1);
    156   total_pages += allocated;
    157 
    158   memory_allocator->SetNextPage(last_page, others);
    159   int page_count = 0;
    160   for (Page* p = first_page; p->is_valid(); p = p->next_page()) {
    161     CHECK(memory_allocator->IsPageInSpace(p, &faked_space));
    162     page_count++;
    163   }
    164   CHECK(total_pages == page_count);
    165 
    166   Page* second_page = first_page->next_page();
    167   CHECK(second_page->is_valid());
    168 
    169   // Freeing pages at the first chunk starting at or after the second page
    170   // should free the entire second chunk.  It will return the page it was passed
    171   // (since the second page was in the first chunk).
    172   Page* free_return = memory_allocator->FreePages(second_page);
    173   CHECK(free_return == second_page);
    174   memory_allocator->SetNextPage(first_page, free_return);
    175 
    176   // Freeing pages in the first chunk starting at the first page should free
    177   // the first chunk and return an invalid page.
    178   Page* invalid_page = memory_allocator->FreePages(first_page);
    179   CHECK(!invalid_page->is_valid());
    180 
    181   memory_allocator->TearDown();
    182   delete memory_allocator;
    183 }
    184 
    185 
    186 TEST(NewSpace) {
    187   OS::Setup();
    188   Isolate* isolate = Isolate::Current();
    189   isolate->InitializeLoggingAndCounters();
    190   Heap* heap = isolate->heap();
    191   CHECK(heap->ConfigureHeapDefault());
    192   MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
    193   CHECK(memory_allocator->Setup(heap->MaxReserved(),
    194                                 heap->MaxExecutableSize()));
    195   TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
    196 
    197   NewSpace new_space(heap);
    198 
    199   void* chunk =
    200       memory_allocator->ReserveInitialChunk(4 * heap->ReservedSemiSpaceSize());
    201   CHECK(chunk != NULL);
    202   Address start = RoundUp(static_cast<Address>(chunk),
    203                           2 * heap->ReservedSemiSpaceSize());
    204   CHECK(new_space.Setup(start, 2 * heap->ReservedSemiSpaceSize()));
    205   CHECK(new_space.HasBeenSetup());
    206 
    207   while (new_space.Available() >= Page::kMaxHeapObjectSize) {
    208     Object* obj =
    209         new_space.AllocateRaw(Page::kMaxHeapObjectSize)->ToObjectUnchecked();
    210     CHECK(new_space.Contains(HeapObject::cast(obj)));
    211   }
    212 
    213   new_space.TearDown();
    214   memory_allocator->TearDown();
    215   delete memory_allocator;
    216 }
    217 
    218 
    219 TEST(OldSpace) {
    220   OS::Setup();
    221   Isolate* isolate = Isolate::Current();
    222   isolate->InitializeLoggingAndCounters();
    223   Heap* heap = isolate->heap();
    224   CHECK(heap->ConfigureHeapDefault());
    225   MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
    226   CHECK(memory_allocator->Setup(heap->MaxReserved(),
    227                                 heap->MaxExecutableSize()));
    228   TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
    229 
    230   OldSpace* s = new OldSpace(heap,
    231                              heap->MaxOldGenerationSize(),
    232                              OLD_POINTER_SPACE,
    233                              NOT_EXECUTABLE);
    234   CHECK(s != NULL);
    235 
    236   void* chunk = memory_allocator->ReserveInitialChunk(
    237       4 * heap->ReservedSemiSpaceSize());
    238   CHECK(chunk != NULL);
    239   Address start = static_cast<Address>(chunk);
    240   size_t size = RoundUp(start, 2 * heap->ReservedSemiSpaceSize()) - start;
    241 
    242   CHECK(s->Setup(start, size));
    243 
    244   while (s->Available() > 0) {
    245     s->AllocateRaw(Page::kMaxHeapObjectSize)->ToObjectUnchecked();
    246   }
    247 
    248   s->TearDown();
    249   delete s;
    250   memory_allocator->TearDown();
    251   delete memory_allocator;
    252 }
    253 
    254 
    255 TEST(LargeObjectSpace) {
    256   v8::V8::Initialize();
    257 
    258   LargeObjectSpace* lo = HEAP->lo_space();
    259   CHECK(lo != NULL);
    260 
    261   Map* faked_map = reinterpret_cast<Map*>(HeapObject::FromAddress(0));
    262   int lo_size = Page::kPageSize;
    263 
    264   Object* obj = lo->AllocateRaw(lo_size)->ToObjectUnchecked();
    265   CHECK(obj->IsHeapObject());
    266 
    267   HeapObject* ho = HeapObject::cast(obj);
    268   ho->set_map(faked_map);
    269 
    270   CHECK(lo->Contains(HeapObject::cast(obj)));
    271 
    272   CHECK(lo->FindObject(ho->address()) == obj);
    273 
    274   CHECK(lo->Contains(ho));
    275 
    276   while (true) {
    277     intptr_t available = lo->Available();
    278     { MaybeObject* maybe_obj = lo->AllocateRaw(lo_size);
    279       if (!maybe_obj->ToObject(&obj)) break;
    280     }
    281     HeapObject::cast(obj)->set_map(faked_map);
    282     CHECK(lo->Available() < available);
    283   };
    284 
    285   CHECK(!lo->IsEmpty());
    286 
    287   CHECK(lo->AllocateRaw(lo_size)->IsFailure());
    288 }
    289