Home | History | Annotate | Download | only in heap
      1 // Copyright 2015 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "test/cctest/cctest.h"
      6 #include "test/cctest/heap/heap-tester.h"
      7 #include "test/cctest/heap/heap-utils.h"
      8 
      9 namespace v8 {
     10 namespace internal {
     11 
     12 namespace {
     13 
     14 void CheckInvariantsOfAbortedPage(Page* page) {
     15   // Check invariants:
     16   // 1) Markbits are cleared
     17   // 2) The page is not marked as evacuation candidate anymore
     18   // 3) The page is not marked as aborted compaction anymore.
     19   CHECK(page->markbits()->IsClean());
     20   CHECK(!page->IsEvacuationCandidate());
     21   CHECK(!page->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
     22 }
     23 
     24 void CheckAllObjectsOnPage(std::vector<Handle<FixedArray>>& handles,
     25                            Page* page) {
     26   for (auto& fixed_array : handles) {
     27     CHECK(Page::FromAddress(fixed_array->address()) == page);
     28   }
     29 }
     30 
     31 }  // namespace
     32 
     33 HEAP_TEST(CompactionFullAbortedPage) {
     34   // Test the scenario where we reach OOM during compaction and the whole page
     35   // is aborted.
     36 
     37   // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
     38   // we can reach the state of a half aborted page.
     39   FLAG_concurrent_sweeping = false;
     40   FLAG_manual_evacuation_candidates_selection = true;
     41   CcTest::InitializeVM();
     42   Isolate* isolate = CcTest::i_isolate();
     43   Heap* heap = isolate->heap();
     44   {
     45     HandleScope scope1(isolate);
     46 
     47     heap::SealCurrentObjects(heap);
     48 
     49     {
     50       HandleScope scope2(isolate);
     51       CHECK(heap->old_space()->Expand());
     52       auto compaction_page_handles =
     53           heap::CreatePadding(heap, Page::kAllocatableMemory, TENURED);
     54       Page* to_be_aborted_page =
     55           Page::FromAddress(compaction_page_handles.front()->address());
     56       to_be_aborted_page->SetFlag(
     57           MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
     58       CheckAllObjectsOnPage(compaction_page_handles, to_be_aborted_page);
     59 
     60       heap->set_force_oom(true);
     61       heap->CollectAllGarbage();
     62       heap->mark_compact_collector()->EnsureSweepingCompleted();
     63 
     64       // Check that all handles still point to the same page, i.e., compaction
     65       // has been aborted on the page.
     66       for (Handle<FixedArray> object : compaction_page_handles) {
     67         CHECK_EQ(to_be_aborted_page, Page::FromAddress(object->address()));
     68       }
     69       CheckInvariantsOfAbortedPage(to_be_aborted_page);
     70     }
     71   }
     72 }
     73 
     74 
     75 HEAP_TEST(CompactionPartiallyAbortedPage) {
     76   // Test the scenario where we reach OOM during compaction and parts of the
     77   // page have already been migrated to a new one.
     78 
     79   // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
     80   // we can reach the state of a half aborted page.
     81   FLAG_concurrent_sweeping = false;
     82   FLAG_manual_evacuation_candidates_selection = true;
     83 
     84   const int objects_per_page = 10;
     85   const int object_size = Page::kAllocatableMemory / objects_per_page;
     86 
     87   CcTest::InitializeVM();
     88   Isolate* isolate = CcTest::i_isolate();
     89   Heap* heap = isolate->heap();
     90   {
     91     HandleScope scope1(isolate);
     92 
     93     heap::SealCurrentObjects(heap);
     94 
     95     {
     96       HandleScope scope2(isolate);
     97       // Fill another page with objects of size {object_size} (last one is
     98       // properly adjusted).
     99       CHECK(heap->old_space()->Expand());
    100       auto compaction_page_handles = heap::CreatePadding(
    101           heap, Page::kAllocatableMemory, TENURED, object_size);
    102       Page* to_be_aborted_page =
    103           Page::FromAddress(compaction_page_handles.front()->address());
    104       to_be_aborted_page->SetFlag(
    105           MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
    106       CheckAllObjectsOnPage(compaction_page_handles, to_be_aborted_page);
    107 
    108       {
    109         // Add another page that is filled with {num_objects} objects of size
    110         // {object_size}.
    111         HandleScope scope3(isolate);
    112         CHECK(heap->old_space()->Expand());
    113         const int num_objects = 3;
    114         std::vector<Handle<FixedArray>> page_to_fill_handles =
    115             heap::CreatePadding(heap, object_size * num_objects, TENURED,
    116                                 object_size);
    117         Page* page_to_fill =
    118             Page::FromAddress(page_to_fill_handles.front()->address());
    119 
    120         heap->set_force_oom(true);
    121         heap->CollectAllGarbage();
    122         heap->mark_compact_collector()->EnsureSweepingCompleted();
    123 
    124         bool migration_aborted = false;
    125         for (Handle<FixedArray> object : compaction_page_handles) {
    126           // Once compaction has been aborted, all following objects still have
    127           // to be on the initial page.
    128           CHECK(!migration_aborted ||
    129                 (Page::FromAddress(object->address()) == to_be_aborted_page));
    130           if (Page::FromAddress(object->address()) == to_be_aborted_page) {
    131             // This object has not been migrated.
    132             migration_aborted = true;
    133           } else {
    134             CHECK_EQ(Page::FromAddress(object->address()), page_to_fill);
    135           }
    136         }
    137         // Check that we actually created a scenario with a partially aborted
    138         // page.
    139         CHECK(migration_aborted);
    140         CheckInvariantsOfAbortedPage(to_be_aborted_page);
    141       }
    142     }
    143   }
    144 }
    145 
    146 
    147 HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
    148   // Test the scenario where we reach OOM during compaction and parts of the
    149   // page have already been migrated to a new one. Objects on the aborted page
    150   // are linked together. This test makes sure that intra-aborted page pointers
    151   // get properly updated.
    152 
    153   // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
    154   // we can reach the state of a half aborted page.
    155   FLAG_concurrent_sweeping = false;
    156   FLAG_manual_evacuation_candidates_selection = true;
    157 
    158   const int objects_per_page = 10;
    159   const int object_size = Page::kAllocatableMemory / objects_per_page;
    160 
    161   CcTest::InitializeVM();
    162   Isolate* isolate = CcTest::i_isolate();
    163   Heap* heap = isolate->heap();
    164   {
    165     HandleScope scope1(isolate);
    166     Handle<FixedArray> root_array =
    167         isolate->factory()->NewFixedArray(10, TENURED);
    168 
    169     heap::SealCurrentObjects(heap);
    170 
    171     Page* to_be_aborted_page = nullptr;
    172     {
    173       HandleScope temporary_scope(isolate);
    174       // Fill a fresh page with objects of size {object_size} (last one is
    175       // properly adjusted).
    176       CHECK(heap->old_space()->Expand());
    177       std::vector<Handle<FixedArray>> compaction_page_handles =
    178           heap::CreatePadding(heap, Page::kAllocatableMemory, TENURED,
    179                               object_size);
    180       to_be_aborted_page =
    181           Page::FromAddress(compaction_page_handles.front()->address());
    182       to_be_aborted_page->SetFlag(
    183           MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
    184       for (size_t i = compaction_page_handles.size() - 1; i > 0; i--) {
    185         compaction_page_handles[i]->set(0, *compaction_page_handles[i - 1]);
    186       }
    187       root_array->set(0, *compaction_page_handles.back());
    188       CheckAllObjectsOnPage(compaction_page_handles, to_be_aborted_page);
    189     }
    190     {
    191       // Add another page that is filled with {num_objects} objects of size
    192       // {object_size}.
    193       HandleScope scope3(isolate);
    194       CHECK(heap->old_space()->Expand());
    195       const int num_objects = 2;
    196       int used_memory = object_size * num_objects;
    197       std::vector<Handle<FixedArray>> page_to_fill_handles =
    198           heap::CreatePadding(heap, used_memory, TENURED, object_size);
    199       Page* page_to_fill =
    200           Page::FromAddress(page_to_fill_handles.front()->address());
    201 
    202       heap->set_force_oom(true);
    203       heap->CollectAllGarbage();
    204       heap->mark_compact_collector()->EnsureSweepingCompleted();
    205 
    206       // The following check makes sure that we compacted "some" objects, while
    207       // leaving others in place.
    208       bool in_place = true;
    209       Handle<FixedArray> current = root_array;
    210       while (current->get(0) != heap->undefined_value()) {
    211         current = Handle<FixedArray>(FixedArray::cast(current->get(0)));
    212         CHECK(current->IsFixedArray());
    213         if (Page::FromAddress(current->address()) != to_be_aborted_page) {
    214           in_place = false;
    215         }
    216         bool on_aborted_page =
    217             Page::FromAddress(current->address()) == to_be_aborted_page;
    218         bool on_fill_page =
    219             Page::FromAddress(current->address()) == page_to_fill;
    220         CHECK((in_place && on_aborted_page) || (!in_place && on_fill_page));
    221       }
    222       // Check that we at least migrated one object, as otherwise the test would
    223       // not trigger.
    224       CHECK(!in_place);
    225       CheckInvariantsOfAbortedPage(to_be_aborted_page);
    226     }
    227   }
    228 }
    229 
    230 
    231 HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
    232   // Test the scenario where we reach OOM during compaction and parts of the
    233   // page have already been migrated to a new one. Objects on the aborted page
    234   // are linked together and the very first object on the aborted page points
    235   // into new space. The test verifies that the store buffer entries are
    236   // properly cleared and rebuilt after aborting a page. Failing to do so can
    237   // result in other objects being allocated in the free space where their
    238   // payload looks like a valid new space pointer.
    239 
    240   // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
    241   // we can reach the state of a half aborted page.
    242   FLAG_concurrent_sweeping = false;
    243   FLAG_manual_evacuation_candidates_selection = true;
    244 
    245   const int objects_per_page = 10;
    246   const int object_size = Page::kAllocatableMemory / objects_per_page;
    247 
    248   CcTest::InitializeVM();
    249   Isolate* isolate = CcTest::i_isolate();
    250   Heap* heap = isolate->heap();
    251   {
    252     HandleScope scope1(isolate);
    253     Handle<FixedArray> root_array =
    254         isolate->factory()->NewFixedArray(10, TENURED);
    255     heap::SealCurrentObjects(heap);
    256 
    257     Page* to_be_aborted_page = nullptr;
    258     {
    259       HandleScope temporary_scope(isolate);
    260       // Fill another page with objects of size {object_size} (last one is
    261       // properly adjusted).
    262       CHECK(heap->old_space()->Expand());
    263       auto compaction_page_handles = heap::CreatePadding(
    264           heap, Page::kAllocatableMemory, TENURED, object_size);
    265       // Sanity check that we have enough space for linking up arrays.
    266       CHECK_GE(compaction_page_handles.front()->length(), 2);
    267       to_be_aborted_page =
    268           Page::FromAddress(compaction_page_handles.front()->address());
    269       to_be_aborted_page->SetFlag(
    270           MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
    271 
    272       for (size_t i = compaction_page_handles.size() - 1; i > 0; i--) {
    273         compaction_page_handles[i]->set(0, *compaction_page_handles[i - 1]);
    274       }
    275       root_array->set(0, *compaction_page_handles.back());
    276       Handle<FixedArray> new_space_array =
    277           isolate->factory()->NewFixedArray(1, NOT_TENURED);
    278       CHECK(heap->InNewSpace(*new_space_array));
    279       compaction_page_handles.front()->set(1, *new_space_array);
    280       CheckAllObjectsOnPage(compaction_page_handles, to_be_aborted_page);
    281     }
    282 
    283     {
    284       // Add another page that is filled with {num_objects} objects of size
    285       // {object_size}.
    286       HandleScope scope3(isolate);
    287       CHECK(heap->old_space()->Expand());
    288       const int num_objects = 2;
    289       int used_memory = object_size * num_objects;
    290       std::vector<Handle<FixedArray>> page_to_fill_handles =
    291           heap::CreatePadding(heap, used_memory, TENURED, object_size);
    292       Page* page_to_fill =
    293           Page::FromAddress(page_to_fill_handles.front()->address());
    294 
    295       heap->set_force_oom(true);
    296       heap->CollectAllGarbage();
    297       heap->mark_compact_collector()->EnsureSweepingCompleted();
    298 
    299       // The following check makes sure that we compacted "some" objects, while
    300       // leaving others in place.
    301       bool in_place = true;
    302       Handle<FixedArray> current = root_array;
    303       while (current->get(0) != heap->undefined_value()) {
    304         current = Handle<FixedArray>(FixedArray::cast(current->get(0)));
    305         CHECK(!heap->InNewSpace(*current));
    306         CHECK(current->IsFixedArray());
    307         if (Page::FromAddress(current->address()) != to_be_aborted_page) {
    308           in_place = false;
    309         }
    310         bool on_aborted_page =
    311             Page::FromAddress(current->address()) == to_be_aborted_page;
    312         bool on_fill_page =
    313             Page::FromAddress(current->address()) == page_to_fill;
    314         CHECK((in_place && on_aborted_page) || (!in_place && on_fill_page));
    315       }
    316       // Check that we at least migrated one object, as otherwise the test would
    317       // not trigger.
    318       CHECK(!in_place);
    319       CheckInvariantsOfAbortedPage(to_be_aborted_page);
    320 
    321       // Allocate a new object in new space.
    322       Handle<FixedArray> holder =
    323           isolate->factory()->NewFixedArray(10, NOT_TENURED);
    324       // Create a broken address that looks like a tagged pointer to a new space
    325       // object.
    326       Address broken_address = holder->address() + 2 * kPointerSize + 1;
    327       // Convert it to a vector to create a string from it.
    328       Vector<const uint8_t> string_to_broken_addresss(
    329           reinterpret_cast<const uint8_t*>(&broken_address), 8);
    330 
    331       Handle<String> string;
    332       do {
    333         // We know that the interesting slot will be on the aborted page and
    334         // hence we allocate until we get our string on the aborted page.
    335         // We used slot 1 in the fixed size array which corresponds to the
    336         // the first word in the string. Since the first object definitely
    337         // migrated we can just allocate until we hit the aborted page.
    338         string = isolate->factory()
    339                      ->NewStringFromOneByte(string_to_broken_addresss, TENURED)
    340                      .ToHandleChecked();
    341       } while (Page::FromAddress(string->address()) != to_be_aborted_page);
    342 
    343       // If store buffer entries are not properly filtered/reset for aborted
    344       // pages we have now a broken address at an object slot in old space and
    345       // the following scavenge will crash.
    346       heap->CollectGarbage(NEW_SPACE);
    347     }
    348   }
    349 }
    350 
    351 }  // namespace internal
    352 }  // namespace v8
    353