1 // Copyright 2011 the V8 project authors. All rights reserved. 2 // Redistribution and use in source and binary forms, with or without 3 // modification, are permitted provided that the following conditions are 4 // met: 5 // 6 // * Redistributions of source code must retain the above copyright 7 // notice, this list of conditions and the following disclaimer. 8 // * Redistributions in binary form must reproduce the above 9 // copyright notice, this list of conditions and the following 10 // disclaimer in the documentation and/or other materials provided 11 // with the distribution. 12 // * Neither the name of Google Inc. nor the names of its 13 // contributors may be used to endorse or promote products derived 14 // from this software without specific prior written permission. 15 // 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 28 #include <stdlib.h> 29 30 #include "src/snapshot.h" 31 #include "src/v8.h" 32 #include "test/cctest/cctest.h" 33 34 35 using namespace v8::internal; 36 37 #if 0 38 static void VerifyRegionMarking(Address page_start) { 39 #ifdef ENABLE_CARDMARKING_WRITE_BARRIER 40 Page* p = Page::FromAddress(page_start); 41 42 p->SetRegionMarks(Page::kAllRegionsCleanMarks); 43 44 for (Address addr = p->ObjectAreaStart(); 45 addr < p->ObjectAreaEnd(); 46 addr += kPointerSize) { 47 CHECK(!Page::FromAddress(addr)->IsRegionDirty(addr)); 48 } 49 50 for (Address addr = p->ObjectAreaStart(); 51 addr < p->ObjectAreaEnd(); 52 addr += kPointerSize) { 53 Page::FromAddress(addr)->MarkRegionDirty(addr); 54 } 55 56 for (Address addr = p->ObjectAreaStart(); 57 addr < p->ObjectAreaEnd(); 58 addr += kPointerSize) { 59 CHECK(Page::FromAddress(addr)->IsRegionDirty(addr)); 60 } 61 #endif 62 } 63 #endif 64 65 66 // TODO(gc) you can no longer allocate pages like this. Details are hidden. 67 #if 0 68 TEST(Page) { 69 byte* mem = NewArray<byte>(2*Page::kPageSize); 70 CHECK(mem != NULL); 71 72 Address start = reinterpret_cast<Address>(mem); 73 Address page_start = RoundUp(start, Page::kPageSize); 74 75 Page* p = Page::FromAddress(page_start); 76 // Initialized Page has heap pointer, normally set by memory_allocator. 77 p->heap_ = CcTest::heap(); 78 CHECK(p->address() == page_start); 79 CHECK(p->is_valid()); 80 81 p->opaque_header = 0; 82 p->SetIsLargeObjectPage(false); 83 CHECK(!p->next_page()->is_valid()); 84 85 CHECK(p->ObjectAreaStart() == page_start + Page::kObjectStartOffset); 86 CHECK(p->ObjectAreaEnd() == page_start + Page::kPageSize); 87 88 CHECK(p->Offset(page_start + Page::kObjectStartOffset) == 89 Page::kObjectStartOffset); 90 CHECK(p->Offset(page_start + Page::kPageSize) == Page::kPageSize); 91 92 CHECK(p->OffsetToAddress(Page::kObjectStartOffset) == p->ObjectAreaStart()); 93 CHECK(p->OffsetToAddress(Page::kPageSize) == p->ObjectAreaEnd()); 94 95 // test region marking 96 VerifyRegionMarking(page_start); 97 98 DeleteArray(mem); 99 } 100 #endif 101 102 103 namespace v8 { 104 namespace internal { 105 106 // Temporarily sets a given allocator in an isolate. 107 class TestMemoryAllocatorScope { 108 public: 109 TestMemoryAllocatorScope(Isolate* isolate, MemoryAllocator* allocator) 110 : isolate_(isolate), 111 old_allocator_(isolate->memory_allocator_) { 112 isolate->memory_allocator_ = allocator; 113 } 114 115 ~TestMemoryAllocatorScope() { 116 isolate_->memory_allocator_ = old_allocator_; 117 } 118 119 private: 120 Isolate* isolate_; 121 MemoryAllocator* old_allocator_; 122 123 DISALLOW_COPY_AND_ASSIGN(TestMemoryAllocatorScope); 124 }; 125 126 127 // Temporarily sets a given code range in an isolate. 128 class TestCodeRangeScope { 129 public: 130 TestCodeRangeScope(Isolate* isolate, CodeRange* code_range) 131 : isolate_(isolate), 132 old_code_range_(isolate->code_range_) { 133 isolate->code_range_ = code_range; 134 } 135 136 ~TestCodeRangeScope() { 137 isolate_->code_range_ = old_code_range_; 138 } 139 140 private: 141 Isolate* isolate_; 142 CodeRange* old_code_range_; 143 144 DISALLOW_COPY_AND_ASSIGN(TestCodeRangeScope); 145 }; 146 147 } } // namespace v8::internal 148 149 150 static void VerifyMemoryChunk(Isolate* isolate, 151 Heap* heap, 152 CodeRange* code_range, 153 size_t reserve_area_size, 154 size_t commit_area_size, 155 size_t second_commit_area_size, 156 Executability executable) { 157 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate); 158 CHECK(memory_allocator->SetUp(heap->MaxReserved(), 159 heap->MaxExecutableSize())); 160 TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator); 161 TestCodeRangeScope test_code_range_scope(isolate, code_range); 162 163 size_t header_size = (executable == EXECUTABLE) 164 ? MemoryAllocator::CodePageGuardStartOffset() 165 : MemoryChunk::kObjectStartOffset; 166 size_t guard_size = (executable == EXECUTABLE) 167 ? MemoryAllocator::CodePageGuardSize() 168 : 0; 169 170 MemoryChunk* memory_chunk = memory_allocator->AllocateChunk(reserve_area_size, 171 commit_area_size, 172 executable, 173 NULL); 174 size_t alignment = code_range != NULL && code_range->valid() ? 175 MemoryChunk::kAlignment : v8::base::OS::CommitPageSize(); 176 size_t reserved_size = 177 ((executable == EXECUTABLE)) 178 ? RoundUp(header_size + guard_size + reserve_area_size + guard_size, 179 alignment) 180 : RoundUp(header_size + reserve_area_size, 181 v8::base::OS::CommitPageSize()); 182 CHECK(memory_chunk->size() == reserved_size); 183 CHECK(memory_chunk->area_start() < memory_chunk->address() + 184 memory_chunk->size()); 185 CHECK(memory_chunk->area_end() <= memory_chunk->address() + 186 memory_chunk->size()); 187 CHECK(static_cast<size_t>(memory_chunk->area_size()) == commit_area_size); 188 189 Address area_start = memory_chunk->area_start(); 190 191 memory_chunk->CommitArea(second_commit_area_size); 192 CHECK(area_start == memory_chunk->area_start()); 193 CHECK(memory_chunk->area_start() < memory_chunk->address() + 194 memory_chunk->size()); 195 CHECK(memory_chunk->area_end() <= memory_chunk->address() + 196 memory_chunk->size()); 197 CHECK(static_cast<size_t>(memory_chunk->area_size()) == 198 second_commit_area_size); 199 200 memory_allocator->Free(memory_chunk); 201 memory_allocator->TearDown(); 202 delete memory_allocator; 203 } 204 205 206 TEST(Regress3540) { 207 Isolate* isolate = CcTest::i_isolate(); 208 Heap* heap = isolate->heap(); 209 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate); 210 CHECK( 211 memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize())); 212 TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator); 213 CodeRange* code_range = new CodeRange(isolate); 214 const size_t code_range_size = 4 * MB; 215 if (!code_range->SetUp(code_range_size)) return; 216 Address address; 217 size_t size; 218 address = code_range->AllocateRawMemory(code_range_size - MB, 219 code_range_size - MB, &size); 220 CHECK(address != NULL); 221 Address null_address; 222 size_t null_size; 223 null_address = code_range->AllocateRawMemory( 224 code_range_size - MB, code_range_size - MB, &null_size); 225 CHECK(null_address == NULL); 226 code_range->FreeRawMemory(address, size); 227 delete code_range; 228 memory_allocator->TearDown(); 229 delete memory_allocator; 230 } 231 232 233 static unsigned int Pseudorandom() { 234 static uint32_t lo = 2345; 235 lo = 18273 * (lo & 0xFFFFF) + (lo >> 16); 236 return lo & 0xFFFFF; 237 } 238 239 240 TEST(MemoryChunk) { 241 Isolate* isolate = CcTest::i_isolate(); 242 Heap* heap = isolate->heap(); 243 244 size_t reserve_area_size = 1 * MB; 245 size_t initial_commit_area_size, second_commit_area_size; 246 247 for (int i = 0; i < 100; i++) { 248 initial_commit_area_size = Pseudorandom(); 249 second_commit_area_size = Pseudorandom(); 250 251 // With CodeRange. 252 CodeRange* code_range = new CodeRange(isolate); 253 const size_t code_range_size = 32 * MB; 254 if (!code_range->SetUp(code_range_size)) return; 255 256 VerifyMemoryChunk(isolate, 257 heap, 258 code_range, 259 reserve_area_size, 260 initial_commit_area_size, 261 second_commit_area_size, 262 EXECUTABLE); 263 264 VerifyMemoryChunk(isolate, 265 heap, 266 code_range, 267 reserve_area_size, 268 initial_commit_area_size, 269 second_commit_area_size, 270 NOT_EXECUTABLE); 271 delete code_range; 272 273 // Without CodeRange. 274 code_range = NULL; 275 VerifyMemoryChunk(isolate, 276 heap, 277 code_range, 278 reserve_area_size, 279 initial_commit_area_size, 280 second_commit_area_size, 281 EXECUTABLE); 282 283 VerifyMemoryChunk(isolate, 284 heap, 285 code_range, 286 reserve_area_size, 287 initial_commit_area_size, 288 second_commit_area_size, 289 NOT_EXECUTABLE); 290 } 291 } 292 293 294 TEST(MemoryAllocator) { 295 Isolate* isolate = CcTest::i_isolate(); 296 Heap* heap = isolate->heap(); 297 298 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate); 299 CHECK(memory_allocator->SetUp(heap->MaxReserved(), 300 heap->MaxExecutableSize())); 301 302 int total_pages = 0; 303 OldSpace faked_space(heap, 304 heap->MaxReserved(), 305 OLD_POINTER_SPACE, 306 NOT_EXECUTABLE); 307 Page* first_page = memory_allocator->AllocatePage( 308 faked_space.AreaSize(), &faked_space, NOT_EXECUTABLE); 309 310 first_page->InsertAfter(faked_space.anchor()->prev_page()); 311 CHECK(first_page->is_valid()); 312 CHECK(first_page->next_page() == faked_space.anchor()); 313 total_pages++; 314 315 for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) { 316 CHECK(p->owner() == &faked_space); 317 } 318 319 // Again, we should get n or n - 1 pages. 320 Page* other = memory_allocator->AllocatePage( 321 faked_space.AreaSize(), &faked_space, NOT_EXECUTABLE); 322 CHECK(other->is_valid()); 323 total_pages++; 324 other->InsertAfter(first_page); 325 int page_count = 0; 326 for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) { 327 CHECK(p->owner() == &faked_space); 328 page_count++; 329 } 330 CHECK(total_pages == page_count); 331 332 Page* second_page = first_page->next_page(); 333 CHECK(second_page->is_valid()); 334 memory_allocator->Free(first_page); 335 memory_allocator->Free(second_page); 336 memory_allocator->TearDown(); 337 delete memory_allocator; 338 } 339 340 341 TEST(NewSpace) { 342 Isolate* isolate = CcTest::i_isolate(); 343 Heap* heap = isolate->heap(); 344 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate); 345 CHECK(memory_allocator->SetUp(heap->MaxReserved(), 346 heap->MaxExecutableSize())); 347 TestMemoryAllocatorScope test_scope(isolate, memory_allocator); 348 349 NewSpace new_space(heap); 350 351 CHECK(new_space.SetUp(CcTest::heap()->ReservedSemiSpaceSize(), 352 CcTest::heap()->ReservedSemiSpaceSize())); 353 CHECK(new_space.HasBeenSetUp()); 354 355 while (new_space.Available() >= Page::kMaxRegularHeapObjectSize) { 356 Object* obj = new_space.AllocateRaw( 357 Page::kMaxRegularHeapObjectSize).ToObjectChecked(); 358 CHECK(new_space.Contains(HeapObject::cast(obj))); 359 } 360 361 new_space.TearDown(); 362 memory_allocator->TearDown(); 363 delete memory_allocator; 364 } 365 366 367 TEST(OldSpace) { 368 Isolate* isolate = CcTest::i_isolate(); 369 Heap* heap = isolate->heap(); 370 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate); 371 CHECK(memory_allocator->SetUp(heap->MaxReserved(), 372 heap->MaxExecutableSize())); 373 TestMemoryAllocatorScope test_scope(isolate, memory_allocator); 374 375 OldSpace* s = new OldSpace(heap, 376 heap->MaxOldGenerationSize(), 377 OLD_POINTER_SPACE, 378 NOT_EXECUTABLE); 379 CHECK(s != NULL); 380 381 CHECK(s->SetUp()); 382 383 while (s->Available() > 0) { 384 s->AllocateRaw(Page::kMaxRegularHeapObjectSize).ToObjectChecked(); 385 } 386 387 s->TearDown(); 388 delete s; 389 memory_allocator->TearDown(); 390 delete memory_allocator; 391 } 392 393 394 TEST(LargeObjectSpace) { 395 v8::V8::Initialize(); 396 397 LargeObjectSpace* lo = CcTest::heap()->lo_space(); 398 CHECK(lo != NULL); 399 400 int lo_size = Page::kPageSize; 401 402 Object* obj = lo->AllocateRaw(lo_size, NOT_EXECUTABLE).ToObjectChecked(); 403 CHECK(obj->IsHeapObject()); 404 405 HeapObject* ho = HeapObject::cast(obj); 406 407 CHECK(lo->Contains(HeapObject::cast(obj))); 408 409 CHECK(lo->FindObject(ho->address()) == obj); 410 411 CHECK(lo->Contains(ho)); 412 413 while (true) { 414 intptr_t available = lo->Available(); 415 { AllocationResult allocation = lo->AllocateRaw(lo_size, NOT_EXECUTABLE); 416 if (allocation.IsRetry()) break; 417 } 418 CHECK(lo->Available() < available); 419 } 420 421 CHECK(!lo->IsEmpty()); 422 423 CHECK(lo->AllocateRaw(lo_size, NOT_EXECUTABLE).IsRetry()); 424 } 425 426 427 TEST(SizeOfFirstPageIsLargeEnough) { 428 if (i::FLAG_always_opt) return; 429 // Bootstrapping without a snapshot causes more allocations. 430 if (!i::Snapshot::HaveASnapshotToStartFrom()) return; 431 CcTest::InitializeVM(); 432 Isolate* isolate = CcTest::i_isolate(); 433 434 // Freshly initialized VM gets by with one page per space. 435 for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) { 436 // Debug code can be very large, so skip CODE_SPACE if we are generating it. 437 if (i == CODE_SPACE && i::FLAG_debug_code) continue; 438 CHECK_EQ(1, isolate->heap()->paged_space(i)->CountTotalPages()); 439 } 440 441 // Executing the empty script gets by with one page per space. 442 HandleScope scope(isolate); 443 CompileRun("/*empty*/"); 444 for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) { 445 // Debug code can be very large, so skip CODE_SPACE if we are generating it. 446 if (i == CODE_SPACE && i::FLAG_debug_code) continue; 447 CHECK_EQ(1, isolate->heap()->paged_space(i)->CountTotalPages()); 448 } 449 450 // No large objects required to perform the above steps. 451 CHECK(isolate->heap()->lo_space()->IsEmpty()); 452 } 453