Home | History | Annotate | Download | only in wtf
      1 /*
      2  * Copyright (C) 2013 Google Inc. All rights reserved.
      3  *
      4  * Redistribution and use in source and binary forms, with or without
      5  * modification, are permitted provided that the following conditions are
      6  * met:
      7  *
      8  *     * Redistributions of source code must retain the above copyright
      9  * notice, this list of conditions and the following disclaimer.
     10  *     * Redistributions in binary form must reproduce the above
     11  * copyright notice, this list of conditions and the following disclaimer
     12  * in the documentation and/or other materials provided with the
     13  * distribution.
     14  *     * Neither the name of Google Inc. nor the names of its
     15  * contributors may be used to endorse or promote products derived from
     16  * this software without specific prior written permission.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     29  */
     30 
     31 #include "config.h"
     32 #include "wtf/PartitionAlloc.h"
     33 
     34 #include "wtf/BitwiseOperations.h"
     35 #include "wtf/OwnPtr.h"
     36 #include "wtf/PassOwnPtr.h"
     37 #include <gtest/gtest.h>
     38 #include <stdlib.h>
     39 #include <string.h>
     40 
     41 #if OS(POSIX)
     42 #include <sys/mman.h>
     43 
     44 #ifndef MAP_ANONYMOUS
     45 #define MAP_ANONYMOUS MAP_ANON
     46 #endif
     47 #endif // OS(POSIX)
     48 
     49 #if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
     50 
     51 namespace {
     52 
     53 static const size_t kTestMaxAllocation = 4096;
     54 static SizeSpecificPartitionAllocator<kTestMaxAllocation> allocator;
     55 static PartitionAllocatorGeneric genericAllocator;
     56 
     57 static const size_t kTestAllocSize = 16;
     58 #if !ENABLE(ASSERT)
     59 static const size_t kPointerOffset = 0;
     60 static const size_t kExtraAllocSize = 0;
     61 #else
     62 static const size_t kPointerOffset = WTF::kCookieSize;
     63 static const size_t kExtraAllocSize = WTF::kCookieSize * 2;
     64 #endif
     65 static const size_t kRealAllocSize = kTestAllocSize + kExtraAllocSize;
     66 static const size_t kTestBucketIndex = kRealAllocSize >> WTF::kBucketShift;
     67 
     68 static void TestSetup()
     69 {
     70     allocator.init();
     71     genericAllocator.init();
     72 }
     73 
     74 static void TestShutdown()
     75 {
     76 #ifndef NDEBUG
     77     // Test that the partition statistic dumping code works. Previously, it
     78     // bitrotted because no test calls it.
     79     partitionDumpStats(*allocator.root());
     80 #endif
     81 
     82     // We expect no leaks in the general case. We have a test for leak
     83     // detection.
     84     EXPECT_TRUE(allocator.shutdown());
     85     EXPECT_TRUE(genericAllocator.shutdown());
     86 }
     87 
     88 static WTF::PartitionPage* GetFullPage(size_t size)
     89 {
     90     size_t realSize = size + kExtraAllocSize;
     91     size_t bucketIdx = realSize >> WTF::kBucketShift;
     92     WTF::PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx];
     93     size_t numSlots = (bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize) / realSize;
     94     void* first = 0;
     95     void* last = 0;
     96     size_t i;
     97     for (i = 0; i < numSlots; ++i) {
     98         void* ptr = partitionAlloc(allocator.root(), size);
     99         EXPECT_TRUE(ptr);
    100         if (!i)
    101             first = WTF::partitionCookieFreePointerAdjust(ptr);
    102         else if (i == numSlots - 1)
    103             last = WTF::partitionCookieFreePointerAdjust(ptr);
    104     }
    105     EXPECT_EQ(WTF::partitionPointerToPage(first), WTF::partitionPointerToPage(last));
    106     if (bucket->numSystemPagesPerSlotSpan == WTF::kNumSystemPagesPerPartitionPage)
    107         EXPECT_EQ(reinterpret_cast<size_t>(first) & WTF::kPartitionPageBaseMask, reinterpret_cast<size_t>(last) & WTF::kPartitionPageBaseMask);
    108     EXPECT_EQ(numSlots, static_cast<size_t>(bucket->activePagesHead->numAllocatedSlots));
    109     EXPECT_EQ(0, bucket->activePagesHead->freelistHead);
    110     EXPECT_TRUE(bucket->activePagesHead);
    111     EXPECT_TRUE(bucket->activePagesHead != &WTF::PartitionRootGeneric::gSeedPage);
    112     return bucket->activePagesHead;
    113 }
    114 
    115 static void FreeFullPage(WTF::PartitionPage* page)
    116 {
    117     size_t size = page->bucket->slotSize;
    118     size_t numSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize) / size;
    119     EXPECT_EQ(numSlots, static_cast<size_t>(abs(page->numAllocatedSlots)));
    120     char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page));
    121     size_t i;
    122     for (i = 0; i < numSlots; ++i) {
    123         partitionFree(ptr + kPointerOffset);
    124         ptr += size;
    125     }
    126 }
    127 
    128 static void CycleFreeCache(size_t size)
    129 {
    130     size_t realSize = size + kExtraAllocSize;
    131     size_t bucketIdx = realSize >> WTF::kBucketShift;
    132     WTF::PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx];
    133     ASSERT(!bucket->activePagesHead->numAllocatedSlots);
    134 
    135     for (size_t i = 0; i < WTF::kMaxFreeableSpans; ++i) {
    136         void* ptr = partitionAlloc(allocator.root(), size);
    137         EXPECT_EQ(1, bucket->activePagesHead->numAllocatedSlots);
    138         partitionFree(ptr);
    139         EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots);
    140         EXPECT_NE(-1, bucket->activePagesHead->freeCacheIndex);
    141     }
    142 }
    143 
    144 static void CycleGenericFreeCache(size_t size)
    145 {
    146     for (size_t i = 0; i < WTF::kMaxFreeableSpans; ++i) {
    147         void* ptr = partitionAllocGeneric(genericAllocator.root(), size);
    148         WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
    149         WTF::PartitionBucket* bucket = page->bucket;
    150         EXPECT_EQ(1, bucket->activePagesHead->numAllocatedSlots);
    151         partitionFreeGeneric(genericAllocator.root(), ptr);
    152         EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots);
    153         EXPECT_NE(-1, bucket->activePagesHead->freeCacheIndex);
    154     }
    155 }
    156 
    157 // Check that the most basic of allocate / free pairs work.
    158 TEST(PartitionAllocTest, Basic)
    159 {
    160     TestSetup();
    161     WTF::PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
    162     WTF::PartitionPage* seedPage = &WTF::PartitionRootGeneric::gSeedPage;
    163 
    164     EXPECT_FALSE(bucket->freePagesHead);
    165     EXPECT_EQ(seedPage, bucket->activePagesHead);
    166     EXPECT_EQ(0, bucket->activePagesHead->nextPage);
    167 
    168     void* ptr = partitionAlloc(allocator.root(), kTestAllocSize);
    169     EXPECT_TRUE(ptr);
    170     EXPECT_EQ(kPointerOffset, reinterpret_cast<size_t>(ptr) & WTF::kPartitionPageOffsetMask);
    171     // Check that the offset appears to include a guard page.
    172     EXPECT_EQ(WTF::kPartitionPageSize + kPointerOffset, reinterpret_cast<size_t>(ptr) & WTF::kSuperPageOffsetMask);
    173 
    174     partitionFree(ptr);
    175     // Expect that the last active page does not get tossed to the freelist.
    176     EXPECT_FALSE(bucket->freePagesHead);
    177 
    178     TestShutdown();
    179 }
    180 
    181 // Check that we can detect a memory leak.
    182 TEST(PartitionAllocTest, SimpleLeak)
    183 {
    184     TestSetup();
    185     void* leakedPtr = partitionAlloc(allocator.root(), kTestAllocSize);
    186     (void)leakedPtr;
    187     void* leakedPtr2 = partitionAllocGeneric(genericAllocator.root(), kTestAllocSize);
    188     (void)leakedPtr2;
    189     EXPECT_FALSE(allocator.shutdown());
    190     EXPECT_FALSE(genericAllocator.shutdown());
    191 }
    192 
    193 // Test multiple allocations, and freelist handling.
    194 TEST(PartitionAllocTest, MultiAlloc)
    195 {
    196     TestSetup();
    197 
    198     char* ptr1 = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
    199     char* ptr2 = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
    200     EXPECT_TRUE(ptr1);
    201     EXPECT_TRUE(ptr2);
    202     ptrdiff_t diff = ptr2 - ptr1;
    203     EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff);
    204 
    205     // Check that we re-use the just-freed slot.
    206     partitionFree(ptr2);
    207     ptr2 = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
    208     EXPECT_TRUE(ptr2);
    209     diff = ptr2 - ptr1;
    210     EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff);
    211     partitionFree(ptr1);
    212     ptr1 = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
    213     EXPECT_TRUE(ptr1);
    214     diff = ptr2 - ptr1;
    215     EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff);
    216 
    217     char* ptr3 = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
    218     EXPECT_TRUE(ptr3);
    219     diff = ptr3 - ptr1;
    220     EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize * 2), diff);
    221 
    222     partitionFree(ptr1);
    223     partitionFree(ptr2);
    224     partitionFree(ptr3);
    225 
    226     TestShutdown();
    227 }
    228 
    229 // Test a bucket with multiple pages.
    230 TEST(PartitionAllocTest, MultiPages)
    231 {
    232     TestSetup();
    233     WTF::PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
    234 
    235     WTF::PartitionPage* page = GetFullPage(kTestAllocSize);
    236     FreeFullPage(page);
    237     EXPECT_FALSE(bucket->freePagesHead);
    238     EXPECT_EQ(page, bucket->activePagesHead);
    239     EXPECT_EQ(0, page->nextPage);
    240     EXPECT_EQ(0, page->numAllocatedSlots);
    241 
    242     page = GetFullPage(kTestAllocSize);
    243     WTF::PartitionPage* page2 = GetFullPage(kTestAllocSize);
    244 
    245     EXPECT_EQ(page2, bucket->activePagesHead);
    246     EXPECT_EQ(0, page2->nextPage);
    247     EXPECT_EQ(reinterpret_cast<uintptr_t>(partitionPageToPointer(page)) & WTF::kSuperPageBaseMask, reinterpret_cast<uintptr_t>(partitionPageToPointer(page2)) & WTF::kSuperPageBaseMask);
    248 
    249     // Fully free the non-current page. It should not be freelisted because
    250     // there is no other immediately useable page. The other page is full.
    251     FreeFullPage(page);
    252     EXPECT_EQ(0, page->numAllocatedSlots);
    253     EXPECT_FALSE(bucket->freePagesHead);
    254     EXPECT_EQ(page, bucket->activePagesHead);
    255 
    256     // Allocate a new page, it should pull from the freelist.
    257     page = GetFullPage(kTestAllocSize);
    258     EXPECT_FALSE(bucket->freePagesHead);
    259     EXPECT_EQ(page, bucket->activePagesHead);
    260 
    261     FreeFullPage(page);
    262     FreeFullPage(page2);
    263     EXPECT_EQ(0, page->numAllocatedSlots);
    264     EXPECT_EQ(0, page2->numAllocatedSlots);
    265     EXPECT_EQ(0, page2->numUnprovisionedSlots);
    266     EXPECT_NE(-1, page2->freeCacheIndex);
    267 
    268     TestShutdown();
    269 }
    270 
    271 // Test some finer aspects of internal page transitions.
    272 TEST(PartitionAllocTest, PageTransitions)
    273 {
    274     TestSetup();
    275     WTF::PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
    276 
    277     WTF::PartitionPage* page1 = GetFullPage(kTestAllocSize);
    278     EXPECT_EQ(page1, bucket->activePagesHead);
    279     EXPECT_EQ(0, page1->nextPage);
    280     WTF::PartitionPage* page2 = GetFullPage(kTestAllocSize);
    281     EXPECT_EQ(page2, bucket->activePagesHead);
    282     EXPECT_EQ(0, page2->nextPage);
    283 
    284     // Bounce page1 back into the non-full list then fill it up again.
    285     char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page1)) + kPointerOffset;
    286     partitionFree(ptr);
    287     EXPECT_EQ(page1, bucket->activePagesHead);
    288     (void) partitionAlloc(allocator.root(), kTestAllocSize);
    289     EXPECT_EQ(page1, bucket->activePagesHead);
    290     EXPECT_EQ(page2, bucket->activePagesHead->nextPage);
    291 
    292     // Allocating another page at this point should cause us to scan over page1
    293     // (which is both full and NOT our current page), and evict it from the
    294     // freelist. Older code had a O(n^2) condition due to failure to do this.
    295     WTF::PartitionPage* page3 = GetFullPage(kTestAllocSize);
    296     EXPECT_EQ(page3, bucket->activePagesHead);
    297     EXPECT_EQ(0, page3->nextPage);
    298 
    299     // Work out a pointer into page2 and free it.
    300     ptr = reinterpret_cast<char*>(partitionPageToPointer(page2)) + kPointerOffset;
    301     partitionFree(ptr);
    302     // Trying to allocate at this time should cause us to cycle around to page2
    303     // and find the recently freed slot.
    304     char* newPtr = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
    305     EXPECT_EQ(ptr, newPtr);
    306     EXPECT_EQ(page2, bucket->activePagesHead);
    307     EXPECT_EQ(page3, page2->nextPage);
    308 
    309     // Work out a pointer into page1 and free it. This should pull the page
    310     // back into the list of available pages.
    311     ptr = reinterpret_cast<char*>(partitionPageToPointer(page1)) + kPointerOffset;
    312     partitionFree(ptr);
    313     // This allocation should be satisfied by page1.
    314     newPtr = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
    315     EXPECT_EQ(ptr, newPtr);
    316     EXPECT_EQ(page1, bucket->activePagesHead);
    317     EXPECT_EQ(page2, page1->nextPage);
    318 
    319     FreeFullPage(page3);
    320     FreeFullPage(page2);
    321     FreeFullPage(page1);
    322 
    323     // Allocating whilst in this state exposed a bug, so keep the test.
    324     ptr = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
    325     partitionFree(ptr);
    326 
    327     TestShutdown();
    328 }
    329 
    330 // Test some corner cases relating to page transitions in the internal
    331 // free page list metadata bucket.
    332 TEST(PartitionAllocTest, FreePageListPageTransitions)
    333 {
    334     TestSetup();
    335     WTF::PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
    336 
    337     size_t numToFillFreeListPage = WTF::kPartitionPageSize / (sizeof(WTF::PartitionPage) + kExtraAllocSize);
    338     // The +1 is because we need to account for the fact that the current page
    339     // never gets thrown on the freelist.
    340     ++numToFillFreeListPage;
    341     OwnPtr<WTF::PartitionPage*[]> pages = adoptArrayPtr(new WTF::PartitionPage*[numToFillFreeListPage]);
    342 
    343     size_t i;
    344     for (i = 0; i < numToFillFreeListPage; ++i) {
    345         pages[i] = GetFullPage(kTestAllocSize);
    346     }
    347     EXPECT_EQ(pages[numToFillFreeListPage - 1], bucket->activePagesHead);
    348     for (i = 0; i < numToFillFreeListPage; ++i)
    349         FreeFullPage(pages[i]);
    350     EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots);
    351     EXPECT_NE(-1, bucket->activePagesHead->nextPage->freeCacheIndex);
    352     EXPECT_EQ(0, bucket->activePagesHead->nextPage->numAllocatedSlots);
    353     EXPECT_EQ(0, bucket->activePagesHead->nextPage->numUnprovisionedSlots);
    354 
    355     // Allocate / free in a different bucket size so we get control of a
    356     // different free page list. We need two pages because one will be the last
    357     // active page and not get freed.
    358     WTF::PartitionPage* page1 = GetFullPage(kTestAllocSize * 2);
    359     WTF::PartitionPage* page2 = GetFullPage(kTestAllocSize * 2);
    360     FreeFullPage(page1);
    361     FreeFullPage(page2);
    362 
    363     // If we re-allocate all kTestAllocSize allocations, we'll pull all the
    364     // free pages and end up freeing the first page for free page objects.
    365     // It's getting a bit tricky but a nice re-entrancy is going on:
    366     // alloc(kTestAllocSize) -> pulls page from free page list ->
    367     // free(PartitionFreepagelistEntry) -> last entry in page freed ->
    368     // alloc(PartitionFreepagelistEntry).
    369     for (i = 0; i < numToFillFreeListPage; ++i) {
    370         pages[i] = GetFullPage(kTestAllocSize);
    371     }
    372     EXPECT_EQ(pages[numToFillFreeListPage - 1], bucket->activePagesHead);
    373 
    374     // As part of the final free-up, we'll test another re-entrancy:
    375     // free(kTestAllocSize) -> last entry in page freed ->
    376     // alloc(PartitionFreepagelistEntry) -> pulls page from free page list ->
    377     // free(PartitionFreepagelistEntry)
    378     for (i = 0; i < numToFillFreeListPage; ++i)
    379         FreeFullPage(pages[i]);
    380     EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots);
    381     EXPECT_NE(-1, bucket->activePagesHead->nextPage->freeCacheIndex);
    382     EXPECT_EQ(0, bucket->activePagesHead->nextPage->numAllocatedSlots);
    383     EXPECT_EQ(0, bucket->activePagesHead->nextPage->numUnprovisionedSlots);
    384 
    385     TestShutdown();
    386 }
    387 
    388 // Test a large series of allocations that cross more than one underlying
    389 // 64KB super page allocation.
    390 TEST(PartitionAllocTest, MultiPageAllocs)
    391 {
    392     TestSetup();
    393     // This is guaranteed to cross a super page boundary because the first
    394     // partition page "slot" will be taken up by a guard page.
    395     size_t numPagesNeeded = WTF::kNumPartitionPagesPerSuperPage;
    396     // The super page should begin and end in a guard so we one less page in
    397     // order to allocate a single page in the new super page.
    398     --numPagesNeeded;
    399 
    400     EXPECT_GT(numPagesNeeded, 1u);
    401     OwnPtr<WTF::PartitionPage*[]> pages;
    402     pages = adoptArrayPtr(new WTF::PartitionPage*[numPagesNeeded]);
    403     uintptr_t firstSuperPageBase = 0;
    404     size_t i;
    405     for (i = 0; i < numPagesNeeded; ++i) {
    406         pages[i] = GetFullPage(kTestAllocSize);
    407         void* storagePtr = partitionPageToPointer(pages[i]);
    408         if (!i)
    409             firstSuperPageBase = reinterpret_cast<uintptr_t>(storagePtr) & WTF::kSuperPageBaseMask;
    410         if (i == numPagesNeeded - 1) {
    411             uintptr_t secondSuperPageBase = reinterpret_cast<uintptr_t>(storagePtr) & WTF::kSuperPageBaseMask;
    412             uintptr_t secondSuperPageOffset = reinterpret_cast<uintptr_t>(storagePtr) & WTF::kSuperPageOffsetMask;
    413             EXPECT_FALSE(secondSuperPageBase == firstSuperPageBase);
    414             // Check that we allocated a guard page for the second page.
    415             EXPECT_EQ(WTF::kPartitionPageSize, secondSuperPageOffset);
    416         }
    417     }
    418     for (i = 0; i < numPagesNeeded; ++i)
    419         FreeFullPage(pages[i]);
    420 
    421     TestShutdown();
    422 }
    423 
    424 // Test the generic allocation functions that can handle arbitrary sizes and
    425 // reallocing etc.
    426 TEST(PartitionAllocTest, GenericAlloc)
    427 {
    428     TestSetup();
    429 
    430     void* ptr = partitionAllocGeneric(genericAllocator.root(), 1);
    431     EXPECT_TRUE(ptr);
    432     partitionFreeGeneric(genericAllocator.root(), ptr);
    433     ptr = partitionAllocGeneric(genericAllocator.root(), WTF::kGenericMaxBucketed + 1);
    434     EXPECT_TRUE(ptr);
    435     partitionFreeGeneric(genericAllocator.root(), ptr);
    436 
    437     ptr = partitionAllocGeneric(genericAllocator.root(), 1);
    438     EXPECT_TRUE(ptr);
    439     void* origPtr = ptr;
    440     char* charPtr = static_cast<char*>(ptr);
    441     *charPtr = 'A';
    442 
    443     // Change the size of the realloc, remaining inside the same bucket.
    444     void* newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 2);
    445     EXPECT_EQ(ptr, newPtr);
    446     newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 1);
    447     EXPECT_EQ(ptr, newPtr);
    448     newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, WTF::kGenericSmallestBucket);
    449     EXPECT_EQ(ptr, newPtr);
    450 
    451     // Change the size of the realloc, switching buckets.
    452     newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, WTF::kGenericSmallestBucket + 1);
    453     EXPECT_NE(newPtr, ptr);
    454     // Check that the realloc copied correctly.
    455     char* newCharPtr = static_cast<char*>(newPtr);
    456     EXPECT_EQ(*newCharPtr, 'A');
    457 #if ENABLE(ASSERT)
    458     // Subtle: this checks for an old bug where we copied too much from the
    459     // source of the realloc. The condition can be detected by a trashing of
    460     // the uninitialized value in the space of the upsized allocation.
    461     EXPECT_EQ(WTF::kUninitializedByte, static_cast<unsigned char>(*(newCharPtr + WTF::kGenericSmallestBucket)));
    462 #endif
    463     *newCharPtr = 'B';
    464     // The realloc moved. To check that the old allocation was freed, we can
    465     // do an alloc of the old allocation size and check that the old allocation
    466     // address is at the head of the freelist and reused.
    467     void* reusedPtr = partitionAllocGeneric(genericAllocator.root(), 1);
    468     EXPECT_EQ(reusedPtr, origPtr);
    469     partitionFreeGeneric(genericAllocator.root(), reusedPtr);
    470 
    471     // Downsize the realloc.
    472     ptr = newPtr;
    473     newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 1);
    474     EXPECT_EQ(newPtr, origPtr);
    475     newCharPtr = static_cast<char*>(newPtr);
    476     EXPECT_EQ(*newCharPtr, 'B');
    477     *newCharPtr = 'C';
    478 
    479     // Upsize the realloc to outside the partition.
    480     ptr = newPtr;
    481     newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, WTF::kGenericMaxBucketed + 1);
    482     EXPECT_NE(newPtr, ptr);
    483     newCharPtr = static_cast<char*>(newPtr);
    484     EXPECT_EQ(*newCharPtr, 'C');
    485     *newCharPtr = 'D';
    486 
    487     // Upsize and downsize the realloc, remaining outside the partition.
    488     ptr = newPtr;
    489     newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, WTF::kGenericMaxBucketed * 10);
    490     newCharPtr = static_cast<char*>(newPtr);
    491     EXPECT_EQ(*newCharPtr, 'D');
    492     *newCharPtr = 'E';
    493     ptr = newPtr;
    494     newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, WTF::kGenericMaxBucketed * 2);
    495     newCharPtr = static_cast<char*>(newPtr);
    496     EXPECT_EQ(*newCharPtr, 'E');
    497     *newCharPtr = 'F';
    498 
    499     // Downsize the realloc to inside the partition.
    500     ptr = newPtr;
    501     newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 1);
    502     EXPECT_NE(newPtr, ptr);
    503     EXPECT_EQ(newPtr, origPtr);
    504     newCharPtr = static_cast<char*>(newPtr);
    505     EXPECT_EQ(*newCharPtr, 'F');
    506 
    507     partitionFreeGeneric(genericAllocator.root(), newPtr);
    508     TestShutdown();
    509 }
    510 
    511 // Test the generic allocation functions can handle some specific sizes of
    512 // interest.
    513 TEST(PartitionAllocTest, GenericAllocSizes)
    514 {
    515     TestSetup();
    516 
    517     void* ptr = partitionAllocGeneric(genericAllocator.root(), 0);
    518     EXPECT_TRUE(ptr);
    519     partitionFreeGeneric(genericAllocator.root(), ptr);
    520 
    521     // kPartitionPageSize is interesting because it results in just one
    522     // allocation per page, which tripped up some corner cases.
    523     size_t size = WTF::kPartitionPageSize - kExtraAllocSize;
    524     ptr = partitionAllocGeneric(genericAllocator.root(), size);
    525     EXPECT_TRUE(ptr);
    526     void* ptr2 = partitionAllocGeneric(genericAllocator.root(), size);
    527     EXPECT_TRUE(ptr2);
    528     partitionFreeGeneric(genericAllocator.root(), ptr);
    529     // Should be freeable at this point.
    530     WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
    531     EXPECT_NE(-1, page->freeCacheIndex);
    532     partitionFreeGeneric(genericAllocator.root(), ptr2);
    533 
    534     size = (((WTF::kPartitionPageSize * WTF::kMaxPartitionPagesPerSlotSpan) - WTF::kSystemPageSize) / 2) - kExtraAllocSize;
    535     ptr = partitionAllocGeneric(genericAllocator.root(), size);
    536     EXPECT_TRUE(ptr);
    537     memset(ptr, 'A', size);
    538     ptr2 = partitionAllocGeneric(genericAllocator.root(), size);
    539     EXPECT_TRUE(ptr2);
    540     void* ptr3 = partitionAllocGeneric(genericAllocator.root(), size);
    541     EXPECT_TRUE(ptr3);
    542     void* ptr4 = partitionAllocGeneric(genericAllocator.root(), size);
    543     EXPECT_TRUE(ptr4);
    544 
    545     page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
    546     WTF::PartitionPage* page2 = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr3));
    547     EXPECT_NE(page, page2);
    548 
    549     partitionFreeGeneric(genericAllocator.root(), ptr);
    550     partitionFreeGeneric(genericAllocator.root(), ptr3);
    551     partitionFreeGeneric(genericAllocator.root(), ptr2);
    552     // Should be freeable at this point.
    553     EXPECT_NE(-1, page->freeCacheIndex);
    554     EXPECT_EQ(0, page->numAllocatedSlots);
    555     EXPECT_EQ(0, page->numUnprovisionedSlots);
    556     void* newPtr = partitionAllocGeneric(genericAllocator.root(), size);
    557     EXPECT_EQ(ptr3, newPtr);
    558     newPtr = partitionAllocGeneric(genericAllocator.root(), size);
    559     EXPECT_EQ(ptr2, newPtr);
    560 #if OS(LINUX) && !ENABLE(ASSERT)
    561     // On Linux, we have a guarantee that freelisting a page should cause its
    562     // contents to be nulled out. We check for null here to detect an bug we
    563     // had where a large slot size was causing us to not properly free all
    564     // resources back to the system.
    565     // We only run the check when asserts are disabled because when they are
    566     // enabled, the allocated area is overwritten with an "uninitialized"
    567     // byte pattern.
    568     EXPECT_EQ(0, *(reinterpret_cast<char*>(newPtr) + (size - 1)));
    569 #endif
    570     partitionFreeGeneric(genericAllocator.root(), newPtr);
    571     partitionFreeGeneric(genericAllocator.root(), ptr3);
    572     partitionFreeGeneric(genericAllocator.root(), ptr4);
    573 
    574     // Can we allocate a massive (512MB) size?
    575     ptr = partitionAllocGeneric(genericAllocator.root(), 512 * 1024 * 1024);
    576     partitionFreeGeneric(genericAllocator.root(), ptr);
    577 
    578     // Check a more reasonable, but still direct mapped, size.
    579     // Chop a system page and a byte off to test for rounding errors.
    580     size = 20 * 1024 * 1024;
    581     size -= WTF::kSystemPageSize;
    582     size -= 1;
    583     ptr = partitionAllocGeneric(genericAllocator.root(), size);
    584     char* charPtr = reinterpret_cast<char*>(ptr);
    585     *(charPtr + (size - 1)) = 'A';
    586     partitionFreeGeneric(genericAllocator.root(), ptr);
    587 
    588     // Can we free null?
    589     partitionFreeGeneric(genericAllocator.root(), 0);
    590 
    591     // Do we correctly get a null for a failed allocation?
    592     EXPECT_EQ(0, partitionAllocGenericFlags(genericAllocator.root(), WTF::PartitionAllocReturnNull, 3u * 1024 * 1024 * 1024));
    593 
    594     TestShutdown();
    595 }
    596 
    597 // Test that we can fetch the real allocated size after an allocation.
    598 TEST(PartitionAllocTest, GenericAllocGetSize)
    599 {
    600     TestSetup();
    601 
    602     void* ptr;
    603     size_t requestedSize, actualSize, predictedSize;
    604 
    605     EXPECT_TRUE(partitionAllocSupportsGetSize());
    606 
    607     // Allocate something small.
    608     requestedSize = 511 - kExtraAllocSize;
    609     predictedSize = partitionAllocActualSize(genericAllocator.root(), requestedSize);
    610     ptr = partitionAllocGeneric(genericAllocator.root(), requestedSize);
    611     EXPECT_TRUE(ptr);
    612     actualSize = partitionAllocGetSize(ptr);
    613     EXPECT_EQ(predictedSize, actualSize);
    614     EXPECT_LT(requestedSize, actualSize);
    615     partitionFreeGeneric(genericAllocator.root(), ptr);
    616 
    617     // Allocate a size that should be a perfect match for a bucket, because it
    618     // is an exact power of 2.
    619     requestedSize = (256 * 1024) - kExtraAllocSize;
    620     predictedSize = partitionAllocActualSize(genericAllocator.root(), requestedSize);
    621     ptr = partitionAllocGeneric(genericAllocator.root(), requestedSize);
    622     EXPECT_TRUE(ptr);
    623     actualSize = partitionAllocGetSize(ptr);
    624     EXPECT_EQ(predictedSize, actualSize);
    625     EXPECT_EQ(requestedSize, actualSize);
    626     partitionFreeGeneric(genericAllocator.root(), ptr);
    627 
    628     // Allocate a size that is a system page smaller than a bucket. GetSize()
    629     // should return a larger size than we asked for now.
    630     requestedSize = (256 * 1024) - WTF::kSystemPageSize - kExtraAllocSize;
    631     predictedSize = partitionAllocActualSize(genericAllocator.root(), requestedSize);
    632     ptr = partitionAllocGeneric(genericAllocator.root(), requestedSize);
    633     EXPECT_TRUE(ptr);
    634     actualSize = partitionAllocGetSize(ptr);
    635     EXPECT_EQ(predictedSize, actualSize);
    636     EXPECT_EQ(requestedSize + WTF::kSystemPageSize, actualSize);
    637     // Check that we can write at the end of the reported size too.
    638     char* charPtr = reinterpret_cast<char*>(ptr);
    639     *(charPtr + (actualSize - 1)) = 'A';
    640     partitionFreeGeneric(genericAllocator.root(), ptr);
    641 
    642     // Allocate something very large, and uneven.
    643     requestedSize = 512 * 1024 * 1024 - 1;
    644     predictedSize = partitionAllocActualSize(genericAllocator.root(), requestedSize);
    645     ptr = partitionAllocGeneric(genericAllocator.root(), requestedSize);
    646     EXPECT_TRUE(ptr);
    647     actualSize = partitionAllocGetSize(ptr);
    648     EXPECT_EQ(predictedSize, actualSize);
    649     EXPECT_LT(requestedSize, actualSize);
    650     partitionFreeGeneric(genericAllocator.root(), ptr);
    651 
    652     // Too large allocation.
    653     requestedSize = INT_MAX;
    654     predictedSize = partitionAllocActualSize(genericAllocator.root(), requestedSize);
    655     EXPECT_EQ(requestedSize, predictedSize);
    656 
    657     TestShutdown();
    658 }
    659 
    660 // Test the realloc() contract.
    661 TEST(PartitionAllocTest, Realloc)
    662 {
    663     TestSetup();
    664 
    665     // realloc(0, size) should be equivalent to malloc().
    666     void* ptr = partitionReallocGeneric(genericAllocator.root(), 0, kTestAllocSize);
    667     memset(ptr, 'A', kTestAllocSize);
    668     WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
    669     // realloc(ptr, 0) should be equivalent to free().
    670     void* ptr2 = partitionReallocGeneric(genericAllocator.root(), ptr, 0);
    671     EXPECT_EQ(0, ptr2);
    672     EXPECT_EQ(WTF::partitionCookieFreePointerAdjust(ptr), page->freelistHead);
    673 
    674     // Test that growing an allocation with realloc() copies everything from the
    675     // old allocation.
    676     size_t size = WTF::kSystemPageSize - kExtraAllocSize;
    677     EXPECT_EQ(size, partitionAllocActualSize(genericAllocator.root(), size));
    678     ptr = partitionAllocGeneric(genericAllocator.root(), size);
    679     memset(ptr, 'A', size);
    680     ptr2 = partitionReallocGeneric(genericAllocator.root(), ptr, size + 1);
    681     EXPECT_NE(ptr, ptr2);
    682     char* charPtr2 = static_cast<char*>(ptr2);
    683     EXPECT_EQ('A', charPtr2[0]);
    684     EXPECT_EQ('A', charPtr2[size - 1]);
    685 #if ENABLE(ASSERT)
    686     EXPECT_EQ(WTF::kUninitializedByte, static_cast<unsigned char>(charPtr2[size]));
    687 #endif
    688 
    689     // Test that shrinking an allocation with realloc() also copies everything
    690     // from the old allocation.
    691     ptr = partitionReallocGeneric(genericAllocator.root(), ptr2, size - 1);
    692     EXPECT_NE(ptr2, ptr);
    693     char* charPtr = static_cast<char*>(ptr);
    694     EXPECT_EQ('A', charPtr[0]);
    695     EXPECT_EQ('A', charPtr[size - 2]);
    696 #if ENABLE(ASSERT)
    697     EXPECT_EQ(WTF::kUninitializedByte, static_cast<unsigned char>(charPtr[size - 1]));
    698 #endif
    699 
    700     partitionFreeGeneric(genericAllocator.root(), ptr);
    701 
    702     // Test that shrinking a direct mapped allocation happens in-place.
    703     size = WTF::kGenericMaxBucketed + 16 * WTF::kSystemPageSize;
    704     ptr = partitionAllocGeneric(genericAllocator.root(), size);
    705     size_t actualSize = partitionAllocGetSize(ptr);
    706     ptr2 = partitionReallocGeneric(genericAllocator.root(), ptr, WTF::kGenericMaxBucketed + 8 * WTF::kSystemPageSize);
    707     EXPECT_EQ(ptr, ptr2);
    708     EXPECT_EQ(actualSize - 8 * WTF::kSystemPageSize, partitionAllocGetSize(ptr2));
    709 
    710     // Test that a previously in-place shrunk direct mapped allocation can be
    711     // expanded up again within its original size.
    712     ptr = partitionReallocGeneric(genericAllocator.root(), ptr2, size - WTF::kSystemPageSize);
    713     EXPECT_EQ(ptr2, ptr);
    714     EXPECT_EQ(actualSize - WTF::kSystemPageSize, partitionAllocGetSize(ptr));
    715 
    716     // Test that a direct mapped allocation is performed not in-place when the
    717     // new size is small enough.
    718     ptr2 = partitionReallocGeneric(genericAllocator.root(), ptr, WTF::kSystemPageSize);
    719     EXPECT_NE(ptr, ptr2);
    720 
    721     partitionFreeGeneric(genericAllocator.root(), ptr2);
    722 
    723     TestShutdown();
    724 }
    725 
    726 // Tests the handing out of freelists for partial pages.
    727 TEST(PartitionAllocTest, PartialPageFreelists)
    728 {
    729     TestSetup();
    730 
    731     size_t bigSize = allocator.root()->maxAllocation - kExtraAllocSize;
    732     EXPECT_EQ(WTF::kSystemPageSize - WTF::kAllocationGranularity, bigSize + kExtraAllocSize);
    733     size_t bucketIdx = (bigSize + kExtraAllocSize) >> WTF::kBucketShift;
    734     WTF::PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx];
    735     EXPECT_EQ(0, bucket->freePagesHead);
    736 
    737     void* ptr = partitionAlloc(allocator.root(), bigSize);
    738     EXPECT_TRUE(ptr);
    739 
    740     WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
    741     size_t totalSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize) / (bigSize + kExtraAllocSize);
    742     EXPECT_EQ(4u, totalSlots);
    743     // The freelist should have one entry, because we were able to exactly fit
    744     // one object slot and one freelist pointer (the null that the head points
    745     // to) into a system page.
    746     EXPECT_TRUE(page->freelistHead);
    747     EXPECT_EQ(1, page->numAllocatedSlots);
    748     EXPECT_EQ(2, page->numUnprovisionedSlots);
    749 
    750     void* ptr2 = partitionAlloc(allocator.root(), bigSize);
    751     EXPECT_TRUE(ptr2);
    752     EXPECT_FALSE(page->freelistHead);
    753     EXPECT_EQ(2, page->numAllocatedSlots);
    754     EXPECT_EQ(2, page->numUnprovisionedSlots);
    755 
    756     void* ptr3 = partitionAlloc(allocator.root(), bigSize);
    757     EXPECT_TRUE(ptr3);
    758     EXPECT_TRUE(page->freelistHead);
    759     EXPECT_EQ(3, page->numAllocatedSlots);
    760     EXPECT_EQ(0, page->numUnprovisionedSlots);
    761 
    762     void* ptr4 = partitionAlloc(allocator.root(), bigSize);
    763     EXPECT_TRUE(ptr4);
    764     EXPECT_FALSE(page->freelistHead);
    765     EXPECT_EQ(4, page->numAllocatedSlots);
    766     EXPECT_EQ(0, page->numUnprovisionedSlots);
    767 
    768     void* ptr5 = partitionAlloc(allocator.root(), bigSize);
    769     EXPECT_TRUE(ptr5);
    770 
    771     WTF::PartitionPage* page2 = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr5));
    772     EXPECT_EQ(1, page2->numAllocatedSlots);
    773 
    774     // Churn things a little whilst there's a partial page freelist.
    775     partitionFree(ptr);
    776     ptr = partitionAlloc(allocator.root(), bigSize);
    777     void* ptr6 = partitionAlloc(allocator.root(), bigSize);
    778 
    779     partitionFree(ptr);
    780     partitionFree(ptr2);
    781     partitionFree(ptr3);
    782     partitionFree(ptr4);
    783     partitionFree(ptr5);
    784     partitionFree(ptr6);
    785     EXPECT_NE(-1, page->freeCacheIndex);
    786     EXPECT_NE(-1, page2->freeCacheIndex);
    787     EXPECT_TRUE(page2->freelistHead);
    788     EXPECT_EQ(0, page2->numAllocatedSlots);
    789 
    790     // And test a couple of sizes that do not cross kSystemPageSize with a single allocation.
    791     size_t mediumSize = (WTF::kSystemPageSize / 2) - kExtraAllocSize;
    792     bucketIdx = (mediumSize + kExtraAllocSize) >> WTF::kBucketShift;
    793     bucket = &allocator.root()->buckets()[bucketIdx];
    794     EXPECT_EQ(0, bucket->freePagesHead);
    795 
    796     ptr = partitionAlloc(allocator.root(), mediumSize);
    797     EXPECT_TRUE(ptr);
    798     page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
    799     EXPECT_EQ(1, page->numAllocatedSlots);
    800     totalSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize) / (mediumSize + kExtraAllocSize);
    801     size_t firstPageSlots = WTF::kSystemPageSize / (mediumSize + kExtraAllocSize);
    802     EXPECT_EQ(2u, firstPageSlots);
    803     EXPECT_EQ(totalSlots - firstPageSlots, page->numUnprovisionedSlots);
    804 
    805     partitionFree(ptr);
    806 
    807     size_t smallSize = (WTF::kSystemPageSize / 4) - kExtraAllocSize;
    808     bucketIdx = (smallSize + kExtraAllocSize) >> WTF::kBucketShift;
    809     bucket = &allocator.root()->buckets()[bucketIdx];
    810     EXPECT_EQ(0, bucket->freePagesHead);
    811 
    812     ptr = partitionAlloc(allocator.root(), smallSize);
    813     EXPECT_TRUE(ptr);
    814     page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
    815     EXPECT_EQ(1, page->numAllocatedSlots);
    816     totalSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize) / (smallSize + kExtraAllocSize);
    817     firstPageSlots = WTF::kSystemPageSize / (smallSize + kExtraAllocSize);
    818     EXPECT_EQ(totalSlots - firstPageSlots, page->numUnprovisionedSlots);
    819 
    820     partitionFree(ptr);
    821     EXPECT_TRUE(page->freelistHead);
    822     EXPECT_EQ(0, page->numAllocatedSlots);
    823 
    824     size_t verySmallSize = 32 - kExtraAllocSize;
    825     bucketIdx = (verySmallSize + kExtraAllocSize) >> WTF::kBucketShift;
    826     bucket = &allocator.root()->buckets()[bucketIdx];
    827     EXPECT_EQ(0, bucket->freePagesHead);
    828 
    829     ptr = partitionAlloc(allocator.root(), verySmallSize);
    830     EXPECT_TRUE(ptr);
    831     page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
    832     EXPECT_EQ(1, page->numAllocatedSlots);
    833     totalSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize) / (verySmallSize + kExtraAllocSize);
    834     firstPageSlots = WTF::kSystemPageSize / (verySmallSize + kExtraAllocSize);
    835     EXPECT_EQ(totalSlots - firstPageSlots, page->numUnprovisionedSlots);
    836 
    837     partitionFree(ptr);
    838     EXPECT_TRUE(page->freelistHead);
    839     EXPECT_EQ(0, page->numAllocatedSlots);
    840 
    841     // And try an allocation size (against the generic allocator) that is
    842     // larger than a system page.
    843     size_t pageAndAHalfSize = (WTF::kSystemPageSize + (WTF::kSystemPageSize / 2)) - kExtraAllocSize;
    844     ptr = partitionAllocGeneric(genericAllocator.root(), pageAndAHalfSize);
    845     EXPECT_TRUE(ptr);
    846     page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
    847     EXPECT_EQ(1, page->numAllocatedSlots);
    848     EXPECT_TRUE(page->freelistHead);
    849     totalSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize) / (pageAndAHalfSize + kExtraAllocSize);
    850     EXPECT_EQ(totalSlots - 2, page->numUnprovisionedSlots);
    851     partitionFreeGeneric(genericAllocator.root(), ptr);
    852 
    853     // And then make sure than exactly the page size only faults one page.
    854     size_t pageSize = WTF::kSystemPageSize - kExtraAllocSize;
    855     ptr = partitionAllocGeneric(genericAllocator.root(), pageSize);
    856     EXPECT_TRUE(ptr);
    857     page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
    858     EXPECT_EQ(1, page->numAllocatedSlots);
    859     EXPECT_FALSE(page->freelistHead);
    860     totalSlots = (page->bucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize) / (pageSize + kExtraAllocSize);
    861     EXPECT_EQ(totalSlots - 1, page->numUnprovisionedSlots);
    862     partitionFreeGeneric(genericAllocator.root(), ptr);
    863 
    864     TestShutdown();
    865 }
    866 
    867 // Test some of the fragmentation-resistant properties of the allocator.
    868 TEST(PartitionAllocTest, PageRefilling)
    869 {
    870     TestSetup();
    871     WTF::PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
    872 
    873     // Grab two full pages and a non-full page.
    874     WTF::PartitionPage* page1 = GetFullPage(kTestAllocSize);
    875     WTF::PartitionPage* page2 = GetFullPage(kTestAllocSize);
    876     void* ptr = partitionAlloc(allocator.root(), kTestAllocSize);
    877     EXPECT_TRUE(ptr);
    878     EXPECT_NE(page1, bucket->activePagesHead);
    879     EXPECT_NE(page2, bucket->activePagesHead);
    880     WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
    881     EXPECT_EQ(1, page->numAllocatedSlots);
    882 
    883     // Work out a pointer into page2 and free it; and then page1 and free it.
    884     char* ptr2 = reinterpret_cast<char*>(WTF::partitionPageToPointer(page1)) + kPointerOffset;
    885     partitionFree(ptr2);
    886     ptr2 = reinterpret_cast<char*>(WTF::partitionPageToPointer(page2)) + kPointerOffset;
    887     partitionFree(ptr2);
    888 
    889     // If we perform two allocations from the same bucket now, we expect to
    890     // refill both the nearly full pages.
    891     (void) partitionAlloc(allocator.root(), kTestAllocSize);
    892     (void) partitionAlloc(allocator.root(), kTestAllocSize);
    893     EXPECT_EQ(1, page->numAllocatedSlots);
    894 
    895     FreeFullPage(page2);
    896     FreeFullPage(page1);
    897     partitionFree(ptr);
    898 
    899     TestShutdown();
    900 }
    901 
    902 // Basic tests to ensure that allocations work for partial page buckets.
    903 TEST(PartitionAllocTest, PartialPages)
    904 {
    905     TestSetup();
    906 
    907     // Find a size that is backed by a partial partition page.
    908     size_t size = sizeof(void*);
    909     WTF::PartitionBucket* bucket = 0;
    910     while (size < kTestMaxAllocation) {
    911         bucket = &allocator.root()->buckets()[size >> WTF::kBucketShift];
    912         if (bucket->numSystemPagesPerSlotSpan % WTF::kNumSystemPagesPerPartitionPage)
    913             break;
    914         size += sizeof(void*);
    915     }
    916     EXPECT_LT(size, kTestMaxAllocation);
    917 
    918     WTF::PartitionPage* page1 = GetFullPage(size);
    919     WTF::PartitionPage* page2 = GetFullPage(size);
    920     FreeFullPage(page2);
    921     FreeFullPage(page1);
    922 
    923     TestShutdown();
    924 }
    925 
    926 // Test correct handling if our mapping collides with another.
    927 TEST(PartitionAllocTest, MappingCollision)
    928 {
    929     TestSetup();
    930     // The -2 is because the first and last partition pages in a super page are
    931     // guard pages.
    932     size_t numPartitionPagesNeeded = WTF::kNumPartitionPagesPerSuperPage - 2;
    933     OwnPtr<WTF::PartitionPage*[]> firstSuperPagePages = adoptArrayPtr(new WTF::PartitionPage*[numPartitionPagesNeeded]);
    934     OwnPtr<WTF::PartitionPage*[]> secondSuperPagePages = adoptArrayPtr(new WTF::PartitionPage*[numPartitionPagesNeeded]);
    935 
    936     size_t i;
    937     for (i = 0; i < numPartitionPagesNeeded; ++i)
    938         firstSuperPagePages[i] = GetFullPage(kTestAllocSize);
    939 
    940     char* pageBase = reinterpret_cast<char*>(WTF::partitionPageToPointer(firstSuperPagePages[0]));
    941     EXPECT_EQ(WTF::kPartitionPageSize, reinterpret_cast<uintptr_t>(pageBase) & WTF::kSuperPageOffsetMask);
    942     pageBase -= WTF::kPartitionPageSize;
    943     // Map a single system page either side of the mapping for our allocations,
    944     // with the goal of tripping up alignment of the next mapping.
    945     void* map1 = WTF::allocPages(pageBase - WTF::kPageAllocationGranularity, WTF::kPageAllocationGranularity, WTF::kPageAllocationGranularity);
    946     EXPECT_TRUE(map1);
    947     void* map2 = WTF::allocPages(pageBase + WTF::kSuperPageSize, WTF::kPageAllocationGranularity, WTF::kPageAllocationGranularity);
    948     EXPECT_TRUE(map2);
    949     WTF::setSystemPagesInaccessible(map1, WTF::kPageAllocationGranularity);
    950     WTF::setSystemPagesInaccessible(map2, WTF::kPageAllocationGranularity);
    951 
    952     for (i = 0; i < numPartitionPagesNeeded; ++i)
    953         secondSuperPagePages[i] = GetFullPage(kTestAllocSize);
    954 
    955     WTF::freePages(map1, WTF::kPageAllocationGranularity);
    956     WTF::freePages(map2, WTF::kPageAllocationGranularity);
    957 
    958     pageBase = reinterpret_cast<char*>(partitionPageToPointer(secondSuperPagePages[0]));
    959     EXPECT_EQ(WTF::kPartitionPageSize, reinterpret_cast<uintptr_t>(pageBase) & WTF::kSuperPageOffsetMask);
    960     pageBase -= WTF::kPartitionPageSize;
    961     // Map a single system page either side of the mapping for our allocations,
    962     // with the goal of tripping up alignment of the next mapping.
    963     map1 = WTF::allocPages(pageBase - WTF::kPageAllocationGranularity, WTF::kPageAllocationGranularity, WTF::kPageAllocationGranularity);
    964     EXPECT_TRUE(map1);
    965     map2 = WTF::allocPages(pageBase + WTF::kSuperPageSize, WTF::kPageAllocationGranularity, WTF::kPageAllocationGranularity);
    966     EXPECT_TRUE(map2);
    967     WTF::setSystemPagesInaccessible(map1, WTF::kPageAllocationGranularity);
    968     WTF::setSystemPagesInaccessible(map2, WTF::kPageAllocationGranularity);
    969 
    970     WTF::PartitionPage* pageInThirdSuperPage = GetFullPage(kTestAllocSize);
    971     WTF::freePages(map1, WTF::kPageAllocationGranularity);
    972     WTF::freePages(map2, WTF::kPageAllocationGranularity);
    973 
    974     EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(partitionPageToPointer(pageInThirdSuperPage)) & WTF::kPartitionPageOffsetMask);
    975 
    976     // And make sure we really did get a page in a new superpage.
    977     EXPECT_NE(reinterpret_cast<uintptr_t>(partitionPageToPointer(firstSuperPagePages[0])) & WTF::kSuperPageBaseMask, reinterpret_cast<uintptr_t>(partitionPageToPointer(pageInThirdSuperPage)) & WTF::kSuperPageBaseMask);
    978     EXPECT_NE(reinterpret_cast<uintptr_t>(partitionPageToPointer(secondSuperPagePages[0])) & WTF::kSuperPageBaseMask, reinterpret_cast<uintptr_t>(partitionPageToPointer(pageInThirdSuperPage)) & WTF::kSuperPageBaseMask);
    979 
    980     FreeFullPage(pageInThirdSuperPage);
    981     for (i = 0; i < numPartitionPagesNeeded; ++i) {
    982         FreeFullPage(firstSuperPagePages[i]);
    983         FreeFullPage(secondSuperPagePages[i]);
    984     }
    985 
    986     TestShutdown();
    987 }
    988 
    989 // Tests that pages in the free page cache do get freed as appropriate.
    990 TEST(PartitionAllocTest, FreeCache)
    991 {
    992     TestSetup();
    993 
    994     EXPECT_EQ(0U, allocator.root()->totalSizeOfCommittedPages);
    995 
    996     size_t bigSize = allocator.root()->maxAllocation - kExtraAllocSize;
    997     size_t bucketIdx = (bigSize + kExtraAllocSize) >> WTF::kBucketShift;
    998     WTF::PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx];
    999 
   1000     void* ptr = partitionAlloc(allocator.root(), bigSize);
   1001     EXPECT_TRUE(ptr);
   1002     WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
   1003     EXPECT_EQ(0, bucket->freePagesHead);
   1004     EXPECT_EQ(1, page->numAllocatedSlots);
   1005     EXPECT_EQ(WTF::kPartitionPageSize, allocator.root()->totalSizeOfCommittedPages);
   1006     partitionFree(ptr);
   1007     EXPECT_EQ(0, page->numAllocatedSlots);
   1008     EXPECT_NE(-1, page->freeCacheIndex);
   1009     EXPECT_TRUE(page->freelistHead);
   1010 
   1011     CycleFreeCache(kTestAllocSize);
   1012 
   1013     // Flushing the cache should have really freed the unused page.
   1014     EXPECT_FALSE(page->freelistHead);
   1015     EXPECT_EQ(-1, page->freeCacheIndex);
   1016     EXPECT_EQ(0, page->numAllocatedSlots);
   1017     WTF::PartitionBucket* cycleFreeCacheBucket = &allocator.root()->buckets()[kTestBucketIndex];
   1018     EXPECT_EQ(cycleFreeCacheBucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize, allocator.root()->totalSizeOfCommittedPages);
   1019 
   1020     // Check that an allocation works ok whilst in this state (a free'd page
   1021     // as the active pages head).
   1022     ptr = partitionAlloc(allocator.root(), bigSize);
   1023     EXPECT_FALSE(bucket->freePagesHead);
   1024     partitionFree(ptr);
   1025 
   1026     // Also check that a page that is bouncing immediately between empty and
   1027     // used does not get freed.
   1028     for (size_t i = 0; i < WTF::kMaxFreeableSpans * 2; ++i) {
   1029         ptr = partitionAlloc(allocator.root(), bigSize);
   1030         EXPECT_TRUE(page->freelistHead);
   1031         partitionFree(ptr);
   1032         EXPECT_TRUE(page->freelistHead);
   1033     }
   1034     EXPECT_EQ(WTF::kPartitionPageSize, allocator.root()->totalSizeOfCommittedPages);
   1035     TestShutdown();
   1036 }
   1037 
   1038 // Tests for a bug we had with losing references to free pages.
   1039 TEST(PartitionAllocTest, LostFreePagesBug)
   1040 {
   1041     TestSetup();
   1042 
   1043     size_t size = WTF::kPartitionPageSize - kExtraAllocSize;
   1044 
   1045     void* ptr = partitionAllocGeneric(genericAllocator.root(), size);
   1046     EXPECT_TRUE(ptr);
   1047     void* ptr2 = partitionAllocGeneric(genericAllocator.root(), size);
   1048     EXPECT_TRUE(ptr2);
   1049 
   1050     WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
   1051     WTF::PartitionPage* page2 = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr2));
   1052     WTF::PartitionBucket* bucket = page->bucket;
   1053 
   1054     EXPECT_EQ(0, bucket->freePagesHead);
   1055     EXPECT_EQ(-1, page->numAllocatedSlots);
   1056     EXPECT_EQ(1, page2->numAllocatedSlots);
   1057 
   1058     partitionFreeGeneric(genericAllocator.root(), ptr);
   1059     partitionFreeGeneric(genericAllocator.root(), ptr2);
   1060 
   1061     EXPECT_EQ(0, bucket->freePagesHead);
   1062     EXPECT_EQ(0, page->numAllocatedSlots);
   1063     EXPECT_EQ(0, page2->numAllocatedSlots);
   1064     EXPECT_TRUE(page->freelistHead);
   1065     EXPECT_TRUE(page2->freelistHead);
   1066 
   1067     CycleGenericFreeCache(kTestAllocSize);
   1068 
   1069     EXPECT_FALSE(page->freelistHead);
   1070     EXPECT_FALSE(page2->freelistHead);
   1071 
   1072     EXPECT_FALSE(bucket->freePagesHead);
   1073     EXPECT_TRUE(bucket->activePagesHead);
   1074     EXPECT_TRUE(bucket->activePagesHead->nextPage);
   1075 
   1076     // At this moment, we have two freed pages, on the freelist.
   1077 
   1078     ptr = partitionAllocGeneric(genericAllocator.root(), size);
   1079     EXPECT_TRUE(ptr);
   1080     partitionFreeGeneric(genericAllocator.root(), ptr);
   1081 
   1082     EXPECT_TRUE(bucket->activePagesHead);
   1083     EXPECT_TRUE(bucket->freePagesHead);
   1084 
   1085     CycleGenericFreeCache(kTestAllocSize);
   1086 
   1087     // We're now set up to trigger the bug by scanning over the active pages
   1088     // list, where the current active page is freed, and there exists at least
   1089     // one freed page in the free pages list.
   1090     ptr = partitionAllocGeneric(genericAllocator.root(), size);
   1091     EXPECT_TRUE(ptr);
   1092     partitionFreeGeneric(genericAllocator.root(), ptr);
   1093 
   1094     EXPECT_TRUE(bucket->activePagesHead);
   1095     EXPECT_TRUE(bucket->freePagesHead);
   1096 
   1097     TestShutdown();
   1098 }
   1099 
   1100 #if !OS(ANDROID)
   1101 
   1102 // Make sure that malloc(-1) dies.
   1103 // In the past, we had an integer overflow that would alias malloc(-1) to
   1104 // malloc(0), which is not good.
   1105 TEST(PartitionAllocDeathTest, LargeAllocs)
   1106 {
   1107     TestSetup();
   1108     // Largest alloc.
   1109     EXPECT_DEATH(partitionAllocGeneric(genericAllocator.root(), static_cast<size_t>(-1)), "");
   1110     // And the smallest allocation we expect to die.
   1111     EXPECT_DEATH(partitionAllocGeneric(genericAllocator.root(), static_cast<size_t>(INT_MAX) + 1), "");
   1112 
   1113     TestShutdown();
   1114 }
   1115 
   1116 // Check that our immediate double-free detection works.
   1117 TEST(PartitionAllocDeathTest, ImmediateDoubleFree)
   1118 {
   1119     TestSetup();
   1120 
   1121     void* ptr = partitionAllocGeneric(genericAllocator.root(), kTestAllocSize);
   1122     EXPECT_TRUE(ptr);
   1123     partitionFreeGeneric(genericAllocator.root(), ptr);
   1124 
   1125     EXPECT_DEATH(partitionFreeGeneric(genericAllocator.root(), ptr), "");
   1126 
   1127     TestShutdown();
   1128 }
   1129 
   1130 // Check that our refcount-based double-free detection works.
   1131 TEST(PartitionAllocDeathTest, RefcountDoubleFree)
   1132 {
   1133     TestSetup();
   1134 
   1135     void* ptr = partitionAllocGeneric(genericAllocator.root(), kTestAllocSize);
   1136     EXPECT_TRUE(ptr);
   1137     void* ptr2 = partitionAllocGeneric(genericAllocator.root(), kTestAllocSize);
   1138     EXPECT_TRUE(ptr2);
   1139     partitionFreeGeneric(genericAllocator.root(), ptr);
   1140     partitionFreeGeneric(genericAllocator.root(), ptr2);
   1141     // This is not an immediate double-free so our immediate detection won't
   1142     // fire. However, it does take the "refcount" of the partition page to -1,
   1143     // which is illegal and should be trapped.
   1144     EXPECT_DEATH(partitionFreeGeneric(genericAllocator.root(), ptr), "");
   1145 
   1146     TestShutdown();
   1147 }
   1148 
   1149 // Check that guard pages are present where expected.
   1150 TEST(PartitionAllocDeathTest, GuardPages)
   1151 {
   1152     TestSetup();
   1153 
   1154     // This large size will result in a direct mapped allocation with guard
   1155     // pages at either end.
   1156     size_t size = (WTF::kGenericMaxBucketed + WTF::kSystemPageSize) - kExtraAllocSize;
   1157     void* ptr = partitionAllocGeneric(genericAllocator.root(), size);
   1158     EXPECT_TRUE(ptr);
   1159     char* charPtr = reinterpret_cast<char*>(ptr) - kPointerOffset;
   1160 
   1161     EXPECT_DEATH(*(charPtr - 1) = 'A', "");
   1162     EXPECT_DEATH(*(charPtr + size + kExtraAllocSize) = 'A', "");
   1163 
   1164     partitionFreeGeneric(genericAllocator.root(), ptr);
   1165 
   1166     TestShutdown();
   1167 }
   1168 
   1169 #endif // !OS(ANDROID)
   1170 
   1171 // Tests that the countLeadingZeros() functions work to our satisfaction.
   1172 // It doesn't seem worth the overhead of a whole new file for these tests, so
   1173 // we'll put them here since partitionAllocGeneric will depend heavily on these
   1174 // functions working correctly.
   1175 TEST(PartitionAllocTest, CLZWorks)
   1176 {
   1177     EXPECT_EQ(32u, WTF::countLeadingZeros32(0u));
   1178     EXPECT_EQ(31u, WTF::countLeadingZeros32(1u));
   1179     EXPECT_EQ(1u, WTF::countLeadingZeros32(1u << 30));
   1180     EXPECT_EQ(0u, WTF::countLeadingZeros32(1u << 31));
   1181 
   1182 #if CPU(64BIT)
   1183     EXPECT_EQ(64u, WTF::countLeadingZerosSizet(0ull));
   1184     EXPECT_EQ(63u, WTF::countLeadingZerosSizet(1ull));
   1185     EXPECT_EQ(32u, WTF::countLeadingZerosSizet(1ull << 31));
   1186     EXPECT_EQ(1u, WTF::countLeadingZerosSizet(1ull << 62));
   1187     EXPECT_EQ(0u, WTF::countLeadingZerosSizet(1ull << 63));
   1188 #else
   1189     EXPECT_EQ(32u, WTF::countLeadingZerosSizet(0u));
   1190     EXPECT_EQ(31u, WTF::countLeadingZerosSizet(1u));
   1191     EXPECT_EQ(1u, WTF::countLeadingZerosSizet(1u << 30));
   1192     EXPECT_EQ(0u, WTF::countLeadingZerosSizet(1u << 31));
   1193 #endif
   1194 }
   1195 
   1196 } // namespace
   1197 
   1198 #endif // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
   1199