Home | History | Annotate | Download | only in wtf
      1 /*
      2  * Copyright (C) 2013 Google Inc. All rights reserved.
      3  *
      4  * Redistribution and use in source and binary forms, with or without
      5  * modification, are permitted provided that the following conditions are
      6  * met:
      7  *
      8  *     * Redistributions of source code must retain the above copyright
      9  * notice, this list of conditions and the following disclaimer.
     10  *     * Redistributions in binary form must reproduce the above
     11  * copyright notice, this list of conditions and the following disclaimer
     12  * in the documentation and/or other materials provided with the
     13  * distribution.
     14  *     * Neither the name of Google Inc. nor the names of its
     15  * contributors may be used to endorse or promote products derived from
     16  * this software without specific prior written permission.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     29  */
     30 
     31 #include "config.h"
     32 #include "wtf/PartitionAlloc.h"
     33 
     34 #include <string.h>
     35 
     36 #ifndef NDEBUG
     37 #include <stdio.h>
     38 #endif
     39 
     40 // Two partition pages are used as guard / metadata page so make sure the super
     41 // page size is bigger.
     42 COMPILE_ASSERT(WTF::kPartitionPageSize * 4 <= WTF::kSuperPageSize, ok_super_page_size);
     43 COMPILE_ASSERT(!(WTF::kSuperPageSize % WTF::kPartitionPageSize), ok_super_page_multiple);
     44 // Four system pages gives us room to hack out a still-guard-paged piece
     45 // of metadata in the middle of a guard partition page.
     46 COMPILE_ASSERT(WTF::kSystemPageSize * 4 <= WTF::kPartitionPageSize, ok_partition_page_size);
     47 COMPILE_ASSERT(!(WTF::kPartitionPageSize % WTF::kSystemPageSize), ok_partition_page_multiple);
     48 COMPILE_ASSERT(sizeof(WTF::PartitionPage) <= WTF::kPageMetadataSize, PartitionPage_not_too_big);
     49 COMPILE_ASSERT(sizeof(WTF::PartitionSuperPageExtentEntry) <= WTF::kPageMetadataSize, PartitionSuperPageExtentEntry_not_too_big);
     50 COMPILE_ASSERT(WTF::kPageMetadataSize * WTF::kNumPartitionPagesPerSuperPage <= WTF::kSystemPageSize, page_metadata_fits_in_hole);
     51 
     52 namespace WTF {
     53 
     54 static size_t partitionBucketPageSize(size_t size)
     55 {
     56     double bestWasteRatio = 1.0f;
     57     size_t bestPages = 0;
     58     for (size_t i = kNumSystemPagesPerPartitionPage - 1; i <= kNumSystemPagesPerPartitionPage; ++i) {
     59         size_t pageSize = kSystemPageSize * i;
     60         size_t numSlots = pageSize / size;
     61         size_t waste = pageSize - (numSlots * size);
     62         // Leave a page unfaulted is not free; the page will occupy an empty page table entry.
     63         // Make a simple attempt to account for that.
     64         waste += sizeof(void*) * (kNumSystemPagesPerPartitionPage - i);
     65         double wasteRatio = (double) waste / (double) pageSize;
     66         if (wasteRatio < bestWasteRatio) {
     67             bestWasteRatio = wasteRatio;
     68             bestPages = i;
     69         }
     70     }
     71     ASSERT(bestPages > 0);
     72     return bestPages * kSystemPageSize;
     73 }
     74 
     75 static ALWAYS_INLINE void partitionPageMarkFree(PartitionPage* page)
     76 {
     77     ASSERT(!partitionPageIsFree(page));
     78     page->numAllocatedSlots = -1;
     79 }
     80 
     81 WTF_EXPORT void partitionAllocInit(PartitionRoot* root, size_t numBuckets, size_t maxAllocation)
     82 {
     83     ASSERT(!root->initialized);
     84     root->initialized = true;
     85     root->lock = 0;
     86     root->totalSizeOfSuperPages = 0;
     87     root->numBuckets = numBuckets;
     88     root->maxAllocation = maxAllocation;
     89     size_t i;
     90     for (i = 0; i < root->numBuckets; ++i) {
     91         PartitionBucket* bucket = &root->buckets()[i];
     92         bucket->root = root;
     93         bucket->activePagesHead = &root->seedPage;
     94         bucket->freePagesHead = 0;
     95         bucket->numFullPages = 0;
     96         size_t size = partitionBucketSize(bucket);
     97         bucket->pageSize = partitionBucketPageSize(size);
     98     }
     99 
    100     root->nextSuperPage = 0;
    101     root->nextPartitionPage = 0;
    102     root->nextPartitionPageEnd = 0;
    103     root->currentExtent = &root->firstExtent;
    104     root->firstExtent.superPageBase = 0;
    105     root->firstExtent.superPagesEnd = 0;
    106     root->firstExtent.next = 0;
    107     root->seedPage.bucket = &root->seedBucket;
    108     root->seedPage.activePageNext = 0;
    109     root->seedPage.numAllocatedSlots = 0;
    110     root->seedPage.numUnprovisionedSlots = 0;
    111     partitionPageSetFreelistHead(&root->seedPage, 0);
    112     // We mark the seed page as free to make sure it is skipped by our logic to
    113     // find a new active page.
    114     partitionPageMarkFree(&root->seedPage);
    115 
    116     root->seedBucket.root = root;
    117     root->seedBucket.activePagesHead = 0;
    118     root->seedBucket.freePagesHead = 0;
    119     root->seedBucket.numFullPages = 0;
    120 }
    121 
    122 static bool partitionAllocShutdownBucket(PartitionBucket* bucket)
    123 {
    124     // Failure here indicates a memory leak.
    125     bool noLeaks = !bucket->numFullPages;
    126     PartitionPage* page = bucket->activePagesHead;
    127     if (page == &bucket->root->seedPage)
    128         return noLeaks;
    129     do {
    130         if (page->numAllocatedSlots)
    131             noLeaks = false;
    132         page = page->activePageNext;
    133     } while (page);
    134 
    135     return noLeaks;
    136 }
    137 
    138 bool partitionAllocShutdown(PartitionRoot* root)
    139 {
    140     bool noLeaks = true;
    141     ASSERT(root->initialized);
    142     root->initialized = false;
    143     size_t i;
    144     for (i = 0; i < root->numBuckets; ++i) {
    145         PartitionBucket* bucket = &root->buckets()[i];
    146         if (!partitionAllocShutdownBucket(bucket))
    147             noLeaks = false;
    148     }
    149 
    150     // Now that we've examined all partition pages in all buckets, it's safe
    151     // to free all our super pages. We first collect the super page pointers
    152     // on the stack because some of them are themselves store in super pages.
    153     char* superPages[kMaxPartitionSize / kSuperPageSize];
    154     size_t numSuperPages = 0;
    155     PartitionSuperPageExtentEntry* entry = &root->firstExtent;
    156     while (entry) {
    157         char* superPage = entry->superPageBase;
    158         while (superPage != entry->superPagesEnd) {
    159             superPages[numSuperPages] = superPage;
    160             numSuperPages++;
    161             superPage += kSuperPageSize;
    162         }
    163         entry = entry->next;
    164     }
    165     ASSERT(numSuperPages == root->totalSizeOfSuperPages / kSuperPageSize);
    166     for (size_t i = 0; i < numSuperPages; ++i) {
    167         SuperPageBitmap::unregisterSuperPage(superPages[i]);
    168         freePages(superPages[i], kSuperPageSize);
    169     }
    170 
    171     return noLeaks;
    172 }
    173 
    174 static ALWAYS_INLINE void* partitionAllocPage(PartitionRoot* root)
    175 {
    176     if (LIKELY(root->nextPartitionPage != 0)) {
    177         // In this case, we can still hand out pages from a previous
    178         // super page allocation.
    179         char* ret = root->nextPartitionPage;
    180         root->nextPartitionPage += kPartitionPageSize;
    181         if (UNLIKELY(root->nextPartitionPage == root->nextPartitionPageEnd)) {
    182             // We ran out, need to get more pages next time.
    183             root->nextPartitionPage = 0;
    184             root->nextPartitionPageEnd = 0;
    185         }
    186         return ret;
    187     }
    188 
    189     // Need a new super page.
    190     root->totalSizeOfSuperPages += kSuperPageSize;
    191     RELEASE_ASSERT(root->totalSizeOfSuperPages <= kMaxPartitionSize);
    192     char* requestedAddress = root->nextSuperPage;
    193     char* superPage = reinterpret_cast<char*>(allocPages(requestedAddress, kSuperPageSize, kSuperPageSize));
    194     SuperPageBitmap::registerSuperPage(superPage);
    195     root->nextSuperPage = superPage + kSuperPageSize;
    196     char* ret = superPage + kPartitionPageSize;
    197     root->nextPartitionPage = ret + kPartitionPageSize;
    198     root->nextPartitionPageEnd = root->nextSuperPage - kPartitionPageSize;
    199     // Make the first partition page in the super page a guard page, but leave a    // hole in the middle.
    200     // This is where we put page metadata and also a tiny amount of extent
    201     // metadata.
    202     setSystemPagesInaccessible(superPage, kSystemPageSize);
    203     setSystemPagesInaccessible(superPage + (kSystemPageSize * 2), kPartitionPageSize - (kSystemPageSize * 2));
    204     // Also make the last partition page a guard page.
    205     setSystemPagesInaccessible(superPage + (kSuperPageSize - kPartitionPageSize), kPartitionPageSize);
    206 
    207     // If we were after a specific address, but didn't get it, assume that
    208     // the system chose a lousy address and re-randomize the next
    209     // allocation.
    210     if (requestedAddress && requestedAddress != superPage)
    211         root->nextSuperPage = 0;
    212 
    213     // We allocated a new super page so update super page metadata.
    214     // First check if this is a new extent or not.
    215     PartitionSuperPageExtentEntry* currentExtent = root->currentExtent;
    216     bool isNewExtent = (superPage != requestedAddress);
    217     if (UNLIKELY(isNewExtent)) {
    218         if (currentExtent->superPageBase) {
    219             // We already have a super page, so need to allocate metadata in the linked list.
    220             PartitionSuperPageExtentEntry* newEntry = reinterpret_cast<PartitionSuperPageExtentEntry*>(partitionSuperPageToMetadataArea(currentExtent->superPageBase));
    221             newEntry->next = 0;
    222             currentExtent->next = newEntry;
    223             currentExtent = newEntry;
    224             root->currentExtent = newEntry;
    225         }
    226         currentExtent->superPageBase = superPage;
    227         currentExtent->superPagesEnd = superPage + kSuperPageSize;
    228     } else {
    229         // We allocated next to an existing extent so just nudge the size up a little.
    230         currentExtent->superPagesEnd += kSuperPageSize;
    231         ASSERT(ret >= currentExtent->superPageBase && ret < currentExtent->superPagesEnd);
    232     }
    233 
    234     return ret;
    235 }
    236 
    237 static ALWAYS_INLINE void partitionUnusePage(PartitionPage* page)
    238 {
    239     void* addr = partitionPageToPointer(page);
    240     decommitSystemPages(addr, kPartitionPageSize);
    241 }
    242 
    243 static ALWAYS_INLINE size_t partitionBucketSlots(const PartitionBucket* bucket)
    244 {
    245     return bucket->pageSize / partitionBucketSize(bucket);
    246 }
    247 
    248 static ALWAYS_INLINE void partitionPageReset(PartitionPage* page, PartitionBucket* bucket)
    249 {
    250     ASSERT(page != &bucket->root->seedPage);
    251     page->numAllocatedSlots = 0;
    252     page->numUnprovisionedSlots = partitionBucketSlots(bucket);
    253     ASSERT(page->numUnprovisionedSlots > 1);
    254     page->bucket = bucket;
    255     // NULLing the freelist is not strictly necessary but it makes an ASSERT in partitionPageFillFreelist simpler.
    256     partitionPageSetFreelistHead(page, 0);
    257 }
    258 
    259 static ALWAYS_INLINE char* partitionPageAllocAndFillFreelist(PartitionPage* page)
    260 {
    261     ASSERT(page != &page->bucket->root->seedPage);
    262     size_t numSlots = page->numUnprovisionedSlots;
    263     ASSERT(numSlots);
    264     PartitionBucket* bucket = page->bucket;
    265     // We should only get here when _every_ slot is either used or unprovisioned.
    266     // (The third state is "on the freelist". If we have a non-empty freelist, we should not get here.)
    267     ASSERT(numSlots + page->numAllocatedSlots == partitionBucketSlots(bucket));
    268     // Similarly, make explicitly sure that the freelist is empty.
    269     ASSERT(!partitionPageFreelistHead(page));
    270 
    271     size_t size = partitionBucketSize(bucket);
    272     char* base = reinterpret_cast<char*>(partitionPageToPointer(page));
    273     char* returnObject = base + (size * page->numAllocatedSlots);
    274     char* nextFreeObject = returnObject + size;
    275     char* subPageLimit = reinterpret_cast<char*>((reinterpret_cast<uintptr_t>(returnObject) + kSystemPageSize) & kSystemPageBaseMask);
    276 
    277     size_t numNewFreelistEntries = 0;
    278     if (LIKELY(subPageLimit > nextFreeObject))
    279         numNewFreelistEntries = (subPageLimit - nextFreeObject) / size;
    280 
    281     // We always return an object slot -- that's the +1 below.
    282     // We do not neccessarily create any new freelist entries, because we cross sub page boundaries frequently for large bucket sizes.
    283     numSlots -= (numNewFreelistEntries + 1);
    284     page->numUnprovisionedSlots = numSlots;
    285     page->numAllocatedSlots++;
    286 
    287     if (LIKELY(numNewFreelistEntries)) {
    288         PartitionFreelistEntry* entry = reinterpret_cast<PartitionFreelistEntry*>(nextFreeObject);
    289         partitionPageSetFreelistHead(page, entry);
    290         while (--numNewFreelistEntries) {
    291             nextFreeObject += size;
    292             PartitionFreelistEntry* nextEntry = reinterpret_cast<PartitionFreelistEntry*>(nextFreeObject);
    293             entry->next = partitionFreelistMask(nextEntry);
    294             entry = nextEntry;
    295         }
    296         entry->next = partitionFreelistMask(0);
    297     } else {
    298         partitionPageSetFreelistHead(page, 0);
    299     }
    300     return returnObject;
    301 }
    302 
    303 // This helper function scans the active page list for a suitable new active
    304 // page, starting at the page passed in. When it finds a suitable new active
    305 // page (one that has free slots), it is also set as the new active page. If
    306 // there is no suitable new active page, the current active page is set to null.
    307 static ALWAYS_INLINE void partitionSetNewActivePage(PartitionBucket* bucket, PartitionPage* page)
    308 {
    309     ASSERT(page == &bucket->root->seedPage || page->bucket == bucket);
    310     for (; page; page = page->activePageNext) {
    311         // Skip over free pages. We need this check first so that we can trust
    312         // that the page union field really containts a freelist pointer.
    313         if (UNLIKELY(partitionPageIsFree(page)))
    314             continue;
    315 
    316         // Page is usable if it has something on the freelist, or unprovisioned
    317         // slots that can be turned into a freelist.
    318         if (LIKELY(partitionPageFreelistHead(page) != 0) || LIKELY(page->numUnprovisionedSlots))
    319             break;
    320         // If we get here, we found a full page. Skip over it too, and also tag
    321         // it as full (via a negative value). We need it tagged so that free'ing
    322         // can tell, and move it back into the active page list.
    323         ASSERT(page->numAllocatedSlots == static_cast<int>(partitionBucketSlots(bucket)));
    324         page->numAllocatedSlots = -page->numAllocatedSlots;
    325         ++bucket->numFullPages;
    326     }
    327 
    328     bucket->activePagesHead = page;
    329 }
    330 
    331 static ALWAYS_INLINE void partitionPageSetFreePageNext(PartitionPage* page, PartitionPage* nextFree)
    332 {
    333     ASSERT(partitionPageIsFree(page));
    334     page->u.freePageNext = nextFree;
    335 }
    336 
    337 static ALWAYS_INLINE PartitionPage* partitionPageFreePageNext(PartitionPage* page)
    338 {
    339     ASSERT(partitionPageIsFree(page));
    340     return page->u.freePageNext;
    341 }
    342 
    343 void* partitionAllocSlowPath(PartitionBucket* bucket)
    344 {
    345     // The slow path is called when the freelist is empty.
    346     ASSERT(!partitionPageFreelistHead(bucket->activePagesHead));
    347 
    348     // First, look for a usable page in the existing active pages list.
    349     PartitionPage* page = bucket->activePagesHead;
    350     partitionSetNewActivePage(bucket, page);
    351     page = bucket->activePagesHead;
    352     if (LIKELY(page != 0)) {
    353         if (LIKELY(partitionPageFreelistHead(page) != 0)) {
    354             PartitionFreelistEntry* ret = partitionPageFreelistHead(page);
    355             partitionPageSetFreelistHead(page, partitionFreelistMask(ret->next));
    356             page->numAllocatedSlots++;
    357             return ret;
    358         }
    359         ASSERT(page->numUnprovisionedSlots);
    360         return partitionPageAllocAndFillFreelist(page);
    361     }
    362 
    363     // Second, look in our list of freed but reserved pages.
    364     PartitionPage* newPage = bucket->freePagesHead;
    365     if (LIKELY(newPage != 0)) {
    366         ASSERT(newPage != &bucket->root->seedPage);
    367         bucket->freePagesHead = partitionPageFreePageNext(newPage);
    368     } else {
    369         // Third. If we get here, we need a brand new page.
    370         void* rawNewPage = partitionAllocPage(bucket->root);
    371         newPage = partitionPointerToPageNoAlignmentCheck(rawNewPage);
    372     }
    373 
    374     newPage->activePageNext = 0;
    375     partitionPageReset(newPage, bucket);
    376     bucket->activePagesHead = newPage;
    377     return partitionPageAllocAndFillFreelist(newPage);
    378 }
    379 
    380 void partitionFreeSlowPath(PartitionPage* page)
    381 {
    382     PartitionBucket* bucket = page->bucket;
    383     ASSERT(page != &bucket->root->seedPage);
    384     ASSERT(bucket->activePagesHead != &bucket->root->seedPage);
    385     if (LIKELY(page->numAllocatedSlots == 0)) {
    386         // Page became fully unused.
    387         // If it's the current page, change it!
    388         if (LIKELY(page == bucket->activePagesHead)) {
    389             PartitionPage* newPage = 0;
    390             if (page->activePageNext) {
    391                 partitionSetNewActivePage(bucket, page->activePageNext);
    392                 newPage = bucket->activePagesHead;
    393             }
    394 
    395             ASSERT(newPage != page);
    396             if (UNLIKELY(!newPage)) {
    397                 // We do not free the last active page in a bucket.
    398                 // To do so is a serious performance issue as we can get into
    399                 // a repeating state change between 0 and 1 objects of a given
    400                 // size allocated; and each state change incurs either a system
    401                 // call or a page fault, or more.
    402                 page->activePageNext = 0;
    403                 bucket->activePagesHead = page;
    404                 return;
    405             }
    406             bucket->activePagesHead = newPage;
    407         }
    408 
    409         partitionUnusePage(page);
    410         // We actually leave the freed page in the active list as well as
    411         // putting it on the free list. We'll evict it from the active list soon
    412         // enough in partitionAllocSlowPath. Pulling this trick enables us to
    413         // use a singly-linked page list for all cases, which is critical in
    414         // keeping the page metadata structure down to 32-bytes in size.
    415         partitionPageMarkFree(page);
    416         partitionPageSetFreePageNext(page, bucket->freePagesHead);
    417         bucket->freePagesHead = page;
    418     } else {
    419         // Ensure that the page is full. That's the only valid case if we
    420         // arrive here.
    421         ASSERT(page->numAllocatedSlots < 0);
    422         // Fully used page became partially used. It must be put back on the
    423         // non-full page list. Also make it the current page to increase the
    424         // chances of it being filled up again. The old current page will be
    425         // the next page.
    426         page->numAllocatedSlots = -page->numAllocatedSlots - 2;
    427         ASSERT(page->numAllocatedSlots == static_cast<int>(partitionBucketSlots(bucket) - 1));
    428         page->activePageNext = bucket->activePagesHead;
    429         bucket->activePagesHead = page;
    430         --bucket->numFullPages;
    431         // Note: there's an opportunity here to look to see if the old active
    432         // page is now freeable.
    433     }
    434 }
    435 
    436 void* partitionReallocGeneric(PartitionRoot* root, void* ptr, size_t newSize)
    437 {
    438     RELEASE_ASSERT(newSize <= QuantizedAllocation::kMaxUnquantizedAllocation);
    439 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
    440     return realloc(ptr, newSize);
    441 #else
    442     size_t oldIndex;
    443     if (LIKELY(partitionPointerIsValid(root, ptr))) {
    444         void* realPtr = partitionCookieFreePointerAdjust(ptr);
    445         PartitionBucket* bucket = partitionPointerToPage(realPtr)->bucket;
    446         ASSERT(bucket->root == root);
    447         oldIndex = bucket - root->buckets();
    448     } else {
    449         oldIndex = root->numBuckets;
    450     }
    451 
    452     size_t allocSize = QuantizedAllocation::quantizedSize(newSize);
    453     allocSize = partitionCookieSizeAdjustAdd(allocSize);
    454     size_t newIndex = allocSize >> kBucketShift;
    455     if (newIndex > root->numBuckets)
    456         newIndex = root->numBuckets;
    457 
    458     if (oldIndex == newIndex) {
    459         // Same bucket. But kNumBuckets indicates the fastMalloc "bucket" so in
    460         // that case we're not done; we have to forward to fastRealloc.
    461         if (oldIndex == root->numBuckets)
    462             return WTF::fastRealloc(ptr, newSize);
    463         return ptr;
    464     }
    465     // This realloc cannot be resized in-place. Sadness.
    466     void* ret = partitionAllocGeneric(root, newSize);
    467     size_t copySize = oldIndex << kBucketShift;
    468     copySize = partitionCookieSizeAdjustSubtract(copySize);
    469     if (newSize < copySize)
    470         copySize = newSize;
    471     memcpy(ret, ptr, copySize);
    472     partitionFreeGeneric(root, ptr);
    473     return ret;
    474 #endif
    475 }
    476 
    477 #if CPU(32BIT)
    478 unsigned char SuperPageBitmap::s_bitmap[1 << (32 - kSuperPageShift - 3)];
    479 
    480 static int bitmapLock = 0;
    481 
    482 void SuperPageBitmap::registerSuperPage(void* ptr)
    483 {
    484     ASSERT(!isPointerInSuperPage(ptr));
    485     uintptr_t raw = reinterpret_cast<uintptr_t>(ptr);
    486     raw >>= kSuperPageShift;
    487     size_t byteIndex = raw >> 3;
    488     size_t bit = raw & 7;
    489     ASSERT(byteIndex < sizeof(s_bitmap));
    490     // The read/modify/write is not guaranteed atomic, so take a lock.
    491     spinLockLock(&bitmapLock);
    492     s_bitmap[byteIndex] |= (1 << bit);
    493     spinLockUnlock(&bitmapLock);
    494 }
    495 
    496 void SuperPageBitmap::unregisterSuperPage(void* ptr)
    497 {
    498     ASSERT(isPointerInSuperPage(ptr));
    499     uintptr_t raw = reinterpret_cast<uintptr_t>(ptr);
    500     raw >>= kSuperPageShift;
    501     size_t byteIndex = raw >> 3;
    502     size_t bit = raw & 7;
    503     ASSERT(byteIndex < sizeof(s_bitmap));
    504     // The read/modify/write is not guaranteed atomic, so take a lock.
    505     spinLockLock(&bitmapLock);
    506     s_bitmap[byteIndex] &= ~(1 << bit);
    507     spinLockUnlock(&bitmapLock);
    508 }
    509 #endif
    510 
    511 #ifndef NDEBUG
    512 
    513 void partitionDumpStats(const PartitionRoot& root)
    514 {
    515     size_t i;
    516     size_t totalLive = 0;
    517     size_t totalResident = 0;
    518     size_t totalFreeable = 0;
    519     for (i = 0; i < root.numBuckets; ++i) {
    520         const PartitionBucket& bucket = root.buckets()[i];
    521         if (bucket.activePagesHead == &bucket.root->seedPage && !bucket.freePagesHead && !bucket.numFullPages) {
    522             // Empty bucket with no freelist or full pages. Skip reporting it.
    523             continue;
    524         }
    525         size_t numFreePages = 0;
    526         PartitionPage* freePages = bucket.freePagesHead;
    527         while (freePages) {
    528             ++numFreePages;
    529             freePages = partitionPageFreePageNext(freePages);
    530         }
    531         size_t bucketSlotSize = partitionBucketSize(&bucket);
    532         size_t bucketNumSlots = partitionBucketSlots(&bucket);
    533         size_t bucketUsefulStorage = bucketSlotSize * bucketNumSlots;
    534         size_t bucketWaste = bucket.pageSize - bucketUsefulStorage;
    535         size_t numActiveBytes = bucket.numFullPages * bucketUsefulStorage;
    536         size_t numResidentBytes = bucket.numFullPages * bucket.pageSize;
    537         size_t numFreeableBytes = 0;
    538         size_t numActivePages = 0;
    539         const PartitionPage* page = bucket.activePagesHead;
    540         do {
    541             if (page != &bucket.root->seedPage) {
    542                 ++numActivePages;
    543                 numActiveBytes += (page->numAllocatedSlots * bucketSlotSize);
    544                 size_t pageBytesResident = (bucketNumSlots - page->numUnprovisionedSlots) * bucketSlotSize;
    545                 // Round up to system page size.
    546                 pageBytesResident = (pageBytesResident + kSystemPageOffsetMask) & kSystemPageBaseMask;
    547                 numResidentBytes += pageBytesResident;
    548                 if (!page->numAllocatedSlots)
    549                     numFreeableBytes += pageBytesResident;
    550             }
    551             page = page->activePageNext;
    552         } while (page != bucket.activePagesHead);
    553         totalLive += numActiveBytes;
    554         totalResident += numResidentBytes;
    555         totalFreeable += numFreeableBytes;
    556         printf("bucket size %zu (pageSize %zu waste %zu): %zu alloc/%zu commit/%zu freeable bytes, %zu/%zu/%zu full/active/free pages\n", bucketSlotSize, static_cast<size_t>(bucket.pageSize), bucketWaste, numActiveBytes, numResidentBytes, numFreeableBytes, static_cast<size_t>(bucket.numFullPages), numActivePages, numFreePages);
    557     }
    558     printf("total live: %zu bytes\n", totalLive);
    559     printf("total resident: %zu bytes\n", totalResident);
    560     printf("total freeable: %zu bytes\n", totalFreeable);
    561     fflush(stdout);
    562 }
    563 
    564 #endif // !NDEBUG
    565 
    566 } // namespace WTF
    567 
    568