Home | History | Annotate | Download | only in compiler
      1 //
      2 // Copyright (c) 2002-2010 The ANGLE Project Authors. All rights reserved.
      3 // Use of this source code is governed by a BSD-style license that can be
      4 // found in the LICENSE file.
      5 //
      6 
      7 #include "compiler/PoolAlloc.h"
      8 
      9 #ifndef _MSC_VER
     10 #include <stdint.h>
     11 #endif
     12 #include <stdio.h>
     13 
     14 #include "common/angleutils.h"
     15 #include "compiler/InitializeGlobals.h"
     16 #include "compiler/osinclude.h"
     17 
     18 OS_TLSIndex PoolIndex = OS_INVALID_TLS_INDEX;
     19 
     20 bool InitializePoolIndex()
     21 {
     22     assert(PoolIndex == OS_INVALID_TLS_INDEX);
     23 
     24     PoolIndex = OS_AllocTLSIndex();
     25     return PoolIndex != OS_INVALID_TLS_INDEX;
     26 }
     27 
     28 void FreePoolIndex()
     29 {
     30     assert(PoolIndex != OS_INVALID_TLS_INDEX);
     31 
     32     OS_FreeTLSIndex(PoolIndex);
     33     PoolIndex = OS_INVALID_TLS_INDEX;
     34 }
     35 
     36 TPoolAllocator* GetGlobalPoolAllocator()
     37 {
     38     assert(PoolIndex != OS_INVALID_TLS_INDEX);
     39     return static_cast<TPoolAllocator*>(OS_GetTLSValue(PoolIndex));
     40 }
     41 
     42 void SetGlobalPoolAllocator(TPoolAllocator* poolAllocator)
     43 {
     44     assert(PoolIndex != OS_INVALID_TLS_INDEX);
     45     OS_SetTLSValue(PoolIndex, poolAllocator);
     46 }
     47 
     48 //
     49 // Implement the functionality of the TPoolAllocator class, which
     50 // is documented in PoolAlloc.h.
     51 //
     52 TPoolAllocator::TPoolAllocator(int growthIncrement, int allocationAlignment) :
     53     pageSize(growthIncrement),
     54     alignment(allocationAlignment),
     55     freeList(0),
     56     inUseList(0),
     57     numCalls(0),
     58     totalBytes(0)
     59 {
     60     //
     61     // Don't allow page sizes we know are smaller than all common
     62     // OS page sizes.
     63     //
     64     if (pageSize < 4*1024)
     65         pageSize = 4*1024;
     66 
     67     //
     68     // A large currentPageOffset indicates a new page needs to
     69     // be obtained to allocate memory.
     70     //
     71     currentPageOffset = pageSize;
     72 
     73     //
     74     // Adjust alignment to be at least pointer aligned and
     75     // power of 2.
     76     //
     77     size_t minAlign = sizeof(void*);
     78     alignment &= ~(minAlign - 1);
     79     if (alignment < minAlign)
     80         alignment = minAlign;
     81     size_t a = 1;
     82     while (a < alignment)
     83         a <<= 1;
     84     alignment = a;
     85     alignmentMask = a - 1;
     86 
     87     //
     88     // Align header skip
     89     //
     90     headerSkip = minAlign;
     91     if (headerSkip < sizeof(tHeader)) {
     92         headerSkip = (sizeof(tHeader) + alignmentMask) & ~alignmentMask;
     93     }
     94 }
     95 
     96 TPoolAllocator::~TPoolAllocator()
     97 {
     98     while (inUseList) {
     99         tHeader* next = inUseList->nextPage;
    100         inUseList->~tHeader();
    101         delete [] reinterpret_cast<char*>(inUseList);
    102         inUseList = next;
    103     }
    104 
    105     // We should not check the guard blocks
    106     // here, because we did it already when the block was
    107     // placed into the free list.
    108     //
    109     while (freeList) {
    110         tHeader* next = freeList->nextPage;
    111         delete [] reinterpret_cast<char*>(freeList);
    112         freeList = next;
    113     }
    114 }
    115 
    116 // Support MSVC++ 6.0
    117 const unsigned char TAllocation::guardBlockBeginVal = 0xfb;
    118 const unsigned char TAllocation::guardBlockEndVal   = 0xfe;
    119 const unsigned char TAllocation::userDataFill       = 0xcd;
    120 
    121 #ifdef GUARD_BLOCKS
    122     const size_t TAllocation::guardBlockSize = 16;
    123 #else
    124     const size_t TAllocation::guardBlockSize = 0;
    125 #endif
    126 
    127 //
    128 // Check a single guard block for damage
    129 //
    130 void TAllocation::checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const
    131 {
    132 #ifdef GUARD_BLOCKS
    133     for (size_t x = 0; x < guardBlockSize; x++) {
    134         if (blockMem[x] != val) {
    135             char assertMsg[80];
    136 
    137             // We don't print the assert message.  It's here just to be helpful.
    138 #if defined(_MSC_VER)
    139             snprintf(assertMsg, sizeof(assertMsg), "PoolAlloc: Damage %s %Iu byte allocation at 0x%p\n",
    140                     locText, size, data());
    141 #else
    142             snprintf(assertMsg, sizeof(assertMsg), "PoolAlloc: Damage %s %zu byte allocation at 0x%p\n",
    143                     locText, size, data());
    144 #endif
    145             assert(0 && "PoolAlloc: Damage in guard block");
    146         }
    147     }
    148 #endif
    149 }
    150 
    151 
    152 void TPoolAllocator::push()
    153 {
    154     tAllocState state = { currentPageOffset, inUseList };
    155 
    156     stack.push_back(state);
    157 
    158     //
    159     // Indicate there is no current page to allocate from.
    160     //
    161     currentPageOffset = pageSize;
    162 }
    163 
    164 //
    165 // Do a mass-deallocation of all the individual allocations
    166 // that have occurred since the last push(), or since the
    167 // last pop(), or since the object's creation.
    168 //
    169 // The deallocated pages are saved for future allocations.
    170 //
    171 void TPoolAllocator::pop()
    172 {
    173     if (stack.size() < 1)
    174         return;
    175 
    176     tHeader* page = stack.back().page;
    177     currentPageOffset = stack.back().offset;
    178 
    179     while (inUseList != page) {
    180         // invoke destructor to free allocation list
    181         inUseList->~tHeader();
    182 
    183         tHeader* nextInUse = inUseList->nextPage;
    184         if (inUseList->pageCount > 1)
    185             delete [] reinterpret_cast<char*>(inUseList);
    186         else {
    187             inUseList->nextPage = freeList;
    188             freeList = inUseList;
    189         }
    190         inUseList = nextInUse;
    191     }
    192 
    193     stack.pop_back();
    194 }
    195 
    196 //
    197 // Do a mass-deallocation of all the individual allocations
    198 // that have occurred.
    199 //
    200 void TPoolAllocator::popAll()
    201 {
    202     while (stack.size() > 0)
    203         pop();
    204 }
    205 
    206 void* TPoolAllocator::allocate(size_t numBytes)
    207 {
    208     //
    209     // Just keep some interesting statistics.
    210     //
    211     ++numCalls;
    212     totalBytes += numBytes;
    213 
    214     // If we are using guard blocks, all allocations are bracketed by
    215     // them: [guardblock][allocation][guardblock].  numBytes is how
    216     // much memory the caller asked for.  allocationSize is the total
    217     // size including guard blocks.  In release build,
    218     // guardBlockSize=0 and this all gets optimized away.
    219     size_t allocationSize = TAllocation::allocationSize(numBytes);
    220     // Detect integer overflow.
    221     if (allocationSize < numBytes)
    222         return 0;
    223 
    224     //
    225     // Do the allocation, most likely case first, for efficiency.
    226     // This step could be moved to be inline sometime.
    227     //
    228     if (allocationSize <= pageSize - currentPageOffset) {
    229         //
    230         // Safe to allocate from currentPageOffset.
    231         //
    232         unsigned char* memory = reinterpret_cast<unsigned char *>(inUseList) + currentPageOffset;
    233         currentPageOffset += allocationSize;
    234         currentPageOffset = (currentPageOffset + alignmentMask) & ~alignmentMask;
    235 
    236         return initializeAllocation(inUseList, memory, numBytes);
    237     }
    238 
    239     if (allocationSize > pageSize - headerSkip) {
    240         //
    241         // Do a multi-page allocation.  Don't mix these with the others.
    242         // The OS is efficient and allocating and free-ing multiple pages.
    243         //
    244         size_t numBytesToAlloc = allocationSize + headerSkip;
    245         // Detect integer overflow.
    246         if (numBytesToAlloc < allocationSize)
    247             return 0;
    248 
    249         tHeader* memory = reinterpret_cast<tHeader*>(::new char[numBytesToAlloc]);
    250         if (memory == 0)
    251             return 0;
    252 
    253         // Use placement-new to initialize header
    254         new(memory) tHeader(inUseList, (numBytesToAlloc + pageSize - 1) / pageSize);
    255         inUseList = memory;
    256 
    257         currentPageOffset = pageSize;  // make next allocation come from a new page
    258 
    259         // No guard blocks for multi-page allocations (yet)
    260         return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(memory) + headerSkip);
    261     }
    262 
    263     //
    264     // Need a simple page to allocate from.
    265     //
    266     tHeader* memory;
    267     if (freeList) {
    268         memory = freeList;
    269         freeList = freeList->nextPage;
    270     } else {
    271         memory = reinterpret_cast<tHeader*>(::new char[pageSize]);
    272         if (memory == 0)
    273             return 0;
    274     }
    275 
    276     // Use placement-new to initialize header
    277     new(memory) tHeader(inUseList, 1);
    278     inUseList = memory;
    279 
    280     unsigned char* ret = reinterpret_cast<unsigned char *>(inUseList) + headerSkip;
    281     currentPageOffset = (headerSkip + allocationSize + alignmentMask) & ~alignmentMask;
    282 
    283     return initializeAllocation(inUseList, ret, numBytes);
    284 }
    285 
    286 
    287 //
    288 // Check all allocations in a list for damage by calling check on each.
    289 //
    290 void TAllocation::checkAllocList() const
    291 {
    292     for (const TAllocation* alloc = this; alloc != 0; alloc = alloc->prevAlloc)
    293         alloc->check();
    294 }
    295