Home | History | Annotate | Download | only in compiler
      1 //
      2 // Copyright (c) 2002-2010 The ANGLE Project Authors. All rights reserved.
      3 // Use of this source code is governed by a BSD-style license that can be
      4 // found in the LICENSE file.
      5 //
      6 
      7 #include "compiler/PoolAlloc.h"
      8 
      9 #ifndef _MSC_VER
     10 #include <stdint.h>
     11 #endif
     12 #include <stdio.h>
     13 
     14 #include "compiler/InitializeGlobals.h"
     15 #include "compiler/osinclude.h"
     16 
     17 OS_TLSIndex PoolIndex = OS_INVALID_TLS_INDEX;
     18 
     19 void InitializeGlobalPools()
     20 {
     21     TThreadGlobalPools* globalPools= static_cast<TThreadGlobalPools*>(OS_GetTLSValue(PoolIndex));
     22     if (globalPools)
     23         return;
     24 
     25     TThreadGlobalPools* threadData = new TThreadGlobalPools();
     26     threadData->globalPoolAllocator = 0;
     27 
     28     OS_SetTLSValue(PoolIndex, threadData);
     29 }
     30 
     31 void FreeGlobalPools()
     32 {
     33     // Release the allocated memory for this thread.
     34     TThreadGlobalPools* globalPools= static_cast<TThreadGlobalPools*>(OS_GetTLSValue(PoolIndex));
     35     if (!globalPools)
     36         return;
     37 
     38     delete globalPools;
     39 }
     40 
     41 bool InitializePoolIndex()
     42 {
     43     // Allocate a TLS index.
     44     if ((PoolIndex = OS_AllocTLSIndex()) == OS_INVALID_TLS_INDEX)
     45         return false;
     46 
     47     return true;
     48 }
     49 
     50 void FreePoolIndex()
     51 {
     52     // Release the TLS index.
     53     OS_FreeTLSIndex(PoolIndex);
     54 }
     55 
     56 TPoolAllocator& GetGlobalPoolAllocator()
     57 {
     58     TThreadGlobalPools* threadData = static_cast<TThreadGlobalPools*>(OS_GetTLSValue(PoolIndex));
     59 
     60     return *threadData->globalPoolAllocator;
     61 }
     62 
     63 void SetGlobalPoolAllocator(TPoolAllocator* poolAllocator)
     64 {
     65     TThreadGlobalPools* threadData = static_cast<TThreadGlobalPools*>(OS_GetTLSValue(PoolIndex));
     66 
     67     threadData->globalPoolAllocator = poolAllocator;
     68 }
     69 
     70 //
     71 // Implement the functionality of the TPoolAllocator class, which
     72 // is documented in PoolAlloc.h.
     73 //
     74 TPoolAllocator::TPoolAllocator(int growthIncrement, int allocationAlignment) :
     75     pageSize(growthIncrement),
     76     alignment(allocationAlignment),
     77     freeList(0),
     78     inUseList(0),
     79     numCalls(0),
     80     totalBytes(0)
     81 {
     82     //
     83     // Don't allow page sizes we know are smaller than all common
     84     // OS page sizes.
     85     //
     86     if (pageSize < 4*1024)
     87         pageSize = 4*1024;
     88 
     89     //
     90     // A large currentPageOffset indicates a new page needs to
     91     // be obtained to allocate memory.
     92     //
     93     currentPageOffset = pageSize;
     94 
     95     //
     96     // Adjust alignment to be at least pointer aligned and
     97     // power of 2.
     98     //
     99     size_t minAlign = sizeof(void*);
    100     alignment &= ~(minAlign - 1);
    101     if (alignment < minAlign)
    102         alignment = minAlign;
    103     size_t a = 1;
    104     while (a < alignment)
    105         a <<= 1;
    106     alignment = a;
    107     alignmentMask = a - 1;
    108 
    109     //
    110     // Align header skip
    111     //
    112     headerSkip = minAlign;
    113     if (headerSkip < sizeof(tHeader)) {
    114         headerSkip = (sizeof(tHeader) + alignmentMask) & ~alignmentMask;
    115     }
    116 }
    117 
    118 TPoolAllocator::~TPoolAllocator()
    119 {
    120     while (inUseList) {
    121         tHeader* next = inUseList->nextPage;
    122         inUseList->~tHeader();
    123         delete [] reinterpret_cast<char*>(inUseList);
    124         inUseList = next;
    125     }
    126 
    127     // We should not check the guard blocks
    128     // here, because we did it already when the block was
    129     // placed into the free list.
    130     //
    131     while (freeList) {
    132         tHeader* next = freeList->nextPage;
    133         delete [] reinterpret_cast<char*>(freeList);
    134         freeList = next;
    135     }
    136 }
    137 
    138 // Support MSVC++ 6.0
    139 const unsigned char TAllocation::guardBlockBeginVal = 0xfb;
    140 const unsigned char TAllocation::guardBlockEndVal   = 0xfe;
    141 const unsigned char TAllocation::userDataFill       = 0xcd;
    142 
    143 #ifdef GUARD_BLOCKS
    144     const size_t TAllocation::guardBlockSize = 16;
    145 #else
    146     const size_t TAllocation::guardBlockSize = 0;
    147 #endif
    148 
    149 //
    150 // Check a single guard block for damage
    151 //
    152 void TAllocation::checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const
    153 {
    154     for (size_t x = 0; x < guardBlockSize; x++) {
    155         if (blockMem[x] != val) {
    156             char assertMsg[80];
    157 
    158             // We don't print the assert message.  It's here just to be helpful.
    159             sprintf(assertMsg, "PoolAlloc: Damage %s %u byte allocation at 0x%p\n",
    160                     locText, size, data());
    161             assert(0 && "PoolAlloc: Damage in guard block");
    162         }
    163     }
    164 }
    165 
    166 
    167 void TPoolAllocator::push()
    168 {
    169     tAllocState state = { currentPageOffset, inUseList };
    170 
    171     stack.push_back(state);
    172 
    173     //
    174     // Indicate there is no current page to allocate from.
    175     //
    176     currentPageOffset = pageSize;
    177 }
    178 
    179 //
    180 // Do a mass-deallocation of all the individual allocations
    181 // that have occurred since the last push(), or since the
    182 // last pop(), or since the object's creation.
    183 //
    184 // The deallocated pages are saved for future allocations.
    185 //
    186 void TPoolAllocator::pop()
    187 {
    188     if (stack.size() < 1)
    189         return;
    190 
    191     tHeader* page = stack.back().page;
    192     currentPageOffset = stack.back().offset;
    193 
    194     while (inUseList != page) {
    195         // invoke destructor to free allocation list
    196         inUseList->~tHeader();
    197 
    198         tHeader* nextInUse = inUseList->nextPage;
    199         if (inUseList->pageCount > 1)
    200             delete [] reinterpret_cast<char*>(inUseList);
    201         else {
    202             inUseList->nextPage = freeList;
    203             freeList = inUseList;
    204         }
    205         inUseList = nextInUse;
    206     }
    207 
    208     stack.pop_back();
    209 }
    210 
    211 //
    212 // Do a mass-deallocation of all the individual allocations
    213 // that have occurred.
    214 //
    215 void TPoolAllocator::popAll()
    216 {
    217     while (stack.size() > 0)
    218         pop();
    219 }
    220 
    221 void* TPoolAllocator::allocate(size_t numBytes)
    222 {
    223     // If we are using guard blocks, all allocations are bracketed by
    224     // them: [guardblock][allocation][guardblock].  numBytes is how
    225     // much memory the caller asked for.  allocationSize is the total
    226     // size including guard blocks.  In release build,
    227     // guardBlockSize=0 and this all gets optimized away.
    228     size_t allocationSize = TAllocation::allocationSize(numBytes);
    229 
    230     //
    231     // Just keep some interesting statistics.
    232     //
    233     ++numCalls;
    234     totalBytes += numBytes;
    235 
    236     //
    237     // Do the allocation, most likely case first, for efficiency.
    238     // This step could be moved to be inline sometime.
    239     //
    240     if (currentPageOffset + allocationSize <= pageSize) {
    241         //
    242         // Safe to allocate from currentPageOffset.
    243         //
    244         unsigned char* memory = reinterpret_cast<unsigned char *>(inUseList) + currentPageOffset;
    245         currentPageOffset += allocationSize;
    246         currentPageOffset = (currentPageOffset + alignmentMask) & ~alignmentMask;
    247 
    248         return initializeAllocation(inUseList, memory, numBytes);
    249     }
    250 
    251     if (allocationSize + headerSkip > pageSize) {
    252         //
    253         // Do a multi-page allocation.  Don't mix these with the others.
    254         // The OS is efficient and allocating and free-ing multiple pages.
    255         //
    256         size_t numBytesToAlloc = allocationSize + headerSkip;
    257         tHeader* memory = reinterpret_cast<tHeader*>(::new char[numBytesToAlloc]);
    258         if (memory == 0)
    259             return 0;
    260 
    261         // Use placement-new to initialize header
    262         new(memory) tHeader(inUseList, (numBytesToAlloc + pageSize - 1) / pageSize);
    263         inUseList = memory;
    264 
    265         currentPageOffset = pageSize;  // make next allocation come from a new page
    266 
    267         // No guard blocks for multi-page allocations (yet)
    268         return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(memory) + headerSkip);
    269     }
    270 
    271     //
    272     // Need a simple page to allocate from.
    273     //
    274     tHeader* memory;
    275     if (freeList) {
    276         memory = freeList;
    277         freeList = freeList->nextPage;
    278     } else {
    279         memory = reinterpret_cast<tHeader*>(::new char[pageSize]);
    280         if (memory == 0)
    281             return 0;
    282     }
    283 
    284     // Use placement-new to initialize header
    285     new(memory) tHeader(inUseList, 1);
    286     inUseList = memory;
    287 
    288     unsigned char* ret = reinterpret_cast<unsigned char *>(inUseList) + headerSkip;
    289     currentPageOffset = (headerSkip + allocationSize + alignmentMask) & ~alignmentMask;
    290 
    291     return initializeAllocation(inUseList, ret, numBytes);
    292 }
    293 
    294 
    295 //
    296 // Check all allocations in a list for damage by calling check on each.
    297 //
    298 void TAllocation::checkAllocList() const
    299 {
    300     for (const TAllocation* alloc = this; alloc != 0; alloc = alloc->prevAlloc)
    301         alloc->check();
    302 }