Home | History | Annotate | Download | only in MachineIndependent
      1 //
      2 //Copyright (C) 2002-2005  3Dlabs Inc. Ltd.
      3 //All rights reserved.
      4 //
      5 //Redistribution and use in source and binary forms, with or without
      6 //modification, are permitted provided that the following conditions
      7 //are met:
      8 //
      9 //    Redistributions of source code must retain the above copyright
     10 //    notice, this list of conditions and the following disclaimer.
     11 //
     12 //    Redistributions in binary form must reproduce the above
     13 //    copyright notice, this list of conditions and the following
     14 //    disclaimer in the documentation and/or other materials provided
     15 //    with the distribution.
     16 //
     17 //    Neither the name of 3Dlabs Inc. Ltd. nor the names of its
     18 //    contributors may be used to endorse or promote products derived
     19 //    from this software without specific prior written permission.
     20 //
     21 //THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     22 //"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     23 //LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
     24 //FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
     25 //COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
     26 //INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     27 //BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     28 //LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
     29 //CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     30 //LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
     31 //ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32 //POSSIBILITY OF SUCH DAMAGE.
     33 //
     34 
     35 #include "../Include/Common.h"
     36 #include "../Include/PoolAlloc.h"
     37 
     38 #include "../Include/InitializeGlobals.h"
     39 #include "../OSDependent/osinclude.h"
     40 
     41 namespace glslang {
     42 
     43 OS_TLSIndex PoolIndex;
     44 
     45 void InitializeMemoryPools()
     46 {
     47     TThreadMemoryPools* pools = static_cast<TThreadMemoryPools*>(OS_GetTLSValue(PoolIndex));
     48     if (pools)
     49         return;
     50 
     51     TPoolAllocator *threadPoolAllocator = new TPoolAllocator();
     52 
     53     TThreadMemoryPools* threadData = new TThreadMemoryPools();
     54 
     55     threadData->threadPoolAllocator = threadPoolAllocator;
     56 
     57     OS_SetTLSValue(PoolIndex, threadData);
     58 }
     59 
     60 void FreeGlobalPools()
     61 {
     62     // Release the allocated memory for this thread.
     63     TThreadMemoryPools* globalPools = static_cast<TThreadMemoryPools*>(OS_GetTLSValue(PoolIndex));
     64     if (! globalPools)
     65         return;
     66 
     67     GetThreadPoolAllocator().popAll();
     68     delete &GetThreadPoolAllocator();
     69     delete globalPools;
     70 }
     71 
     72 bool InitializePoolIndex()
     73 {
     74     // Allocate a TLS index.
     75     if ((PoolIndex = OS_AllocTLSIndex()) == OS_INVALID_TLS_INDEX)
     76         return false;
     77 
     78     return true;
     79 }
     80 
     81 void FreePoolIndex()
     82 {
     83     // Release the TLS index.
     84     OS_FreeTLSIndex(PoolIndex);
     85 }
     86 
     87 TPoolAllocator& GetThreadPoolAllocator()
     88 {
     89     TThreadMemoryPools* threadData = static_cast<TThreadMemoryPools*>(OS_GetTLSValue(PoolIndex));
     90 
     91     return *threadData->threadPoolAllocator;
     92 }
     93 
     94 void SetThreadPoolAllocator(TPoolAllocator& poolAllocator)
     95 {
     96     TThreadMemoryPools* threadData = static_cast<TThreadMemoryPools*>(OS_GetTLSValue(PoolIndex));
     97 
     98     threadData->threadPoolAllocator = &poolAllocator;
     99 }
    100 
    101 //
    102 // Implement the functionality of the TPoolAllocator class, which
    103 // is documented in PoolAlloc.h.
    104 //
    105 TPoolAllocator::TPoolAllocator(int growthIncrement, int allocationAlignment) :
    106     pageSize(growthIncrement),
    107     alignment(allocationAlignment),
    108     freeList(0),
    109     inUseList(0),
    110     numCalls(0)
    111 {
    112     //
    113     // Don't allow page sizes we know are smaller than all common
    114     // OS page sizes.
    115     //
    116     if (pageSize < 4*1024)
    117         pageSize = 4*1024;
    118 
    119     //
    120     // A large currentPageOffset indicates a new page needs to
    121     // be obtained to allocate memory.
    122     //
    123     currentPageOffset = pageSize;
    124 
    125     //
    126     // Adjust alignment to be at least pointer aligned and
    127     // power of 2.
    128     //
    129     size_t minAlign = sizeof(void*);
    130     alignment &= ~(minAlign - 1);
    131     if (alignment < minAlign)
    132         alignment = minAlign;
    133     size_t a = 1;
    134     while (a < alignment)
    135         a <<= 1;
    136     alignment = a;
    137     alignmentMask = a - 1;
    138 
    139     //
    140     // Align header skip
    141     //
    142     headerSkip = minAlign;
    143     if (headerSkip < sizeof(tHeader)) {
    144         headerSkip = (sizeof(tHeader) + alignmentMask) & ~alignmentMask;
    145     }
    146 
    147     push();
    148 }
    149 
    150 TPoolAllocator::~TPoolAllocator()
    151 {
    152 	while (inUseList) {
    153 	    tHeader* next = inUseList->nextPage;
    154         inUseList->~tHeader();
    155         delete [] reinterpret_cast<char*>(inUseList);
    156 	    inUseList = next;
    157 	}
    158 
    159     //
    160     // Always delete the free list memory - it can't be being
    161     // (correctly) referenced, whether the pool allocator was
    162     // global or not.  We should not check the guard blocks
    163     // here, because we did it already when the block was
    164     // placed into the free list.
    165     //
    166     while (freeList) {
    167         tHeader* next = freeList->nextPage;
    168         delete [] reinterpret_cast<char*>(freeList);
    169         freeList = next;
    170     }
    171 }
    172 
    173 const unsigned char TAllocation::guardBlockBeginVal = 0xfb;
    174 const unsigned char TAllocation::guardBlockEndVal   = 0xfe;
    175 const unsigned char TAllocation::userDataFill       = 0xcd;
    176 
    177 #   ifdef GUARD_BLOCKS
    178     const size_t TAllocation::guardBlockSize = 16;
    179 #   else
    180     const size_t TAllocation::guardBlockSize = 0;
    181 #   endif
    182 
    183 //
    184 // Check a single guard block for damage
    185 //
    186 #ifdef GUARD_BLOCKS
    187 void TAllocation::checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const
    188 #else
    189 void TAllocation::checkGuardBlock(unsigned char*, unsigned char, const char*) const
    190 #endif
    191 {
    192 #ifdef GUARD_BLOCKS
    193     for (size_t x = 0; x < guardBlockSize; x++) {
    194         if (blockMem[x] != val) {
    195             const int maxSize = 80;
    196             char assertMsg[maxSize];
    197 
    198             // We don't print the assert message.  It's here just to be helpful.
    199             snprintf(assertMsg, maxSize, "PoolAlloc: Damage %s %zu byte allocation at 0x%p\n",
    200                       locText, size, data());
    201             assert(0 && "PoolAlloc: Damage in guard block");
    202         }
    203     }
    204 #else
    205     assert(guardBlockSize == 0);
    206 #endif
    207 }
    208 
    209 
    210 void TPoolAllocator::push()
    211 {
    212     tAllocState state = { currentPageOffset, inUseList };
    213 
    214     stack.push_back(state);
    215 
    216     //
    217     // Indicate there is no current page to allocate from.
    218     //
    219     currentPageOffset = pageSize;
    220 }
    221 
    222 //
    223 // Do a mass-deallocation of all the individual allocations
    224 // that have occurred since the last push(), or since the
    225 // last pop(), or since the object's creation.
    226 //
    227 // The deallocated pages are saved for future allocations.
    228 //
    229 void TPoolAllocator::pop()
    230 {
    231     if (stack.size() < 1)
    232         return;
    233 
    234     tHeader* page = stack.back().page;
    235     currentPageOffset = stack.back().offset;
    236 
    237     while (inUseList != page) {
    238         // invoke destructor to free allocation list
    239         inUseList->~tHeader();
    240 
    241         tHeader* nextInUse = inUseList->nextPage;
    242         if (inUseList->pageCount > 1)
    243             delete [] reinterpret_cast<char*>(inUseList);
    244         else {
    245             inUseList->nextPage = freeList;
    246             freeList = inUseList;
    247         }
    248         inUseList = nextInUse;
    249     }
    250 
    251     stack.pop_back();
    252 }
    253 
    254 //
    255 // Do a mass-deallocation of all the individual allocations
    256 // that have occurred.
    257 //
    258 void TPoolAllocator::popAll()
    259 {
    260     while (stack.size() > 0)
    261         pop();
    262 }
    263 
    264 void* TPoolAllocator::allocate(size_t numBytes)
    265 {
    266     // If we are using guard blocks, all allocations are bracketed by
    267     // them: [guardblock][allocation][guardblock].  numBytes is how
    268     // much memory the caller asked for.  allocationSize is the total
    269     // size including guard blocks.  In release build,
    270     // guardBlockSize=0 and this all gets optimized away.
    271     size_t allocationSize = TAllocation::allocationSize(numBytes);
    272 
    273     //
    274     // Just keep some interesting statistics.
    275     //
    276     ++numCalls;
    277     totalBytes += numBytes;
    278 
    279     //
    280     // Do the allocation, most likely case first, for efficiency.
    281     // This step could be moved to be inline sometime.
    282     //
    283     if (currentPageOffset + allocationSize <= pageSize) {
    284         //
    285         // Safe to allocate from currentPageOffset.
    286         //
    287         unsigned char* memory = reinterpret_cast<unsigned char*>(inUseList) + currentPageOffset;
    288         currentPageOffset += allocationSize;
    289         currentPageOffset = (currentPageOffset + alignmentMask) & ~alignmentMask;
    290 
    291         return initializeAllocation(inUseList, memory, numBytes);
    292     }
    293 
    294     if (allocationSize + headerSkip > pageSize) {
    295         //
    296         // Do a multi-page allocation.  Don't mix these with the others.
    297         // The OS is efficient and allocating and free-ing multiple pages.
    298         //
    299         size_t numBytesToAlloc = allocationSize + headerSkip;
    300         tHeader* memory = reinterpret_cast<tHeader*>(::new char[numBytesToAlloc]);
    301         if (memory == 0)
    302             return 0;
    303 
    304         // Use placement-new to initialize header
    305         new(memory) tHeader(inUseList, (numBytesToAlloc + pageSize - 1) / pageSize);
    306         inUseList = memory;
    307 
    308         currentPageOffset = pageSize;  // make next allocation come from a new page
    309 
    310         // No guard blocks for multi-page allocations (yet)
    311         return reinterpret_cast<void*>(reinterpret_cast<UINT_PTR>(memory) + headerSkip);
    312     }
    313 
    314     //
    315     // Need a simple page to allocate from.
    316     //
    317     tHeader* memory;
    318     if (freeList) {
    319         memory = freeList;
    320         freeList = freeList->nextPage;
    321     } else {
    322         memory = reinterpret_cast<tHeader*>(::new char[pageSize]);
    323         if (memory == 0)
    324             return 0;
    325     }
    326 
    327     // Use placement-new to initialize header
    328     new(memory) tHeader(inUseList, 1);
    329     inUseList = memory;
    330 
    331     unsigned char* ret = reinterpret_cast<unsigned char*>(inUseList) + headerSkip;
    332     currentPageOffset = (headerSkip + allocationSize + alignmentMask) & ~alignmentMask;
    333 
    334     return initializeAllocation(inUseList, ret, numBytes);
    335 }
    336 
    337 
    338 //
    339 // Check all allocations in a list for damage by calling check on each.
    340 //
    341 void TAllocation::checkAllocList() const
    342 {
    343     for (const TAllocation* alloc = this; alloc != 0; alloc = alloc->prevAlloc)
    344         alloc->check();
    345 }
    346 
    347 } // end namespace glslang
    348