Home | History | Annotate | Download | only in compiler
      1 // Copyright 2016 The SwiftShader Authors. All Rights Reserved.
      2 //
      3 // Licensed under the Apache License, Version 2.0 (the "License");
      4 // you may not use this file except in compliance with the License.
      5 // You may obtain a copy of the License at
      6 //
      7 //    http://www.apache.org/licenses/LICENSE-2.0
      8 //
      9 // Unless required by applicable law or agreed to in writing, software
     10 // distributed under the License is distributed on an "AS IS" BASIS,
     11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 // See the License for the specific language governing permissions and
     13 // limitations under the License.
     14 
     15 #include "PoolAlloc.h"
     16 
     17 #ifndef _MSC_VER
     18 #include <stdint.h>
     19 #endif
     20 #include <stdio.h>
     21 
     22 #include "InitializeGlobals.h"
     23 #include "osinclude.h"
     24 
     25 OS_TLSIndex PoolIndex = OS_INVALID_TLS_INDEX;
     26 
     27 bool InitializePoolIndex()
     28 {
     29 	assert(PoolIndex == OS_INVALID_TLS_INDEX);
     30 
     31 	PoolIndex = OS_AllocTLSIndex();
     32 	return PoolIndex != OS_INVALID_TLS_INDEX;
     33 }
     34 
     35 void FreePoolIndex()
     36 {
     37 	assert(PoolIndex != OS_INVALID_TLS_INDEX);
     38 
     39 	OS_FreeTLSIndex(PoolIndex);
     40 	PoolIndex = OS_INVALID_TLS_INDEX;
     41 }
     42 
     43 TPoolAllocator* GetGlobalPoolAllocator()
     44 {
     45 	assert(PoolIndex != OS_INVALID_TLS_INDEX);
     46 	return static_cast<TPoolAllocator*>(OS_GetTLSValue(PoolIndex));
     47 }
     48 
     49 void SetGlobalPoolAllocator(TPoolAllocator* poolAllocator)
     50 {
     51 	assert(PoolIndex != OS_INVALID_TLS_INDEX);
     52 	OS_SetTLSValue(PoolIndex, poolAllocator);
     53 }
     54 
     55 //
     56 // Implement the functionality of the TPoolAllocator class, which
     57 // is documented in PoolAlloc.h.
     58 //
     59 TPoolAllocator::TPoolAllocator(int growthIncrement, int allocationAlignment) :
     60 	alignment(allocationAlignment)
     61 #if !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC)
     62 	, pageSize(growthIncrement),
     63 	freeList(0),
     64 	inUseList(0),
     65 	numCalls(0),
     66 	totalBytes(0)
     67 #endif
     68 {
     69 	//
     70 	// Adjust alignment to be at least pointer aligned and
     71 	// power of 2.
     72 	//
     73 	size_t minAlign = sizeof(void*);
     74 	alignment &= ~(minAlign - 1);
     75 	if (alignment < minAlign)
     76 		alignment = minAlign;
     77 	size_t a = 1;
     78 	while (a < alignment)
     79 		a <<= 1;
     80 	alignment = a;
     81 	alignmentMask = a - 1;
     82 
     83 #if !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC)
     84 	//
     85 	// Don't allow page sizes we know are smaller than all common
     86 	// OS page sizes.
     87 	//
     88 	if (pageSize < 4*1024)
     89 		pageSize = 4*1024;
     90 
     91 	//
     92 	// A large currentPageOffset indicates a new page needs to
     93 	// be obtained to allocate memory.
     94 	//
     95 	currentPageOffset = pageSize;
     96 
     97 	//
     98 	// Align header skip
     99 	//
    100 	headerSkip = minAlign;
    101 	if (headerSkip < sizeof(tHeader)) {
    102 		headerSkip = (sizeof(tHeader) + alignmentMask) & ~alignmentMask;
    103 	}
    104 #else  // !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC)
    105 	mStack.push_back({});
    106 #endif
    107 }
    108 
    109 TPoolAllocator::~TPoolAllocator()
    110 {
    111 #if !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC)
    112 	while (inUseList) {
    113 		tHeader* next = inUseList->nextPage;
    114 		inUseList->~tHeader();
    115 		delete [] reinterpret_cast<char*>(inUseList);
    116 		inUseList = next;
    117 	}
    118 
    119 	// We should not check the guard blocks
    120 	// here, because we did it already when the block was
    121 	// placed into the free list.
    122 	//
    123 	while (freeList) {
    124 		tHeader* next = freeList->nextPage;
    125 		delete [] reinterpret_cast<char*>(freeList);
    126 		freeList = next;
    127 	}
    128 #else  // !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC)
    129 	for (auto& allocs : mStack) {
    130 		for (auto alloc : allocs) {
    131 			free(alloc);
    132 		}
    133 	}
    134 	mStack.clear();
    135 #endif
    136 }
    137 
    138 // Support MSVC++ 6.0
    139 const unsigned char TAllocation::guardBlockBeginVal = 0xfb;
    140 const unsigned char TAllocation::guardBlockEndVal   = 0xfe;
    141 const unsigned char TAllocation::userDataFill       = 0xcd;
    142 
    143 #ifdef GUARD_BLOCKS
    144 	const size_t TAllocation::guardBlockSize = 16;
    145 #else
    146 	const size_t TAllocation::guardBlockSize = 0;
    147 #endif
    148 
    149 //
    150 // Check a single guard block for damage
    151 //
    152 void TAllocation::checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const
    153 {
    154 #ifdef GUARD_BLOCKS
    155 	for (size_t x = 0; x < guardBlockSize; x++) {
    156 		if (blockMem[x] != val) {
    157 			char assertMsg[80];
    158 
    159 			// We don't print the assert message.  It's here just to be helpful.
    160 			#if defined(_MSC_VER)
    161 				_snprintf(assertMsg, sizeof(assertMsg), "PoolAlloc: Damage %s %Iu byte allocation at 0x%p\n",
    162 						  locText, size, data());
    163 			#else
    164 				snprintf(assertMsg, sizeof(assertMsg), "PoolAlloc: Damage %s %zu byte allocation at 0x%p\n",
    165 						 locText, size, data());
    166 			#endif
    167 			assert(0 && "PoolAlloc: Damage in guard block");
    168 		}
    169 	}
    170 #endif
    171 }
    172 
    173 
    174 void TPoolAllocator::push()
    175 {
    176 #if !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC)
    177 	tAllocState state = { currentPageOffset, inUseList };
    178 
    179 	mStack.push_back(state);
    180 
    181 	//
    182 	// Indicate there is no current page to allocate from.
    183 	//
    184 	currentPageOffset = pageSize;
    185 #else  // !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC)
    186 	mStack.push_back({});
    187 #endif
    188 }
    189 
    190 //
    191 // Do a mass-deallocation of all the individual allocations
    192 // that have occurred since the last push(), or since the
    193 // last pop(), or since the object's creation.
    194 //
    195 // The deallocated pages are saved for future allocations.
    196 //
    197 void TPoolAllocator::pop()
    198 {
    199 	if (mStack.size() < 1)
    200 		return;
    201 
    202 #if !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC)
    203 	tHeader* page = mStack.back().page;
    204 	currentPageOffset = mStack.back().offset;
    205 
    206 	while (inUseList != page) {
    207 		// invoke destructor to free allocation list
    208 		inUseList->~tHeader();
    209 
    210 		tHeader* nextInUse = inUseList->nextPage;
    211 		if (inUseList->pageCount > 1)
    212 			delete [] reinterpret_cast<char*>(inUseList);
    213 		else {
    214 			inUseList->nextPage = freeList;
    215 			freeList = inUseList;
    216 		}
    217 		inUseList = nextInUse;
    218 	}
    219 
    220 	mStack.pop_back();
    221 #else  // !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC)
    222 	for (auto alloc : mStack.back()) {
    223 		free(alloc);
    224 	}
    225 	mStack.pop_back();
    226 #endif
    227 }
    228 
    229 //
    230 // Do a mass-deallocation of all the individual allocations
    231 // that have occurred.
    232 //
    233 void TPoolAllocator::popAll()
    234 {
    235 	while (mStack.size() > 0)
    236 		pop();
    237 }
    238 
    239 void* TPoolAllocator::allocate(size_t numBytes)
    240 {
    241 #if !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC)
    242 	//
    243 	// Just keep some interesting statistics.
    244 	//
    245 	++numCalls;
    246 	totalBytes += numBytes;
    247 
    248 	// If we are using guard blocks, all allocations are bracketed by
    249 	// them: [guardblock][allocation][guardblock].  numBytes is how
    250 	// much memory the caller asked for.  allocationSize is the total
    251 	// size including guard blocks.  In release build,
    252 	// guardBlockSize=0 and this all gets optimized away.
    253 	size_t allocationSize = TAllocation::allocationSize(numBytes);
    254 	// Detect integer overflow.
    255 	if (allocationSize < numBytes)
    256 		return 0;
    257 
    258 	//
    259 	// Do the allocation, most likely case first, for efficiency.
    260 	// This step could be moved to be inline sometime.
    261 	//
    262 	if (allocationSize <= pageSize - currentPageOffset) {
    263 		//
    264 		// Safe to allocate from currentPageOffset.
    265 		//
    266 		unsigned char* memory = reinterpret_cast<unsigned char *>(inUseList) + currentPageOffset;
    267 		currentPageOffset += allocationSize;
    268 		currentPageOffset = (currentPageOffset + alignmentMask) & ~alignmentMask;
    269 
    270 		return initializeAllocation(inUseList, memory, numBytes);
    271 	}
    272 
    273 	if (allocationSize > pageSize - headerSkip) {
    274 		//
    275 		// Do a multi-page allocation.  Don't mix these with the others.
    276 		// The OS is efficient and allocating and free-ing multiple pages.
    277 		//
    278 		size_t numBytesToAlloc = allocationSize + headerSkip;
    279 		// Detect integer overflow.
    280 		if (numBytesToAlloc < allocationSize)
    281 			return 0;
    282 
    283 		tHeader* memory = reinterpret_cast<tHeader*>(::new char[numBytesToAlloc]);
    284 		if (memory == 0)
    285 			return 0;
    286 
    287 		// Use placement-new to initialize header
    288 		new(memory) tHeader(inUseList, (numBytesToAlloc + pageSize - 1) / pageSize);
    289 		inUseList = memory;
    290 
    291 		currentPageOffset = pageSize;  // make next allocation come from a new page
    292 
    293 		// No guard blocks for multi-page allocations (yet)
    294 		return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(memory) + headerSkip);
    295 	}
    296 
    297 	//
    298 	// Need a simple page to allocate from.
    299 	//
    300 	tHeader* memory;
    301 	if (freeList) {
    302 		memory = freeList;
    303 		freeList = freeList->nextPage;
    304 	} else {
    305 		memory = reinterpret_cast<tHeader*>(::new char[pageSize]);
    306 		if (memory == 0)
    307 			return 0;
    308 	}
    309 
    310 	// Use placement-new to initialize header
    311 	new(memory) tHeader(inUseList, 1);
    312 	inUseList = memory;
    313 
    314 	unsigned char* ret = reinterpret_cast<unsigned char *>(inUseList) + headerSkip;
    315 	currentPageOffset = (headerSkip + allocationSize + alignmentMask) & ~alignmentMask;
    316 
    317 	return initializeAllocation(inUseList, ret, numBytes);
    318 #else  // !defined(SWIFTSHADER_TRANSLATOR_DISABLE_POOL_ALLOC)
    319 	void *alloc = malloc(numBytes + alignmentMask);
    320 	mStack.back().push_back(alloc);
    321 
    322 	intptr_t intAlloc = reinterpret_cast<intptr_t>(alloc);
    323 	intAlloc = (intAlloc + alignmentMask) & ~alignmentMask;
    324 	return reinterpret_cast<void *>(intAlloc);
    325 #endif
    326 }
    327 
    328 
    329 //
    330 // Check all allocations in a list for damage by calling check on each.
    331 //
    332 void TAllocation::checkAllocList() const
    333 {
    334 	for (const TAllocation* alloc = this; alloc != 0; alloc = alloc->prevAlloc)
    335 		alloc->check();
    336 }
    337