Home | History | Annotate | Download | only in memory
      1 /*-------------------------------------------------------------------------
      2  * Vulkan Conformance Tests
      3  * ------------------------
      4  *
      5  * Copyright (c) 2015 Google Inc.
      6  *
      7  * Licensed under the Apache License, Version 2.0 (the "License");
      8  * you may not use this file except in compliance with the License.
      9  * You may obtain a copy of the License at
     10  *
     11  *      http://www.apache.org/licenses/LICENSE-2.0
     12  *
     13  * Unless required by applicable law or agreed to in writing, software
     14  * distributed under the License is distributed on an "AS IS" BASIS,
     15  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     16  * See the License for the specific language governing permissions and
     17  * limitations under the License.
     18  *
     19  *//*!
     20  * \file
     21  * \brief Simple memory mapping tests.
     22  *//*--------------------------------------------------------------------*/
     23 
     24 #include "vktMemoryMappingTests.hpp"
     25 
     26 #include "vktTestCaseUtil.hpp"
     27 
     28 #include "tcuMaybe.hpp"
     29 #include "tcuResultCollector.hpp"
     30 #include "tcuTestLog.hpp"
     31 #include "tcuPlatform.hpp"
     32 
     33 #include "vkDeviceUtil.hpp"
     34 #include "vkPlatform.hpp"
     35 #include "vkQueryUtil.hpp"
     36 #include "vkRef.hpp"
     37 #include "vkRefUtil.hpp"
     38 #include "vkStrUtil.hpp"
     39 #include "vkAllocationCallbackUtil.hpp"
     40 
     41 #include "deRandom.hpp"
     42 #include "deSharedPtr.hpp"
     43 #include "deStringUtil.hpp"
     44 #include "deUniquePtr.hpp"
     45 #include "deSTLUtil.hpp"
     46 
     47 #include <string>
     48 #include <vector>
     49 #include <algorithm>
     50 
     51 using tcu::Maybe;
     52 using tcu::TestLog;
     53 
     54 using de::SharedPtr;
     55 
     56 using std::string;
     57 using std::vector;
     58 
     59 using namespace vk;
     60 
     61 namespace vkt
     62 {
     63 namespace memory
     64 {
     65 namespace
     66 {
     67 enum
     68 {
     69 	REFERENCE_BYTES_PER_BYTE = 2
     70 };
     71 
     72 template<typename T>
     73 T divRoundUp (const T& a, const T& b)
     74 {
     75 	return (a / b) + (a % b == 0 ? 0 : 1);
     76 }
     77 
     78 // \note Bit vector that guarantees that each value takes only one bit.
     79 // std::vector<bool> is often optimized to only take one bit for each bool, but
     80 // that is implementation detail and in this case we really need to known how much
     81 // memory is used.
     82 class BitVector
     83 {
     84 public:
     85 	enum
     86 	{
     87 		BLOCK_BIT_SIZE = 8 * sizeof(deUint32)
     88 	};
     89 
     90 	BitVector (size_t size, bool value = false)
     91 		: m_data(divRoundUp<size_t>(size, (size_t)BLOCK_BIT_SIZE), value ? ~0x0u : 0x0u)
     92 	{
     93 	}
     94 
     95 	bool get (size_t ndx) const
     96 	{
     97 		return (m_data[ndx / BLOCK_BIT_SIZE] & (0x1u << (deUint32)(ndx % BLOCK_BIT_SIZE))) != 0;
     98 	}
     99 
    100 	void set (size_t ndx, bool value)
    101 	{
    102 		if (value)
    103 			m_data[ndx / BLOCK_BIT_SIZE] |= 0x1u << (deUint32)(ndx % BLOCK_BIT_SIZE);
    104 		else
    105 			m_data[ndx / BLOCK_BIT_SIZE] &= ~(0x1u << (deUint32)(ndx % BLOCK_BIT_SIZE));
    106 	}
    107 
    108 private:
    109 	vector<deUint32>	m_data;
    110 };
    111 
    112 class ReferenceMemory
    113 {
    114 public:
    115 	ReferenceMemory (size_t size, size_t atomSize)
    116 		: m_atomSize	(atomSize)
    117 		, m_bytes		(size, 0xDEu)
    118 		, m_defined		(size, false)
    119 		, m_flushed		(size / atomSize, false)
    120 	{
    121 		DE_ASSERT(size % m_atomSize == 0);
    122 	}
    123 
    124 	void write (size_t pos, deUint8 value)
    125 	{
    126 		m_bytes[pos] = value;
    127 		m_defined.set(pos, true);
    128 		m_flushed.set(pos / m_atomSize, false);
    129 	}
    130 
    131 	bool read (size_t pos, deUint8 value)
    132 	{
    133 		const bool isOk = !m_defined.get(pos)
    134 						|| m_bytes[pos] == value;
    135 
    136 		m_bytes[pos] = value;
    137 		m_defined.set(pos, true);
    138 
    139 		return isOk;
    140 	}
    141 
    142 	bool modifyXor (size_t pos, deUint8 value, deUint8 mask)
    143 	{
    144 		const bool isOk = !m_defined.get(pos)
    145 						|| m_bytes[pos] == value;
    146 
    147 		m_bytes[pos] = value ^ mask;
    148 		m_defined.set(pos, true);
    149 		m_flushed.set(pos / m_atomSize, false);
    150 
    151 		return isOk;
    152 	}
    153 
    154 	void flush (size_t offset, size_t size)
    155 	{
    156 		DE_ASSERT((offset % m_atomSize) == 0);
    157 		DE_ASSERT((size % m_atomSize) == 0);
    158 
    159 		for (size_t ndx = 0; ndx < size / m_atomSize; ndx++)
    160 			m_flushed.set((offset / m_atomSize) + ndx, true);
    161 	}
    162 
    163 	void invalidate (size_t offset, size_t size)
    164 	{
    165 		DE_ASSERT((offset % m_atomSize) == 0);
    166 		DE_ASSERT((size % m_atomSize) == 0);
    167 
    168 		for (size_t ndx = 0; ndx < size / m_atomSize; ndx++)
    169 		{
    170 			if (!m_flushed.get((offset / m_atomSize) + ndx))
    171 			{
    172 				for (size_t i = 0; i < m_atomSize; i++)
    173 					m_defined.set(offset + ndx * m_atomSize + i, false);
    174 			}
    175 		}
    176 	}
    177 
    178 
    179 private:
    180 	const size_t	m_atomSize;
    181 	vector<deUint8>	m_bytes;
    182 	BitVector		m_defined;
    183 	BitVector		m_flushed;
    184 };
    185 
    186 struct MemoryType
    187 {
    188 	MemoryType		(deUint32 index_, const VkMemoryType& type_)
    189 		: index	(index_)
    190 		, type	(type_)
    191 	{
    192 	}
    193 
    194 	MemoryType		(void)
    195 		: index	(~0u)
    196 	{
    197 	}
    198 
    199 	deUint32		index;
    200 	VkMemoryType	type;
    201 };
    202 
    203 size_t computeDeviceMemorySystemMemFootprint (const DeviceInterface& vk, VkDevice device)
    204 {
    205 	AllocationCallbackRecorder	callbackRecorder	(getSystemAllocator());
    206 
    207 	{
    208 		// 1 B allocation from memory type 0
    209 		const VkMemoryAllocateInfo	allocInfo	=
    210 		{
    211 			VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
    212 			DE_NULL,
    213 			1u,
    214 			0u,
    215 		};
    216 		const Unique<VkDeviceMemory>			memory			(allocateMemory(vk, device, &allocInfo));
    217 		AllocationCallbackValidationResults		validateRes;
    218 
    219 		validateAllocationCallbacks(callbackRecorder, &validateRes);
    220 
    221 		TCU_CHECK(validateRes.violations.empty());
    222 
    223 		return getLiveSystemAllocationTotal(validateRes)
    224 			   + sizeof(void*)*validateRes.liveAllocations.size(); // allocation overhead
    225 	}
    226 }
    227 
    228 Move<VkDeviceMemory> allocMemory (const DeviceInterface& vk, VkDevice device, VkDeviceSize pAllocInfo_allocationSize, deUint32 pAllocInfo_memoryTypeIndex)
    229 {
    230 	const VkMemoryAllocateInfo pAllocInfo =
    231 	{
    232 		VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
    233 		DE_NULL,
    234 		pAllocInfo_allocationSize,
    235 		pAllocInfo_memoryTypeIndex,
    236 	};
    237 	return allocateMemory(vk, device, &pAllocInfo);
    238 }
    239 
    240 struct MemoryRange
    241 {
    242 	MemoryRange (VkDeviceSize offset_ = ~(VkDeviceSize)0, VkDeviceSize size_ = ~(VkDeviceSize)0)
    243 		: offset	(offset_)
    244 		, size		(size_)
    245 	{
    246 	}
    247 
    248 	VkDeviceSize	offset;
    249 	VkDeviceSize	size;
    250 };
    251 
    252 struct TestConfig
    253 {
    254 	TestConfig (void)
    255 		: allocationSize	(~(VkDeviceSize)0)
    256 	{
    257 	}
    258 
    259 	VkDeviceSize		allocationSize;
    260 	deUint32			seed;
    261 
    262 	MemoryRange			mapping;
    263 	vector<MemoryRange>	flushMappings;
    264 	vector<MemoryRange>	invalidateMappings;
    265 	bool				remap;
    266 };
    267 
    268 bool compareAndLogBuffer (TestLog& log, size_t size, const deUint8* result, const deUint8* reference)
    269 {
    270 	size_t	failedBytes	= 0;
    271 	size_t	firstFailed	= (size_t)-1;
    272 
    273 	for (size_t ndx = 0; ndx < size; ndx++)
    274 	{
    275 		if (result[ndx] != reference[ndx])
    276 		{
    277 			failedBytes++;
    278 
    279 			if (firstFailed == (size_t)-1)
    280 				firstFailed = ndx;
    281 		}
    282 	}
    283 
    284 	if (failedBytes > 0)
    285 	{
    286 		log << TestLog::Message << "Comparison failed. Failed bytes " << failedBytes << ". First failed at offset " << firstFailed << "." << TestLog::EndMessage;
    287 
    288 		std::ostringstream	expectedValues;
    289 		std::ostringstream	resultValues;
    290 
    291 		for (size_t ndx = firstFailed; ndx < firstFailed + 10 && ndx < size; ndx++)
    292 		{
    293 			if (ndx != firstFailed)
    294 			{
    295 				expectedValues << ", ";
    296 				resultValues << ", ";
    297 			}
    298 
    299 			expectedValues << reference[ndx];
    300 			resultValues << result[ndx];
    301 		}
    302 
    303 		if (firstFailed + 10 < size)
    304 		{
    305 			expectedValues << "...";
    306 			resultValues << "...";
    307 		}
    308 
    309 		log << TestLog::Message << "Expected values at offset: " << firstFailed << ", " << expectedValues.str() << TestLog::EndMessage;
    310 		log << TestLog::Message << "Result values at offset: " << firstFailed << ", " << resultValues.str() << TestLog::EndMessage;
    311 
    312 		return false;
    313 	}
    314 	else
    315 		return true;
    316 }
    317 
    318 tcu::TestStatus testMemoryMapping (Context& context, const TestConfig config)
    319 {
    320 	TestLog&								log					= context.getTestContext().getLog();
    321 	tcu::ResultCollector					result				(log);
    322 	const VkPhysicalDevice					physicalDevice		= context.getPhysicalDevice();
    323 	const VkDevice							device				= context.getDevice();
    324 	const InstanceInterface&				vki					= context.getInstanceInterface();
    325 	const DeviceInterface&					vkd					= context.getDeviceInterface();
    326 	const VkPhysicalDeviceMemoryProperties	memoryProperties	= getPhysicalDeviceMemoryProperties(vki, physicalDevice);
    327 	// \todo [2016-05-27 misojarvi] Remove once drivers start reporting correctly nonCoherentAtomSize that is at least 1.
    328 	const VkDeviceSize						nonCoherentAtomSize	= context.getDeviceProperties().limits.nonCoherentAtomSize != 0
    329 																? context.getDeviceProperties().limits.nonCoherentAtomSize
    330 																: 1;
    331 
    332 	{
    333 		const tcu::ScopedLogSection	section	(log, "TestCaseInfo", "TestCaseInfo");
    334 
    335 		log << TestLog::Message << "Seed: " << config.seed << TestLog::EndMessage;
    336 		log << TestLog::Message << "Allocation size: " << config.allocationSize << " * atom" <<  TestLog::EndMessage;
    337 		log << TestLog::Message << "Mapping, offset: " << config.mapping.offset << " * atom, size: " << config.mapping.size << " * atom" << TestLog::EndMessage;
    338 
    339 		if (!config.flushMappings.empty())
    340 		{
    341 			log << TestLog::Message << "Invalidating following ranges:" << TestLog::EndMessage;
    342 
    343 			for (size_t ndx = 0; ndx < config.flushMappings.size(); ndx++)
    344 				log << TestLog::Message << "\tOffset: " << config.flushMappings[ndx].offset << " * atom, Size: " << config.flushMappings[ndx].size << " * atom" << TestLog::EndMessage;
    345 		}
    346 
    347 		if (config.remap)
    348 			log << TestLog::Message << "Remapping memory between flush and invalidation." << TestLog::EndMessage;
    349 
    350 		if (!config.invalidateMappings.empty())
    351 		{
    352 			log << TestLog::Message << "Flushing following ranges:" << TestLog::EndMessage;
    353 
    354 			for (size_t ndx = 0; ndx < config.invalidateMappings.size(); ndx++)
    355 				log << TestLog::Message << "\tOffset: " << config.invalidateMappings[ndx].offset << " * atom, Size: " << config.invalidateMappings[ndx].size << " * atom" << TestLog::EndMessage;
    356 		}
    357 	}
    358 
    359 	for (deUint32 memoryTypeIndex = 0; memoryTypeIndex < memoryProperties.memoryTypeCount; memoryTypeIndex++)
    360 	{
    361 		try
    362 		{
    363 			const tcu::ScopedLogSection		section		(log, "MemoryType" + de::toString(memoryTypeIndex), "MemoryType" + de::toString(memoryTypeIndex));
    364 			const VkMemoryType&				memoryType	= memoryProperties.memoryTypes[memoryTypeIndex];
    365 			const VkMemoryHeap&				memoryHeap	= memoryProperties.memoryHeaps[memoryType.heapIndex];
    366 			const VkDeviceSize				atomSize	= (memoryType.propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0
    367 														? 1
    368 														: nonCoherentAtomSize;
    369 
    370 			log << TestLog::Message << "MemoryType: " << memoryType << TestLog::EndMessage;
    371 			log << TestLog::Message << "MemoryHeap: " << memoryHeap << TestLog::EndMessage;
    372 			log << TestLog::Message << "AtomSize: " << atomSize << TestLog::EndMessage;
    373 			log << TestLog::Message << "AllocationSize: " << config.allocationSize * atomSize <<  TestLog::EndMessage;
    374 			log << TestLog::Message << "Mapping, offset: " << config.mapping.offset * atomSize << ", size: " << config.mapping.size * atomSize << TestLog::EndMessage;
    375 
    376 			if (!config.flushMappings.empty())
    377 			{
    378 				log << TestLog::Message << "Invalidating following ranges:" << TestLog::EndMessage;
    379 
    380 				for (size_t ndx = 0; ndx < config.flushMappings.size(); ndx++)
    381 					log << TestLog::Message << "\tOffset: " << config.flushMappings[ndx].offset * atomSize << ", Size: " << config.flushMappings[ndx].size * atomSize << TestLog::EndMessage;
    382 			}
    383 
    384 			if (!config.invalidateMappings.empty())
    385 			{
    386 				log << TestLog::Message << "Flushing following ranges:" << TestLog::EndMessage;
    387 
    388 				for (size_t ndx = 0; ndx < config.invalidateMappings.size(); ndx++)
    389 					log << TestLog::Message << "\tOffset: " << config.invalidateMappings[ndx].offset * atomSize << ", Size: " << config.invalidateMappings[ndx].size * atomSize << TestLog::EndMessage;
    390 			}
    391 
    392 			if ((memoryType.propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    393 			{
    394 				log << TestLog::Message << "Memory type doesn't support mapping." << TestLog::EndMessage;
    395 			}
    396 			else if (memoryHeap.size <= 4 * atomSize * config.allocationSize)
    397 			{
    398 				log << TestLog::Message << "Memory types heap is too small." << TestLog::EndMessage;
    399 			}
    400 			else
    401 			{
    402 				const Unique<VkDeviceMemory>	memory				(allocMemory(vkd, device, config.allocationSize * atomSize, memoryTypeIndex));
    403 				de::Random						rng					(config.seed);
    404 				vector<deUint8>					reference			((size_t)(config.allocationSize * atomSize));
    405 				deUint8*						mapping				= DE_NULL;
    406 
    407 				{
    408 					void* ptr;
    409 					VK_CHECK(vkd.mapMemory(device, *memory, config.mapping.offset * atomSize, config.mapping.size * atomSize, 0u, &ptr));
    410 					TCU_CHECK(ptr);
    411 
    412 					mapping = (deUint8*)ptr;
    413 				}
    414 
    415 				for (VkDeviceSize ndx = 0; ndx < config.mapping.size * atomSize; ndx++)
    416 				{
    417 					const deUint8 val = rng.getUint8();
    418 
    419 					mapping[ndx]												= val;
    420 					reference[(size_t)(config.mapping.offset * atomSize + ndx)]	= val;
    421 				}
    422 
    423 				if (!config.flushMappings.empty())
    424 				{
    425 					vector<VkMappedMemoryRange> ranges;
    426 
    427 					for (size_t ndx = 0; ndx < config.flushMappings.size(); ndx++)
    428 					{
    429 						const VkMappedMemoryRange range =
    430 						{
    431 							VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
    432 							DE_NULL,
    433 
    434 							*memory,
    435 							config.flushMappings[ndx].offset * atomSize,
    436 							config.flushMappings[ndx].size * atomSize
    437 						};
    438 
    439 						ranges.push_back(range);
    440 					}
    441 
    442 					VK_CHECK(vkd.flushMappedMemoryRanges(device, (deUint32)ranges.size(), &ranges[0]));
    443 				}
    444 
    445 				if (config.remap)
    446 				{
    447 					void* ptr;
    448 					vkd.unmapMemory(device, *memory);
    449 					VK_CHECK(vkd.mapMemory(device, *memory, config.mapping.offset * atomSize, config.mapping.size * atomSize, 0u, &ptr));
    450 					TCU_CHECK(ptr);
    451 
    452 					mapping = (deUint8*)ptr;
    453 				}
    454 
    455 				if (!config.invalidateMappings.empty())
    456 				{
    457 					vector<VkMappedMemoryRange> ranges;
    458 
    459 					for (size_t ndx = 0; ndx < config.invalidateMappings.size(); ndx++)
    460 					{
    461 						const VkMappedMemoryRange range =
    462 						{
    463 							VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
    464 							DE_NULL,
    465 
    466 							*memory,
    467 							config.invalidateMappings[ndx].offset * atomSize,
    468 							config.invalidateMappings[ndx].size * atomSize
    469 						};
    470 
    471 						ranges.push_back(range);
    472 					}
    473 
    474 					VK_CHECK(vkd.invalidateMappedMemoryRanges(device, (deUint32)ranges.size(), &ranges[0]));
    475 				}
    476 
    477 				if (!compareAndLogBuffer(log, (size_t)(config.mapping.size * atomSize), mapping, &reference[(size_t)(config.mapping.offset * atomSize)]))
    478 					result.fail("Unexpected values read from mapped memory.");
    479 
    480 				vkd.unmapMemory(device, *memory);
    481 			}
    482 		}
    483 		catch (const tcu::TestError& error)
    484 		{
    485 			result.fail(error.getMessage());
    486 		}
    487 	}
    488 
    489 	return tcu::TestStatus(result.getResult(), result.getMessage());
    490 }
    491 
    492 class MemoryMapping
    493 {
    494 public:
    495 						MemoryMapping	(const MemoryRange&	range,
    496 										 void*				ptr,
    497 										 ReferenceMemory&	reference);
    498 
    499 	void				randomRead		(de::Random& rng);
    500 	void				randomWrite		(de::Random& rng);
    501 	void				randomModify	(de::Random& rng);
    502 
    503 	const MemoryRange&	getRange		(void) const { return m_range; }
    504 
    505 private:
    506 	MemoryRange			m_range;
    507 	void*				m_ptr;
    508 	ReferenceMemory&	m_reference;
    509 };
    510 
    511 MemoryMapping::MemoryMapping (const MemoryRange&	range,
    512 							  void*					ptr,
    513 							  ReferenceMemory&		reference)
    514 	: m_range		(range)
    515 	, m_ptr			(ptr)
    516 	, m_reference	(reference)
    517 {
    518 	DE_ASSERT(range.size > 0);
    519 }
    520 
    521 void MemoryMapping::randomRead (de::Random& rng)
    522 {
    523 	const size_t count = (size_t)rng.getInt(0, 100);
    524 
    525 	for (size_t ndx = 0; ndx < count; ndx++)
    526 	{
    527 		const size_t	pos	= (size_t)(rng.getUint64() % (deUint64)m_range.size);
    528 		const deUint8	val	= ((deUint8*)m_ptr)[pos];
    529 
    530 		TCU_CHECK(m_reference.read((size_t)(m_range.offset + pos), val));
    531 	}
    532 }
    533 
    534 void MemoryMapping::randomWrite (de::Random& rng)
    535 {
    536 	const size_t count = (size_t)rng.getInt(0, 100);
    537 
    538 	for (size_t ndx = 0; ndx < count; ndx++)
    539 	{
    540 		const size_t	pos	= (size_t)(rng.getUint64() % (deUint64)m_range.size);
    541 		const deUint8	val	= rng.getUint8();
    542 
    543 		((deUint8*)m_ptr)[pos]	= val;
    544 		m_reference.write((size_t)(m_range.offset + pos), val);
    545 	}
    546 }
    547 
    548 void MemoryMapping::randomModify (de::Random& rng)
    549 {
    550 	const size_t count = (size_t)rng.getInt(0, 100);
    551 
    552 	for (size_t ndx = 0; ndx < count; ndx++)
    553 	{
    554 		const size_t	pos		= (size_t)(rng.getUint64() % (deUint64)m_range.size);
    555 		const deUint8	val		= ((deUint8*)m_ptr)[pos];
    556 		const deUint8	mask	= rng.getUint8();
    557 
    558 		((deUint8*)m_ptr)[pos]	= val ^ mask;
    559 		TCU_CHECK(m_reference.modifyXor((size_t)(m_range.offset + pos), val, mask));
    560 	}
    561 }
    562 
    563 VkDeviceSize randomSize (de::Random& rng, VkDeviceSize atomSize, VkDeviceSize maxSize)
    564 {
    565 	const VkDeviceSize maxSizeInAtoms = maxSize / atomSize;
    566 
    567 	DE_ASSERT(maxSizeInAtoms > 0);
    568 
    569 	return maxSizeInAtoms > 1
    570 			? atomSize * (1 + (VkDeviceSize)(rng.getUint64() % (deUint64)maxSizeInAtoms))
    571 			: atomSize;
    572 }
    573 
    574 VkDeviceSize randomOffset (de::Random& rng, VkDeviceSize atomSize, VkDeviceSize maxOffset)
    575 {
    576 	const VkDeviceSize maxOffsetInAtoms = maxOffset / atomSize;
    577 
    578 	return maxOffsetInAtoms > 0
    579 			? atomSize * (VkDeviceSize)(rng.getUint64() % (deUint64)(maxOffsetInAtoms + 1))
    580 			: 0;
    581 }
    582 
    583 void randomRanges (de::Random& rng, vector<VkMappedMemoryRange>& ranges, size_t count, VkDeviceMemory memory, VkDeviceSize minOffset, VkDeviceSize maxSize, VkDeviceSize atomSize)
    584 {
    585 	ranges.resize(count);
    586 
    587 	for (size_t rangeNdx = 0; rangeNdx < count; rangeNdx++)
    588 	{
    589 		const VkDeviceSize	size	= randomSize(rng, atomSize, maxSize);
    590 		const VkDeviceSize	offset	= minOffset + randomOffset(rng, atomSize, maxSize - size);
    591 
    592 		const VkMappedMemoryRange range =
    593 		{
    594 			VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
    595 			DE_NULL,
    596 
    597 			memory,
    598 			offset,
    599 			size
    600 		};
    601 		ranges[rangeNdx] = range;
    602 	}
    603 }
    604 
    605 class MemoryObject
    606 {
    607 public:
    608 							MemoryObject		(const DeviceInterface&		vkd,
    609 												 VkDevice					device,
    610 												 VkDeviceSize				size,
    611 												 deUint32					memoryTypeIndex,
    612 												 VkDeviceSize				atomSize);
    613 
    614 							~MemoryObject		(void);
    615 
    616 	MemoryMapping*			mapRandom			(const DeviceInterface& vkd, VkDevice device, de::Random& rng);
    617 	void					unmap				(void);
    618 
    619 	void					randomFlush			(const DeviceInterface& vkd, VkDevice device, de::Random& rng);
    620 	void					randomInvalidate	(const DeviceInterface& vkd, VkDevice device, de::Random& rng);
    621 
    622 	VkDeviceSize			getSize				(void) const { return m_size; }
    623 	MemoryMapping*			getMapping			(void) { return m_mapping; }
    624 
    625 private:
    626 	const DeviceInterface&	m_vkd;
    627 	const VkDevice			m_device;
    628 
    629 	const deUint32			m_memoryTypeIndex;
    630 	const VkDeviceSize		m_size;
    631 	const VkDeviceSize		m_atomSize;
    632 
    633 	Move<VkDeviceMemory>	m_memory;
    634 
    635 	MemoryMapping*			m_mapping;
    636 	ReferenceMemory			m_referenceMemory;
    637 };
    638 
    639 MemoryObject::MemoryObject (const DeviceInterface&		vkd,
    640 							VkDevice					device,
    641 							VkDeviceSize				size,
    642 							deUint32					memoryTypeIndex,
    643 							VkDeviceSize				atomSize)
    644 	: m_vkd				(vkd)
    645 	, m_device			(device)
    646 	, m_memoryTypeIndex	(memoryTypeIndex)
    647 	, m_size			(size)
    648 	, m_atomSize		(atomSize)
    649 	, m_mapping			(DE_NULL)
    650 	, m_referenceMemory	((size_t)size, (size_t)m_atomSize)
    651 {
    652 	m_memory = allocMemory(m_vkd, m_device, m_size, m_memoryTypeIndex);
    653 }
    654 
    655 MemoryObject::~MemoryObject (void)
    656 {
    657 	delete m_mapping;
    658 }
    659 
    660 MemoryMapping* MemoryObject::mapRandom (const DeviceInterface& vkd, VkDevice device, de::Random& rng)
    661 {
    662 	const VkDeviceSize	size	= randomSize(rng, m_atomSize, m_size);
    663 	const VkDeviceSize	offset	= randomOffset(rng, m_atomSize, m_size - size);
    664 	void*				ptr;
    665 
    666 	DE_ASSERT(!m_mapping);
    667 
    668 	VK_CHECK(vkd.mapMemory(device, *m_memory, offset, size, 0u, &ptr));
    669 	TCU_CHECK(ptr);
    670 	m_mapping = new MemoryMapping(MemoryRange(offset, size), ptr, m_referenceMemory);
    671 
    672 	return m_mapping;
    673 }
    674 
    675 void MemoryObject::unmap (void)
    676 {
    677 	m_vkd.unmapMemory(m_device, *m_memory);
    678 
    679 	delete m_mapping;
    680 	m_mapping = DE_NULL;
    681 }
    682 
    683 void MemoryObject::randomFlush (const DeviceInterface& vkd, VkDevice device, de::Random& rng)
    684 {
    685 	const size_t				rangeCount	= (size_t)rng.getInt(1, 10);
    686 	vector<VkMappedMemoryRange>	ranges		(rangeCount);
    687 
    688 	randomRanges(rng, ranges, rangeCount, *m_memory, m_mapping->getRange().offset, m_mapping->getRange().size, m_atomSize);
    689 
    690 	for (size_t rangeNdx = 0; rangeNdx < ranges.size(); rangeNdx++)
    691 		m_referenceMemory.flush((size_t)ranges[rangeNdx].offset, (size_t)ranges[rangeNdx].size);
    692 
    693 	VK_CHECK(vkd.flushMappedMemoryRanges(device, (deUint32)ranges.size(), ranges.empty() ? DE_NULL : &ranges[0]));
    694 }
    695 
    696 void MemoryObject::randomInvalidate (const DeviceInterface& vkd, VkDevice device, de::Random& rng)
    697 {
    698 	const size_t				rangeCount	= (size_t)rng.getInt(1, 10);
    699 	vector<VkMappedMemoryRange>	ranges		(rangeCount);
    700 
    701 	randomRanges(rng, ranges, rangeCount, *m_memory, m_mapping->getRange().offset, m_mapping->getRange().size, m_atomSize);
    702 
    703 	for (size_t rangeNdx = 0; rangeNdx < ranges.size(); rangeNdx++)
    704 		m_referenceMemory.invalidate((size_t)ranges[rangeNdx].offset, (size_t)ranges[rangeNdx].size);
    705 
    706 	VK_CHECK(vkd.invalidateMappedMemoryRanges(device, (deUint32)ranges.size(), ranges.empty() ? DE_NULL : &ranges[0]));
    707 }
    708 
    709 enum
    710 {
    711 	// Use only 1/2 of each memory heap.
    712 	MAX_MEMORY_USAGE_DIV = 2
    713 };
    714 
    715 template<typename T>
    716 void removeFirstEqual (vector<T>& vec, const T& val)
    717 {
    718 	for (size_t ndx = 0; ndx < vec.size(); ndx++)
    719 	{
    720 		if (vec[ndx] == val)
    721 		{
    722 			vec[ndx] = vec.back();
    723 			vec.pop_back();
    724 			return;
    725 		}
    726 	}
    727 }
    728 
    729 enum MemoryClass
    730 {
    731 	MEMORY_CLASS_SYSTEM = 0,
    732 	MEMORY_CLASS_DEVICE,
    733 
    734 	MEMORY_CLASS_LAST
    735 };
    736 
    737 // \todo [2016-04-20 pyry] Consider estimating memory fragmentation
    738 class TotalMemoryTracker
    739 {
    740 public:
    741 					TotalMemoryTracker	(void)
    742 	{
    743 		std::fill(DE_ARRAY_BEGIN(m_usage), DE_ARRAY_END(m_usage), 0);
    744 	}
    745 
    746 	void			allocate			(MemoryClass memClass, VkDeviceSize size)
    747 	{
    748 		m_usage[memClass] += size;
    749 	}
    750 
    751 	void			free				(MemoryClass memClass, VkDeviceSize size)
    752 	{
    753 		DE_ASSERT(size <= m_usage[memClass]);
    754 		m_usage[memClass] -= size;
    755 	}
    756 
    757 	VkDeviceSize	getUsage			(MemoryClass memClass) const
    758 	{
    759 		return m_usage[memClass];
    760 	}
    761 
    762 	VkDeviceSize	getTotalUsage		(void) const
    763 	{
    764 		VkDeviceSize total = 0;
    765 		for (int ndx = 0; ndx < MEMORY_CLASS_LAST; ++ndx)
    766 			total += getUsage((MemoryClass)ndx);
    767 		return total;
    768 	}
    769 
    770 private:
    771 	VkDeviceSize	m_usage[MEMORY_CLASS_LAST];
    772 };
    773 
    774 class MemoryHeap
    775 {
    776 public:
    777 	MemoryHeap (const VkMemoryHeap&			heap,
    778 				const vector<MemoryType>&	memoryTypes,
    779 				const PlatformMemoryLimits&	memoryLimits,
    780 				const VkDeviceSize			nonCoherentAtomSize,
    781 				TotalMemoryTracker&			totalMemTracker)
    782 		: m_heap				(heap)
    783 		, m_memoryTypes			(memoryTypes)
    784 		, m_limits				(memoryLimits)
    785 		, m_nonCoherentAtomSize	(nonCoherentAtomSize)
    786 		, m_totalMemTracker		(totalMemTracker)
    787 		, m_usage				(0)
    788 	{
    789 	}
    790 
    791 	~MemoryHeap (void)
    792 	{
    793 		for (vector<MemoryObject*>::iterator iter = m_objects.begin(); iter != m_objects.end(); ++iter)
    794 			delete *iter;
    795 	}
    796 
    797 	bool								full			(void) const { return getAvailableMem() < m_nonCoherentAtomSize * (1 + REFERENCE_BYTES_PER_BYTE);	}
    798 	bool								empty			(void) const { return m_usage == 0;																	}
    799 
    800 	MemoryObject*						allocateRandom	(const DeviceInterface& vkd, VkDevice device, de::Random& rng)
    801 	{
    802 		const VkDeviceSize		availableMem	= getAvailableMem();
    803 
    804 		DE_ASSERT(availableMem > 0);
    805 
    806 		const MemoryType		type			= rng.choose<MemoryType>(m_memoryTypes.begin(), m_memoryTypes.end());
    807 		const VkDeviceSize		atomSize		= (type.type.propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0
    808 												? 1
    809 												: m_nonCoherentAtomSize;
    810 		const VkDeviceSize		size			= randomSize(rng, atomSize, availableMem);
    811 
    812 		DE_ASSERT(size <= availableMem);
    813 
    814 		MemoryObject* const		object	= new MemoryObject(vkd, device, size, type.index, atomSize);
    815 
    816 		m_usage += size;
    817 		m_totalMemTracker.allocate(getMemoryClass(), size);
    818 		m_totalMemTracker.allocate(MEMORY_CLASS_SYSTEM, size * REFERENCE_BYTES_PER_BYTE);
    819 		m_objects.push_back(object);
    820 
    821 		return object;
    822 	}
    823 
    824 	MemoryObject*						getRandomObject	(de::Random& rng) const
    825 	{
    826 		return rng.choose<MemoryObject*>(m_objects.begin(), m_objects.end());
    827 	}
    828 
    829 	void								free			(MemoryObject* object)
    830 	{
    831 		removeFirstEqual(m_objects, object);
    832 		m_usage -= object->getSize();
    833 		m_totalMemTracker.free(MEMORY_CLASS_SYSTEM, object->getSize() * REFERENCE_BYTES_PER_BYTE);
    834 		m_totalMemTracker.free(getMemoryClass(), object->getSize());
    835 		delete object;
    836 	}
    837 
    838 private:
    839 	MemoryClass							getMemoryClass	(void) const
    840 	{
    841 		if ((m_heap.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    842 			return MEMORY_CLASS_DEVICE;
    843 		else
    844 			return MEMORY_CLASS_SYSTEM;
    845 	}
    846 
    847 	VkDeviceSize						getAvailableMem	(void) const
    848 	{
    849 		DE_ASSERT(m_usage <= m_heap.size/MAX_MEMORY_USAGE_DIV);
    850 
    851 		const VkDeviceSize	availableInHeap	= m_heap.size/MAX_MEMORY_USAGE_DIV - m_usage;
    852 		const bool			isUMA			= m_limits.totalDeviceLocalMemory == 0;
    853 
    854 		if (isUMA)
    855 		{
    856 			const VkDeviceSize	totalUsage	= m_totalMemTracker.getTotalUsage();
    857 			const VkDeviceSize	totalSysMem	= (VkDeviceSize)m_limits.totalSystemMemory;
    858 
    859 			DE_ASSERT(totalUsage <= totalSysMem);
    860 
    861 			return de::min(availableInHeap, (totalSysMem-totalUsage) / (1 + REFERENCE_BYTES_PER_BYTE));
    862 		}
    863 		else
    864 		{
    865 			const VkDeviceSize	totalUsage		= m_totalMemTracker.getTotalUsage();
    866 			const VkDeviceSize	totalSysMem		= (VkDeviceSize)m_limits.totalSystemMemory;
    867 
    868 			const MemoryClass	memClass		= getMemoryClass();
    869 			const VkDeviceSize	totalMemClass	= memClass == MEMORY_CLASS_SYSTEM
    870 												? (VkDeviceSize)(m_limits.totalSystemMemory / (1 + REFERENCE_BYTES_PER_BYTE))
    871 												: m_limits.totalDeviceLocalMemory;
    872 			const VkDeviceSize	usedMemClass	= m_totalMemTracker.getUsage(memClass);
    873 
    874 			DE_ASSERT(usedMemClass <= totalMemClass);
    875 
    876 			return de::min(de::min(availableInHeap, totalMemClass-usedMemClass), (totalSysMem - totalUsage) / REFERENCE_BYTES_PER_BYTE);
    877 		}
    878 	}
    879 
    880 	const VkMemoryHeap			m_heap;
    881 	const vector<MemoryType>	m_memoryTypes;
    882 	const PlatformMemoryLimits&	m_limits;
    883 	const VkDeviceSize			m_nonCoherentAtomSize;
    884 	TotalMemoryTracker&			m_totalMemTracker;
    885 
    886 	VkDeviceSize				m_usage;
    887 	vector<MemoryObject*>		m_objects;
    888 };
    889 
    890 size_t getMemoryObjectSystemSize (Context& context)
    891 {
    892 	return computeDeviceMemorySystemMemFootprint(context.getDeviceInterface(), context.getDevice())
    893 		   + sizeof(MemoryObject)
    894 		   + sizeof(de::SharedPtr<MemoryObject>);
    895 }
    896 
    897 size_t getMemoryMappingSystemSize (void)
    898 {
    899 	return sizeof(MemoryMapping) + sizeof(de::SharedPtr<MemoryMapping>);
    900 }
    901 
    902 class RandomMemoryMappingInstance : public TestInstance
    903 {
    904 public:
    905 	RandomMemoryMappingInstance (Context& context, deUint32 seed)
    906 		: TestInstance				(context)
    907 		, m_memoryObjectSysMemSize	(getMemoryObjectSystemSize(context))
    908 		, m_memoryMappingSysMemSize	(getMemoryMappingSystemSize())
    909 		, m_memoryLimits			(getMemoryLimits(context.getTestContext().getPlatform().getVulkanPlatform()))
    910 		, m_rng						(seed)
    911 		, m_opNdx					(0)
    912 	{
    913 		const VkPhysicalDevice					physicalDevice		= context.getPhysicalDevice();
    914 		const InstanceInterface&				vki					= context.getInstanceInterface();
    915 		const VkPhysicalDeviceMemoryProperties	memoryProperties	= getPhysicalDeviceMemoryProperties(vki, physicalDevice);
    916 		// \todo [2016-05-26 misojarvi] Remove zero check once drivers report correctly 1 instead of 0
    917 		const VkDeviceSize						nonCoherentAtomSize	= context.getDeviceProperties().limits.nonCoherentAtomSize != 0
    918 																	? context.getDeviceProperties().limits.nonCoherentAtomSize
    919 																	: 1;
    920 
    921 		// Initialize heaps
    922 		{
    923 			vector<vector<MemoryType> >	memoryTypes	(memoryProperties.memoryHeapCount);
    924 
    925 			for (deUint32 memoryTypeNdx = 0; memoryTypeNdx < memoryProperties.memoryTypeCount; memoryTypeNdx++)
    926 			{
    927 				if (memoryProperties.memoryTypes[memoryTypeNdx].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)
    928 					memoryTypes[memoryProperties.memoryTypes[memoryTypeNdx].heapIndex].push_back(MemoryType(memoryTypeNdx, memoryProperties.memoryTypes[memoryTypeNdx]));
    929 			}
    930 
    931 			for (deUint32 heapIndex = 0; heapIndex < memoryProperties.memoryHeapCount; heapIndex++)
    932 			{
    933 				const VkMemoryHeap	heapInfo	= memoryProperties.memoryHeaps[heapIndex];
    934 
    935 				if (!memoryTypes[heapIndex].empty())
    936 				{
    937 					const de::SharedPtr<MemoryHeap>	heap	(new MemoryHeap(heapInfo, memoryTypes[heapIndex], m_memoryLimits, nonCoherentAtomSize, m_totalMemTracker));
    938 
    939 					TCU_CHECK_INTERNAL(!heap->full());
    940 
    941 					m_memoryHeaps.push_back(heap);
    942 				}
    943 			}
    944 		}
    945 	}
    946 
    947 	~RandomMemoryMappingInstance (void)
    948 	{
    949 	}
    950 
    951 	tcu::TestStatus iterate (void)
    952 	{
    953 		const size_t			opCount						= 100;
    954 		const float				memoryOpProbability			= 0.5f;		// 0.50
    955 		const float				flushInvalidateProbability	= 0.4f;		// 0.20
    956 		const float				mapProbability				= 0.50f;	// 0.15
    957 		const float				unmapProbability			= 0.25f;	// 0.075
    958 
    959 		const float				allocProbability			= 0.75f; // Versun free
    960 
    961 		const VkDevice			device						= m_context.getDevice();
    962 		const DeviceInterface&	vkd							= m_context.getDeviceInterface();
    963 
    964 		const VkDeviceSize		sysMemUsage					= (m_memoryLimits.totalDeviceLocalMemory == 0)
    965 															? m_totalMemTracker.getTotalUsage()
    966 															: m_totalMemTracker.getUsage(MEMORY_CLASS_SYSTEM);
    967 
    968 		if (!m_memoryMappings.empty() && m_rng.getFloat() < memoryOpProbability)
    969 		{
    970 			// Perform operations on mapped memory
    971 			MemoryMapping* const	mapping	= m_rng.choose<MemoryMapping*>(m_memoryMappings.begin(), m_memoryMappings.end());
    972 
    973 			enum Op
    974 			{
    975 				OP_READ = 0,
    976 				OP_WRITE,
    977 				OP_MODIFY,
    978 				OP_LAST
    979 			};
    980 
    981 			const Op op = (Op)(m_rng.getUint32() % OP_LAST);
    982 
    983 			switch (op)
    984 			{
    985 				case OP_READ:
    986 					mapping->randomRead(m_rng);
    987 					break;
    988 
    989 				case OP_WRITE:
    990 					mapping->randomWrite(m_rng);
    991 					break;
    992 
    993 				case OP_MODIFY:
    994 					mapping->randomModify(m_rng);
    995 					break;
    996 
    997 				default:
    998 					DE_FATAL("Invalid operation");
    999 			}
   1000 		}
   1001 		else if (!m_mappedMemoryObjects.empty() && m_rng.getFloat() < flushInvalidateProbability)
   1002 		{
   1003 			MemoryObject* const	object	= m_rng.choose<MemoryObject*>(m_mappedMemoryObjects.begin(), m_mappedMemoryObjects.end());
   1004 
   1005 			if (m_rng.getBool())
   1006 				object->randomFlush(vkd, device, m_rng);
   1007 			else
   1008 				object->randomInvalidate(vkd, device, m_rng);
   1009 		}
   1010 		else if (!m_mappedMemoryObjects.empty() && m_rng.getFloat() < unmapProbability)
   1011 		{
   1012 			// Unmap memory object
   1013 			MemoryObject* const	object	= m_rng.choose<MemoryObject*>(m_mappedMemoryObjects.begin(), m_mappedMemoryObjects.end());
   1014 
   1015 			// Remove mapping
   1016 			removeFirstEqual(m_memoryMappings, object->getMapping());
   1017 
   1018 			object->unmap();
   1019 			removeFirstEqual(m_mappedMemoryObjects, object);
   1020 			m_nonMappedMemoryObjects.push_back(object);
   1021 
   1022 			m_totalMemTracker.free(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryMappingSysMemSize);
   1023 		}
   1024 		else if (!m_nonMappedMemoryObjects.empty() &&
   1025 				 (m_rng.getFloat() < mapProbability) &&
   1026 				 (sysMemUsage+m_memoryMappingSysMemSize <= (VkDeviceSize)m_memoryLimits.totalSystemMemory))
   1027 		{
   1028 			// Map memory object
   1029 			MemoryObject* const		object	= m_rng.choose<MemoryObject*>(m_nonMappedMemoryObjects.begin(), m_nonMappedMemoryObjects.end());
   1030 			MemoryMapping*			mapping	= object->mapRandom(vkd, device, m_rng);
   1031 
   1032 			m_memoryMappings.push_back(mapping);
   1033 			m_mappedMemoryObjects.push_back(object);
   1034 			removeFirstEqual(m_nonMappedMemoryObjects, object);
   1035 
   1036 			m_totalMemTracker.allocate(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryMappingSysMemSize);
   1037 		}
   1038 		else
   1039 		{
   1040 			// Sort heaps based on capacity (full or not)
   1041 			vector<MemoryHeap*>		nonFullHeaps;
   1042 			vector<MemoryHeap*>		nonEmptyHeaps;
   1043 
   1044 			if (sysMemUsage+m_memoryObjectSysMemSize <= (VkDeviceSize)m_memoryLimits.totalSystemMemory)
   1045 			{
   1046 				// For the duration of sorting reserve MemoryObject space from system memory
   1047 				m_totalMemTracker.allocate(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryObjectSysMemSize);
   1048 
   1049 				for (vector<de::SharedPtr<MemoryHeap> >::const_iterator heapIter = m_memoryHeaps.begin();
   1050 					 heapIter != m_memoryHeaps.end();
   1051 					 ++heapIter)
   1052 				{
   1053 					if (!(*heapIter)->full())
   1054 						nonFullHeaps.push_back(heapIter->get());
   1055 
   1056 					if (!(*heapIter)->empty())
   1057 						nonEmptyHeaps.push_back(heapIter->get());
   1058 				}
   1059 
   1060 				m_totalMemTracker.free(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryObjectSysMemSize);
   1061 			}
   1062 			else
   1063 			{
   1064 				// Not possible to even allocate MemoryObject from system memory, look for non-empty heaps
   1065 				for (vector<de::SharedPtr<MemoryHeap> >::const_iterator heapIter = m_memoryHeaps.begin();
   1066 					 heapIter != m_memoryHeaps.end();
   1067 					 ++heapIter)
   1068 				{
   1069 					if (!(*heapIter)->empty())
   1070 						nonEmptyHeaps.push_back(heapIter->get());
   1071 				}
   1072 			}
   1073 
   1074 			if (!nonFullHeaps.empty() && (nonEmptyHeaps.empty() || m_rng.getFloat() < allocProbability))
   1075 			{
   1076 				// Reserve MemoryObject from sys mem first
   1077 				m_totalMemTracker.allocate(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryObjectSysMemSize);
   1078 
   1079 				// Allocate more memory objects
   1080 				MemoryHeap* const	heap	= m_rng.choose<MemoryHeap*>(nonFullHeaps.begin(), nonFullHeaps.end());
   1081 				MemoryObject* const	object	= heap->allocateRandom(vkd, device, m_rng);
   1082 
   1083 				m_nonMappedMemoryObjects.push_back(object);
   1084 			}
   1085 			else
   1086 			{
   1087 				// Free memory objects
   1088 				MemoryHeap* const		heap	= m_rng.choose<MemoryHeap*>(nonEmptyHeaps.begin(), nonEmptyHeaps.end());
   1089 				MemoryObject* const		object	= heap->getRandomObject(m_rng);
   1090 
   1091 				// Remove mapping
   1092 				if (object->getMapping())
   1093 				{
   1094 					removeFirstEqual(m_memoryMappings, object->getMapping());
   1095 					m_totalMemTracker.free(MEMORY_CLASS_SYSTEM, m_memoryMappingSysMemSize);
   1096 				}
   1097 
   1098 				removeFirstEqual(m_mappedMemoryObjects, object);
   1099 				removeFirstEqual(m_nonMappedMemoryObjects, object);
   1100 
   1101 				heap->free(object);
   1102 				m_totalMemTracker.free(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryObjectSysMemSize);
   1103 			}
   1104 		}
   1105 
   1106 		m_opNdx += 1;
   1107 		if (m_opNdx == opCount)
   1108 			return tcu::TestStatus::pass("Pass");
   1109 		else
   1110 			return tcu::TestStatus::incomplete();
   1111 	}
   1112 
   1113 private:
   1114 	const size_t						m_memoryObjectSysMemSize;
   1115 	const size_t						m_memoryMappingSysMemSize;
   1116 	const PlatformMemoryLimits			m_memoryLimits;
   1117 
   1118 	de::Random							m_rng;
   1119 	size_t								m_opNdx;
   1120 
   1121 	TotalMemoryTracker					m_totalMemTracker;
   1122 	vector<de::SharedPtr<MemoryHeap> >	m_memoryHeaps;
   1123 
   1124 	vector<MemoryObject*>				m_mappedMemoryObjects;
   1125 	vector<MemoryObject*>				m_nonMappedMemoryObjects;
   1126 	vector<MemoryMapping*>				m_memoryMappings;
   1127 };
   1128 
   1129 enum Op
   1130 {
   1131 	OP_NONE = 0,
   1132 
   1133 	OP_FLUSH,
   1134 	OP_SUB_FLUSH,
   1135 	OP_SUB_FLUSH_SEPARATE,
   1136 	OP_SUB_FLUSH_OVERLAPPING,
   1137 
   1138 	OP_INVALIDATE,
   1139 	OP_SUB_INVALIDATE,
   1140 	OP_SUB_INVALIDATE_SEPARATE,
   1141 	OP_SUB_INVALIDATE_OVERLAPPING,
   1142 
   1143 	OP_REMAP,
   1144 
   1145 	OP_LAST
   1146 };
   1147 
   1148 TestConfig subMappedConfig (VkDeviceSize				allocationSize,
   1149 							const MemoryRange&			mapping,
   1150 							Op							op,
   1151 							deUint32					seed)
   1152 {
   1153 	TestConfig config;
   1154 
   1155 	config.allocationSize	= allocationSize;
   1156 	config.seed				= seed;
   1157 	config.mapping			= mapping;
   1158 	config.remap			= false;
   1159 
   1160 	switch (op)
   1161 	{
   1162 		case OP_NONE:
   1163 			return config;
   1164 
   1165 		case OP_REMAP:
   1166 			config.remap = true;
   1167 			return config;
   1168 
   1169 		case OP_FLUSH:
   1170 			config.flushMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset, mapping.size));
   1171 			return config;
   1172 
   1173 		case OP_SUB_FLUSH:
   1174 			DE_ASSERT(mapping.size / 4 > 0);
   1175 
   1176 			config.flushMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset + mapping.size / 4, mapping.size / 2));
   1177 			return config;
   1178 
   1179 		case OP_SUB_FLUSH_SEPARATE:
   1180 			DE_ASSERT(mapping.size / 2 > 0);
   1181 
   1182 			config.flushMappings.push_back(MemoryRange(mapping.offset + mapping.size /  2, mapping.size - (mapping.size / 2)));
   1183 			config.flushMappings.push_back(MemoryRange(mapping.offset, mapping.size / 2));
   1184 
   1185 			return config;
   1186 
   1187 		case OP_SUB_FLUSH_OVERLAPPING:
   1188 			DE_ASSERT((mapping.size / 3) > 0);
   1189 
   1190 			config.flushMappings.push_back(MemoryRange(mapping.offset + mapping.size /  3, mapping.size - (mapping.size / 2)));
   1191 			config.flushMappings.push_back(MemoryRange(mapping.offset, (2 * mapping.size) / 3));
   1192 
   1193 			return config;
   1194 
   1195 		case OP_INVALIDATE:
   1196 			config.flushMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset, mapping.size));
   1197 			config.invalidateMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset, mapping.size));
   1198 			return config;
   1199 
   1200 		case OP_SUB_INVALIDATE:
   1201 			DE_ASSERT(mapping.size / 4 > 0);
   1202 
   1203 			config.flushMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset + mapping.size / 4, mapping.size / 2));
   1204 			config.invalidateMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset + mapping.size / 4, mapping.size / 2));
   1205 			return config;
   1206 
   1207 		case OP_SUB_INVALIDATE_SEPARATE:
   1208 			DE_ASSERT(mapping.size / 2 > 0);
   1209 
   1210 			config.flushMappings.push_back(MemoryRange(mapping.offset + mapping.size /  2, mapping.size - (mapping.size / 2)));
   1211 			config.flushMappings.push_back(MemoryRange(mapping.offset, mapping.size / 2));
   1212 
   1213 			config.invalidateMappings.push_back(MemoryRange(mapping.offset + mapping.size /  2, mapping.size - (mapping.size / 2)));
   1214 			config.invalidateMappings.push_back(MemoryRange(mapping.offset, mapping.size / 2));
   1215 
   1216 			return config;
   1217 
   1218 		case OP_SUB_INVALIDATE_OVERLAPPING:
   1219 			DE_ASSERT((mapping.size / 3) > 0);
   1220 
   1221 			config.flushMappings.push_back(MemoryRange(mapping.offset + mapping.size /  3, mapping.size - (mapping.size / 2)));
   1222 			config.flushMappings.push_back(MemoryRange(mapping.offset, (2 * mapping.size) / 3));
   1223 
   1224 			config.invalidateMappings.push_back(MemoryRange(mapping.offset + mapping.size /  3, mapping.size - (mapping.size / 2)));
   1225 			config.invalidateMappings.push_back(MemoryRange(mapping.offset, (2 * mapping.size) / 3));
   1226 
   1227 			return config;
   1228 
   1229 		default:
   1230 			DE_FATAL("Unknown Op");
   1231 			return TestConfig();
   1232 	}
   1233 }
   1234 
   1235 TestConfig fullMappedConfig (VkDeviceSize	allocationSize,
   1236 							 Op				op,
   1237 							 deUint32		seed)
   1238 {
   1239 	return subMappedConfig(allocationSize, MemoryRange(0, allocationSize), op, seed);
   1240 }
   1241 
   1242 } // anonymous
   1243 
   1244 tcu::TestCaseGroup* createMappingTests (tcu::TestContext& testCtx)
   1245 {
   1246 	de::MovePtr<tcu::TestCaseGroup> group (new tcu::TestCaseGroup(testCtx, "mapping", "Memory mapping tests."));
   1247 
   1248 	const VkDeviceSize allocationSizes[] =
   1249 	{
   1250 		33, 257, 4087, 8095, 1*1024*1024 + 1
   1251 	};
   1252 
   1253 	const VkDeviceSize offsets[] =
   1254 	{
   1255 		0, 17, 129, 255, 1025, 32*1024+1
   1256 	};
   1257 
   1258 	const VkDeviceSize sizes[] =
   1259 	{
   1260 		31, 255, 1025, 4085, 1*1024*1024 - 1
   1261 	};
   1262 
   1263 	const struct
   1264 	{
   1265 		const Op			op;
   1266 		const char* const	name;
   1267 	} ops[] =
   1268 	{
   1269 		{ OP_NONE,						"simple"					},
   1270 		{ OP_REMAP,						"remap"						},
   1271 		{ OP_FLUSH,						"flush"						},
   1272 		{ OP_SUB_FLUSH,					"subflush"					},
   1273 		{ OP_SUB_FLUSH_SEPARATE,		"subflush_separate"			},
   1274 		{ OP_SUB_FLUSH_SEPARATE,		"subflush_overlapping"		},
   1275 
   1276 		{ OP_INVALIDATE,				"invalidate"				},
   1277 		{ OP_SUB_INVALIDATE,			"subinvalidate"				},
   1278 		{ OP_SUB_INVALIDATE_SEPARATE,	"subinvalidate_separate"	},
   1279 		{ OP_SUB_INVALIDATE_SEPARATE,	"subinvalidate_overlapping"	}
   1280 	};
   1281 
   1282 	// .full
   1283 	{
   1284 		de::MovePtr<tcu::TestCaseGroup> fullGroup (new tcu::TestCaseGroup(testCtx, "full", "Map memory completely."));
   1285 
   1286 		for (size_t allocationSizeNdx = 0; allocationSizeNdx < DE_LENGTH_OF_ARRAY(allocationSizes); allocationSizeNdx++)
   1287 		{
   1288 			const VkDeviceSize				allocationSize		= allocationSizes[allocationSizeNdx];
   1289 			de::MovePtr<tcu::TestCaseGroup>	allocationSizeGroup	(new tcu::TestCaseGroup(testCtx, de::toString(allocationSize).c_str(), ""));
   1290 
   1291 			for (size_t opNdx = 0; opNdx < DE_LENGTH_OF_ARRAY(ops); opNdx++)
   1292 			{
   1293 				const Op			op		= ops[opNdx].op;
   1294 				const char* const	name	= ops[opNdx].name;
   1295 				const deUint32		seed	= (deUint32)(opNdx * allocationSizeNdx);
   1296 				const TestConfig	config	= fullMappedConfig(allocationSize, op, seed);
   1297 
   1298 				addFunctionCase(allocationSizeGroup.get(), name, name, testMemoryMapping, config);
   1299 			}
   1300 
   1301 			fullGroup->addChild(allocationSizeGroup.release());
   1302 		}
   1303 
   1304 		group->addChild(fullGroup.release());
   1305 	}
   1306 
   1307 	// .sub
   1308 	{
   1309 		de::MovePtr<tcu::TestCaseGroup> subGroup (new tcu::TestCaseGroup(testCtx, "sub", "Map part of the memory."));
   1310 
   1311 		for (size_t allocationSizeNdx = 0; allocationSizeNdx < DE_LENGTH_OF_ARRAY(allocationSizes); allocationSizeNdx++)
   1312 		{
   1313 			const VkDeviceSize				allocationSize		= allocationSizes[allocationSizeNdx];
   1314 			de::MovePtr<tcu::TestCaseGroup>	allocationSizeGroup	(new tcu::TestCaseGroup(testCtx, de::toString(allocationSize).c_str(), ""));
   1315 
   1316 			for (size_t offsetNdx = 0; offsetNdx < DE_LENGTH_OF_ARRAY(offsets); offsetNdx++)
   1317 			{
   1318 				const VkDeviceSize				offset			= offsets[offsetNdx];
   1319 
   1320 				if (offset >= allocationSize)
   1321 					continue;
   1322 
   1323 				de::MovePtr<tcu::TestCaseGroup>	offsetGroup		(new tcu::TestCaseGroup(testCtx, ("offset_" + de::toString(offset)).c_str(), ""));
   1324 
   1325 				for (size_t sizeNdx = 0; sizeNdx < DE_LENGTH_OF_ARRAY(sizes); sizeNdx++)
   1326 				{
   1327 					const VkDeviceSize				size		= sizes[sizeNdx];
   1328 
   1329 					if (offset + size > allocationSize)
   1330 						continue;
   1331 
   1332 					if (offset == 0 && size == allocationSize)
   1333 						continue;
   1334 
   1335 					de::MovePtr<tcu::TestCaseGroup>	sizeGroup	(new tcu::TestCaseGroup(testCtx, ("size_" + de::toString(size)).c_str(), ""));
   1336 
   1337 					for (size_t opNdx = 0; opNdx < DE_LENGTH_OF_ARRAY(ops); opNdx++)
   1338 					{
   1339 						const deUint32		seed	= (deUint32)(opNdx * allocationSizeNdx);
   1340 						const Op			op		= ops[opNdx].op;
   1341 						const char* const	name	= ops[opNdx].name;
   1342 						const TestConfig	config	= subMappedConfig(allocationSize, MemoryRange(offset, size), op, seed);
   1343 
   1344 						addFunctionCase(sizeGroup.get(), name, name, testMemoryMapping, config);
   1345 					}
   1346 
   1347 					offsetGroup->addChild(sizeGroup.release());
   1348 				}
   1349 
   1350 				allocationSizeGroup->addChild(offsetGroup.release());
   1351 			}
   1352 
   1353 			subGroup->addChild(allocationSizeGroup.release());
   1354 		}
   1355 
   1356 		group->addChild(subGroup.release());
   1357 	}
   1358 
   1359 	// .random
   1360 	{
   1361 		de::MovePtr<tcu::TestCaseGroup>	randomGroup	(new tcu::TestCaseGroup(testCtx, "random", "Random memory mapping tests."));
   1362 		de::Random						rng			(3927960301u);
   1363 
   1364 		for (size_t ndx = 0; ndx < 100; ndx++)
   1365 		{
   1366 			const deUint32		seed	= rng.getUint32();
   1367 			const std::string	name	= de::toString(ndx);
   1368 
   1369 			randomGroup->addChild(new InstanceFactory1<RandomMemoryMappingInstance, deUint32>(testCtx, tcu::NODETYPE_SELF_VALIDATE, de::toString(ndx), "Random case", seed));
   1370 		}
   1371 
   1372 		group->addChild(randomGroup.release());
   1373 	}
   1374 
   1375 	return group.release();
   1376 }
   1377 
   1378 } // memory
   1379 } // vkt
   1380