Home | History | Annotate | Download | only in sparse_resources
      1 /*------------------------------------------------------------------------
      2  * Vulkan Conformance Tests
      3  * ------------------------
      4  *
      5  * Copyright (c) 2016 The Khronos Group Inc.
      6  *
      7  * Licensed under the Apache License, Version 2.0 (the "License");
      8  * you may not use this file except in compliance with the License.
      9  * You may obtain a copy of the License at
     10  *
     11  *      http://www.apache.org/licenses/LICENSE-2.0
     12  *
     13  * Unless required by applicable law or agreed to in writing, software
     14  * distributed under the License is distributed on an "AS IS" BASIS,
     15  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     16  * See the License for the specific language governing permissions and
     17  * limitations under the License.
     18  *
     19  *//*!
     20  * \file  vktSparseResourcesImageMemoryAliasing.cpp
     21  * \brief Sparse image memory aliasing tests
     22  *//*--------------------------------------------------------------------*/
     23 
     24 #include "vktSparseResourcesImageMemoryAliasing.hpp"
     25 #include "vktSparseResourcesTestsUtil.hpp"
     26 #include "vktSparseResourcesBase.hpp"
     27 #include "vktTestCaseUtil.hpp"
     28 
     29 #include "vkDefs.hpp"
     30 #include "vkRef.hpp"
     31 #include "vkRefUtil.hpp"
     32 #include "vkPlatform.hpp"
     33 #include "vkPrograms.hpp"
     34 #include "vkRefUtil.hpp"
     35 #include "vkMemUtil.hpp"
     36 #include "vkQueryUtil.hpp"
     37 #include "vkBuilderUtil.hpp"
     38 #include "vkTypeUtil.hpp"
     39 
     40 #include "deStringUtil.hpp"
     41 #include "deUniquePtr.hpp"
     42 #include "deSharedPtr.hpp"
     43 #include "tcuTexture.hpp"
     44 
     45 #include <deMath.h>
     46 #include <string>
     47 #include <vector>
     48 
     49 using namespace vk;
     50 
     51 namespace vkt
     52 {
     53 namespace sparse
     54 {
     55 namespace
     56 {
     57 
     58 enum ShaderParameters
     59 {
     60 	MODULO_DIVISOR = 128
     61 };
     62 
     63 const std::string getCoordStr  (const ImageType		imageType,
     64 								const std::string&	x,
     65 								const std::string&	y,
     66 								const std::string&	z)
     67 {
     68 	switch (imageType)
     69 	{
     70 		case IMAGE_TYPE_1D:
     71 		case IMAGE_TYPE_BUFFER:
     72 			return x;
     73 
     74 		case IMAGE_TYPE_1D_ARRAY:
     75 		case IMAGE_TYPE_2D:
     76 			return "ivec2(" + x + "," + y + ")";
     77 
     78 		case IMAGE_TYPE_2D_ARRAY:
     79 		case IMAGE_TYPE_3D:
     80 		case IMAGE_TYPE_CUBE:
     81 		case IMAGE_TYPE_CUBE_ARRAY:
     82 			return "ivec3(" + x + "," + y + "," + z + ")";
     83 
     84 		default:
     85 			DE_ASSERT(false);
     86 			return "";
     87 	}
     88 }
     89 
     90 tcu::UVec3 alignedDivide (const VkExtent3D& extent, const VkExtent3D& divisor)
     91 {
     92 	tcu::UVec3 result;
     93 
     94 	result.x() = extent.width  / divisor.width  + ((extent.width  % divisor.width)  ? 1u : 0u);
     95 	result.y() = extent.height / divisor.height + ((extent.height % divisor.height) ? 1u : 0u);
     96 	result.z() = extent.depth  / divisor.depth  + ((extent.depth  % divisor.depth)  ? 1u : 0u);
     97 
     98 	return result;
     99 }
    100 
    101 class ImageSparseMemoryAliasingCase : public TestCase
    102 {
    103 public:
    104 					ImageSparseMemoryAliasingCase	(tcu::TestContext&			testCtx,
    105 													 const std::string&			name,
    106 													 const std::string&			description,
    107 													 const ImageType			imageType,
    108 													 const tcu::UVec3&			imageSize,
    109 													 const tcu::TextureFormat&	format,
    110 													 const glu::GLSLVersion		glslVersion,
    111 													 const bool					useDeviceGroups);
    112 
    113 	void			initPrograms					(SourceCollections&			sourceCollections) const;
    114 	TestInstance*	createInstance					(Context&					context) const;
    115 
    116 
    117 private:
    118 	const bool					m_useDeviceGroups;
    119 	const ImageType				m_imageType;
    120 	const tcu::UVec3			m_imageSize;
    121 	const tcu::TextureFormat	m_format;
    122 	const glu::GLSLVersion		m_glslVersion;
    123 };
    124 
    125 ImageSparseMemoryAliasingCase::ImageSparseMemoryAliasingCase (tcu::TestContext&			testCtx,
    126 															  const std::string&		name,
    127 															  const std::string&		description,
    128 															  const ImageType			imageType,
    129 															  const tcu::UVec3&			imageSize,
    130 															  const tcu::TextureFormat&	format,
    131 															  const glu::GLSLVersion	glslVersion,
    132 															  const bool				useDeviceGroups)
    133 	: TestCase				(testCtx, name, description)
    134 	, m_useDeviceGroups		(useDeviceGroups)
    135 	, m_imageType			(imageType)
    136 	, m_imageSize			(imageSize)
    137 	, m_format				(format)
    138 	, m_glslVersion			(glslVersion)
    139 {
    140 }
    141 
    142 class ImageSparseMemoryAliasingInstance : public SparseResourcesBaseInstance
    143 {
    144 public:
    145 					ImageSparseMemoryAliasingInstance	(Context&								context,
    146 														 const ImageType						imageType,
    147 														 const tcu::UVec3&						imageSize,
    148 														 const tcu::TextureFormat&				format,
    149 														 const bool								useDeviceGroups);
    150 
    151 	tcu::TestStatus	iterate								(void);
    152 
    153 private:
    154 	const bool					m_useDeviceGroups;
    155 	const ImageType				m_imageType;
    156 	const tcu::UVec3			m_imageSize;
    157 	const tcu::TextureFormat	m_format;
    158 };
    159 
    160 ImageSparseMemoryAliasingInstance::ImageSparseMemoryAliasingInstance (Context&					context,
    161 																	  const ImageType			imageType,
    162 																	  const tcu::UVec3&			imageSize,
    163 																	  const tcu::TextureFormat&	format,
    164 																	  const bool				useDeviceGroups)
    165 	: SparseResourcesBaseInstance	(context, useDeviceGroups)
    166 	, m_useDeviceGroups				(useDeviceGroups)
    167 	, m_imageType					(imageType)
    168 	, m_imageSize					(imageSize)
    169 	, m_format						(format)
    170 {
    171 }
    172 
    173 tcu::TestStatus ImageSparseMemoryAliasingInstance::iterate (void)
    174 {
    175 	const InstanceInterface&			instance				= m_context.getInstanceInterface();
    176 
    177 	{
    178 		// Create logical device supporting both sparse and compute queues
    179 		QueueRequirementsVec queueRequirements;
    180 		queueRequirements.push_back(QueueRequirements(VK_QUEUE_SPARSE_BINDING_BIT, 1u));
    181 		queueRequirements.push_back(QueueRequirements(VK_QUEUE_COMPUTE_BIT, 1u));
    182 
    183 		createDeviceSupportingQueues(queueRequirements);
    184 	}
    185 
    186 	const VkPhysicalDevice				physicalDevice			= getPhysicalDevice();
    187 	const tcu::UVec3					maxWorkGroupSize		= tcu::UVec3(128u, 128u, 64u);
    188 	const tcu::UVec3					maxWorkGroupCount		= tcu::UVec3(65535u, 65535u, 65535u);
    189 	const deUint32						maxWorkGroupInvocations	= 128u;
    190 	VkImageCreateInfo					imageSparseInfo;
    191 	VkSparseImageMemoryRequirements		aspectRequirements;
    192 	std::vector<DeviceMemorySp>			deviceMemUniquePtrVec;
    193 
    194 	//vsk checking these flags should be after creating m_imageType
    195 	//getting queues should be outside the loop
    196 	//see these in all image files
    197 
    198 	// Check if image size does not exceed device limits
    199 	if (!isImageSizeSupported(instance, physicalDevice, m_imageType, m_imageSize))
    200 		TCU_THROW(NotSupportedError, "Image size not supported for device");
    201 
    202 	// Check if sparse memory aliasing is supported
    203 	if (!getPhysicalDeviceFeatures(instance, physicalDevice).sparseResidencyAliased)
    204 		TCU_THROW(NotSupportedError, "Sparse memory aliasing not supported");
    205 
    206 	// Check if device supports sparse operations for image type
    207 	if (!checkSparseSupportForImageType(instance, physicalDevice, m_imageType))
    208 		TCU_THROW(NotSupportedError, "Sparse residency for image type is not supported");
    209 
    210 	const DeviceInterface&	deviceInterface	= getDeviceInterface();
    211 	const Queue&			sparseQueue		= getQueue(VK_QUEUE_SPARSE_BINDING_BIT, 0);
    212 	const Queue&			computeQueue	= getQueue(VK_QUEUE_COMPUTE_BIT, 0);
    213 
    214 	// Go through all physical devices
    215 	for (deUint32 physDevID = 0; physDevID < m_numPhysicalDevices; physDevID++)
    216 	{
    217 		const deUint32	firstDeviceID	= physDevID;
    218 		const deUint32	secondDeviceID	= (firstDeviceID + 1) % m_numPhysicalDevices;
    219 
    220 		imageSparseInfo.sType					= VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
    221 		imageSparseInfo.pNext					= DE_NULL;
    222 		imageSparseInfo.flags					= VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT |
    223 												  VK_IMAGE_CREATE_SPARSE_ALIASED_BIT   |
    224 												  VK_IMAGE_CREATE_SPARSE_BINDING_BIT;
    225 		imageSparseInfo.imageType				= mapImageType(m_imageType);
    226 		imageSparseInfo.format					= mapTextureFormat(m_format);
    227 		imageSparseInfo.extent					= makeExtent3D(getLayerSize(m_imageType, m_imageSize));
    228 		imageSparseInfo.arrayLayers				= getNumLayers(m_imageType, m_imageSize);
    229 		imageSparseInfo.samples					= VK_SAMPLE_COUNT_1_BIT;
    230 		imageSparseInfo.tiling					= VK_IMAGE_TILING_OPTIMAL;
    231 		imageSparseInfo.initialLayout			= VK_IMAGE_LAYOUT_UNDEFINED;
    232 		imageSparseInfo.usage					= VK_IMAGE_USAGE_TRANSFER_DST_BIT |
    233 												  VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
    234 												  VK_IMAGE_USAGE_STORAGE_BIT;
    235 		imageSparseInfo.sharingMode				= VK_SHARING_MODE_EXCLUSIVE;
    236 		imageSparseInfo.queueFamilyIndexCount	= 0u;
    237 		imageSparseInfo.pQueueFamilyIndices		= DE_NULL;
    238 
    239 		if (m_imageType == IMAGE_TYPE_CUBE || m_imageType == IMAGE_TYPE_CUBE_ARRAY)
    240 			imageSparseInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
    241 
    242 		{
    243 			// Assign maximum allowed mipmap levels to image
    244 			VkImageFormatProperties imageFormatProperties;
    245 			instance.getPhysicalDeviceImageFormatProperties(physicalDevice,
    246 				imageSparseInfo.format,
    247 				imageSparseInfo.imageType,
    248 				imageSparseInfo.tiling,
    249 				imageSparseInfo.usage,
    250 				imageSparseInfo.flags,
    251 				&imageFormatProperties);
    252 
    253 			imageSparseInfo.mipLevels = getImageMaxMipLevels(imageFormatProperties, imageSparseInfo.extent);
    254 		}
    255 
    256 		// Check if device supports sparse operations for image format
    257 		if (!checkSparseSupportForImageFormat(instance, physicalDevice, imageSparseInfo))
    258 			TCU_THROW(NotSupportedError, "The image format does not support sparse operations");
    259 
    260 		// Create sparse image
    261 		const Unique<VkImage> imageRead(createImage(deviceInterface, getDevice(), &imageSparseInfo));
    262 		const Unique<VkImage> imageWrite(createImage(deviceInterface, getDevice(), &imageSparseInfo));
    263 
    264 		// Create semaphores to synchronize sparse binding operations with other operations on the sparse images
    265 		const Unique<VkSemaphore> memoryBindSemaphoreTransfer(createSemaphore(deviceInterface, getDevice()));
    266 		const Unique<VkSemaphore> memoryBindSemaphoreCompute(createSemaphore(deviceInterface, getDevice()));
    267 
    268 		const VkSemaphore imageMemoryBindSemaphores[] = { memoryBindSemaphoreTransfer.get(), memoryBindSemaphoreCompute.get() };
    269 
    270 		{
    271 			std::vector<VkSparseImageMemoryBind> imageResidencyMemoryBinds;
    272 			std::vector<VkSparseMemoryBind>		 imageReadMipTailBinds;
    273 			std::vector<VkSparseMemoryBind>		 imageWriteMipTailBinds;
    274 
    275 			// Get sparse image general memory requirements
    276 			const VkMemoryRequirements imageMemoryRequirements = getImageMemoryRequirements(deviceInterface, getDevice(), *imageRead);
    277 
    278 			// Check if required image memory size does not exceed device limits
    279 			if (imageMemoryRequirements.size > getPhysicalDeviceProperties(instance, physicalDevice).limits.sparseAddressSpaceSize)
    280 				TCU_THROW(NotSupportedError, "Required memory size for sparse resource exceeds device limits");
    281 
    282 			DE_ASSERT((imageMemoryRequirements.size % imageMemoryRequirements.alignment) == 0);
    283 
    284 			// Get sparse image sparse memory requirements
    285 			const std::vector<VkSparseImageMemoryRequirements> sparseMemoryRequirements = getImageSparseMemoryRequirements(deviceInterface, getDevice(), *imageRead);
    286 
    287 			DE_ASSERT(sparseMemoryRequirements.size() != 0);
    288 
    289 			const deUint32 colorAspectIndex = getSparseAspectRequirementsIndex(sparseMemoryRequirements, VK_IMAGE_ASPECT_COLOR_BIT);
    290 
    291 			if (colorAspectIndex == NO_MATCH_FOUND)
    292 				TCU_THROW(NotSupportedError, "Not supported image aspect - the test supports currently only VK_IMAGE_ASPECT_COLOR_BIT");
    293 
    294 			aspectRequirements = sparseMemoryRequirements[colorAspectIndex];
    295 
    296 			const VkImageAspectFlags	aspectMask			= aspectRequirements.formatProperties.aspectMask;
    297 			const VkExtent3D			imageGranularity	= aspectRequirements.formatProperties.imageGranularity;
    298 
    299 			DE_ASSERT((aspectRequirements.imageMipTailSize % imageMemoryRequirements.alignment) == 0);
    300 
    301 			const deUint32 memoryType = findMatchingMemoryType(instance, physicalDevice, imageMemoryRequirements, MemoryRequirement::Any);
    302 
    303 			if (memoryType == NO_MATCH_FOUND)
    304 				return tcu::TestStatus::fail("No matching memory type found");
    305 
    306 			// Bind memory for each layer
    307 			for (deUint32 layerNdx = 0; layerNdx < imageSparseInfo.arrayLayers; ++layerNdx)
    308 			{
    309 				for (deUint32 mipLevelNdx = 0; mipLevelNdx < aspectRequirements.imageMipTailFirstLod; ++mipLevelNdx)
    310 				{
    311 					const VkExtent3D			mipExtent		= mipLevelExtents(imageSparseInfo.extent, mipLevelNdx);
    312 					const tcu::UVec3			sparseBlocks	= alignedDivide(mipExtent, imageGranularity);
    313 					const deUint32				numSparseBlocks = sparseBlocks.x() * sparseBlocks.y() * sparseBlocks.z();
    314 					const VkImageSubresource	subresource		= { aspectMask, mipLevelNdx, layerNdx };
    315 
    316 					const VkSparseImageMemoryBind imageMemoryBind = makeSparseImageMemoryBind(deviceInterface, getDevice(),
    317 						imageMemoryRequirements.alignment * numSparseBlocks, memoryType, subresource, makeOffset3D(0u, 0u, 0u), mipExtent);
    318 
    319 					deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
    320 
    321 					imageResidencyMemoryBinds.push_back(imageMemoryBind);
    322 				}
    323 
    324 				if (!(aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) && aspectRequirements.imageMipTailFirstLod < imageSparseInfo.mipLevels)
    325 				{
    326 					const VkSparseMemoryBind imageReadMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
    327 						aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset + layerNdx * aspectRequirements.imageMipTailStride);
    328 
    329 					deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageReadMipTailMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
    330 
    331 					imageReadMipTailBinds.push_back(imageReadMipTailMemoryBind);
    332 
    333 					const VkSparseMemoryBind imageWriteMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
    334 						aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset + layerNdx * aspectRequirements.imageMipTailStride);
    335 
    336 					deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageWriteMipTailMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
    337 
    338 					imageWriteMipTailBinds.push_back(imageWriteMipTailMemoryBind);
    339 				}
    340 			}
    341 
    342 			if ((aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) && aspectRequirements.imageMipTailFirstLod < imageSparseInfo.mipLevels)
    343 			{
    344 				const VkSparseMemoryBind imageReadMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
    345 					aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset);
    346 
    347 				deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageReadMipTailMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
    348 
    349 				imageReadMipTailBinds.push_back(imageReadMipTailMemoryBind);
    350 
    351 				const VkSparseMemoryBind imageWriteMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
    352 					aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset);
    353 
    354 				deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageWriteMipTailMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
    355 
    356 				imageWriteMipTailBinds.push_back(imageWriteMipTailMemoryBind);
    357 			}
    358 
    359 			const VkDeviceGroupBindSparseInfo devGroupBindSparseInfo =
    360 			{
    361 				VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO_KHR,	//VkStructureType							sType;
    362 				DE_NULL,												//const void*								pNext;
    363 				firstDeviceID,											//deUint32									resourceDeviceIndex;
    364 				secondDeviceID,											//deUint32									memoryDeviceIndex;
    365 			};
    366 
    367 			VkBindSparseInfo bindSparseInfo =
    368 			{
    369 				VK_STRUCTURE_TYPE_BIND_SPARSE_INFO,						//VkStructureType							sType;
    370 				m_useDeviceGroups ? &devGroupBindSparseInfo : DE_NULL,	//const void*								pNext;
    371 				0u,														//deUint32									waitSemaphoreCount;
    372 				DE_NULL,												//const VkSemaphore*						pWaitSemaphores;
    373 				0u,														//deUint32									bufferBindCount;
    374 				DE_NULL,												//const VkSparseBufferMemoryBindInfo*		pBufferBinds;
    375 				0u,														//deUint32									imageOpaqueBindCount;
    376 				DE_NULL,												//const VkSparseImageOpaqueMemoryBindInfo*	pImageOpaqueBinds;
    377 				0u,														//deUint32									imageBindCount;
    378 				DE_NULL,												//const VkSparseImageMemoryBindInfo*		pImageBinds;
    379 				2u,														//deUint32									signalSemaphoreCount;
    380 				imageMemoryBindSemaphores								//const VkSemaphore*						pSignalSemaphores;
    381 			};
    382 
    383 			VkSparseImageMemoryBindInfo		  imageResidencyBindInfo[2];
    384 			VkSparseImageOpaqueMemoryBindInfo imageMipTailBindInfo[2];
    385 
    386 			if (imageResidencyMemoryBinds.size() > 0)
    387 			{
    388 				imageResidencyBindInfo[0].image		= *imageRead;
    389 				imageResidencyBindInfo[0].bindCount = static_cast<deUint32>(imageResidencyMemoryBinds.size());
    390 				imageResidencyBindInfo[0].pBinds	= &imageResidencyMemoryBinds[0];
    391 
    392 				imageResidencyBindInfo[1].image		= *imageWrite;
    393 				imageResidencyBindInfo[1].bindCount = static_cast<deUint32>(imageResidencyMemoryBinds.size());
    394 				imageResidencyBindInfo[1].pBinds	= &imageResidencyMemoryBinds[0];
    395 
    396 				bindSparseInfo.imageBindCount		= 2u;
    397 				bindSparseInfo.pImageBinds			= imageResidencyBindInfo;
    398 			}
    399 
    400 			if (imageReadMipTailBinds.size() > 0)
    401 			{
    402 				imageMipTailBindInfo[0].image		= *imageRead;
    403 				imageMipTailBindInfo[0].bindCount	= static_cast<deUint32>(imageReadMipTailBinds.size());
    404 				imageMipTailBindInfo[0].pBinds		= &imageReadMipTailBinds[0];
    405 
    406 				imageMipTailBindInfo[1].image		= *imageWrite;
    407 				imageMipTailBindInfo[1].bindCount	= static_cast<deUint32>(imageWriteMipTailBinds.size());
    408 				imageMipTailBindInfo[1].pBinds		= &imageWriteMipTailBinds[0];
    409 
    410 				bindSparseInfo.imageOpaqueBindCount = 2u;
    411 				bindSparseInfo.pImageOpaqueBinds	= imageMipTailBindInfo;
    412 			}
    413 
    414 			// Submit sparse bind commands for execution
    415 			VK_CHECK(deviceInterface.queueBindSparse(sparseQueue.queueHandle, 1u, &bindSparseInfo, DE_NULL));
    416 		}
    417 
    418 		// Create command buffer for compute and transfer oparations
    419 		const Unique<VkCommandPool>	  commandPool  (makeCommandPool(deviceInterface, getDevice(), computeQueue.queueFamilyIndex));
    420 		const Unique<VkCommandBuffer> commandBuffer(allocateCommandBuffer(deviceInterface, getDevice(), *commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
    421 
    422 		std::vector<VkBufferImageCopy> bufferImageCopy(imageSparseInfo.mipLevels);
    423 
    424 		{
    425 			deUint32 bufferOffset = 0u;
    426 			for (deUint32 mipLevelNdx = 0u; mipLevelNdx < imageSparseInfo.mipLevels; ++mipLevelNdx)
    427 			{
    428 				bufferImageCopy[mipLevelNdx] = makeBufferImageCopy(mipLevelExtents(imageSparseInfo.extent, mipLevelNdx), imageSparseInfo.arrayLayers, mipLevelNdx, bufferOffset);
    429 				bufferOffset += getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, mipLevelNdx, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY);
    430 			}
    431 		}
    432 
    433 		// Start recording commands
    434 		beginCommandBuffer(deviceInterface, *commandBuffer);
    435 
    436 		const deUint32					imageSizeInBytes		= getImageSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, imageSparseInfo.mipLevels, BUFFER_IMAGE_COPY_OFFSET_GRANULARITY);
    437 		const VkBufferCreateInfo		inputBufferCreateInfo	= makeBufferCreateInfo(imageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_SRC_BIT);
    438 		const Unique<VkBuffer>			inputBuffer				(createBuffer(deviceInterface, getDevice(), &inputBufferCreateInfo));
    439 		const de::UniquePtr<Allocation>	inputBufferAlloc		(bindBuffer(deviceInterface, getDevice(), getAllocator(), *inputBuffer, MemoryRequirement::HostVisible));
    440 
    441 		std::vector<deUint8> referenceData(imageSizeInBytes);
    442 
    443 		for (deUint32 mipLevelNdx = 0u; mipLevelNdx < imageSparseInfo.mipLevels; ++mipLevelNdx)
    444 		{
    445 			const deUint32 mipLevelSizeInBytes	= getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, mipLevelNdx);
    446 			const deUint32 bufferOffset			= static_cast<deUint32>(bufferImageCopy[mipLevelNdx].bufferOffset);
    447 
    448 			deMemset(&referenceData[bufferOffset], mipLevelNdx + 1u, mipLevelSizeInBytes);
    449 		}
    450 
    451 		deMemcpy(inputBufferAlloc->getHostPtr(), &referenceData[0], imageSizeInBytes);
    452 
    453 		flushMappedMemoryRange(deviceInterface, getDevice(), inputBufferAlloc->getMemory(), inputBufferAlloc->getOffset(), imageSizeInBytes);
    454 
    455 		{
    456 			const VkBufferMemoryBarrier inputBufferBarrier = makeBufferMemoryBarrier
    457 			(
    458 				VK_ACCESS_HOST_WRITE_BIT,
    459 				VK_ACCESS_TRANSFER_READ_BIT,
    460 				*inputBuffer,
    461 				0u,
    462 				imageSizeInBytes
    463 			);
    464 
    465 			deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 1u, &inputBufferBarrier, 0u, DE_NULL);
    466 		}
    467 
    468 		{
    469 			const VkImageMemoryBarrier imageSparseTransferDstBarrier = makeImageMemoryBarrier
    470 			(
    471 				0u,
    472 				VK_ACCESS_TRANSFER_WRITE_BIT,
    473 				VK_IMAGE_LAYOUT_UNDEFINED,
    474 				VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
    475 				sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex ? sparseQueue.queueFamilyIndex  : VK_QUEUE_FAMILY_IGNORED,
    476 				sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex ? computeQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED,
    477 				*imageRead,
    478 				makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers)
    479 			);
    480 
    481 			deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &imageSparseTransferDstBarrier);
    482 		}
    483 
    484 		deviceInterface.cmdCopyBufferToImage(*commandBuffer, *inputBuffer, *imageRead, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, static_cast<deUint32>(bufferImageCopy.size()), &bufferImageCopy[0]);
    485 
    486 		{
    487 			const VkImageMemoryBarrier imageSparseTransferSrcBarrier = makeImageMemoryBarrier
    488 			(
    489 				VK_ACCESS_TRANSFER_WRITE_BIT,
    490 				VK_ACCESS_TRANSFER_READ_BIT,
    491 				VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
    492 				VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
    493 				*imageRead,
    494 				makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers)
    495 			);
    496 
    497 			deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &imageSparseTransferSrcBarrier);
    498 		}
    499 
    500 		{
    501 			const VkImageMemoryBarrier imageSparseShaderStorageBarrier = makeImageMemoryBarrier
    502 			(
    503 				0u,
    504 				VK_ACCESS_SHADER_WRITE_BIT,
    505 				VK_IMAGE_LAYOUT_UNDEFINED,
    506 				VK_IMAGE_LAYOUT_GENERAL,
    507 				*imageWrite,
    508 				makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers)
    509 			);
    510 
    511 			deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &imageSparseShaderStorageBarrier);
    512 		}
    513 
    514 		// Create descriptor set layout
    515 		const Unique<VkDescriptorSetLayout> descriptorSetLayout(
    516 			DescriptorSetLayoutBuilder()
    517 			.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
    518 			.build(deviceInterface, getDevice()));
    519 
    520 		Unique<VkPipelineLayout> pipelineLayout(makePipelineLayout(deviceInterface, getDevice(), *descriptorSetLayout));
    521 
    522 		Unique<VkDescriptorPool> descriptorPool(
    523 			DescriptorPoolBuilder()
    524 			.addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, imageSparseInfo.mipLevels)
    525 			.build(deviceInterface, getDevice(), VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, imageSparseInfo.mipLevels));
    526 
    527 		typedef de::SharedPtr< Unique<VkImageView> >		SharedVkImageView;
    528 		std::vector<SharedVkImageView>						imageViews;
    529 		imageViews.resize(imageSparseInfo.mipLevels);
    530 
    531 		typedef de::SharedPtr< Unique<VkDescriptorSet> >	SharedVkDescriptorSet;
    532 		std::vector<SharedVkDescriptorSet>					descriptorSets;
    533 		descriptorSets.resize(imageSparseInfo.mipLevels);
    534 
    535 		typedef de::SharedPtr< Unique<VkPipeline> >			SharedVkPipeline;
    536 		std::vector<SharedVkPipeline>						computePipelines;
    537 		computePipelines.resize(imageSparseInfo.mipLevels);
    538 
    539 		for (deUint32 mipLevelNdx = 0u; mipLevelNdx < imageSparseInfo.mipLevels; ++mipLevelNdx)
    540 		{
    541 			std::ostringstream name;
    542 			name << "comp" << mipLevelNdx;
    543 
    544 			// Create and bind compute pipeline
    545 			Unique<VkShaderModule> shaderModule(createShaderModule(deviceInterface, getDevice(), m_context.getBinaryCollection().get(name.str()), DE_NULL));
    546 
    547 			computePipelines[mipLevelNdx]	= makeVkSharedPtr(makeComputePipeline(deviceInterface, getDevice(), *pipelineLayout, *shaderModule));
    548 			VkPipeline computePipeline		= **computePipelines[mipLevelNdx];
    549 
    550 			deviceInterface.cmdBindPipeline(*commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, computePipeline);
    551 
    552 			// Create and bind descriptor set
    553 			descriptorSets[mipLevelNdx]		= makeVkSharedPtr(makeDescriptorSet(deviceInterface, getDevice(), *descriptorPool, *descriptorSetLayout));
    554 			VkDescriptorSet descriptorSet	= **descriptorSets[mipLevelNdx];
    555 
    556 			// Select which mipmap level to bind
    557 			const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, mipLevelNdx, 1u, 0u, imageSparseInfo.arrayLayers);
    558 
    559 			imageViews[mipLevelNdx] = makeVkSharedPtr(makeImageView(deviceInterface, getDevice(), *imageWrite, mapImageViewType(m_imageType), imageSparseInfo.format, subresourceRange));
    560 			VkImageView imageView	= **imageViews[mipLevelNdx];
    561 
    562 			const VkDescriptorImageInfo sparseImageInfo = makeDescriptorImageInfo(DE_NULL, imageView, VK_IMAGE_LAYOUT_GENERAL);
    563 
    564 			DescriptorSetUpdateBuilder()
    565 				.writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &sparseImageInfo)
    566 				.update(deviceInterface, getDevice());
    567 
    568 			deviceInterface.cmdBindDescriptorSets(*commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
    569 
    570 			const tcu::UVec3	gridSize			= getShaderGridSize(m_imageType, m_imageSize, mipLevelNdx);
    571 			const deUint32		xWorkGroupSize		= std::min(std::min(gridSize.x(), maxWorkGroupSize.x()), maxWorkGroupInvocations);
    572 			const deUint32		yWorkGroupSize		= std::min(std::min(gridSize.y(), maxWorkGroupSize.y()), maxWorkGroupInvocations / xWorkGroupSize);
    573 			const deUint32		zWorkGroupSize		= std::min(std::min(gridSize.z(), maxWorkGroupSize.z()), maxWorkGroupInvocations / (xWorkGroupSize * yWorkGroupSize));
    574 
    575 			const deUint32		xWorkGroupCount		= gridSize.x() / xWorkGroupSize + (gridSize.x() % xWorkGroupSize ? 1u : 0u);
    576 			const deUint32		yWorkGroupCount		= gridSize.y() / yWorkGroupSize + (gridSize.y() % yWorkGroupSize ? 1u : 0u);
    577 			const deUint32		zWorkGroupCount		= gridSize.z() / zWorkGroupSize + (gridSize.z() % zWorkGroupSize ? 1u : 0u);
    578 
    579 			if (maxWorkGroupCount.x() < xWorkGroupCount ||
    580 				maxWorkGroupCount.y() < yWorkGroupCount ||
    581 				maxWorkGroupCount.z() < zWorkGroupCount)
    582 				TCU_THROW(NotSupportedError, "Image size is not supported");
    583 
    584 			deviceInterface.cmdDispatch(*commandBuffer, xWorkGroupCount, yWorkGroupCount, zWorkGroupCount);
    585 		}
    586 
    587 		{
    588 			const VkMemoryBarrier memoryBarrier = makeMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT);
    589 
    590 			deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 1u, &memoryBarrier, 0u, DE_NULL, 0u, DE_NULL);
    591 		}
    592 
    593 		const VkBufferCreateInfo		outputBufferCreateInfo	= makeBufferCreateInfo(imageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
    594 		const Unique<VkBuffer>			outputBuffer			(createBuffer(deviceInterface, getDevice(), &outputBufferCreateInfo));
    595 		const de::UniquePtr<Allocation>	outputBufferAlloc		(bindBuffer(deviceInterface, getDevice(), getAllocator(), *outputBuffer, MemoryRequirement::HostVisible));
    596 
    597 		deviceInterface.cmdCopyImageToBuffer(*commandBuffer, *imageRead, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *outputBuffer, static_cast<deUint32>(bufferImageCopy.size()), &bufferImageCopy[0]);
    598 
    599 		{
    600 			const VkBufferMemoryBarrier outputBufferBarrier = makeBufferMemoryBarrier
    601 			(
    602 				VK_ACCESS_TRANSFER_WRITE_BIT,
    603 				VK_ACCESS_HOST_READ_BIT,
    604 				*outputBuffer,
    605 				0u,
    606 				imageSizeInBytes
    607 			);
    608 
    609 			deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 1u, &outputBufferBarrier, 0u, DE_NULL);
    610 		}
    611 
    612 		// End recording commands
    613 		endCommandBuffer(deviceInterface, *commandBuffer);
    614 
    615 		const VkPipelineStageFlags stageBits[] = { VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT };
    616 
    617 		// Submit commands for execution and wait for completion
    618 		submitCommandsAndWait(deviceInterface, getDevice(), computeQueue.queueHandle, *commandBuffer, 2u, imageMemoryBindSemaphores, stageBits,
    619 								0, DE_NULL, m_useDeviceGroups, firstDeviceID);
    620 
    621 		// Retrieve data from buffer to host memory
    622 		invalidateMappedMemoryRange(deviceInterface, getDevice(), outputBufferAlloc->getMemory(), outputBufferAlloc->getOffset(), imageSizeInBytes);
    623 
    624 		const deUint8* outputData = static_cast<const deUint8*>(outputBufferAlloc->getHostPtr());
    625 
    626 		// Wait for sparse queue to become idle
    627 		deviceInterface.queueWaitIdle(sparseQueue.queueHandle);
    628 
    629 		for (deUint32 mipLevelNdx = 0; mipLevelNdx < aspectRequirements.imageMipTailFirstLod; ++mipLevelNdx)
    630 		{
    631 			const tcu::UVec3				  gridSize		= getShaderGridSize(m_imageType, m_imageSize, mipLevelNdx);
    632 			const deUint32					  bufferOffset	= static_cast<deUint32>(bufferImageCopy[mipLevelNdx].bufferOffset);
    633 			const tcu::ConstPixelBufferAccess pixelBuffer	= tcu::ConstPixelBufferAccess(m_format, gridSize.x(), gridSize.y(), gridSize.z(), outputData + bufferOffset);
    634 
    635 			for (deUint32 offsetZ = 0u; offsetZ < gridSize.z(); ++offsetZ)
    636 			for (deUint32 offsetY = 0u; offsetY < gridSize.y(); ++offsetY)
    637 			for (deUint32 offsetX = 0u; offsetX < gridSize.x(); ++offsetX)
    638 			{
    639 				const deUint32 index			= offsetX + (offsetY + offsetZ * gridSize.y()) * gridSize.x();
    640 				const tcu::UVec4 referenceValue = tcu::UVec4(index % MODULO_DIVISOR, index % MODULO_DIVISOR, index % MODULO_DIVISOR, 1u);
    641 				const tcu::UVec4 outputValue	= pixelBuffer.getPixelUint(offsetX, offsetY, offsetZ);
    642 
    643 				if (deMemCmp(&outputValue, &referenceValue, sizeof(deUint32) * getNumUsedChannels(m_format.order)) != 0)
    644 					return tcu::TestStatus::fail("Failed");
    645 			}
    646 		}
    647 
    648 		for (deUint32 mipLevelNdx = aspectRequirements.imageMipTailFirstLod; mipLevelNdx < imageSparseInfo.mipLevels; ++mipLevelNdx)
    649 		{
    650 			const deUint32 mipLevelSizeInBytes	= getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, mipLevelNdx);
    651 			const deUint32 bufferOffset			= static_cast<deUint32>(bufferImageCopy[mipLevelNdx].bufferOffset);
    652 
    653 			if (deMemCmp(outputData + bufferOffset, &referenceData[bufferOffset], mipLevelSizeInBytes) != 0)
    654 				return tcu::TestStatus::fail("Failed");
    655 		}
    656 	}
    657 
    658 	return tcu::TestStatus::pass("Passed");
    659 }
    660 
    661 void ImageSparseMemoryAliasingCase::initPrograms(SourceCollections&	sourceCollections) const
    662 {
    663 	const char* const	versionDecl				= glu::getGLSLVersionDeclaration(m_glslVersion);
    664 	const std::string	imageTypeStr			= getShaderImageType(m_format, m_imageType);
    665 	const std::string	formatQualifierStr		= getShaderImageFormatQualifier(m_format);
    666 	const std::string	formatDataStr			= getShaderImageDataType(m_format);
    667 	const deUint32		maxWorkGroupInvocations = 128u;
    668 	const tcu::UVec3	maxWorkGroupSize		= tcu::UVec3(128u, 128u, 64u);
    669 
    670 	const tcu::UVec3	layerSize				= getLayerSize(m_imageType, m_imageSize);
    671 	const deUint32		widestEdge				= std::max(std::max(layerSize.x(), layerSize.y()), layerSize.z());
    672 	const deUint32		mipLevels				= static_cast<deUint32>(deFloatLog2(static_cast<float>(widestEdge))) + 1u;
    673 
    674 	for (deUint32 mipLevelNdx = 0; mipLevelNdx < mipLevels; ++mipLevelNdx)
    675 	{
    676 		// Create compute program
    677 		const tcu::UVec3	gridSize		= getShaderGridSize(m_imageType, m_imageSize, mipLevelNdx);
    678 		const deUint32		xWorkGroupSize  = std::min(std::min(gridSize.x(), maxWorkGroupSize.x()), maxWorkGroupInvocations);
    679 		const deUint32		yWorkGroupSize  = std::min(std::min(gridSize.y(), maxWorkGroupSize.y()), maxWorkGroupInvocations / xWorkGroupSize);
    680 		const deUint32		zWorkGroupSize  = std::min(std::min(gridSize.z(), maxWorkGroupSize.z()), maxWorkGroupInvocations / (xWorkGroupSize * yWorkGroupSize));
    681 
    682 		std::ostringstream src;
    683 
    684 		src << versionDecl << "\n"
    685 			<< "layout (local_size_x = " << xWorkGroupSize << ", local_size_y = " << yWorkGroupSize << ", local_size_z = " << zWorkGroupSize << ") in; \n"
    686 			<< "layout (binding = 0, " << formatQualifierStr << ") writeonly uniform highp " << imageTypeStr << " u_image;\n"
    687 			<< "void main (void)\n"
    688 			<< "{\n"
    689 			<< "	if( gl_GlobalInvocationID.x < " << gridSize.x() << " ) \n"
    690 			<< "	if( gl_GlobalInvocationID.y < " << gridSize.y() << " ) \n"
    691 			<< "	if( gl_GlobalInvocationID.z < " << gridSize.z() << " ) \n"
    692 			<< "	{\n"
    693 			<< "		int index = int(gl_GlobalInvocationID.x + (gl_GlobalInvocationID.y + gl_GlobalInvocationID.z*" << gridSize.y() << ")*" << gridSize.x() << ");\n"
    694 			<< "		imageStore(u_image, " << getCoordStr(m_imageType, "gl_GlobalInvocationID.x", "gl_GlobalInvocationID.y", "gl_GlobalInvocationID.z") << ","
    695 			<< formatDataStr << "( index % " << MODULO_DIVISOR << ", index % " << MODULO_DIVISOR << ", index % " << MODULO_DIVISOR << ", 1 )); \n"
    696 			<< "	}\n"
    697 			<< "}\n";
    698 
    699 		std::ostringstream name;
    700 		name << "comp" << mipLevelNdx;
    701 		sourceCollections.glslSources.add(name.str()) << glu::ComputeSource(src.str());
    702 	}
    703 }
    704 
    705 TestInstance* ImageSparseMemoryAliasingCase::createInstance (Context& context) const
    706 {
    707 	return new ImageSparseMemoryAliasingInstance(context, m_imageType, m_imageSize, m_format, m_useDeviceGroups);
    708 }
    709 
    710 } // anonymous ns
    711 
    712 tcu::TestCaseGroup* createImageSparseMemoryAliasingTestsCommon(tcu::TestContext& testCtx, de::MovePtr<tcu::TestCaseGroup> testGroup, const bool useDeviceGroup = false)
    713 {
    714 	static const deUint32 sizeCountPerImageType = 4u;
    715 
    716 	struct ImageParameters
    717 	{
    718 		ImageType	imageType;
    719 		tcu::UVec3	imageSizes[sizeCountPerImageType];
    720 	};
    721 
    722 	static const ImageParameters imageParametersArray[] =
    723 	{
    724 		{ IMAGE_TYPE_2D,		{ tcu::UVec3(512u, 256u, 1u),	tcu::UVec3(128u, 128u, 1u),	tcu::UVec3(503u, 137u, 1u),	tcu::UVec3(11u, 37u, 1u) } },
    725 		{ IMAGE_TYPE_2D_ARRAY,	{ tcu::UVec3(512u, 256u, 6u),	tcu::UVec3(128u, 128u, 8u),	tcu::UVec3(503u, 137u, 3u),	tcu::UVec3(11u, 37u, 3u) } },
    726 		{ IMAGE_TYPE_CUBE,		{ tcu::UVec3(256u, 256u, 1u),	tcu::UVec3(128u, 128u, 1u),	tcu::UVec3(137u, 137u, 1u),	tcu::UVec3(11u, 11u, 1u) } },
    727 		{ IMAGE_TYPE_CUBE_ARRAY,{ tcu::UVec3(256u, 256u, 6u),	tcu::UVec3(128u, 128u, 8u),	tcu::UVec3(137u, 137u, 3u),	tcu::UVec3(11u, 11u, 3u) } },
    728 		{ IMAGE_TYPE_3D,		{ tcu::UVec3(256u, 256u, 16u),	tcu::UVec3(128u, 128u, 8u),	tcu::UVec3(503u, 137u, 3u),	tcu::UVec3(11u, 37u, 3u) } }
    729 	};
    730 
    731 	static const tcu::TextureFormat formats[] =
    732 	{
    733 		tcu::TextureFormat(tcu::TextureFormat::R,	 tcu::TextureFormat::SIGNED_INT32),
    734 		tcu::TextureFormat(tcu::TextureFormat::R,	 tcu::TextureFormat::SIGNED_INT16),
    735 		tcu::TextureFormat(tcu::TextureFormat::R,	 tcu::TextureFormat::SIGNED_INT8),
    736 		tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNSIGNED_INT32),
    737 		tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNSIGNED_INT16),
    738 		tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNSIGNED_INT8)
    739 	};
    740 
    741 	for (deInt32 imageTypeNdx = 0; imageTypeNdx < DE_LENGTH_OF_ARRAY(imageParametersArray); ++imageTypeNdx)
    742 	{
    743 		const ImageType					imageType = imageParametersArray[imageTypeNdx].imageType;
    744 		de::MovePtr<tcu::TestCaseGroup> imageTypeGroup(new tcu::TestCaseGroup(testCtx, getImageTypeName(imageType).c_str(), ""));
    745 
    746 		for (deInt32 formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(formats); ++formatNdx)
    747 		{
    748 			const tcu::TextureFormat&		format = formats[formatNdx];
    749 			de::MovePtr<tcu::TestCaseGroup> formatGroup(new tcu::TestCaseGroup(testCtx, getShaderImageFormatQualifier(format).c_str(), ""));
    750 
    751 			for (deInt32 imageSizeNdx = 0; imageSizeNdx < DE_LENGTH_OF_ARRAY(imageParametersArray[imageTypeNdx].imageSizes); ++imageSizeNdx)
    752 			{
    753 				const tcu::UVec3 imageSize = imageParametersArray[imageTypeNdx].imageSizes[imageSizeNdx];
    754 
    755 				std::ostringstream stream;
    756 				stream << imageSize.x() << "_" << imageSize.y() << "_" << imageSize.z();
    757 
    758 				formatGroup->addChild(new ImageSparseMemoryAliasingCase(testCtx, stream.str(), "", imageType, imageSize, format, glu::GLSL_VERSION_440, useDeviceGroup));
    759 			}
    760 			imageTypeGroup->addChild(formatGroup.release());
    761 		}
    762 		testGroup->addChild(imageTypeGroup.release());
    763 	}
    764 
    765 	return testGroup.release();
    766 }
    767 
    768 tcu::TestCaseGroup* createImageSparseMemoryAliasingTests(tcu::TestContext& testCtx)
    769 {
    770 	de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "image_sparse_memory_aliasing", "Sparse Image Memory Aliasing"));
    771 	return createImageSparseMemoryAliasingTestsCommon(testCtx, testGroup);
    772 }
    773 
    774 tcu::TestCaseGroup* createDeviceGroupImageSparseMemoryAliasingTests(tcu::TestContext& testCtx)
    775 {
    776 	de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "device_group_image_sparse_memory_aliasing", "Sparse Image Memory Aliasing"));
    777 	return createImageSparseMemoryAliasingTestsCommon(testCtx, testGroup, true);
    778 }
    779 
    780 } // sparse
    781 } // vkt
    782