Home | History | Annotate | Download | only in shaderexecutor
      1 /*------------------------------------------------------------------------
      2  * Vulkan Conformance Tests
      3  * ------------------------
      4  *
      5  * Copyright (c) 2015 The Khronos Group Inc.
      6  * Copyright (c) 2015 Samsung Electronics Co., Ltd.
      7  * Copyright (c) 2016 The Android Open Source Project
      8  *
      9  * Licensed under the Apache License, Version 2.0 (the "License");
     10  * you may not use this file except in compliance with the License.
     11  * You may obtain a copy of the License at
     12  *
     13  *      http://www.apache.org/licenses/LICENSE-2.0
     14  *
     15  * Unless required by applicable law or agreed to in writing, software
     16  * distributed under the License is distributed on an "AS IS" BASIS,
     17  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     18  * See the License for the specific language governing permissions and
     19  * limitations under the License.
     20  *
     21  *//*!
     22  * \file
     23  * \brief Opaque type (sampler, buffer, atomic counter, ...) indexing tests.
     24  *//*--------------------------------------------------------------------*/
     25 
     26 #include "vktOpaqueTypeIndexingTests.hpp"
     27 
     28 #include "vkRefUtil.hpp"
     29 #include "vkImageUtil.hpp"
     30 #include "vkMemUtil.hpp"
     31 #include "vkTypeUtil.hpp"
     32 #include "vkQueryUtil.hpp"
     33 
     34 #include "tcuTexture.hpp"
     35 #include "tcuTestLog.hpp"
     36 #include "tcuVectorUtil.hpp"
     37 #include "tcuTextureUtil.hpp"
     38 
     39 #include "deStringUtil.hpp"
     40 #include "deSharedPtr.hpp"
     41 #include "deRandom.hpp"
     42 
     43 #include "vktShaderExecutor.hpp"
     44 
     45 #include <sstream>
     46 
     47 namespace vkt
     48 {
     49 namespace shaderexecutor
     50 {
     51 
     52 namespace
     53 {
     54 
     55 using de::UniquePtr;
     56 using de::MovePtr;
     57 using de::SharedPtr;
     58 using std::vector;
     59 
     60 using namespace vk;
     61 
     62 typedef SharedPtr<Unique<VkSampler> > VkSamplerSp;
     63 
     64 // Buffer helper
     65 
     66 class Buffer
     67 {
     68 public:
     69 								Buffer				(Context& context, VkBufferUsageFlags usage, size_t size);
     70 
     71 	VkBuffer					getBuffer			(void) const { return *m_buffer;					}
     72 	void*						getHostPtr			(void) const { return m_allocation->getHostPtr();	}
     73 	void						flush				(void);
     74 	void						invalidate			(void);
     75 
     76 private:
     77 	const DeviceInterface&		m_vkd;
     78 	const VkDevice				m_device;
     79 	const Unique<VkBuffer>		m_buffer;
     80 	const UniquePtr<Allocation>	m_allocation;
     81 };
     82 
     83 typedef de::SharedPtr<Buffer> BufferSp;
     84 
     85 Move<VkBuffer> createBuffer (const DeviceInterface& vkd, VkDevice device, VkDeviceSize size, VkBufferUsageFlags usageFlags)
     86 {
     87 	const VkBufferCreateInfo	createInfo		=
     88 	{
     89 		VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
     90 		DE_NULL,
     91 		(VkBufferCreateFlags)0,
     92 		size,
     93 		usageFlags,
     94 		VK_SHARING_MODE_EXCLUSIVE,
     95 		0u,
     96 		DE_NULL
     97 	};
     98 	return createBuffer(vkd, device, &createInfo);
     99 }
    100 
    101 MovePtr<Allocation> allocateAndBindMemory (const DeviceInterface& vkd, VkDevice device, Allocator& allocator, VkBuffer buffer)
    102 {
    103 	MovePtr<Allocation>		alloc	(allocator.allocate(getBufferMemoryRequirements(vkd, device, buffer), MemoryRequirement::HostVisible));
    104 
    105 	VK_CHECK(vkd.bindBufferMemory(device, buffer, alloc->getMemory(), alloc->getOffset()));
    106 
    107 	return alloc;
    108 }
    109 
    110 Buffer::Buffer (Context& context, VkBufferUsageFlags usage, size_t size)
    111 	: m_vkd			(context.getDeviceInterface())
    112 	, m_device		(context.getDevice())
    113 	, m_buffer		(createBuffer			(context.getDeviceInterface(),
    114 											 context.getDevice(),
    115 											 (VkDeviceSize)size,
    116 											 usage))
    117 	, m_allocation	(allocateAndBindMemory	(context.getDeviceInterface(),
    118 											 context.getDevice(),
    119 											 context.getDefaultAllocator(),
    120 											 *m_buffer))
    121 {
    122 }
    123 
    124 void Buffer::flush (void)
    125 {
    126 	flushMappedMemoryRange(m_vkd, m_device, m_allocation->getMemory(), m_allocation->getOffset(), VK_WHOLE_SIZE);
    127 }
    128 
    129 void Buffer::invalidate (void)
    130 {
    131 	invalidateMappedMemoryRange(m_vkd, m_device, m_allocation->getMemory(), m_allocation->getOffset(), VK_WHOLE_SIZE);
    132 }
    133 
    134 MovePtr<Buffer> createUniformIndexBuffer (Context& context, int numIndices, const int* indices)
    135 {
    136 	MovePtr<Buffer>		buffer	(new Buffer(context, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, sizeof(int)*numIndices));
    137 	int* const			bufPtr	= (int*)buffer->getHostPtr();
    138 
    139 	for (int ndx = 0; ndx < numIndices; ++ndx)
    140 		bufPtr[ndx] = indices[ndx];
    141 
    142 	buffer->flush();
    143 
    144 	return buffer;
    145 }
    146 
    147 // Tests
    148 
    149 enum IndexExprType
    150 {
    151 	INDEX_EXPR_TYPE_CONST_LITERAL	= 0,
    152 	INDEX_EXPR_TYPE_CONST_EXPRESSION,
    153 	INDEX_EXPR_TYPE_UNIFORM,
    154 	INDEX_EXPR_TYPE_DYNAMIC_UNIFORM,
    155 
    156 	INDEX_EXPR_TYPE_LAST
    157 };
    158 
    159 enum TextureType
    160 {
    161 	TEXTURE_TYPE_1D = 0,
    162 	TEXTURE_TYPE_2D,
    163 	TEXTURE_TYPE_CUBE,
    164 	TEXTURE_TYPE_2D_ARRAY,
    165 	TEXTURE_TYPE_3D,
    166 
    167 	TEXTURE_TYPE_LAST
    168 };
    169 
    170 class OpaqueTypeIndexingCase : public TestCase
    171 {
    172 public:
    173 										OpaqueTypeIndexingCase		(tcu::TestContext&			testCtx,
    174 																	 const char*				name,
    175 																	 const char*				description,
    176 																	 const glu::ShaderType		shaderType,
    177 																	 const IndexExprType		indexExprType);
    178 	virtual								~OpaqueTypeIndexingCase		(void);
    179 
    180 	virtual void						initPrograms				(vk::SourceCollections& programCollection) const
    181 										{
    182 											generateSources(m_shaderType, m_shaderSpec, programCollection);
    183 										}
    184 
    185 protected:
    186 	const char*							m_name;
    187 	const glu::ShaderType				m_shaderType;
    188 	const IndexExprType					m_indexExprType;
    189 	ShaderSpec							m_shaderSpec;
    190 };
    191 
    192 OpaqueTypeIndexingCase::OpaqueTypeIndexingCase (tcu::TestContext&			testCtx,
    193 												const char*					name,
    194 												const char*					description,
    195 												const glu::ShaderType		shaderType,
    196 												const IndexExprType			indexExprType)
    197 	: TestCase			(testCtx, name, description)
    198 	, m_name			(name)
    199 	, m_shaderType		(shaderType)
    200 	, m_indexExprType	(indexExprType)
    201 {
    202 }
    203 
    204 OpaqueTypeIndexingCase::~OpaqueTypeIndexingCase (void)
    205 {
    206 }
    207 
    208 class OpaqueTypeIndexingTestInstance : public TestInstance
    209 {
    210 public:
    211 										OpaqueTypeIndexingTestInstance		(Context&					context,
    212 																			 const glu::ShaderType		shaderType,
    213 																			 const ShaderSpec&			shaderSpec,
    214 																			 const char*				name,
    215 																			 const IndexExprType		indexExprType);
    216 	virtual								~OpaqueTypeIndexingTestInstance		(void);
    217 
    218 	virtual tcu::TestStatus				iterate								(void) = 0;
    219 
    220 protected:
    221 	void								checkSupported						(const VkDescriptorType descriptorType);
    222 
    223 protected:
    224 	tcu::TestContext&					m_testCtx;
    225 	const glu::ShaderType				m_shaderType;
    226 	const ShaderSpec&					m_shaderSpec;
    227 	const char*							m_name;
    228 	const IndexExprType					m_indexExprType;
    229 };
    230 
    231 OpaqueTypeIndexingTestInstance::OpaqueTypeIndexingTestInstance (Context&					context,
    232 																const glu::ShaderType		shaderType,
    233 																const ShaderSpec&			shaderSpec,
    234 																const char*					name,
    235 																const IndexExprType			indexExprType)
    236 	: TestInstance		(context)
    237 	, m_testCtx			(context.getTestContext())
    238 	, m_shaderType		(shaderType)
    239 	, m_shaderSpec		(shaderSpec)
    240 	, m_name			(name)
    241 	, m_indexExprType	(indexExprType)
    242 {
    243 }
    244 
    245 OpaqueTypeIndexingTestInstance::~OpaqueTypeIndexingTestInstance (void)
    246 {
    247 }
    248 
    249 void OpaqueTypeIndexingTestInstance::checkSupported (const VkDescriptorType descriptorType)
    250 {
    251 	const VkPhysicalDeviceFeatures& deviceFeatures = m_context.getDeviceFeatures();
    252 
    253 	if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL && m_indexExprType != INDEX_EXPR_TYPE_CONST_EXPRESSION)
    254 	{
    255 		switch (descriptorType)
    256 		{
    257 			case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
    258 				if (!deviceFeatures.shaderSampledImageArrayDynamicIndexing)
    259 					TCU_THROW(NotSupportedError, "Dynamic indexing of sampler arrays is not supported");
    260 				break;
    261 
    262 			case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
    263 				if (!deviceFeatures.shaderUniformBufferArrayDynamicIndexing)
    264 					TCU_THROW(NotSupportedError, "Dynamic indexing of uniform buffer arrays is not supported");
    265 				break;
    266 
    267 			case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
    268 				if (!deviceFeatures.shaderStorageBufferArrayDynamicIndexing)
    269 					TCU_THROW(NotSupportedError, "Dynamic indexing of storage buffer arrays is not supported");
    270 				break;
    271 
    272 			default:
    273 				break;
    274 		}
    275 	}
    276 }
    277 
    278 static void declareUniformIndexVars (std::ostream& str, deUint32 bindingLocation, const char* varPrefix, int numVars)
    279 {
    280 	str << "layout(set = " << EXTRA_RESOURCES_DESCRIPTOR_SET_INDEX << ", binding = " << bindingLocation << ", std140) uniform Indices\n{\n";
    281 
    282 	for (int varNdx = 0; varNdx < numVars; varNdx++)
    283 		str << "\thighp int " << varPrefix << varNdx << ";\n";
    284 
    285 	str << "};\n";
    286 }
    287 
    288 static TextureType getTextureType (glu::DataType samplerType)
    289 {
    290 	switch (samplerType)
    291 	{
    292 		case glu::TYPE_SAMPLER_1D:
    293 		case glu::TYPE_INT_SAMPLER_1D:
    294 		case glu::TYPE_UINT_SAMPLER_1D:
    295 		case glu::TYPE_SAMPLER_1D_SHADOW:
    296 			return TEXTURE_TYPE_1D;
    297 
    298 		case glu::TYPE_SAMPLER_2D:
    299 		case glu::TYPE_INT_SAMPLER_2D:
    300 		case glu::TYPE_UINT_SAMPLER_2D:
    301 		case glu::TYPE_SAMPLER_2D_SHADOW:
    302 			return TEXTURE_TYPE_2D;
    303 
    304 		case glu::TYPE_SAMPLER_CUBE:
    305 		case glu::TYPE_INT_SAMPLER_CUBE:
    306 		case glu::TYPE_UINT_SAMPLER_CUBE:
    307 		case glu::TYPE_SAMPLER_CUBE_SHADOW:
    308 			return TEXTURE_TYPE_CUBE;
    309 
    310 		case glu::TYPE_SAMPLER_2D_ARRAY:
    311 		case glu::TYPE_INT_SAMPLER_2D_ARRAY:
    312 		case glu::TYPE_UINT_SAMPLER_2D_ARRAY:
    313 		case glu::TYPE_SAMPLER_2D_ARRAY_SHADOW:
    314 			return TEXTURE_TYPE_2D_ARRAY;
    315 
    316 		case glu::TYPE_SAMPLER_3D:
    317 		case glu::TYPE_INT_SAMPLER_3D:
    318 		case glu::TYPE_UINT_SAMPLER_3D:
    319 			return TEXTURE_TYPE_3D;
    320 
    321 		default:
    322 			throw tcu::InternalError("Invalid sampler type");
    323 	}
    324 }
    325 
    326 static bool isShadowSampler (glu::DataType samplerType)
    327 {
    328 	return samplerType == glu::TYPE_SAMPLER_1D_SHADOW		||
    329 		   samplerType == glu::TYPE_SAMPLER_2D_SHADOW		||
    330 		   samplerType == glu::TYPE_SAMPLER_2D_ARRAY_SHADOW	||
    331 		   samplerType == glu::TYPE_SAMPLER_CUBE_SHADOW;
    332 }
    333 
    334 static glu::DataType getSamplerOutputType (glu::DataType samplerType)
    335 {
    336 	switch (samplerType)
    337 	{
    338 		case glu::TYPE_SAMPLER_1D:
    339 		case glu::TYPE_SAMPLER_2D:
    340 		case glu::TYPE_SAMPLER_CUBE:
    341 		case glu::TYPE_SAMPLER_2D_ARRAY:
    342 		case glu::TYPE_SAMPLER_3D:
    343 			return glu::TYPE_FLOAT_VEC4;
    344 
    345 		case glu::TYPE_SAMPLER_1D_SHADOW:
    346 		case glu::TYPE_SAMPLER_2D_SHADOW:
    347 		case glu::TYPE_SAMPLER_CUBE_SHADOW:
    348 		case glu::TYPE_SAMPLER_2D_ARRAY_SHADOW:
    349 			return glu::TYPE_FLOAT;
    350 
    351 		case glu::TYPE_INT_SAMPLER_1D:
    352 		case glu::TYPE_INT_SAMPLER_2D:
    353 		case glu::TYPE_INT_SAMPLER_CUBE:
    354 		case glu::TYPE_INT_SAMPLER_2D_ARRAY:
    355 		case glu::TYPE_INT_SAMPLER_3D:
    356 			return glu::TYPE_INT_VEC4;
    357 
    358 		case glu::TYPE_UINT_SAMPLER_1D:
    359 		case glu::TYPE_UINT_SAMPLER_2D:
    360 		case glu::TYPE_UINT_SAMPLER_CUBE:
    361 		case glu::TYPE_UINT_SAMPLER_2D_ARRAY:
    362 		case glu::TYPE_UINT_SAMPLER_3D:
    363 			return glu::TYPE_UINT_VEC4;
    364 
    365 		default:
    366 			throw tcu::InternalError("Invalid sampler type");
    367 	}
    368 }
    369 
    370 static tcu::TextureFormat getSamplerTextureFormat (glu::DataType samplerType)
    371 {
    372 	const glu::DataType		outType			= getSamplerOutputType(samplerType);
    373 	const glu::DataType		outScalarType	= glu::getDataTypeScalarType(outType);
    374 
    375 	switch (outScalarType)
    376 	{
    377 		case glu::TYPE_FLOAT:
    378 			if (isShadowSampler(samplerType))
    379 				return tcu::TextureFormat(tcu::TextureFormat::D, tcu::TextureFormat::UNORM_INT16);
    380 			else
    381 				return tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNORM_INT8);
    382 
    383 		case glu::TYPE_INT:		return tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::SIGNED_INT8);
    384 		case glu::TYPE_UINT:	return tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNSIGNED_INT8);
    385 
    386 		default:
    387 			throw tcu::InternalError("Invalid sampler type");
    388 	}
    389 }
    390 
    391 static glu::DataType getSamplerCoordType (glu::DataType samplerType)
    392 {
    393 	const TextureType	texType		= getTextureType(samplerType);
    394 	int					numCoords	= 0;
    395 
    396 	switch (texType)
    397 	{
    398 		case TEXTURE_TYPE_1D:		numCoords = 1;	break;
    399 		case TEXTURE_TYPE_2D:		numCoords = 2;	break;
    400 		case TEXTURE_TYPE_2D_ARRAY:	numCoords = 3;	break;
    401 		case TEXTURE_TYPE_CUBE:		numCoords = 3;	break;
    402 		case TEXTURE_TYPE_3D:		numCoords = 3;	break;
    403 		default:
    404 			DE_ASSERT(false);
    405 	}
    406 
    407 	if (isShadowSampler(samplerType))
    408 		numCoords += 1;
    409 
    410 	DE_ASSERT(de::inRange(numCoords, 1, 4));
    411 
    412 	return numCoords == 1 ? glu::TYPE_FLOAT : glu::getDataTypeFloatVec(numCoords);
    413 }
    414 
    415 static void fillTextureData (const tcu::PixelBufferAccess& access, de::Random& rnd)
    416 {
    417 	DE_ASSERT(access.getHeight() == 1 && access.getDepth() == 1);
    418 
    419 	if (access.getFormat().order == tcu::TextureFormat::D)
    420 	{
    421 		// \note Texture uses odd values, lookup even values to avoid precision issues.
    422 		const float values[] = { 0.1f, 0.3f, 0.5f, 0.7f, 0.9f };
    423 
    424 		for (int ndx = 0; ndx < access.getWidth(); ndx++)
    425 			access.setPixDepth(rnd.choose<float>(DE_ARRAY_BEGIN(values), DE_ARRAY_END(values)), ndx, 0);
    426 	}
    427 	else
    428 	{
    429 		TCU_CHECK_INTERNAL(access.getFormat().order == tcu::TextureFormat::RGBA && access.getFormat().getPixelSize() == 4);
    430 
    431 		for (int ndx = 0; ndx < access.getWidth(); ndx++)
    432 			*((deUint32*)access.getDataPtr() + ndx) = rnd.getUint32();
    433 	}
    434 }
    435 
    436 static vk::VkImageType getVkImageType (TextureType texType)
    437 {
    438 	switch (texType)
    439 	{
    440 		case TEXTURE_TYPE_1D:			return vk::VK_IMAGE_TYPE_1D;
    441 		case TEXTURE_TYPE_2D:
    442 		case TEXTURE_TYPE_2D_ARRAY:		return vk::VK_IMAGE_TYPE_2D;
    443 		case TEXTURE_TYPE_CUBE:			return vk::VK_IMAGE_TYPE_2D;
    444 		case TEXTURE_TYPE_3D:			return vk::VK_IMAGE_TYPE_3D;
    445 		default:
    446 			DE_FATAL("Impossible");
    447 			return (vk::VkImageType)0;
    448 	}
    449 }
    450 
    451 static vk::VkImageViewType getVkImageViewType (TextureType texType)
    452 {
    453 	switch (texType)
    454 	{
    455 		case TEXTURE_TYPE_1D:			return vk::VK_IMAGE_VIEW_TYPE_1D;
    456 		case TEXTURE_TYPE_2D:			return vk::VK_IMAGE_VIEW_TYPE_2D;
    457 		case TEXTURE_TYPE_2D_ARRAY:		return vk::VK_IMAGE_VIEW_TYPE_2D_ARRAY;
    458 		case TEXTURE_TYPE_CUBE:			return vk::VK_IMAGE_VIEW_TYPE_CUBE;
    459 		case TEXTURE_TYPE_3D:			return vk::VK_IMAGE_VIEW_TYPE_3D;
    460 		default:
    461 			DE_FATAL("Impossible");
    462 			return (vk::VkImageViewType)0;
    463 	}
    464 }
    465 
    466 //! Test image with 1-pixel dimensions and no mipmaps
    467 class TestImage
    468 {
    469 public:
    470 								TestImage		(Context& context, TextureType texType, tcu::TextureFormat format, const void* colorValue);
    471 
    472 	VkImageView					getImageView	(void) const { return *m_imageView; }
    473 
    474 private:
    475 	const Unique<VkImage>		m_image;
    476 	const UniquePtr<Allocation>	m_allocation;
    477 	const Unique<VkImageView>	m_imageView;
    478 };
    479 
    480 Move<VkImage> createTestImage (const DeviceInterface& vkd, VkDevice device, TextureType texType, tcu::TextureFormat format)
    481 {
    482 	const VkImageCreateInfo		createInfo		=
    483 	{
    484 		VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
    485 		DE_NULL,
    486 		(texType == TEXTURE_TYPE_CUBE ? (VkImageCreateFlags)VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT : (VkImageCreateFlags)0),
    487 		getVkImageType(texType),
    488 		mapTextureFormat(format),
    489 		makeExtent3D(1, 1, 1),
    490 		1u,
    491 		(texType == TEXTURE_TYPE_CUBE) ? 6u : 1u,
    492 		VK_SAMPLE_COUNT_1_BIT,
    493 		VK_IMAGE_TILING_OPTIMAL,
    494 		VK_IMAGE_USAGE_SAMPLED_BIT|VK_IMAGE_USAGE_TRANSFER_DST_BIT,
    495 		VK_SHARING_MODE_EXCLUSIVE,
    496 		0u,
    497 		DE_NULL,
    498 		VK_IMAGE_LAYOUT_UNDEFINED
    499 	};
    500 
    501 	return createImage(vkd, device, &createInfo);
    502 }
    503 
    504 de::MovePtr<Allocation> allocateAndBindMemory (const DeviceInterface& vkd, VkDevice device, Allocator& allocator, VkImage image)
    505 {
    506 	de::MovePtr<Allocation>		alloc	= allocator.allocate(getImageMemoryRequirements(vkd, device, image), MemoryRequirement::Any);
    507 
    508 	VK_CHECK(vkd.bindImageMemory(device, image, alloc->getMemory(), alloc->getOffset()));
    509 
    510 	return alloc;
    511 }
    512 
    513 Move<VkImageView> createTestImageView (const DeviceInterface& vkd, VkDevice device, VkImage image, TextureType texType, tcu::TextureFormat format)
    514 {
    515 	const bool					isDepthImage	= format.order == tcu::TextureFormat::D;
    516 	const VkImageViewCreateInfo	createInfo		=
    517 	{
    518 		VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
    519 		DE_NULL,
    520 		(VkImageViewCreateFlags)0,
    521 		image,
    522 		getVkImageViewType(texType),
    523 		mapTextureFormat(format),
    524 		{
    525 			VK_COMPONENT_SWIZZLE_IDENTITY,
    526 			VK_COMPONENT_SWIZZLE_IDENTITY,
    527 			VK_COMPONENT_SWIZZLE_IDENTITY,
    528 			VK_COMPONENT_SWIZZLE_IDENTITY,
    529 		},
    530 		{
    531 			(VkImageAspectFlags)(isDepthImage ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT),
    532 			0u,
    533 			1u,
    534 			0u,
    535 			(texType == TEXTURE_TYPE_CUBE ? 6u : 1u)
    536 		}
    537 	};
    538 
    539 	return createImageView(vkd, device, &createInfo);
    540 }
    541 
    542 TestImage::TestImage (Context& context, TextureType texType, tcu::TextureFormat format, const void* colorValue)
    543 	: m_image		(createTestImage		(context.getDeviceInterface(), context.getDevice(), texType, format))
    544 	, m_allocation	(allocateAndBindMemory	(context.getDeviceInterface(), context.getDevice(), context.getDefaultAllocator(), *m_image))
    545 	, m_imageView	(createTestImageView	(context.getDeviceInterface(), context.getDevice(), *m_image, texType, format))
    546 {
    547 	const DeviceInterface&		vkd					= context.getDeviceInterface();
    548 	const VkDevice				device				= context.getDevice();
    549 
    550 	const size_t				pixelSize			= (size_t)format.getPixelSize();
    551 	const deUint32				numLayers			= (texType == TEXTURE_TYPE_CUBE) ? 6u : 1u;
    552 	const size_t				numReplicas			= (size_t)numLayers;
    553 	const size_t				stagingBufferSize	= pixelSize*numReplicas;
    554 
    555 	const VkBufferCreateInfo	stagingBufferInfo	=
    556 	{
    557 		VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
    558 		DE_NULL,
    559 		(VkBufferCreateFlags)0u,
    560 		(VkDeviceSize)stagingBufferSize,
    561 		(VkBufferCreateFlags)VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
    562 		VK_SHARING_MODE_EXCLUSIVE,
    563 		0u,
    564 		DE_NULL,
    565 	};
    566 	const Unique<VkBuffer>		stagingBuffer		(createBuffer(vkd, device, &stagingBufferInfo));
    567 	const UniquePtr<Allocation>	alloc				(context.getDefaultAllocator().allocate(getBufferMemoryRequirements(vkd, device, *stagingBuffer), MemoryRequirement::HostVisible));
    568 
    569 	VK_CHECK(vkd.bindBufferMemory(device, *stagingBuffer, alloc->getMemory(), alloc->getOffset()));
    570 
    571 	for (size_t ndx = 0; ndx < numReplicas; ++ndx)
    572 		deMemcpy((deUint8*)alloc->getHostPtr() + ndx*pixelSize, colorValue, pixelSize);
    573 
    574 	{
    575 		const VkCommandPoolCreateInfo		cmdPoolInfo		=
    576 		{
    577 			VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
    578 			DE_NULL,
    579 			(VkCommandPoolCreateFlags)VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT,
    580 			context.getUniversalQueueFamilyIndex(),
    581 		};
    582 		const Unique<VkCommandPool>			cmdPool			(createCommandPool(vkd, device, &cmdPoolInfo));
    583 		const VkCommandBufferAllocateInfo	allocInfo		=
    584 		{
    585 			VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
    586 			DE_NULL,
    587 			*cmdPool,
    588 			VK_COMMAND_BUFFER_LEVEL_PRIMARY,
    589 			1u,
    590 		};
    591 		const Unique<VkCommandBuffer>		cmdBuf			(allocateCommandBuffer(vkd, device, &allocInfo));
    592 		const VkCommandBufferBeginInfo		beginInfo		=
    593 		{
    594 			VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
    595 			DE_NULL,
    596 			(VkCommandBufferUsageFlags)VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
    597 			(const VkCommandBufferInheritanceInfo*)DE_NULL,
    598 		};
    599 		const VkImageAspectFlags			imageAspect		= (VkImageAspectFlags)(format.order == tcu::TextureFormat::D ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT);
    600 		const VkBufferImageCopy				copyInfo		=
    601 		{
    602 			0u,
    603 			1u,
    604 			1u,
    605 			{
    606 				imageAspect,
    607 				0u,
    608 				0u,
    609 				numLayers
    610 			},
    611 			{ 0u, 0u, 0u },
    612 			{ 1u, 1u, 1u }
    613 		};
    614 		const VkImageMemoryBarrier			preCopyBarrier	=
    615 		{
    616 			VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
    617 			DE_NULL,
    618 			(VkAccessFlags)0u,
    619 			(VkAccessFlags)VK_ACCESS_TRANSFER_WRITE_BIT,
    620 			VK_IMAGE_LAYOUT_UNDEFINED,
    621 			VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
    622 			VK_QUEUE_FAMILY_IGNORED,
    623 			VK_QUEUE_FAMILY_IGNORED,
    624 			*m_image,
    625 			{
    626 				imageAspect,
    627 				0u,
    628 				1u,
    629 				0u,
    630 				numLayers
    631 			}
    632 		};
    633 		const VkImageMemoryBarrier			postCopyBarrier	=
    634 		{
    635 			VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
    636 			DE_NULL,
    637 			(VkAccessFlags)VK_ACCESS_TRANSFER_WRITE_BIT,
    638 			(VkAccessFlags)VK_ACCESS_SHADER_READ_BIT,
    639 			VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
    640 			VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
    641 			VK_QUEUE_FAMILY_IGNORED,
    642 			VK_QUEUE_FAMILY_IGNORED,
    643 			*m_image,
    644 			{
    645 				imageAspect,
    646 				0u,
    647 				1u,
    648 				0u,
    649 				numLayers
    650 			}
    651 		};
    652 
    653 		VK_CHECK(vkd.beginCommandBuffer(*cmdBuf, &beginInfo));
    654 		vkd.cmdPipelineBarrier(*cmdBuf,
    655 							   (VkPipelineStageFlags)VK_PIPELINE_STAGE_HOST_BIT,
    656 							   (VkPipelineStageFlags)VK_PIPELINE_STAGE_TRANSFER_BIT,
    657 							   (VkDependencyFlags)0u,
    658 							   0u,
    659 							   (const VkMemoryBarrier*)DE_NULL,
    660 							   0u,
    661 							   (const VkBufferMemoryBarrier*)DE_NULL,
    662 							   1u,
    663 							   &preCopyBarrier);
    664 		vkd.cmdCopyBufferToImage(*cmdBuf, *stagingBuffer, *m_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, &copyInfo);
    665 		vkd.cmdPipelineBarrier(*cmdBuf,
    666 							   (VkPipelineStageFlags)VK_PIPELINE_STAGE_TRANSFER_BIT,
    667 							   (VkPipelineStageFlags)VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
    668 							   (VkDependencyFlags)0u,
    669 							   0u,
    670 							   (const VkMemoryBarrier*)DE_NULL,
    671 							   0u,
    672 							   (const VkBufferMemoryBarrier*)DE_NULL,
    673 							   1u,
    674 							   &postCopyBarrier);
    675 		VK_CHECK(vkd.endCommandBuffer(*cmdBuf));
    676 
    677 		{
    678 			const VkFenceCreateInfo		fenceInfo	=
    679 			{
    680 				VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
    681 				DE_NULL,
    682 				(VkFenceCreateFlags)0,
    683 			};
    684 			const Unique<VkFence>		fence		(createFence(vkd, device, &fenceInfo));
    685 			const VkSubmitInfo			submitInfo	=
    686 			{
    687 				VK_STRUCTURE_TYPE_SUBMIT_INFO,
    688 				DE_NULL,
    689 				0u,
    690 				(const VkSemaphore*)DE_NULL,
    691 				(const VkPipelineStageFlags*)DE_NULL,
    692 				1u,
    693 				&cmdBuf.get(),
    694 				0u,
    695 				(const VkSemaphore*)DE_NULL,
    696 			};
    697 
    698 			VK_CHECK(vkd.queueSubmit(context.getUniversalQueue(), 1u, &submitInfo, *fence));
    699 			VK_CHECK(vkd.waitForFences(device, 1u, &fence.get(), VK_TRUE, ~0ull));
    700 		}
    701 	}
    702 }
    703 
    704 typedef SharedPtr<TestImage> TestImageSp;
    705 
    706 // SamplerIndexingCaseInstance
    707 
    708 class SamplerIndexingCaseInstance : public OpaqueTypeIndexingTestInstance
    709 {
    710 public:
    711 	enum
    712 	{
    713 		NUM_INVOCATIONS		= 64,
    714 		NUM_SAMPLERS		= 8,
    715 		NUM_LOOKUPS			= 4
    716 	};
    717 
    718 								SamplerIndexingCaseInstance		(Context&					context,
    719 																 const glu::ShaderType		shaderType,
    720 																 const ShaderSpec&			shaderSpec,
    721 																 const char*				name,
    722 																 glu::DataType				samplerType,
    723 																 const IndexExprType		indexExprType,
    724 																 const std::vector<int>&	lookupIndices);
    725 	virtual						~SamplerIndexingCaseInstance	(void);
    726 
    727 	virtual tcu::TestStatus		iterate							(void);
    728 
    729 protected:
    730 	const glu::DataType			m_samplerType;
    731 	const std::vector<int>		m_lookupIndices;
    732 };
    733 
    734 SamplerIndexingCaseInstance::SamplerIndexingCaseInstance (Context&						context,
    735 														  const glu::ShaderType			shaderType,
    736 														  const ShaderSpec&				shaderSpec,
    737 														  const char*					name,
    738 														  glu::DataType					samplerType,
    739 														  const IndexExprType			indexExprType,
    740 														  const std::vector<int>&		lookupIndices)
    741 	: OpaqueTypeIndexingTestInstance	(context, shaderType, shaderSpec, name, indexExprType)
    742 	, m_samplerType						(samplerType)
    743 	, m_lookupIndices					(lookupIndices)
    744 {
    745 }
    746 
    747 SamplerIndexingCaseInstance::~SamplerIndexingCaseInstance (void)
    748 {
    749 }
    750 
    751 bool isIntegerFormat (const tcu::TextureFormat& format)
    752 {
    753 	const tcu::TextureChannelClass	chnClass	= tcu::getTextureChannelClass(format.type);
    754 
    755 	return chnClass == tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER ||
    756 		   chnClass == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER;
    757 }
    758 
    759 tcu::TestStatus SamplerIndexingCaseInstance::iterate (void)
    760 {
    761 	const int						numInvocations		= SamplerIndexingCaseInstance::NUM_INVOCATIONS;
    762 	const int						numSamplers			= SamplerIndexingCaseInstance::NUM_SAMPLERS;
    763 	const int						numLookups			= SamplerIndexingCaseInstance::NUM_LOOKUPS;
    764 	const glu::DataType				coordType			= getSamplerCoordType(m_samplerType);
    765 	const glu::DataType				outputType			= getSamplerOutputType(m_samplerType);
    766 	const tcu::TextureFormat		texFormat			= getSamplerTextureFormat(m_samplerType);
    767 	const int						outLookupStride		= numInvocations*getDataTypeScalarSize(outputType);
    768 	vector<float>					coords;
    769 	vector<deUint32>				outData;
    770 	vector<deUint8>					texData				(numSamplers * texFormat.getPixelSize());
    771 	const tcu::PixelBufferAccess	refTexAccess		(texFormat, numSamplers, 1, 1, &texData[0]);
    772 	de::Random						rnd					(deInt32Hash(m_samplerType) ^ deInt32Hash(m_shaderType) ^ deInt32Hash(m_indexExprType));
    773 	const TextureType				texType				= getTextureType(m_samplerType);
    774 	const tcu::Sampler::FilterMode	filterMode			= (isShadowSampler(m_samplerType) || isIntegerFormat(texFormat)) ? tcu::Sampler::NEAREST : tcu::Sampler::LINEAR;
    775 
    776 	// The shadow sampler with unnormalized coordinates is only used with the reference texture. Actual samplers in shaders use normalized coords.
    777 	const tcu::Sampler				refSampler			= isShadowSampler(m_samplerType)
    778 																? tcu::Sampler(tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE,
    779 																				filterMode, filterMode, 0.0f, false /* non-normalized */,
    780 																				tcu::Sampler::COMPAREMODE_LESS)
    781 																: tcu::Sampler(tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE,
    782 																				filterMode, filterMode);
    783 
    784 	const DeviceInterface&			vkd					= m_context.getDeviceInterface();
    785 	const VkDevice					device				= m_context.getDevice();
    786 	vector<TestImageSp>				images;
    787 	vector<VkSamplerSp>				samplers;
    788 	MovePtr<Buffer>					indexBuffer;
    789 	Move<VkDescriptorSetLayout>		extraResourcesLayout;
    790 	Move<VkDescriptorPool>			extraResourcesSetPool;
    791 	Move<VkDescriptorSet>			extraResourcesSet;
    792 
    793 	checkSupported(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
    794 
    795 	coords.resize(numInvocations * getDataTypeScalarSize(coordType));
    796 
    797 	if (texType == TEXTURE_TYPE_CUBE)
    798 	{
    799 		if (isShadowSampler(m_samplerType))
    800 		{
    801 			for (size_t i = 0; i < coords.size() / 4; i++)
    802 			{
    803 				coords[4 * i] = 1.0f;
    804 				coords[4 * i + 1] = coords[4 * i + 2] = coords[4 * i + 3] = 0.0f;
    805 			}
    806 		}
    807 		else
    808 		{
    809 			for (size_t i = 0; i < coords.size() / 3; i++)
    810 			{
    811 				coords[3 * i] = 1.0f;
    812 				coords[3 * i + 1] = coords[3 * i + 2] = 0.0f;
    813 			}
    814 		}
    815 	}
    816 
    817 	if (isShadowSampler(m_samplerType))
    818 	{
    819 		// Use different comparison value per invocation.
    820 		// \note Texture uses odd values, comparison even values.
    821 		const int	numCoordComps	= getDataTypeScalarSize(coordType);
    822 		const float	cmpValues[]		= { 0.0f, 0.2f, 0.4f, 0.6f, 0.8f, 1.0f };
    823 
    824 		for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
    825 			coords[invocationNdx*numCoordComps + (numCoordComps-1)] = rnd.choose<float>(DE_ARRAY_BEGIN(cmpValues), DE_ARRAY_END(cmpValues));
    826 	}
    827 
    828 	fillTextureData(refTexAccess, rnd);
    829 
    830 	outData.resize(numLookups*outLookupStride);
    831 
    832 	for (int ndx = 0; ndx < numSamplers; ++ndx)
    833 	{
    834 		images.push_back(TestImageSp(new TestImage(m_context, texType, texFormat, &texData[ndx * texFormat.getPixelSize()])));
    835 
    836 		{
    837 			tcu::Sampler	samplerCopy	(refSampler);
    838 			samplerCopy.normalizedCoords = true;
    839 
    840 			{
    841 				const VkSamplerCreateInfo	samplerParams	= mapSampler(samplerCopy, texFormat);
    842 				samplers.push_back(VkSamplerSp(new Unique<VkSampler>(createSampler(vkd, device, &samplerParams))));
    843 			}
    844 		}
    845 	}
    846 
    847 	if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
    848 		indexBuffer = createUniformIndexBuffer(m_context, numLookups, &m_lookupIndices[0]);
    849 
    850 	{
    851 		const VkDescriptorSetLayoutBinding		bindings[]	=
    852 		{
    853 			{ 0u,						VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,	(deUint32)numSamplers,		VK_SHADER_STAGE_ALL,	DE_NULL		},
    854 			{ (deUint32)numSamplers,	VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,			1u,							VK_SHADER_STAGE_ALL,	DE_NULL		}
    855 		};
    856 		const VkDescriptorSetLayoutCreateInfo	layoutInfo	=
    857 		{
    858 			VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
    859 			DE_NULL,
    860 			(VkDescriptorSetLayoutCreateFlags)0u,
    861 			DE_LENGTH_OF_ARRAY(bindings),
    862 			bindings,
    863 		};
    864 
    865 		extraResourcesLayout = createDescriptorSetLayout(vkd, device, &layoutInfo);
    866 	}
    867 
    868 	{
    869 		const VkDescriptorPoolSize			poolSizes[]	=
    870 		{
    871 			{ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,	(deUint32)numSamplers	},
    872 			{ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,			1u,						}
    873 		};
    874 		const VkDescriptorPoolCreateInfo	poolInfo	=
    875 		{
    876 			VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
    877 			DE_NULL,
    878 			(VkDescriptorPoolCreateFlags)VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
    879 			1u,		// maxSets
    880 			DE_LENGTH_OF_ARRAY(poolSizes),
    881 			poolSizes,
    882 		};
    883 
    884 		extraResourcesSetPool = createDescriptorPool(vkd, device, &poolInfo);
    885 	}
    886 
    887 	{
    888 		const VkDescriptorSetAllocateInfo	allocInfo	=
    889 		{
    890 			VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
    891 			DE_NULL,
    892 			*extraResourcesSetPool,
    893 			1u,
    894 			&extraResourcesLayout.get(),
    895 		};
    896 
    897 		extraResourcesSet = allocateDescriptorSet(vkd, device, &allocInfo);
    898 	}
    899 
    900 	{
    901 		vector<VkDescriptorImageInfo>	imageInfos			(numSamplers);
    902 		const VkWriteDescriptorSet		descriptorWrite		=
    903 		{
    904 			VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
    905 			DE_NULL,
    906 			*extraResourcesSet,
    907 			0u,		// dstBinding
    908 			0u,		// dstArrayElement
    909 			(deUint32)numSamplers,
    910 			VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
    911 			&imageInfos[0],
    912 			(const VkDescriptorBufferInfo*)DE_NULL,
    913 			(const VkBufferView*)DE_NULL,
    914 		};
    915 
    916 		for (int ndx = 0; ndx < numSamplers; ++ndx)
    917 		{
    918 			imageInfos[ndx].sampler		= **samplers[ndx];
    919 			imageInfos[ndx].imageView	= images[ndx]->getImageView();
    920 			imageInfos[ndx].imageLayout	= VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
    921 		}
    922 
    923 		vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
    924 	}
    925 
    926 	if (indexBuffer)
    927 	{
    928 		const VkDescriptorBufferInfo	bufferInfo	=
    929 		{
    930 			indexBuffer->getBuffer(),
    931 			0u,
    932 			VK_WHOLE_SIZE
    933 		};
    934 		const VkWriteDescriptorSet		descriptorWrite		=
    935 		{
    936 			VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
    937 			DE_NULL,
    938 			*extraResourcesSet,
    939 			(deUint32)numSamplers,	// dstBinding
    940 			0u,						// dstArrayElement
    941 			1u,
    942 			VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
    943 			(const VkDescriptorImageInfo*)DE_NULL,
    944 			&bufferInfo,
    945 			(const VkBufferView*)DE_NULL,
    946 		};
    947 
    948 		vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
    949 	}
    950 
    951 	{
    952 		std::vector<void*>			inputs;
    953 		std::vector<void*>			outputs;
    954 		std::vector<int>			expandedIndices;
    955 		UniquePtr<ShaderExecutor>	executor		(createExecutor(m_context, m_shaderType, m_shaderSpec, *extraResourcesLayout));
    956 
    957 		inputs.push_back(&coords[0]);
    958 
    959 		if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
    960 		{
    961 			expandedIndices.resize(numInvocations * m_lookupIndices.size());
    962 			for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
    963 			{
    964 				for (int invNdx = 0; invNdx < numInvocations; invNdx++)
    965 					expandedIndices[lookupNdx*numInvocations + invNdx] = m_lookupIndices[lookupNdx];
    966 			}
    967 
    968 			for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
    969 				inputs.push_back(&expandedIndices[lookupNdx*numInvocations]);
    970 		}
    971 
    972 		for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
    973 			outputs.push_back(&outData[outLookupStride*lookupNdx]);
    974 
    975 		executor->execute(numInvocations, &inputs[0], &outputs[0], *extraResourcesSet);
    976 	}
    977 
    978 	{
    979 		tcu::TestLog&		log				= m_context.getTestContext().getLog();
    980 		tcu::TestStatus		testResult		= tcu::TestStatus::pass("Pass");
    981 
    982 		if (isShadowSampler(m_samplerType))
    983 		{
    984 			const int			numCoordComps	= getDataTypeScalarSize(coordType);
    985 
    986 			TCU_CHECK_INTERNAL(getDataTypeScalarSize(outputType) == 1);
    987 
    988 			// Each invocation may have different results.
    989 			for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
    990 			{
    991 				const float	coord	= coords[invocationNdx*numCoordComps + (numCoordComps-1)];
    992 
    993 				for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
    994 				{
    995 					const int		texNdx		= m_lookupIndices[lookupNdx];
    996 					const float		result		= *((const float*)(const deUint8*)&outData[lookupNdx*outLookupStride + invocationNdx]);
    997 					const float		reference	= refTexAccess.sample2DCompare(refSampler, tcu::Sampler::NEAREST, coord, (float)texNdx, 0.0f, tcu::IVec3(0));
    998 
    999 					if (de::abs(result-reference) > 0.005f)
   1000 					{
   1001 						log << tcu::TestLog::Message << "ERROR: at invocation " << invocationNdx << ", lookup " << lookupNdx << ": expected "
   1002 							<< reference << ", got " << result
   1003 							<< tcu::TestLog::EndMessage;
   1004 
   1005 						if (testResult.getCode() == QP_TEST_RESULT_PASS)
   1006 							testResult = tcu::TestStatus::fail("Got invalid lookup result");
   1007 					}
   1008 				}
   1009 			}
   1010 		}
   1011 		else
   1012 		{
   1013 			TCU_CHECK_INTERNAL(getDataTypeScalarSize(outputType) == 4);
   1014 
   1015 			// Validate results from first invocation
   1016 			for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
   1017 			{
   1018 				const int		texNdx	= m_lookupIndices[lookupNdx];
   1019 				const deUint8*	resPtr	= (const deUint8*)&outData[lookupNdx*outLookupStride];
   1020 				bool			isOk;
   1021 
   1022 				if (outputType == glu::TYPE_FLOAT_VEC4)
   1023 				{
   1024 					const float			threshold		= 1.0f / 256.0f;
   1025 					const tcu::Vec4		reference		= refTexAccess.getPixel(texNdx, 0);
   1026 					const float*		floatPtr		= (const float*)resPtr;
   1027 					const tcu::Vec4		result			(floatPtr[0], floatPtr[1], floatPtr[2], floatPtr[3]);
   1028 
   1029 					isOk = boolAll(lessThanEqual(abs(reference-result), tcu::Vec4(threshold)));
   1030 
   1031 					if (!isOk)
   1032 					{
   1033 						log << tcu::TestLog::Message << "ERROR: at lookup " << lookupNdx << ": expected "
   1034 							<< reference << ", got " << result
   1035 							<< tcu::TestLog::EndMessage;
   1036 					}
   1037 				}
   1038 				else
   1039 				{
   1040 					const tcu::UVec4	reference		= refTexAccess.getPixelUint(texNdx, 0);
   1041 					const deUint32*		uintPtr			= (const deUint32*)resPtr;
   1042 					const tcu::UVec4	result			(uintPtr[0], uintPtr[1], uintPtr[2], uintPtr[3]);
   1043 
   1044 					isOk = boolAll(equal(reference, result));
   1045 
   1046 					if (!isOk)
   1047 					{
   1048 						log << tcu::TestLog::Message << "ERROR: at lookup " << lookupNdx << ": expected "
   1049 							<< reference << ", got " << result
   1050 							<< tcu::TestLog::EndMessage;
   1051 					}
   1052 				}
   1053 
   1054 				if (!isOk && testResult.getCode() == QP_TEST_RESULT_PASS)
   1055 					testResult = tcu::TestStatus::fail("Got invalid lookup result");
   1056 			}
   1057 
   1058 			// Check results of other invocations against first one
   1059 			for (int invocationNdx = 1; invocationNdx < numInvocations; invocationNdx++)
   1060 			{
   1061 				for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
   1062 				{
   1063 					const deUint32*		refPtr		= &outData[lookupNdx*outLookupStride];
   1064 					const deUint32*		resPtr		= refPtr + invocationNdx*4;
   1065 					bool				isOk		= true;
   1066 
   1067 					for (int ndx = 0; ndx < 4; ndx++)
   1068 						isOk = isOk && (refPtr[ndx] == resPtr[ndx]);
   1069 
   1070 					if (!isOk)
   1071 					{
   1072 						log << tcu::TestLog::Message << "ERROR: invocation " << invocationNdx << " result "
   1073 							<< tcu::formatArray(tcu::Format::HexIterator<deUint32>(resPtr), tcu::Format::HexIterator<deUint32>(resPtr+4))
   1074 							<< " for lookup " << lookupNdx << " doesn't match result from first invocation "
   1075 							<< tcu::formatArray(tcu::Format::HexIterator<deUint32>(refPtr), tcu::Format::HexIterator<deUint32>(refPtr+4))
   1076 							<< tcu::TestLog::EndMessage;
   1077 
   1078 						if (testResult.getCode() == QP_TEST_RESULT_PASS)
   1079 							testResult = tcu::TestStatus::fail("Inconsistent lookup results");
   1080 					}
   1081 				}
   1082 			}
   1083 		}
   1084 
   1085 		return testResult;
   1086 	}
   1087 }
   1088 
   1089 class SamplerIndexingCase : public OpaqueTypeIndexingCase
   1090 {
   1091 public:
   1092 								SamplerIndexingCase			(tcu::TestContext&			testCtx,
   1093 															 const char*				name,
   1094 															 const char*				description,
   1095 															 const glu::ShaderType		shaderType,
   1096 															 glu::DataType				samplerType,
   1097 															 IndexExprType				indexExprType);
   1098 	virtual						~SamplerIndexingCase		(void);
   1099 
   1100 	virtual TestInstance*		createInstance				(Context& ctx) const;
   1101 
   1102 private:
   1103 								SamplerIndexingCase			(const SamplerIndexingCase&);
   1104 	SamplerIndexingCase&		operator=					(const SamplerIndexingCase&);
   1105 
   1106 	void						createShaderSpec			(void);
   1107 
   1108 	const glu::DataType			m_samplerType;
   1109 	const int					m_numSamplers;
   1110 	const int					m_numLookups;
   1111 	std::vector<int>			m_lookupIndices;
   1112 };
   1113 
   1114 SamplerIndexingCase::SamplerIndexingCase (tcu::TestContext&			testCtx,
   1115 										  const char*				name,
   1116 										  const char*				description,
   1117 										  const glu::ShaderType		shaderType,
   1118 										  glu::DataType				samplerType,
   1119 										  IndexExprType				indexExprType)
   1120 	: OpaqueTypeIndexingCase	(testCtx, name, description, shaderType, indexExprType)
   1121 	, m_samplerType				(samplerType)
   1122 	, m_numSamplers				(SamplerIndexingCaseInstance::NUM_SAMPLERS)
   1123 	, m_numLookups				(SamplerIndexingCaseInstance::NUM_LOOKUPS)
   1124 	, m_lookupIndices			(m_numLookups)
   1125 {
   1126 	createShaderSpec();
   1127 	init();
   1128 }
   1129 
   1130 SamplerIndexingCase::~SamplerIndexingCase (void)
   1131 {
   1132 }
   1133 
   1134 TestInstance* SamplerIndexingCase::createInstance (Context& ctx) const
   1135 {
   1136 	return new SamplerIndexingCaseInstance(ctx,
   1137 										   m_shaderType,
   1138 										   m_shaderSpec,
   1139 										   m_name,
   1140 										   m_samplerType,
   1141 										   m_indexExprType,
   1142 										   m_lookupIndices);
   1143 }
   1144 
   1145 void SamplerIndexingCase::createShaderSpec (void)
   1146 {
   1147 	de::Random			rnd				(deInt32Hash(m_samplerType) ^ deInt32Hash(m_shaderType) ^ deInt32Hash(m_indexExprType));
   1148 	const char*			samplersName	= "texSampler";
   1149 	const char*			coordsName		= "coords";
   1150 	const char*			indicesPrefix	= "index";
   1151 	const char*			resultPrefix	= "result";
   1152 	const glu::DataType	coordType		= getSamplerCoordType(m_samplerType);
   1153 	const glu::DataType	outType			= getSamplerOutputType(m_samplerType);
   1154 	std::ostringstream	global, code;
   1155 
   1156 	for (int ndx = 0; ndx < m_numLookups; ndx++)
   1157 		m_lookupIndices[ndx] = rnd.getInt(0, m_numSamplers-1);
   1158 
   1159 	m_shaderSpec.inputs.push_back(Symbol(coordsName, glu::VarType(coordType, glu::PRECISION_HIGHP)));
   1160 
   1161 	if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL)
   1162 		global << "#extension GL_EXT_gpu_shader5 : require\n";
   1163 
   1164 	if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
   1165 		global << "const highp int indexBase = 1;\n";
   1166 
   1167 	global <<
   1168 		"layout(set = " << EXTRA_RESOURCES_DESCRIPTOR_SET_INDEX << ", binding = 0) uniform highp " << getDataTypeName(m_samplerType) << " " << samplersName << "[" << m_numSamplers << "];\n";
   1169 
   1170 	if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
   1171 	{
   1172 		for (int lookupNdx = 0; lookupNdx < m_numLookups; lookupNdx++)
   1173 		{
   1174 			const std::string varName = indicesPrefix + de::toString(lookupNdx);
   1175 			m_shaderSpec.inputs.push_back(Symbol(varName, glu::VarType(glu::TYPE_INT, glu::PRECISION_HIGHP)));
   1176 		}
   1177 	}
   1178 	else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
   1179 		declareUniformIndexVars(global, (deUint32)m_numSamplers, indicesPrefix, m_numLookups);
   1180 
   1181 	for (int lookupNdx = 0; lookupNdx < m_numLookups; lookupNdx++)
   1182 	{
   1183 		const std::string varName = resultPrefix + de::toString(lookupNdx);
   1184 		m_shaderSpec.outputs.push_back(Symbol(varName, glu::VarType(outType, glu::PRECISION_HIGHP)));
   1185 	}
   1186 
   1187 	for (int lookupNdx = 0; lookupNdx < m_numLookups; lookupNdx++)
   1188 	{
   1189 		code << resultPrefix << "" << lookupNdx << " = texture(" << samplersName << "[";
   1190 
   1191 		if (m_indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL)
   1192 			code << m_lookupIndices[lookupNdx];
   1193 		else if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
   1194 			code << "indexBase + " << (m_lookupIndices[lookupNdx]-1);
   1195 		else
   1196 			code << indicesPrefix << lookupNdx;
   1197 
   1198 		code << "], " << coordsName << ");\n";
   1199 	}
   1200 
   1201 	m_shaderSpec.globalDeclarations	= global.str();
   1202 	m_shaderSpec.source				= code.str();
   1203 }
   1204 
   1205 enum BlockType
   1206 {
   1207 	BLOCKTYPE_UNIFORM = 0,
   1208 	BLOCKTYPE_BUFFER,
   1209 
   1210 	BLOCKTYPE_LAST
   1211 };
   1212 
   1213 class BlockArrayIndexingCaseInstance : public OpaqueTypeIndexingTestInstance
   1214 {
   1215 public:
   1216 	enum
   1217 	{
   1218 		NUM_INVOCATIONS		= 32,
   1219 		NUM_INSTANCES		= 4,
   1220 		NUM_READS			= 4
   1221 	};
   1222 
   1223 									BlockArrayIndexingCaseInstance	(Context&						context,
   1224 																	 const glu::ShaderType			shaderType,
   1225 																	 const ShaderSpec&				shaderSpec,
   1226 																	 const char*					name,
   1227 																	 BlockType						blockType,
   1228 																	 const IndexExprType			indexExprType,
   1229 																	 const std::vector<int>&		readIndices,
   1230 																	 const std::vector<deUint32>&	inValues);
   1231 	virtual							~BlockArrayIndexingCaseInstance	(void);
   1232 
   1233 	virtual tcu::TestStatus			iterate							(void);
   1234 
   1235 private:
   1236 	const BlockType					m_blockType;
   1237 	const std::vector<int>&			m_readIndices;
   1238 	const std::vector<deUint32>&	m_inValues;
   1239 };
   1240 
   1241 BlockArrayIndexingCaseInstance::BlockArrayIndexingCaseInstance (Context&						context,
   1242 																const glu::ShaderType			shaderType,
   1243 																const ShaderSpec&				shaderSpec,
   1244 																const char*						name,
   1245 																BlockType						blockType,
   1246 																const IndexExprType				indexExprType,
   1247 																const std::vector<int>&			readIndices,
   1248 																const std::vector<deUint32>&	inValues)
   1249 	: OpaqueTypeIndexingTestInstance	(context, shaderType, shaderSpec, name, indexExprType)
   1250 	, m_blockType						(blockType)
   1251 	, m_readIndices						(readIndices)
   1252 	, m_inValues						(inValues)
   1253 {
   1254 }
   1255 
   1256 BlockArrayIndexingCaseInstance::~BlockArrayIndexingCaseInstance (void)
   1257 {
   1258 }
   1259 
   1260 tcu::TestStatus BlockArrayIndexingCaseInstance::iterate (void)
   1261 {
   1262 	const int					numInvocations		= NUM_INVOCATIONS;
   1263 	const int					numReads			= NUM_READS;
   1264 	std::vector<deUint32>		outValues			(numInvocations*numReads);
   1265 
   1266 	tcu::TestLog&				log					= m_context.getTestContext().getLog();
   1267 	tcu::TestStatus				testResult			= tcu::TestStatus::pass("Pass");
   1268 
   1269 	std::vector<int>			expandedIndices;
   1270 	std::vector<void*>			inputs;
   1271 	std::vector<void*>			outputs;
   1272 	const VkBufferUsageFlags	bufferUsage			= m_blockType == BLOCKTYPE_UNIFORM ? VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT : VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
   1273 	const VkDescriptorType		descriptorType		= m_blockType == BLOCKTYPE_UNIFORM ? VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER : VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
   1274 
   1275 	const DeviceInterface&		vkd					= m_context.getDeviceInterface();
   1276 	const VkDevice				device				= m_context.getDevice();
   1277 
   1278 	// \note Using separate buffer per element - might want to test
   1279 	// offsets & single buffer in the future.
   1280 	vector<BufferSp>			buffers				(m_inValues.size());
   1281 	MovePtr<Buffer>				indexBuffer;
   1282 
   1283 	Move<VkDescriptorSetLayout>	extraResourcesLayout;
   1284 	Move<VkDescriptorPool>		extraResourcesSetPool;
   1285 	Move<VkDescriptorSet>		extraResourcesSet;
   1286 
   1287 	checkSupported(descriptorType);
   1288 
   1289 	for (size_t bufferNdx = 0; bufferNdx < m_inValues.size(); ++bufferNdx)
   1290 	{
   1291 		buffers[bufferNdx] = BufferSp(new Buffer(m_context, bufferUsage, sizeof(deUint32)));
   1292 		*(deUint32*)buffers[bufferNdx]->getHostPtr() = m_inValues[bufferNdx];
   1293 		buffers[bufferNdx]->flush();
   1294 	}
   1295 
   1296 	if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
   1297 		indexBuffer = createUniformIndexBuffer(m_context, numReads, &m_readIndices[0]);
   1298 
   1299 	{
   1300 		const VkDescriptorSetLayoutBinding		bindings[]	=
   1301 		{
   1302 			{ 0u,							descriptorType,						(deUint32)m_inValues.size(),	VK_SHADER_STAGE_ALL,	DE_NULL		},
   1303 			{ (deUint32)m_inValues.size(),	VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,	1u,								VK_SHADER_STAGE_ALL,	DE_NULL		}
   1304 		};
   1305 		const VkDescriptorSetLayoutCreateInfo	layoutInfo	=
   1306 		{
   1307 			VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
   1308 			DE_NULL,
   1309 			(VkDescriptorSetLayoutCreateFlags)0u,
   1310 			DE_LENGTH_OF_ARRAY(bindings),
   1311 			bindings,
   1312 		};
   1313 
   1314 		extraResourcesLayout = createDescriptorSetLayout(vkd, device, &layoutInfo);
   1315 	}
   1316 
   1317 	{
   1318 		const VkDescriptorPoolSize			poolSizes[]	=
   1319 		{
   1320 			{ descriptorType,						(deUint32)m_inValues.size()	},
   1321 			{ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,	1u,							}
   1322 		};
   1323 		const VkDescriptorPoolCreateInfo	poolInfo	=
   1324 		{
   1325 			VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
   1326 			DE_NULL,
   1327 			(VkDescriptorPoolCreateFlags)VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
   1328 			1u,		// maxSets
   1329 			DE_LENGTH_OF_ARRAY(poolSizes),
   1330 			poolSizes,
   1331 		};
   1332 
   1333 		extraResourcesSetPool = createDescriptorPool(vkd, device, &poolInfo);
   1334 	}
   1335 
   1336 	{
   1337 		const VkDescriptorSetAllocateInfo	allocInfo	=
   1338 		{
   1339 			VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
   1340 			DE_NULL,
   1341 			*extraResourcesSetPool,
   1342 			1u,
   1343 			&extraResourcesLayout.get(),
   1344 		};
   1345 
   1346 		extraResourcesSet = allocateDescriptorSet(vkd, device, &allocInfo);
   1347 	}
   1348 
   1349 	{
   1350 		vector<VkDescriptorBufferInfo>	bufferInfos			(m_inValues.size());
   1351 		const VkWriteDescriptorSet		descriptorWrite		=
   1352 		{
   1353 			VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
   1354 			DE_NULL,
   1355 			*extraResourcesSet,
   1356 			0u,		// dstBinding
   1357 			0u,		// dstArrayElement
   1358 			(deUint32)m_inValues.size(),
   1359 			descriptorType,
   1360 			(const VkDescriptorImageInfo*)DE_NULL,
   1361 			&bufferInfos[0],
   1362 			(const VkBufferView*)DE_NULL,
   1363 		};
   1364 
   1365 		for (size_t ndx = 0; ndx < m_inValues.size(); ++ndx)
   1366 		{
   1367 			bufferInfos[ndx].buffer		= buffers[ndx]->getBuffer();
   1368 			bufferInfos[ndx].offset		= 0u;
   1369 			bufferInfos[ndx].range		= VK_WHOLE_SIZE;
   1370 		}
   1371 
   1372 		vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
   1373 	}
   1374 
   1375 	if (indexBuffer)
   1376 	{
   1377 		const VkDescriptorBufferInfo	bufferInfo	=
   1378 		{
   1379 			indexBuffer->getBuffer(),
   1380 			0u,
   1381 			VK_WHOLE_SIZE
   1382 		};
   1383 		const VkWriteDescriptorSet		descriptorWrite		=
   1384 		{
   1385 			VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
   1386 			DE_NULL,
   1387 			*extraResourcesSet,
   1388 			(deUint32)m_inValues.size(),	// dstBinding
   1389 			0u,								// dstArrayElement
   1390 			1u,
   1391 			VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
   1392 			(const VkDescriptorImageInfo*)DE_NULL,
   1393 			&bufferInfo,
   1394 			(const VkBufferView*)DE_NULL,
   1395 		};
   1396 
   1397 		vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
   1398 	}
   1399 
   1400 	if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
   1401 	{
   1402 		expandedIndices.resize(numInvocations * m_readIndices.size());
   1403 
   1404 		for (int readNdx = 0; readNdx < numReads; readNdx++)
   1405 		{
   1406 			int* dst = &expandedIndices[numInvocations*readNdx];
   1407 			std::fill(dst, dst+numInvocations, m_readIndices[readNdx]);
   1408 		}
   1409 
   1410 		for (int readNdx = 0; readNdx < numReads; readNdx++)
   1411 			inputs.push_back(&expandedIndices[readNdx*numInvocations]);
   1412 	}
   1413 
   1414 	for (int readNdx = 0; readNdx < numReads; readNdx++)
   1415 		outputs.push_back(&outValues[readNdx*numInvocations]);
   1416 
   1417 	{
   1418 		UniquePtr<ShaderExecutor>	executor	(createExecutor(m_context, m_shaderType, m_shaderSpec, *extraResourcesLayout));
   1419 
   1420 		executor->execute(numInvocations, inputs.empty() ? DE_NULL : &inputs[0], &outputs[0], *extraResourcesSet);
   1421 	}
   1422 
   1423 	for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
   1424 	{
   1425 		for (int readNdx = 0; readNdx < numReads; readNdx++)
   1426 		{
   1427 			const deUint32	refValue	= m_inValues[m_readIndices[readNdx]];
   1428 			const deUint32	resValue	= outValues[readNdx*numInvocations + invocationNdx];
   1429 
   1430 			if (refValue != resValue)
   1431 			{
   1432 				log << tcu::TestLog::Message << "ERROR: at invocation " << invocationNdx
   1433 					<< ", read " << readNdx << ": expected "
   1434 					<< tcu::toHex(refValue) << ", got " << tcu::toHex(resValue)
   1435 					<< tcu::TestLog::EndMessage;
   1436 
   1437 				if (testResult.getCode() == QP_TEST_RESULT_PASS)
   1438 					testResult = tcu::TestStatus::fail("Invalid result value");
   1439 			}
   1440 		}
   1441 	}
   1442 
   1443 	return testResult;
   1444 }
   1445 
   1446 class BlockArrayIndexingCase : public OpaqueTypeIndexingCase
   1447 {
   1448 public:
   1449 								BlockArrayIndexingCase		(tcu::TestContext&			testCtx,
   1450 															 const char*				name,
   1451 															 const char*				description,
   1452 															 BlockType					blockType,
   1453 															 IndexExprType				indexExprType,
   1454 															 const glu::ShaderType		shaderType);
   1455 	virtual						~BlockArrayIndexingCase		(void);
   1456 
   1457 	virtual TestInstance*		createInstance				(Context& ctx) const;
   1458 
   1459 private:
   1460 								BlockArrayIndexingCase		(const BlockArrayIndexingCase&);
   1461 	BlockArrayIndexingCase&		operator=					(const BlockArrayIndexingCase&);
   1462 
   1463 	void						createShaderSpec			(void);
   1464 
   1465 	const BlockType				m_blockType;
   1466 	std::vector<int>			m_readIndices;
   1467 	std::vector<deUint32>		m_inValues;
   1468 };
   1469 
   1470 BlockArrayIndexingCase::BlockArrayIndexingCase (tcu::TestContext&			testCtx,
   1471 												const char*					name,
   1472 												const char*					description,
   1473 												BlockType					blockType,
   1474 												IndexExprType				indexExprType,
   1475 												const glu::ShaderType		shaderType)
   1476 	: OpaqueTypeIndexingCase	(testCtx, name, description, shaderType, indexExprType)
   1477 	, m_blockType				(blockType)
   1478 	, m_readIndices				(BlockArrayIndexingCaseInstance::NUM_READS)
   1479 	, m_inValues				(BlockArrayIndexingCaseInstance::NUM_INSTANCES)
   1480 {
   1481 	createShaderSpec();
   1482 	init();
   1483 }
   1484 
   1485 BlockArrayIndexingCase::~BlockArrayIndexingCase (void)
   1486 {
   1487 }
   1488 
   1489 TestInstance* BlockArrayIndexingCase::createInstance (Context& ctx) const
   1490 {
   1491 	return new BlockArrayIndexingCaseInstance(ctx,
   1492 											  m_shaderType,
   1493 											  m_shaderSpec,
   1494 											  m_name,
   1495 											  m_blockType,
   1496 											  m_indexExprType,
   1497 											  m_readIndices,
   1498 											  m_inValues);
   1499 }
   1500 
   1501 void BlockArrayIndexingCase::createShaderSpec (void)
   1502 {
   1503 	const int			numInstances	= BlockArrayIndexingCaseInstance::NUM_INSTANCES;
   1504 	const int			numReads		= BlockArrayIndexingCaseInstance::NUM_READS;
   1505 	de::Random			rnd				(deInt32Hash(m_shaderType) ^ deInt32Hash(m_blockType) ^ deInt32Hash(m_indexExprType));
   1506 	const char*			blockName		= "Block";
   1507 	const char*			instanceName	= "block";
   1508 	const char*			indicesPrefix	= "index";
   1509 	const char*			resultPrefix	= "result";
   1510 	const char*			interfaceName	= m_blockType == BLOCKTYPE_UNIFORM ? "uniform" : "buffer";
   1511 	std::ostringstream	global, code;
   1512 
   1513 	for (int readNdx = 0; readNdx < numReads; readNdx++)
   1514 		m_readIndices[readNdx] = rnd.getInt(0, numInstances-1);
   1515 
   1516 	for (int instanceNdx = 0; instanceNdx < numInstances; instanceNdx++)
   1517 		m_inValues[instanceNdx] = rnd.getUint32();
   1518 
   1519 	if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL)
   1520 		global << "#extension GL_EXT_gpu_shader5 : require\n";
   1521 
   1522 	if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
   1523 		global << "const highp int indexBase = 1;\n";
   1524 
   1525 	global <<
   1526 		"layout(set = " << EXTRA_RESOURCES_DESCRIPTOR_SET_INDEX << ", binding = 0) " << interfaceName << " " << blockName << "\n"
   1527 		"{\n"
   1528 		"	highp uint value;\n"
   1529 		"} " << instanceName << "[" << numInstances << "];\n";
   1530 
   1531 	if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
   1532 	{
   1533 		for (int readNdx = 0; readNdx < numReads; readNdx++)
   1534 		{
   1535 			const std::string varName = indicesPrefix + de::toString(readNdx);
   1536 			m_shaderSpec.inputs.push_back(Symbol(varName, glu::VarType(glu::TYPE_INT, glu::PRECISION_HIGHP)));
   1537 		}
   1538 	}
   1539 	else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
   1540 		declareUniformIndexVars(global, (deUint32)m_inValues.size(), indicesPrefix, numReads);
   1541 
   1542 	for (int readNdx = 0; readNdx < numReads; readNdx++)
   1543 	{
   1544 		const std::string varName = resultPrefix + de::toString(readNdx);
   1545 		m_shaderSpec.outputs.push_back(Symbol(varName, glu::VarType(glu::TYPE_UINT, glu::PRECISION_HIGHP)));
   1546 	}
   1547 
   1548 	for (int readNdx = 0; readNdx < numReads; readNdx++)
   1549 	{
   1550 		code << resultPrefix << readNdx << " = " << instanceName << "[";
   1551 
   1552 		if (m_indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL)
   1553 			code << m_readIndices[readNdx];
   1554 		else if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
   1555 			code << "indexBase + " << (m_readIndices[readNdx]-1);
   1556 		else
   1557 			code << indicesPrefix << readNdx;
   1558 
   1559 		code << "].value;\n";
   1560 	}
   1561 
   1562 	m_shaderSpec.globalDeclarations	= global.str();
   1563 	m_shaderSpec.source				= code.str();
   1564 }
   1565 
   1566 class AtomicCounterIndexingCaseInstance : public OpaqueTypeIndexingTestInstance
   1567 {
   1568 public:
   1569 	enum
   1570 	{
   1571 		NUM_INVOCATIONS		= 32,
   1572 		NUM_COUNTERS		= 4,
   1573 		NUM_OPS				= 4
   1574 	};
   1575 
   1576 								AtomicCounterIndexingCaseInstance	(Context&					context,
   1577 																	 const glu::ShaderType		shaderType,
   1578 																	 const ShaderSpec&			shaderSpec,
   1579 																	 const char*				name,
   1580 																	 const std::vector<int>&	opIndices,
   1581 																	 const IndexExprType		indexExprType);
   1582 	virtual						~AtomicCounterIndexingCaseInstance	(void);
   1583 
   1584 	virtual	tcu::TestStatus		iterate								(void);
   1585 
   1586 private:
   1587 	const std::vector<int>&		m_opIndices;
   1588 };
   1589 
   1590 AtomicCounterIndexingCaseInstance::AtomicCounterIndexingCaseInstance (Context&					context,
   1591 																	  const glu::ShaderType		shaderType,
   1592 																	  const ShaderSpec&			shaderSpec,
   1593 																	  const char*				name,
   1594 																	  const std::vector<int>&	opIndices,
   1595 																	  const IndexExprType		indexExprType)
   1596 	: OpaqueTypeIndexingTestInstance	(context, shaderType, shaderSpec, name, indexExprType)
   1597 	, m_opIndices						(opIndices)
   1598 {
   1599 }
   1600 
   1601 AtomicCounterIndexingCaseInstance::~AtomicCounterIndexingCaseInstance (void)
   1602 {
   1603 }
   1604 
   1605 tcu::TestStatus AtomicCounterIndexingCaseInstance::iterate (void)
   1606 {
   1607 	// \todo [2015-12-02 elecro] Add vertexPipelineStoresAndAtomics feature check.
   1608 	const int					numInvocations		= NUM_INVOCATIONS;
   1609 	const int					numCounters			= NUM_COUNTERS;
   1610 	const int					numOps				= NUM_OPS;
   1611 	std::vector<int>			expandedIndices;
   1612 	std::vector<void*>			inputs;
   1613 	std::vector<void*>			outputs;
   1614 	std::vector<deUint32>		outValues			(numInvocations*numOps);
   1615 
   1616 	const DeviceInterface&		vkd					= m_context.getDeviceInterface();
   1617 	const VkDevice				device				= m_context.getDevice();
   1618 
   1619 	// \note Using separate buffer per element - might want to test
   1620 	// offsets & single buffer in the future.
   1621 	Buffer						atomicOpBuffer		(m_context, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, sizeof(deUint32)*numCounters);
   1622 	MovePtr<Buffer>				indexBuffer;
   1623 
   1624 	Move<VkDescriptorSetLayout>	extraResourcesLayout;
   1625 	Move<VkDescriptorPool>		extraResourcesSetPool;
   1626 	Move<VkDescriptorSet>		extraResourcesSet;
   1627 
   1628 	checkSupported(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
   1629 
   1630 	deMemset(atomicOpBuffer.getHostPtr(), 0, sizeof(deUint32)*numCounters);
   1631 	atomicOpBuffer.flush();
   1632 
   1633 	if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
   1634 		indexBuffer = createUniformIndexBuffer(m_context, numOps, &m_opIndices[0]);
   1635 
   1636 	{
   1637 		const VkDescriptorSetLayoutBinding		bindings[]	=
   1638 		{
   1639 			{ 0u,	VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,	1u,	VK_SHADER_STAGE_ALL,	DE_NULL		},
   1640 			{ 1u,	VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,	1u,	VK_SHADER_STAGE_ALL,	DE_NULL		}
   1641 		};
   1642 		const VkDescriptorSetLayoutCreateInfo	layoutInfo	=
   1643 		{
   1644 			VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
   1645 			DE_NULL,
   1646 			(VkDescriptorSetLayoutCreateFlags)0u,
   1647 			DE_LENGTH_OF_ARRAY(bindings),
   1648 			bindings,
   1649 		};
   1650 
   1651 		extraResourcesLayout = createDescriptorSetLayout(vkd, device, &layoutInfo);
   1652 	}
   1653 
   1654 	{
   1655 		const VkDescriptorPoolSize			poolSizes[]	=
   1656 		{
   1657 			{ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,	1u,	},
   1658 			{ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,	1u,	}
   1659 		};
   1660 		const VkDescriptorPoolCreateInfo	poolInfo	=
   1661 		{
   1662 			VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
   1663 			DE_NULL,
   1664 			(VkDescriptorPoolCreateFlags)VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
   1665 			1u,		// maxSets
   1666 			DE_LENGTH_OF_ARRAY(poolSizes),
   1667 			poolSizes,
   1668 		};
   1669 
   1670 		extraResourcesSetPool = createDescriptorPool(vkd, device, &poolInfo);
   1671 	}
   1672 
   1673 	{
   1674 		const VkDescriptorSetAllocateInfo	allocInfo	=
   1675 		{
   1676 			VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
   1677 			DE_NULL,
   1678 			*extraResourcesSetPool,
   1679 			1u,
   1680 			&extraResourcesLayout.get(),
   1681 		};
   1682 
   1683 		extraResourcesSet = allocateDescriptorSet(vkd, device, &allocInfo);
   1684 	}
   1685 
   1686 	{
   1687 		const VkDescriptorBufferInfo	bufferInfo			=
   1688 		{
   1689 			atomicOpBuffer.getBuffer(),
   1690 			0u,
   1691 			VK_WHOLE_SIZE
   1692 		};
   1693 		const VkWriteDescriptorSet		descriptorWrite		=
   1694 		{
   1695 			VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
   1696 			DE_NULL,
   1697 			*extraResourcesSet,
   1698 			0u,		// dstBinding
   1699 			0u,		// dstArrayElement
   1700 			1u,
   1701 			VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
   1702 			(const VkDescriptorImageInfo*)DE_NULL,
   1703 			&bufferInfo,
   1704 			(const VkBufferView*)DE_NULL,
   1705 		};
   1706 
   1707 		vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
   1708 	}
   1709 
   1710 	if (indexBuffer)
   1711 	{
   1712 		const VkDescriptorBufferInfo	bufferInfo	=
   1713 		{
   1714 			indexBuffer->getBuffer(),
   1715 			0u,
   1716 			VK_WHOLE_SIZE
   1717 		};
   1718 		const VkWriteDescriptorSet		descriptorWrite		=
   1719 		{
   1720 			VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
   1721 			DE_NULL,
   1722 			*extraResourcesSet,
   1723 			1u,		// dstBinding
   1724 			0u,		// dstArrayElement
   1725 			1u,
   1726 			VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
   1727 			(const VkDescriptorImageInfo*)DE_NULL,
   1728 			&bufferInfo,
   1729 			(const VkBufferView*)DE_NULL,
   1730 		};
   1731 
   1732 		vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
   1733 	}
   1734 
   1735 	if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
   1736 	{
   1737 		expandedIndices.resize(numInvocations * m_opIndices.size());
   1738 
   1739 		for (int opNdx = 0; opNdx < numOps; opNdx++)
   1740 		{
   1741 			int* dst = &expandedIndices[numInvocations*opNdx];
   1742 			std::fill(dst, dst+numInvocations, m_opIndices[opNdx]);
   1743 		}
   1744 
   1745 		for (int opNdx = 0; opNdx < numOps; opNdx++)
   1746 			inputs.push_back(&expandedIndices[opNdx*numInvocations]);
   1747 	}
   1748 
   1749 	for (int opNdx = 0; opNdx < numOps; opNdx++)
   1750 		outputs.push_back(&outValues[opNdx*numInvocations]);
   1751 
   1752 	{
   1753 		UniquePtr<ShaderExecutor>	executor	(createExecutor(m_context, m_shaderType, m_shaderSpec, *extraResourcesLayout));
   1754 
   1755 		executor->execute(numInvocations, inputs.empty() ? DE_NULL : &inputs[0], &outputs[0], *extraResourcesSet);
   1756 	}
   1757 
   1758 	{
   1759 		tcu::TestLog&					log				= m_context.getTestContext().getLog();
   1760 		tcu::TestStatus					testResult		= tcu::TestStatus::pass("Pass");
   1761 		std::vector<int>				numHits			(numCounters, 0);	// Number of hits per counter.
   1762 		std::vector<deUint32>			counterValues	(numCounters);
   1763 		std::vector<std::vector<bool> >	counterMasks	(numCounters);
   1764 
   1765 		for (int opNdx = 0; opNdx < numOps; opNdx++)
   1766 			numHits[m_opIndices[opNdx]] += 1;
   1767 
   1768 		// Read counter values
   1769 		{
   1770 			const void* mapPtr = atomicOpBuffer.getHostPtr();
   1771 			DE_ASSERT(mapPtr != DE_NULL);
   1772 			atomicOpBuffer.invalidate();
   1773 			std::copy((const deUint32*)mapPtr, (const deUint32*)mapPtr + numCounters, &counterValues[0]);
   1774 		}
   1775 
   1776 		// Verify counter values
   1777 		for (int counterNdx = 0; counterNdx < numCounters; counterNdx++)
   1778 		{
   1779 			const deUint32		refCount	= (deUint32)(numHits[counterNdx]*numInvocations);
   1780 			const deUint32		resCount	= counterValues[counterNdx];
   1781 
   1782 			if (refCount != resCount)
   1783 			{
   1784 				log << tcu::TestLog::Message << "ERROR: atomic counter " << counterNdx << " has value " << resCount
   1785 					<< ", expected " << refCount
   1786 					<< tcu::TestLog::EndMessage;
   1787 
   1788 				if (testResult.getCode() == QP_TEST_RESULT_PASS)
   1789 					testResult = tcu::TestStatus::fail("Invalid atomic counter value");
   1790 			}
   1791 		}
   1792 
   1793 		// Allocate bitmasks - one bit per each valid result value
   1794 		for (int counterNdx = 0; counterNdx < numCounters; counterNdx++)
   1795 		{
   1796 			const int	counterValue	= numHits[counterNdx]*numInvocations;
   1797 			counterMasks[counterNdx].resize(counterValue, false);
   1798 		}
   1799 
   1800 		// Verify result values from shaders
   1801 		for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
   1802 		{
   1803 			for (int opNdx = 0; opNdx < numOps; opNdx++)
   1804 			{
   1805 				const int		counterNdx	= m_opIndices[opNdx];
   1806 				const deUint32	resValue	= outValues[opNdx*numInvocations + invocationNdx];
   1807 				const bool		rangeOk		= de::inBounds(resValue, 0u, (deUint32)counterMasks[counterNdx].size());
   1808 				const bool		notSeen		= rangeOk && !counterMasks[counterNdx][resValue];
   1809 				const bool		isOk		= rangeOk && notSeen;
   1810 
   1811 				if (!isOk)
   1812 				{
   1813 					log << tcu::TestLog::Message << "ERROR: at invocation " << invocationNdx
   1814 						<< ", op " << opNdx << ": got invalid result value "
   1815 						<< resValue
   1816 						<< tcu::TestLog::EndMessage;
   1817 
   1818 					if (testResult.getCode() == QP_TEST_RESULT_PASS)
   1819 						testResult = tcu::TestStatus::fail("Invalid result value");
   1820 				}
   1821 				else
   1822 				{
   1823 					// Mark as used - no other invocation should see this value from same counter.
   1824 					counterMasks[counterNdx][resValue] = true;
   1825 				}
   1826 			}
   1827 		}
   1828 
   1829 		if (testResult.getCode() == QP_TEST_RESULT_PASS)
   1830 		{
   1831 			// Consistency check - all masks should be 1 now
   1832 			for (int counterNdx = 0; counterNdx < numCounters; counterNdx++)
   1833 			{
   1834 				for (std::vector<bool>::const_iterator i = counterMasks[counterNdx].begin(); i != counterMasks[counterNdx].end(); i++)
   1835 					TCU_CHECK_INTERNAL(*i);
   1836 			}
   1837 		}
   1838 
   1839 		return testResult;
   1840 	}
   1841 }
   1842 
   1843 class AtomicCounterIndexingCase : public OpaqueTypeIndexingCase
   1844 {
   1845 public:
   1846 								AtomicCounterIndexingCase	(tcu::TestContext&			testCtx,
   1847 															 const char*				name,
   1848 															 const char*				description,
   1849 															 IndexExprType				indexExprType,
   1850 															 const glu::ShaderType		shaderType);
   1851 	virtual						~AtomicCounterIndexingCase	(void);
   1852 
   1853 	virtual TestInstance*		createInstance				(Context& ctx) const;
   1854 
   1855 private:
   1856 								AtomicCounterIndexingCase	(const BlockArrayIndexingCase&);
   1857 	AtomicCounterIndexingCase&	operator=					(const BlockArrayIndexingCase&);
   1858 
   1859 	void						createShaderSpec			(void);
   1860 
   1861 	std::vector<int>			m_opIndices;
   1862 };
   1863 
   1864 AtomicCounterIndexingCase::AtomicCounterIndexingCase (tcu::TestContext&			testCtx,
   1865 													  const char*				name,
   1866 													  const char*				description,
   1867 													  IndexExprType				indexExprType,
   1868 													  const glu::ShaderType		shaderType)
   1869 	: OpaqueTypeIndexingCase	(testCtx, name, description, shaderType, indexExprType)
   1870 	, m_opIndices				(AtomicCounterIndexingCaseInstance::NUM_OPS)
   1871 {
   1872 	createShaderSpec();
   1873 	init();
   1874 }
   1875 
   1876 AtomicCounterIndexingCase::~AtomicCounterIndexingCase (void)
   1877 {
   1878 }
   1879 
   1880 TestInstance* AtomicCounterIndexingCase::createInstance (Context& ctx) const
   1881 {
   1882 	return new AtomicCounterIndexingCaseInstance(ctx,
   1883 												 m_shaderType,
   1884 												 m_shaderSpec,
   1885 												 m_name,
   1886 												 m_opIndices,
   1887 												 m_indexExprType);
   1888 }
   1889 
   1890 void AtomicCounterIndexingCase::createShaderSpec (void)
   1891 {
   1892 	const int				numCounters		= AtomicCounterIndexingCaseInstance::NUM_COUNTERS;
   1893 	const int				numOps			= AtomicCounterIndexingCaseInstance::NUM_OPS;
   1894 	de::Random				rnd				(deInt32Hash(m_shaderType) ^ deInt32Hash(m_indexExprType));
   1895 
   1896 	for (int opNdx = 0; opNdx < numOps; opNdx++)
   1897 		m_opIndices[opNdx] = rnd.getInt(0, numOps-1);
   1898 
   1899 	{
   1900 		const char*			indicesPrefix	= "index";
   1901 		const char*			resultPrefix	= "result";
   1902 		std::ostringstream	global, code;
   1903 
   1904 		if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL)
   1905 			global << "#extension GL_EXT_gpu_shader5 : require\n";
   1906 
   1907 		if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
   1908 			global << "const highp int indexBase = 1;\n";
   1909 
   1910 		global <<
   1911 			"layout(set = " << EXTRA_RESOURCES_DESCRIPTOR_SET_INDEX << ", binding = 0, std430) buffer AtomicBuffer { highp uint counter[" << numCounters << "]; };\n";
   1912 
   1913 		if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
   1914 		{
   1915 			for (int opNdx = 0; opNdx < numOps; opNdx++)
   1916 			{
   1917 				const std::string varName = indicesPrefix + de::toString(opNdx);
   1918 				m_shaderSpec.inputs.push_back(Symbol(varName, glu::VarType(glu::TYPE_INT, glu::PRECISION_HIGHP)));
   1919 			}
   1920 		}
   1921 		else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
   1922 			declareUniformIndexVars(global, 1, indicesPrefix, numOps);
   1923 
   1924 		for (int opNdx = 0; opNdx < numOps; opNdx++)
   1925 		{
   1926 			const std::string varName = resultPrefix + de::toString(opNdx);
   1927 			m_shaderSpec.outputs.push_back(Symbol(varName, glu::VarType(glu::TYPE_UINT, glu::PRECISION_HIGHP)));
   1928 		}
   1929 
   1930 		for (int opNdx = 0; opNdx < numOps; opNdx++)
   1931 		{
   1932 			code << resultPrefix << opNdx << " = atomicAdd(counter[";
   1933 
   1934 			if (m_indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL)
   1935 				code << m_opIndices[opNdx];
   1936 			else if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
   1937 				code << "indexBase + " << (m_opIndices[opNdx]-1);
   1938 			else
   1939 				code << indicesPrefix << opNdx;
   1940 
   1941 			code << "], uint(1));\n";
   1942 		}
   1943 
   1944 		m_shaderSpec.globalDeclarations	= global.str();
   1945 		m_shaderSpec.source				= code.str();
   1946 	}
   1947 }
   1948 
   1949 class OpaqueTypeIndexingTests : public tcu::TestCaseGroup
   1950 {
   1951 public:
   1952 								OpaqueTypeIndexingTests		(tcu::TestContext& testCtx);
   1953 	virtual						~OpaqueTypeIndexingTests	(void);
   1954 
   1955 	virtual void				init						(void);
   1956 
   1957 private:
   1958 								OpaqueTypeIndexingTests		(const OpaqueTypeIndexingTests&);
   1959 	OpaqueTypeIndexingTests&	operator=					(const OpaqueTypeIndexingTests&);
   1960 };
   1961 
   1962 OpaqueTypeIndexingTests::OpaqueTypeIndexingTests (tcu::TestContext& testCtx)
   1963 	: tcu::TestCaseGroup(testCtx, "opaque_type_indexing", "Opaque Type Indexing Tests")
   1964 {
   1965 }
   1966 
   1967 OpaqueTypeIndexingTests::~OpaqueTypeIndexingTests (void)
   1968 {
   1969 }
   1970 
   1971 void OpaqueTypeIndexingTests::init (void)
   1972 {
   1973 	static const struct
   1974 	{
   1975 		IndexExprType	type;
   1976 		const char*		name;
   1977 		const char*		description;
   1978 	} indexingTypes[] =
   1979 	{
   1980 		{ INDEX_EXPR_TYPE_CONST_LITERAL,	"const_literal",		"Indexing by constant literal"					},
   1981 		{ INDEX_EXPR_TYPE_CONST_EXPRESSION,	"const_expression",		"Indexing by constant expression"				},
   1982 		{ INDEX_EXPR_TYPE_UNIFORM,			"uniform",				"Indexing by uniform value"						},
   1983 		{ INDEX_EXPR_TYPE_DYNAMIC_UNIFORM,	"dynamically_uniform",	"Indexing by dynamically uniform expression"	}
   1984 	};
   1985 
   1986 	static const struct
   1987 	{
   1988 		glu::ShaderType	type;
   1989 		const char*		name;
   1990 	} shaderTypes[] =
   1991 	{
   1992 		{ glu::SHADERTYPE_VERTEX,		"vertex"	},
   1993 		{ glu::SHADERTYPE_FRAGMENT,		"fragment"	},
   1994 		{ glu::SHADERTYPE_COMPUTE,		"compute"	}
   1995 	};
   1996 
   1997 	// .sampler
   1998 	{
   1999 		static const glu::DataType samplerTypes[] =
   2000 		{
   2001 			// \note 1D images will be added by a later extension.
   2002 //			glu::TYPE_SAMPLER_1D,
   2003 			glu::TYPE_SAMPLER_2D,
   2004 			glu::TYPE_SAMPLER_CUBE,
   2005 			glu::TYPE_SAMPLER_2D_ARRAY,
   2006 			glu::TYPE_SAMPLER_3D,
   2007 //			glu::TYPE_SAMPLER_1D_SHADOW,
   2008 			glu::TYPE_SAMPLER_2D_SHADOW,
   2009 			glu::TYPE_SAMPLER_CUBE_SHADOW,
   2010 			glu::TYPE_SAMPLER_2D_ARRAY_SHADOW,
   2011 //			glu::TYPE_INT_SAMPLER_1D,
   2012 			glu::TYPE_INT_SAMPLER_2D,
   2013 			glu::TYPE_INT_SAMPLER_CUBE,
   2014 			glu::TYPE_INT_SAMPLER_2D_ARRAY,
   2015 			glu::TYPE_INT_SAMPLER_3D,
   2016 //			glu::TYPE_UINT_SAMPLER_1D,
   2017 			glu::TYPE_UINT_SAMPLER_2D,
   2018 			glu::TYPE_UINT_SAMPLER_CUBE,
   2019 			glu::TYPE_UINT_SAMPLER_2D_ARRAY,
   2020 			glu::TYPE_UINT_SAMPLER_3D,
   2021 		};
   2022 
   2023 		tcu::TestCaseGroup* const samplerGroup = new tcu::TestCaseGroup(m_testCtx, "sampler", "Sampler Array Indexing Tests");
   2024 		addChild(samplerGroup);
   2025 
   2026 		for (int indexTypeNdx = 0; indexTypeNdx < DE_LENGTH_OF_ARRAY(indexingTypes); indexTypeNdx++)
   2027 		{
   2028 			const IndexExprType			indexExprType	= indexingTypes[indexTypeNdx].type;
   2029 			tcu::TestCaseGroup* const	indexGroup		= new tcu::TestCaseGroup(m_testCtx, indexingTypes[indexTypeNdx].name, indexingTypes[indexTypeNdx].description);
   2030 			samplerGroup->addChild(indexGroup);
   2031 
   2032 			for (int shaderTypeNdx = 0; shaderTypeNdx < DE_LENGTH_OF_ARRAY(shaderTypes); shaderTypeNdx++)
   2033 			{
   2034 				const glu::ShaderType		shaderType		= shaderTypes[shaderTypeNdx].type;
   2035 				tcu::TestCaseGroup* const	shaderGroup		= new tcu::TestCaseGroup(m_testCtx, shaderTypes[shaderTypeNdx].name, "");
   2036 				indexGroup->addChild(shaderGroup);
   2037 
   2038 				for (int samplerTypeNdx = 0; samplerTypeNdx < DE_LENGTH_OF_ARRAY(samplerTypes); samplerTypeNdx++)
   2039 				{
   2040 					const glu::DataType	samplerType	= samplerTypes[samplerTypeNdx];
   2041 					const char*			samplerName	= getDataTypeName(samplerType);
   2042 					const std::string	caseName	= de::toLower(samplerName);
   2043 
   2044 					shaderGroup->addChild(new SamplerIndexingCase(m_testCtx, caseName.c_str(), "", shaderType, samplerType, indexExprType));
   2045 				}
   2046 			}
   2047 		}
   2048 	}
   2049 
   2050 	// .ubo / .ssbo / .atomic_counter
   2051 	{
   2052 		tcu::TestCaseGroup* const	uboGroup	= new tcu::TestCaseGroup(m_testCtx, "ubo",				"Uniform Block Instance Array Indexing Tests");
   2053 		tcu::TestCaseGroup* const	ssboGroup	= new tcu::TestCaseGroup(m_testCtx, "ssbo",				"Buffer Block Instance Array Indexing Tests");
   2054 		tcu::TestCaseGroup* const	acGroup		= new tcu::TestCaseGroup(m_testCtx, "atomic_counter",	"Atomic Counter Array Indexing Tests");
   2055 		addChild(uboGroup);
   2056 		addChild(ssboGroup);
   2057 		addChild(acGroup);
   2058 
   2059 		for (int indexTypeNdx = 0; indexTypeNdx < DE_LENGTH_OF_ARRAY(indexingTypes); indexTypeNdx++)
   2060 		{
   2061 			const IndexExprType		indexExprType		= indexingTypes[indexTypeNdx].type;
   2062 			const char*				indexExprName		= indexingTypes[indexTypeNdx].name;
   2063 			const char*				indexExprDesc		= indexingTypes[indexTypeNdx].description;
   2064 
   2065 			for (int shaderTypeNdx = 0; shaderTypeNdx < DE_LENGTH_OF_ARRAY(shaderTypes); shaderTypeNdx++)
   2066 			{
   2067 				const glu::ShaderType	shaderType		= shaderTypes[shaderTypeNdx].type;
   2068 				const std::string		name			= std::string(indexExprName) + "_" + shaderTypes[shaderTypeNdx].name;
   2069 
   2070 				uboGroup->addChild	(new BlockArrayIndexingCase		(m_testCtx, name.c_str(), indexExprDesc, BLOCKTYPE_UNIFORM,	indexExprType, shaderType));
   2071 				acGroup->addChild	(new AtomicCounterIndexingCase	(m_testCtx, name.c_str(), indexExprDesc, indexExprType, shaderType));
   2072 
   2073 				if (indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL || indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
   2074 					ssboGroup->addChild	(new BlockArrayIndexingCase	(m_testCtx, name.c_str(), indexExprDesc, BLOCKTYPE_BUFFER, indexExprType, shaderType));
   2075 			}
   2076 		}
   2077 	}
   2078 }
   2079 
   2080 } // anonymous
   2081 
   2082 tcu::TestCaseGroup* createOpaqueTypeIndexingTests (tcu::TestContext& testCtx)
   2083 {
   2084 	return new OpaqueTypeIndexingTests(testCtx);
   2085 }
   2086 
   2087 } // shaderexecutor
   2088 } // vkt
   2089