1 /*------------------------------------------------------------------------ 2 * Vulkan Conformance Tests 3 * ------------------------ 4 * 5 * Copyright (c) 2015 The Khronos Group Inc. 6 * Copyright (c) 2015 Imagination Technologies Ltd. 7 * 8 * Licensed under the Apache License, Version 2.0 (the "License"); 9 * you may not use this file except in compliance with the License. 10 * You may obtain a copy of the License at 11 * 12 * http://www.apache.org/licenses/LICENSE-2.0 13 * 14 * Unless required by applicable law or agreed to in writing, software 15 * distributed under the License is distributed on an "AS IS" BASIS, 16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 * See the License for the specific language governing permissions and 18 * limitations under the License. 19 * 20 *//*! 21 * \file 22 * \brief Image sampling case 23 *//*--------------------------------------------------------------------*/ 24 25 #include "vktPipelineImageSamplingInstance.hpp" 26 #include "vktPipelineClearUtil.hpp" 27 #include "vktPipelineReferenceRenderer.hpp" 28 #include "vkBuilderUtil.hpp" 29 #include "vkImageUtil.hpp" 30 #include "vkPrograms.hpp" 31 #include "vkQueryUtil.hpp" 32 #include "vkRefUtil.hpp" 33 #include "tcuTexLookupVerifier.hpp" 34 #include "tcuTextureUtil.hpp" 35 #include "tcuTestLog.hpp" 36 #include "deSTLUtil.hpp" 37 38 namespace vkt 39 { 40 namespace pipeline 41 { 42 43 using namespace vk; 44 using de::MovePtr; 45 using de::UniquePtr; 46 47 namespace 48 { 49 de::MovePtr<Allocation> allocateBuffer (const InstanceInterface& vki, 50 const DeviceInterface& vkd, 51 const VkPhysicalDevice& physDevice, 52 const VkDevice device, 53 const VkBuffer& buffer, 54 const MemoryRequirement requirement, 55 Allocator& allocator, 56 AllocationKind allocationKind) 57 { 58 switch (allocationKind) 59 { 60 case ALLOCATION_KIND_SUBALLOCATED: 61 { 62 const VkMemoryRequirements memoryRequirements = getBufferMemoryRequirements(vkd, device, buffer); 63 64 return allocator.allocate(memoryRequirements, requirement); 65 } 66 67 case ALLOCATION_KIND_DEDICATED: 68 { 69 return allocateDedicated(vki, vkd, physDevice, device, buffer, requirement); 70 } 71 72 default: 73 { 74 TCU_THROW(InternalError, "Invalid allocation kind"); 75 } 76 } 77 } 78 79 de::MovePtr<Allocation> allocateImage (const InstanceInterface& vki, 80 const DeviceInterface& vkd, 81 const VkPhysicalDevice& physDevice, 82 const VkDevice device, 83 const VkImage& image, 84 const MemoryRequirement requirement, 85 Allocator& allocator, 86 AllocationKind allocationKind) 87 { 88 switch (allocationKind) 89 { 90 case ALLOCATION_KIND_SUBALLOCATED: 91 { 92 const VkMemoryRequirements memoryRequirements = getImageMemoryRequirements(vkd, device, image); 93 94 return allocator.allocate(memoryRequirements, requirement); 95 } 96 97 case ALLOCATION_KIND_DEDICATED: 98 { 99 return allocateDedicated(vki, vkd, physDevice, device, image, requirement); 100 } 101 102 default: 103 { 104 TCU_THROW(InternalError, "Invalid allocation kind"); 105 } 106 } 107 } 108 109 static VkImageType getCompatibleImageType (VkImageViewType viewType) 110 { 111 switch (viewType) 112 { 113 case VK_IMAGE_VIEW_TYPE_1D: return VK_IMAGE_TYPE_1D; 114 case VK_IMAGE_VIEW_TYPE_1D_ARRAY: return VK_IMAGE_TYPE_1D; 115 case VK_IMAGE_VIEW_TYPE_2D: return VK_IMAGE_TYPE_2D; 116 case VK_IMAGE_VIEW_TYPE_2D_ARRAY: return VK_IMAGE_TYPE_2D; 117 case VK_IMAGE_VIEW_TYPE_3D: return VK_IMAGE_TYPE_3D; 118 case VK_IMAGE_VIEW_TYPE_CUBE: return VK_IMAGE_TYPE_2D; 119 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY: return VK_IMAGE_TYPE_2D; 120 default: 121 break; 122 } 123 124 DE_ASSERT(false); 125 return VK_IMAGE_TYPE_1D; 126 } 127 128 template<typename TcuFormatType> 129 static MovePtr<TestTexture> createTestTexture (const TcuFormatType format, VkImageViewType viewType, const tcu::IVec3& size, int layerCount) 130 { 131 MovePtr<TestTexture> texture; 132 const VkImageType imageType = getCompatibleImageType(viewType); 133 134 switch (imageType) 135 { 136 case VK_IMAGE_TYPE_1D: 137 if (layerCount == 1) 138 texture = MovePtr<TestTexture>(new TestTexture1D(format, size.x())); 139 else 140 texture = MovePtr<TestTexture>(new TestTexture1DArray(format, size.x(), layerCount)); 141 142 break; 143 144 case VK_IMAGE_TYPE_2D: 145 if (layerCount == 1) 146 { 147 texture = MovePtr<TestTexture>(new TestTexture2D(format, size.x(), size.y())); 148 } 149 else 150 { 151 if (viewType == VK_IMAGE_VIEW_TYPE_CUBE || viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) 152 { 153 if (layerCount == tcu::CUBEFACE_LAST && viewType == VK_IMAGE_VIEW_TYPE_CUBE) 154 { 155 texture = MovePtr<TestTexture>(new TestTextureCube(format, size.x())); 156 } 157 else 158 { 159 DE_ASSERT(layerCount % tcu::CUBEFACE_LAST == 0); 160 161 texture = MovePtr<TestTexture>(new TestTextureCubeArray(format, size.x(), layerCount)); 162 } 163 } 164 else 165 { 166 texture = MovePtr<TestTexture>(new TestTexture2DArray(format, size.x(), size.y(), layerCount)); 167 } 168 } 169 170 break; 171 172 case VK_IMAGE_TYPE_3D: 173 texture = MovePtr<TestTexture>(new TestTexture3D(format, size.x(), size.y(), size.z())); 174 break; 175 176 default: 177 DE_ASSERT(false); 178 } 179 180 return texture; 181 } 182 183 } // anonymous 184 185 ImageSamplingInstance::ImageSamplingInstance (Context& context, 186 const tcu::UVec2& renderSize, 187 VkImageViewType imageViewType, 188 VkFormat imageFormat, 189 const tcu::IVec3& imageSize, 190 int layerCount, 191 const VkComponentMapping& componentMapping, 192 const VkImageSubresourceRange& subresourceRange, 193 const VkSamplerCreateInfo& samplerParams, 194 float samplerLod, 195 const std::vector<Vertex4Tex4>& vertices, 196 VkDescriptorType samplingType, 197 int imageCount, 198 AllocationKind allocationKind) 199 : vkt::TestInstance (context) 200 , m_allocationKind (allocationKind) 201 , m_samplingType (samplingType) 202 , m_imageViewType (imageViewType) 203 , m_imageFormat (imageFormat) 204 , m_imageSize (imageSize) 205 , m_layerCount (layerCount) 206 , m_imageCount (imageCount) 207 , m_componentMapping (componentMapping) 208 , m_subresourceRange (subresourceRange) 209 , m_samplerParams (samplerParams) 210 , m_samplerLod (samplerLod) 211 , m_renderSize (renderSize) 212 , m_colorFormat (VK_FORMAT_R8G8B8A8_UNORM) 213 , m_vertices (vertices) 214 { 215 const InstanceInterface& vki = context.getInstanceInterface(); 216 const DeviceInterface& vk = context.getDeviceInterface(); 217 const VkPhysicalDevice physDevice = context.getPhysicalDevice(); 218 const VkDevice vkDevice = context.getDevice(); 219 const VkQueue queue = context.getUniversalQueue(); 220 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex(); 221 SimpleAllocator memAlloc (vk, vkDevice, getPhysicalDeviceMemoryProperties(context.getInstanceInterface(), context.getPhysicalDevice())); 222 const VkComponentMapping componentMappingRGBA = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A }; 223 224 if (!isSupportedSamplableFormat(context.getInstanceInterface(), context.getPhysicalDevice(), imageFormat)) 225 throw tcu::NotSupportedError(std::string("Unsupported format for sampling: ") + getFormatName(imageFormat)); 226 227 if ((deUint32)imageCount > context.getDeviceProperties().limits.maxColorAttachments) 228 throw tcu::NotSupportedError(std::string("Unsupported render target count: ") + de::toString(imageCount)); 229 230 if ((samplerParams.minFilter == VK_FILTER_LINEAR || 231 samplerParams.magFilter == VK_FILTER_LINEAR || 232 samplerParams.mipmapMode == VK_SAMPLER_MIPMAP_MODE_LINEAR) && 233 !isLinearFilteringSupported(context.getInstanceInterface(), context.getPhysicalDevice(), imageFormat, VK_IMAGE_TILING_OPTIMAL)) 234 throw tcu::NotSupportedError(std::string("Unsupported format for linear filtering: ") + getFormatName(imageFormat)); 235 236 if ((samplerParams.addressModeU == VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE || 237 samplerParams.addressModeV == VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE || 238 samplerParams.addressModeW == VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE) && 239 !de::contains(context.getDeviceExtensions().begin(), context.getDeviceExtensions().end(), "VK_KHR_sampler_mirror_clamp_to_edge")) 240 TCU_THROW(NotSupportedError, "VK_KHR_sampler_mirror_clamp_to_edge not supported"); 241 242 if (isCompressedFormat(imageFormat) && imageViewType == VK_IMAGE_VIEW_TYPE_3D) 243 { 244 // \todo [2016-01-22 pyry] Mandate VK_ERROR_FORMAT_NOT_SUPPORTED 245 try 246 { 247 const VkImageFormatProperties formatProperties = getPhysicalDeviceImageFormatProperties(context.getInstanceInterface(), 248 context.getPhysicalDevice(), 249 imageFormat, 250 VK_IMAGE_TYPE_3D, 251 VK_IMAGE_TILING_OPTIMAL, 252 VK_IMAGE_USAGE_SAMPLED_BIT, 253 (VkImageCreateFlags)0); 254 255 if (formatProperties.maxExtent.width == 0 && 256 formatProperties.maxExtent.height == 0 && 257 formatProperties.maxExtent.depth == 0) 258 TCU_THROW(NotSupportedError, "3D compressed format not supported"); 259 } 260 catch (const Error&) 261 { 262 TCU_THROW(NotSupportedError, "3D compressed format not supported"); 263 } 264 } 265 266 if (imageViewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY && !context.getDeviceFeatures().imageCubeArray) 267 TCU_THROW(NotSupportedError, "imageCubeArray feature is not supported"); 268 269 if (m_allocationKind == ALLOCATION_KIND_DEDICATED) 270 { 271 if (!isDeviceExtensionSupported(context.getUsedApiVersion(), context.getDeviceExtensions(), "VK_KHR_dedicated_allocation")) 272 TCU_THROW(NotSupportedError, std::string("VK_KHR_dedicated_allocation is not supported").c_str()); 273 } 274 275 // Create texture images, views and samplers 276 { 277 VkImageCreateFlags imageFlags = 0u; 278 279 if (m_imageViewType == VK_IMAGE_VIEW_TYPE_CUBE || m_imageViewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) 280 imageFlags = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT; 281 282 // Initialize texture data 283 if (isCompressedFormat(imageFormat)) 284 m_texture = createTestTexture(mapVkCompressedFormat(imageFormat), imageViewType, imageSize, layerCount); 285 else 286 m_texture = createTestTexture(mapVkFormat(imageFormat), imageViewType, imageSize, layerCount); 287 288 const VkImageCreateInfo imageParams = 289 { 290 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType; 291 DE_NULL, // const void* pNext; 292 imageFlags, // VkImageCreateFlags flags; 293 getCompatibleImageType(m_imageViewType), // VkImageType imageType; 294 imageFormat, // VkFormat format; 295 { // VkExtent3D extent; 296 (deUint32)m_imageSize.x(), 297 (deUint32)m_imageSize.y(), 298 (deUint32)m_imageSize.z() 299 }, 300 (deUint32)m_texture->getNumLevels(), // deUint32 mipLevels; 301 (deUint32)m_layerCount, // deUint32 arrayLayers; 302 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples; 303 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling; 304 VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, // VkImageUsageFlags usage; 305 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode; 306 1u, // deUint32 queueFamilyIndexCount; 307 &queueFamilyIndex, // const deUint32* pQueueFamilyIndices; 308 VK_IMAGE_LAYOUT_UNDEFINED // VkImageLayout initialLayout; 309 }; 310 311 m_images.resize(m_imageCount); 312 m_imageAllocs.resize(m_imageCount); 313 m_imageViews.resize(m_imageCount); 314 315 for (int imgNdx = 0; imgNdx < m_imageCount; ++imgNdx) 316 { 317 m_images[imgNdx] = SharedImagePtr(new UniqueImage(createImage(vk, vkDevice, &imageParams))); 318 m_imageAllocs[imgNdx] = SharedAllocPtr(new UniqueAlloc(allocateImage(vki, vk, physDevice, vkDevice, **m_images[imgNdx], MemoryRequirement::Any, memAlloc, m_allocationKind))); 319 VK_CHECK(vk.bindImageMemory(vkDevice, **m_images[imgNdx], (*m_imageAllocs[imgNdx])->getMemory(), (*m_imageAllocs[imgNdx])->getOffset())); 320 321 // Upload texture data 322 uploadTestTexture(vk, vkDevice, queue, queueFamilyIndex, memAlloc, *m_texture, **m_images[imgNdx]); 323 324 // Create image view and sampler 325 const VkImageViewCreateInfo imageViewParams = 326 { 327 VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // VkStructureType sType; 328 DE_NULL, // const void* pNext; 329 0u, // VkImageViewCreateFlags flags; 330 **m_images[imgNdx], // VkImage image; 331 m_imageViewType, // VkImageViewType viewType; 332 imageFormat, // VkFormat format; 333 m_componentMapping, // VkComponentMapping components; 334 m_subresourceRange, // VkImageSubresourceRange subresourceRange; 335 }; 336 337 m_imageViews[imgNdx] = SharedImageViewPtr(new UniqueImageView(createImageView(vk, vkDevice, &imageViewParams))); 338 } 339 340 m_sampler = createSampler(vk, vkDevice, &m_samplerParams); 341 } 342 343 // Create descriptor set for image and sampler 344 { 345 DescriptorPoolBuilder descriptorPoolBuilder; 346 if (m_samplingType == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE) 347 descriptorPoolBuilder.addType(VK_DESCRIPTOR_TYPE_SAMPLER, 1u); 348 descriptorPoolBuilder.addType(m_samplingType, m_imageCount); 349 m_descriptorPool = descriptorPoolBuilder.build(vk, vkDevice, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 350 m_samplingType == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE ? m_imageCount + 1u : m_imageCount); 351 352 DescriptorSetLayoutBuilder setLayoutBuilder; 353 if (m_samplingType == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE) 354 setLayoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_SAMPLER, VK_SHADER_STAGE_FRAGMENT_BIT); 355 setLayoutBuilder.addArrayBinding(m_samplingType, m_imageCount, VK_SHADER_STAGE_FRAGMENT_BIT); 356 m_descriptorSetLayout = setLayoutBuilder.build(vk, vkDevice); 357 358 const VkDescriptorSetAllocateInfo descriptorSetAllocateInfo = 359 { 360 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, // VkStructureType sType; 361 DE_NULL, // const void* pNext; 362 *m_descriptorPool, // VkDescriptorPool descriptorPool; 363 1u, // deUint32 setLayoutCount; 364 &m_descriptorSetLayout.get() // const VkDescriptorSetLayout* pSetLayouts; 365 }; 366 367 m_descriptorSet = allocateDescriptorSet(vk, vkDevice, &descriptorSetAllocateInfo); 368 369 const VkSampler sampler = m_samplingType == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE ? DE_NULL : *m_sampler; 370 std::vector<VkDescriptorImageInfo> descriptorImageInfo(m_imageCount); 371 for (int imgNdx = 0; imgNdx < m_imageCount; ++imgNdx) 372 { 373 descriptorImageInfo[imgNdx].sampler = sampler; // VkSampler sampler; 374 descriptorImageInfo[imgNdx].imageView = **m_imageViews[imgNdx]; // VkImageView imageView; 375 descriptorImageInfo[imgNdx].imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; // VkImageLayout imageLayout; 376 } 377 378 DescriptorSetUpdateBuilder setUpdateBuilder; 379 if (m_samplingType == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE) 380 { 381 const VkDescriptorImageInfo descriptorSamplerInfo = 382 { 383 *m_sampler, // VkSampler sampler; 384 DE_NULL, // VkImageView imageView; 385 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL // VkImageLayout imageLayout; 386 }; 387 setUpdateBuilder.writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0), VK_DESCRIPTOR_TYPE_SAMPLER, &descriptorSamplerInfo); 388 } 389 390 const deUint32 binding = m_samplingType == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE ? 1u : 0u; 391 setUpdateBuilder.writeArray(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(binding), m_samplingType, m_imageCount, descriptorImageInfo.data()); 392 setUpdateBuilder.update(vk, vkDevice); 393 } 394 395 // Create color images and views 396 { 397 const VkImageCreateInfo colorImageParams = 398 { 399 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType; 400 DE_NULL, // const void* pNext; 401 0u, // VkImageCreateFlags flags; 402 VK_IMAGE_TYPE_2D, // VkImageType imageType; 403 m_colorFormat, // VkFormat format; 404 { (deUint32)m_renderSize.x(), (deUint32)m_renderSize.y(), 1u }, // VkExtent3D extent; 405 1u, // deUint32 mipLevels; 406 1u, // deUint32 arrayLayers; 407 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples; 408 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling; 409 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, // VkImageUsageFlags usage; 410 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode; 411 1u, // deUint32 queueFamilyIndexCount; 412 &queueFamilyIndex, // const deUint32* pQueueFamilyIndices; 413 VK_IMAGE_LAYOUT_UNDEFINED // VkImageLayout initialLayout; 414 }; 415 416 m_colorImages.resize(m_imageCount); 417 m_colorImageAllocs.resize(m_imageCount); 418 m_colorAttachmentViews.resize(m_imageCount); 419 420 for (int imgNdx = 0; imgNdx < m_imageCount; ++imgNdx) 421 { 422 m_colorImages[imgNdx] = SharedImagePtr(new UniqueImage(createImage(vk, vkDevice, &colorImageParams))); 423 m_colorImageAllocs[imgNdx] = SharedAllocPtr(new UniqueAlloc(allocateImage(vki, vk, physDevice, vkDevice, **m_colorImages[imgNdx], MemoryRequirement::Any, memAlloc, m_allocationKind))); 424 VK_CHECK(vk.bindImageMemory(vkDevice, **m_colorImages[imgNdx], (*m_colorImageAllocs[imgNdx])->getMemory(), (*m_colorImageAllocs[imgNdx])->getOffset())); 425 426 const VkImageViewCreateInfo colorAttachmentViewParams = 427 { 428 VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // VkStructureType sType; 429 DE_NULL, // const void* pNext; 430 0u, // VkImageViewCreateFlags flags; 431 **m_colorImages[imgNdx], // VkImage image; 432 VK_IMAGE_VIEW_TYPE_2D, // VkImageViewType viewType; 433 m_colorFormat, // VkFormat format; 434 componentMappingRGBA, // VkComponentMapping components; 435 { VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u } // VkImageSubresourceRange subresourceRange; 436 }; 437 438 m_colorAttachmentViews[imgNdx] = SharedImageViewPtr(new UniqueImageView(createImageView(vk, vkDevice, &colorAttachmentViewParams))); 439 } 440 } 441 442 // Create render pass 443 { 444 std::vector<VkAttachmentDescription> colorAttachmentDescriptions(m_imageCount); 445 std::vector<VkAttachmentReference> colorAttachmentReferences(m_imageCount); 446 447 for (int imgNdx = 0; imgNdx < m_imageCount; ++imgNdx) 448 { 449 colorAttachmentDescriptions[imgNdx].flags = 0u; // VkAttachmentDescriptionFlags flags; 450 colorAttachmentDescriptions[imgNdx].format = m_colorFormat; // VkFormat format; 451 colorAttachmentDescriptions[imgNdx].samples = VK_SAMPLE_COUNT_1_BIT; // VkSampleCountFlagBits samples; 452 colorAttachmentDescriptions[imgNdx].loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; // VkAttachmentLoadOp loadOp; 453 colorAttachmentDescriptions[imgNdx].storeOp = VK_ATTACHMENT_STORE_OP_STORE; // VkAttachmentStoreOp storeOp; 454 colorAttachmentDescriptions[imgNdx].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; // VkAttachmentLoadOp stencilLoadOp; 455 colorAttachmentDescriptions[imgNdx].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; // VkAttachmentStoreOp stencilStoreOp; 456 colorAttachmentDescriptions[imgNdx].initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; // VkImageLayout initialLayout; 457 colorAttachmentDescriptions[imgNdx].finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; // VkImageLayout finalLayout; 458 459 colorAttachmentReferences[imgNdx].attachment = (deUint32)imgNdx; // deUint32 attachment; 460 colorAttachmentReferences[imgNdx].layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; // VkImageLayout layout; 461 } 462 463 const VkSubpassDescription subpassDescription = 464 { 465 0u, // VkSubpassDescriptionFlags flags; 466 VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint; 467 0u, // deUint32 inputAttachmentCount; 468 DE_NULL, // const VkAttachmentReference* pInputAttachments; 469 (deUint32)m_imageCount, // deUint32 colorAttachmentCount; 470 &colorAttachmentReferences[0], // const VkAttachmentReference* pColorAttachments; 471 DE_NULL, // const VkAttachmentReference* pResolveAttachments; 472 DE_NULL, // const VkAttachmentReference* pDepthStencilAttachment; 473 0u, // deUint32 preserveAttachmentCount; 474 DE_NULL // const VkAttachmentReference* pPreserveAttachments; 475 }; 476 477 const VkRenderPassCreateInfo renderPassParams = 478 { 479 VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureType sType; 480 DE_NULL, // const void* pNext; 481 0u, // VkRenderPassCreateFlags flags; 482 (deUint32)m_imageCount, // deUint32 attachmentCount; 483 &colorAttachmentDescriptions[0], // const VkAttachmentDescription* pAttachments; 484 1u, // deUint32 subpassCount; 485 &subpassDescription, // const VkSubpassDescription* pSubpasses; 486 0u, // deUint32 dependencyCount; 487 DE_NULL // const VkSubpassDependency* pDependencies; 488 }; 489 490 m_renderPass = createRenderPass(vk, vkDevice, &renderPassParams); 491 } 492 493 // Create framebuffer 494 { 495 std::vector<VkImageView> pAttachments(m_imageCount); 496 for (int imgNdx = 0; imgNdx < m_imageCount; ++imgNdx) 497 pAttachments[imgNdx] = m_colorAttachmentViews[imgNdx]->get(); 498 499 const VkFramebufferCreateInfo framebufferParams = 500 { 501 VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // VkStructureType sType; 502 DE_NULL, // const void* pNext; 503 0u, // VkFramebufferCreateFlags flags; 504 *m_renderPass, // VkRenderPass renderPass; 505 (deUint32)m_imageCount, // deUint32 attachmentCount; 506 &pAttachments[0], // const VkImageView* pAttachments; 507 (deUint32)m_renderSize.x(), // deUint32 width; 508 (deUint32)m_renderSize.y(), // deUint32 height; 509 1u // deUint32 layers; 510 }; 511 512 m_framebuffer = createFramebuffer(vk, vkDevice, &framebufferParams); 513 } 514 515 // Create pipeline layout 516 { 517 const VkPipelineLayoutCreateInfo pipelineLayoutParams = 518 { 519 VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // VkStructureType sType; 520 DE_NULL, // const void* pNext; 521 0u, // VkPipelineLayoutCreateFlags flags; 522 1u, // deUint32 setLayoutCount; 523 &m_descriptorSetLayout.get(), // const VkDescriptorSetLayout* pSetLayouts; 524 0u, // deUint32 pushConstantRangeCount; 525 DE_NULL // const VkPushConstantRange* pPushConstantRanges; 526 }; 527 528 m_pipelineLayout = createPipelineLayout(vk, vkDevice, &pipelineLayoutParams); 529 } 530 531 m_vertexShaderModule = createShaderModule(vk, vkDevice, m_context.getBinaryCollection().get("tex_vert"), 0); 532 m_fragmentShaderModule = createShaderModule(vk, vkDevice, m_context.getBinaryCollection().get("tex_frag"), 0); 533 534 // Create pipeline 535 { 536 const VkPipelineShaderStageCreateInfo shaderStages[2] = 537 { 538 { 539 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType; 540 DE_NULL, // const void* pNext; 541 0u, // VkPipelineShaderStageCreateFlags flags; 542 VK_SHADER_STAGE_VERTEX_BIT, // VkShaderStageFlagBits stage; 543 *m_vertexShaderModule, // VkShaderModule module; 544 "main", // const char* pName; 545 DE_NULL // const VkSpecializationInfo* pSpecializationInfo; 546 }, 547 { 548 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType; 549 DE_NULL, // const void* pNext; 550 0u, // VkPipelineShaderStageCreateFlags flags; 551 VK_SHADER_STAGE_FRAGMENT_BIT, // VkShaderStageFlagBits stage; 552 *m_fragmentShaderModule, // VkShaderModule module; 553 "main", // const char* pName; 554 DE_NULL // const VkSpecializationInfo* pSpecializationInfo; 555 } 556 }; 557 558 const VkVertexInputBindingDescription vertexInputBindingDescription = 559 { 560 0u, // deUint32 binding; 561 sizeof(Vertex4Tex4), // deUint32 strideInBytes; 562 VK_VERTEX_INPUT_RATE_VERTEX // VkVertexInputStepRate inputRate; 563 }; 564 565 const VkVertexInputAttributeDescription vertexInputAttributeDescriptions[2] = 566 { 567 { 568 0u, // deUint32 location; 569 0u, // deUint32 binding; 570 VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format; 571 0u // deUint32 offset; 572 }, 573 { 574 1u, // deUint32 location; 575 0u, // deUint32 binding; 576 VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format; 577 DE_OFFSET_OF(Vertex4Tex4, texCoord), // deUint32 offset; 578 } 579 }; 580 581 const VkPipelineVertexInputStateCreateInfo vertexInputStateParams = 582 { 583 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType; 584 DE_NULL, // const void* pNext; 585 0u, // VkPipelineVertexInputStateCreateFlags flags; 586 1u, // deUint32 vertexBindingDescriptionCount; 587 &vertexInputBindingDescription, // const VkVertexInputBindingDescription* pVertexBindingDescriptions; 588 2u, // deUint32 vertexAttributeDescriptionCount; 589 vertexInputAttributeDescriptions // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions; 590 }; 591 592 const VkPipelineInputAssemblyStateCreateInfo inputAssemblyStateParams = 593 { 594 VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, // VkStructureType sType; 595 DE_NULL, // const void* pNext; 596 0u, // VkPipelineInputAssemblyStateCreateFlags flags; 597 VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, // VkPrimitiveTopology topology; 598 false // VkBool32 primitiveRestartEnable; 599 }; 600 601 const VkViewport viewport = 602 { 603 0.0f, // float x; 604 0.0f, // float y; 605 (float)m_renderSize.x(), // float width; 606 (float)m_renderSize.y(), // float height; 607 0.0f, // float minDepth; 608 1.0f // float maxDepth; 609 }; 610 611 const VkRect2D scissor = { { 0, 0 }, { (deUint32)m_renderSize.x(), (deUint32)m_renderSize.y() } }; 612 613 const VkPipelineViewportStateCreateInfo viewportStateParams = 614 { 615 VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, // VkStructureType sType; 616 DE_NULL, // const void* pNext; 617 0u, // VkPipelineViewportStateCreateFlags flags; 618 1u, // deUint32 viewportCount; 619 &viewport, // const VkViewport* pViewports; 620 1u, // deUint32 scissorCount; 621 &scissor // const VkRect2D* pScissors; 622 }; 623 624 const VkPipelineRasterizationStateCreateInfo rasterStateParams = 625 { 626 VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, // VkStructureType sType; 627 DE_NULL, // const void* pNext; 628 0u, // VkPipelineRasterizationStateCreateFlags flags; 629 false, // VkBool32 depthClampEnable; 630 false, // VkBool32 rasterizerDiscardEnable; 631 VK_POLYGON_MODE_FILL, // VkPolygonMode polygonMode; 632 VK_CULL_MODE_NONE, // VkCullModeFlags cullMode; 633 VK_FRONT_FACE_COUNTER_CLOCKWISE, // VkFrontFace frontFace; 634 false, // VkBool32 depthBiasEnable; 635 0.0f, // float depthBiasConstantFactor; 636 0.0f, // float depthBiasClamp; 637 0.0f, // float depthBiasSlopeFactor; 638 1.0f // float lineWidth; 639 }; 640 641 std::vector<VkPipelineColorBlendAttachmentState> colorBlendAttachmentStates(m_imageCount); 642 643 for (int imgNdx = 0; imgNdx < m_imageCount; ++imgNdx) 644 { 645 colorBlendAttachmentStates[imgNdx].blendEnable = false; // VkBool32 blendEnable; 646 colorBlendAttachmentStates[imgNdx].srcColorBlendFactor = VK_BLEND_FACTOR_ONE; // VkBlendFactor srcColorBlendFactor; 647 colorBlendAttachmentStates[imgNdx].dstColorBlendFactor = VK_BLEND_FACTOR_ZERO; // VkBlendFactor dstColorBlendFactor; 648 colorBlendAttachmentStates[imgNdx].colorBlendOp = VK_BLEND_OP_ADD; // VkBlendOp colorBlendOp; 649 colorBlendAttachmentStates[imgNdx].srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE; // VkBlendFactor srcAlphaBlendFactor; 650 colorBlendAttachmentStates[imgNdx].dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO; // VkBlendFactor dstAlphaBlendFactor; 651 colorBlendAttachmentStates[imgNdx].alphaBlendOp = VK_BLEND_OP_ADD; // VkBlendOp alphaBlendOp; 652 colorBlendAttachmentStates[imgNdx].colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | // VkColorComponentFlags colorWriteMask; 653 VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT; 654 } 655 656 const VkPipelineColorBlendStateCreateInfo colorBlendStateParams = 657 { 658 VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, // VkStructureType sType; 659 DE_NULL, // const void* pNext; 660 0u, // VkPipelineColorBlendStateCreateFlags flags; 661 false, // VkBool32 logicOpEnable; 662 VK_LOGIC_OP_COPY, // VkLogicOp logicOp; 663 (deUint32)m_imageCount, // deUint32 attachmentCount; 664 &colorBlendAttachmentStates[0], // const VkPipelineColorBlendAttachmentState* pAttachments; 665 { 0.0f, 0.0f, 0.0f, 0.0f } // float blendConstants[4]; 666 }; 667 668 const VkPipelineMultisampleStateCreateInfo multisampleStateParams = 669 { 670 VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, // VkStructureType sType; 671 DE_NULL, // const void* pNext; 672 0u, // VkPipelineMultisampleStateCreateFlags flags; 673 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits rasterizationSamples; 674 false, // VkBool32 sampleShadingEnable; 675 0.0f, // float minSampleShading; 676 DE_NULL, // const VkSampleMask* pSampleMask; 677 false, // VkBool32 alphaToCoverageEnable; 678 false // VkBool32 alphaToOneEnable; 679 }; 680 681 VkPipelineDepthStencilStateCreateInfo depthStencilStateParams = 682 { 683 VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, // VkStructureType sType; 684 DE_NULL, // const void* pNext; 685 0u, // VkPipelineDepthStencilStateCreateFlags flags; 686 false, // VkBool32 depthTestEnable; 687 false, // VkBool32 depthWriteEnable; 688 VK_COMPARE_OP_LESS, // VkCompareOp depthCompareOp; 689 false, // VkBool32 depthBoundsTestEnable; 690 false, // VkBool32 stencilTestEnable; 691 { // VkStencilOpState front; 692 VK_STENCIL_OP_ZERO, // VkStencilOp failOp; 693 VK_STENCIL_OP_ZERO, // VkStencilOp passOp; 694 VK_STENCIL_OP_ZERO, // VkStencilOp depthFailOp; 695 VK_COMPARE_OP_NEVER, // VkCompareOp compareOp; 696 0u, // deUint32 compareMask; 697 0u, // deUint32 writeMask; 698 0u // deUint32 reference; 699 }, 700 { // VkStencilOpState back; 701 VK_STENCIL_OP_ZERO, // VkStencilOp failOp; 702 VK_STENCIL_OP_ZERO, // VkStencilOp passOp; 703 VK_STENCIL_OP_ZERO, // VkStencilOp depthFailOp; 704 VK_COMPARE_OP_NEVER, // VkCompareOp compareOp; 705 0u, // deUint32 compareMask; 706 0u, // deUint32 writeMask; 707 0u // deUint32 reference; 708 }, 709 0.0f, // float minDepthBounds; 710 1.0f // float maxDepthBounds; 711 }; 712 713 const VkGraphicsPipelineCreateInfo graphicsPipelineParams = 714 { 715 VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, // VkStructureType sType; 716 DE_NULL, // const void* pNext; 717 0u, // VkPipelineCreateFlags flags; 718 2u, // deUint32 stageCount; 719 shaderStages, // const VkPipelineShaderStageCreateInfo* pStages; 720 &vertexInputStateParams, // const VkPipelineVertexInputStateCreateInfo* pVertexInputState; 721 &inputAssemblyStateParams, // const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState; 722 DE_NULL, // const VkPipelineTessellationStateCreateInfo* pTessellationState; 723 &viewportStateParams, // const VkPipelineViewportStateCreateInfo* pViewportState; 724 &rasterStateParams, // const VkPipelineRasterizationStateCreateInfo* pRasterizationState; 725 &multisampleStateParams, // const VkPipelineMultisampleStateCreateInfo* pMultisampleState; 726 &depthStencilStateParams, // const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState; 727 &colorBlendStateParams, // const VkPipelineColorBlendStateCreateInfo* pColorBlendState; 728 (const VkPipelineDynamicStateCreateInfo*)DE_NULL, // const VkPipelineDynamicStateCreateInfo* pDynamicState; 729 *m_pipelineLayout, // VkPipelineLayout layout; 730 *m_renderPass, // VkRenderPass renderPass; 731 0u, // deUint32 subpass; 732 0u, // VkPipeline basePipelineHandle; 733 0u // deInt32 basePipelineIndex; 734 }; 735 736 m_graphicsPipeline = createGraphicsPipeline(vk, vkDevice, DE_NULL, &graphicsPipelineParams); 737 } 738 739 // Create vertex buffer 740 { 741 const VkDeviceSize vertexBufferSize = (VkDeviceSize)(m_vertices.size() * sizeof(Vertex4Tex4)); 742 const VkBufferCreateInfo vertexBufferParams = 743 { 744 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType; 745 DE_NULL, // const void* pNext; 746 0u, // VkBufferCreateFlags flags; 747 vertexBufferSize, // VkDeviceSize size; 748 VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, // VkBufferUsageFlags usage; 749 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode; 750 1u, // deUint32 queueFamilyIndexCount; 751 &queueFamilyIndex // const deUint32* pQueueFamilyIndices; 752 }; 753 754 DE_ASSERT(vertexBufferSize > 0); 755 756 m_vertexBuffer = createBuffer(vk, vkDevice, &vertexBufferParams); 757 m_vertexBufferAlloc = allocateBuffer(vki, vk, physDevice, vkDevice, *m_vertexBuffer, MemoryRequirement::HostVisible, memAlloc, m_allocationKind); 758 VK_CHECK(vk.bindBufferMemory(vkDevice, *m_vertexBuffer, m_vertexBufferAlloc->getMemory(), m_vertexBufferAlloc->getOffset())); 759 760 // Load vertices into vertex buffer 761 deMemcpy(m_vertexBufferAlloc->getHostPtr(), &m_vertices[0], (size_t)vertexBufferSize); 762 flushMappedMemoryRange(vk, vkDevice, m_vertexBufferAlloc->getMemory(), m_vertexBufferAlloc->getOffset(), vertexBufferParams.size); 763 } 764 765 // Create command pool 766 m_cmdPool = createCommandPool(vk, vkDevice, VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, queueFamilyIndex); 767 768 // Create command buffer 769 { 770 const VkCommandBufferBeginInfo cmdBufferBeginInfo = 771 { 772 VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, // VkStructureType sType; 773 DE_NULL, // const void* pNext; 774 0u, // VkCommandBufferUsageFlags flags; 775 (const VkCommandBufferInheritanceInfo*)DE_NULL, 776 }; 777 778 const std::vector<VkClearValue> attachmentClearValues (m_imageCount, defaultClearValue(m_colorFormat)); 779 780 const VkRenderPassBeginInfo renderPassBeginInfo = 781 { 782 VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, // VkStructureType sType; 783 DE_NULL, // const void* pNext; 784 *m_renderPass, // VkRenderPass renderPass; 785 *m_framebuffer, // VkFramebuffer framebuffer; 786 { 787 { 0, 0 }, 788 { (deUint32)m_renderSize.x(), (deUint32)m_renderSize.y() } 789 }, // VkRect2D renderArea; 790 static_cast<deUint32>(attachmentClearValues.size()), // deUint32 clearValueCount; 791 &attachmentClearValues[0] // const VkClearValue* pClearValues; 792 }; 793 794 std::vector<VkImageMemoryBarrier> preAttachmentBarriers(m_imageCount); 795 796 for (int imgNdx = 0; imgNdx < m_imageCount; ++imgNdx) 797 { 798 preAttachmentBarriers[imgNdx].sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; // VkStructureType sType; 799 preAttachmentBarriers[imgNdx].pNext = DE_NULL; // const void* pNext; 800 preAttachmentBarriers[imgNdx].srcAccessMask = 0u; // VkAccessFlags srcAccessMask; 801 preAttachmentBarriers[imgNdx].dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; // VkAccessFlags dstAccessMask; 802 preAttachmentBarriers[imgNdx].oldLayout = VK_IMAGE_LAYOUT_UNDEFINED; // VkImageLayout oldLayout; 803 preAttachmentBarriers[imgNdx].newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; // VkImageLayout newLayout; 804 preAttachmentBarriers[imgNdx].srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; // deUint32 srcQueueFamilyIndex; 805 preAttachmentBarriers[imgNdx].dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; // deUint32 dstQueueFamilyIndex; 806 preAttachmentBarriers[imgNdx].image = **m_colorImages[imgNdx]; // VkImage image; 807 preAttachmentBarriers[imgNdx].subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // VkImageSubresourceRange subresourceRange; 808 preAttachmentBarriers[imgNdx].subresourceRange.baseMipLevel = 0u; 809 preAttachmentBarriers[imgNdx].subresourceRange.levelCount = 1u; 810 preAttachmentBarriers[imgNdx].subresourceRange.baseArrayLayer = 0u; 811 preAttachmentBarriers[imgNdx].subresourceRange.layerCount = 1u; 812 } 813 814 m_cmdBuffer = allocateCommandBuffer(vk, vkDevice, *m_cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY); 815 816 VK_CHECK(vk.beginCommandBuffer(*m_cmdBuffer, &cmdBufferBeginInfo)); 817 818 vk.cmdPipelineBarrier(*m_cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, (VkDependencyFlags)0, 819 0u, DE_NULL, 0u, DE_NULL, (deUint32)m_imageCount, &preAttachmentBarriers[0]); 820 821 vk.cmdBeginRenderPass(*m_cmdBuffer, &renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE); 822 823 vk.cmdBindPipeline(*m_cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *m_graphicsPipeline); 824 825 vk.cmdBindDescriptorSets(*m_cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipelineLayout, 0, 1, &m_descriptorSet.get(), 0, DE_NULL); 826 827 const VkDeviceSize vertexBufferOffset = 0; 828 vk.cmdBindVertexBuffers(*m_cmdBuffer, 0, 1, &m_vertexBuffer.get(), &vertexBufferOffset); 829 vk.cmdDraw(*m_cmdBuffer, (deUint32)m_vertices.size(), 1, 0, 0); 830 831 vk.cmdEndRenderPass(*m_cmdBuffer); 832 VK_CHECK(vk.endCommandBuffer(*m_cmdBuffer)); 833 } 834 835 // Create fence 836 m_fence = createFence(vk, vkDevice); 837 } 838 839 ImageSamplingInstance::~ImageSamplingInstance (void) 840 { 841 } 842 843 tcu::TestStatus ImageSamplingInstance::iterate (void) 844 { 845 const DeviceInterface& vk = m_context.getDeviceInterface(); 846 const VkDevice vkDevice = m_context.getDevice(); 847 const VkQueue queue = m_context.getUniversalQueue(); 848 const VkSubmitInfo submitInfo = 849 { 850 VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType; 851 DE_NULL, // const void* pNext; 852 0u, // deUint32 waitSemaphoreCount; 853 DE_NULL, // const VkSemaphore* pWaitSemaphores; 854 DE_NULL, 855 1u, // deUint32 commandBufferCount; 856 &m_cmdBuffer.get(), // const VkCommandBuffer* pCommandBuffers; 857 0u, // deUint32 signalSemaphoreCount; 858 DE_NULL // const VkSemaphore* pSignalSemaphores; 859 }; 860 861 VK_CHECK(vk.resetFences(vkDevice, 1, &m_fence.get())); 862 VK_CHECK(vk.queueSubmit(queue, 1, &submitInfo, *m_fence)); 863 VK_CHECK(vk.waitForFences(vkDevice, 1, &m_fence.get(), true, ~(0ull) /* infinity */)); 864 865 return verifyImage(); 866 } 867 868 namespace 869 { 870 871 bool isLookupResultValid (const tcu::Texture1DView& texture, 872 const tcu::Sampler& sampler, 873 const tcu::LookupPrecision& precision, 874 const tcu::Vec4& coords, 875 const tcu::Vec2& lodBounds, 876 const tcu::Vec4& result) 877 { 878 return tcu::isLookupResultValid(texture, sampler, precision, coords.x(), lodBounds, result); 879 } 880 881 bool isLookupResultValid (const tcu::Texture1DArrayView& texture, 882 const tcu::Sampler& sampler, 883 const tcu::LookupPrecision& precision, 884 const tcu::Vec4& coords, 885 const tcu::Vec2& lodBounds, 886 const tcu::Vec4& result) 887 { 888 return tcu::isLookupResultValid(texture, sampler, precision, coords.swizzle(0,1), lodBounds, result); 889 } 890 891 bool isLookupResultValid (const tcu::Texture2DView& texture, 892 const tcu::Sampler& sampler, 893 const tcu::LookupPrecision& precision, 894 const tcu::Vec4& coords, 895 const tcu::Vec2& lodBounds, 896 const tcu::Vec4& result) 897 { 898 return tcu::isLookupResultValid(texture, sampler, precision, coords.swizzle(0,1), lodBounds, result); 899 } 900 901 bool isLookupResultValid (const tcu::Texture2DArrayView& texture, 902 const tcu::Sampler& sampler, 903 const tcu::LookupPrecision& precision, 904 const tcu::Vec4& coords, 905 const tcu::Vec2& lodBounds, 906 const tcu::Vec4& result) 907 { 908 return tcu::isLookupResultValid(texture, sampler, precision, coords.swizzle(0,1,2), lodBounds, result); 909 } 910 911 bool isLookupResultValid (const tcu::TextureCubeView& texture, 912 const tcu::Sampler& sampler, 913 const tcu::LookupPrecision& precision, 914 const tcu::Vec4& coords, 915 const tcu::Vec2& lodBounds, 916 const tcu::Vec4& result) 917 { 918 return tcu::isLookupResultValid(texture, sampler, precision, coords.swizzle(0,1,2), lodBounds, result); 919 } 920 921 bool isLookupResultValid (const tcu::TextureCubeArrayView& texture, 922 const tcu::Sampler& sampler, 923 const tcu::LookupPrecision& precision, 924 const tcu::Vec4& coords, 925 const tcu::Vec2& lodBounds, 926 const tcu::Vec4& result) 927 { 928 return tcu::isLookupResultValid(texture, sampler, precision, tcu::IVec4(precision.coordBits.x()), coords, lodBounds, result); 929 } 930 931 bool isLookupResultValid(const tcu::Texture3DView& texture, 932 const tcu::Sampler& sampler, 933 const tcu::LookupPrecision& precision, 934 const tcu::Vec4& coords, 935 const tcu::Vec2& lodBounds, 936 const tcu::Vec4& result) 937 { 938 return tcu::isLookupResultValid(texture, sampler, precision, coords.swizzle(0,1,2), lodBounds, result); 939 } 940 941 template<typename TextureViewType> 942 bool validateResultImage (const TextureViewType& texture, 943 const tcu::Sampler& sampler, 944 const tcu::ConstPixelBufferAccess& texCoords, 945 const tcu::Vec2& lodBounds, 946 const tcu::LookupPrecision& lookupPrecision, 947 const tcu::Vec4& lookupScale, 948 const tcu::Vec4& lookupBias, 949 const tcu::ConstPixelBufferAccess& result, 950 const tcu::PixelBufferAccess& errorMask) 951 { 952 const int w = result.getWidth(); 953 const int h = result.getHeight(); 954 bool allOk = true; 955 956 for (int y = 0; y < h; ++y) 957 { 958 for (int x = 0; x < w; ++x) 959 { 960 const tcu::Vec4 resultPixel = result.getPixel(x, y); 961 const tcu::Vec4 resultColor = (resultPixel - lookupBias) / lookupScale; 962 const tcu::Vec4 texCoord = texCoords.getPixel(x, y); 963 const bool pixelOk = isLookupResultValid(texture, sampler, lookupPrecision, texCoord, lodBounds, resultColor); 964 965 errorMask.setPixel(tcu::Vec4(pixelOk?0.0f:1.0f, pixelOk?1.0f:0.0f, 0.0f, 1.0f), x, y); 966 967 if (!pixelOk) 968 allOk = false; 969 } 970 } 971 972 return allOk; 973 } 974 975 template<typename ScalarType> 976 ScalarType getSwizzledComp (const tcu::Vector<ScalarType, 4>& vec, vk::VkComponentSwizzle comp, int identityNdx) 977 { 978 if (comp == vk::VK_COMPONENT_SWIZZLE_IDENTITY) 979 return vec[identityNdx]; 980 else if (comp == vk::VK_COMPONENT_SWIZZLE_ZERO) 981 return ScalarType(0); 982 else if (comp == vk::VK_COMPONENT_SWIZZLE_ONE) 983 return ScalarType(1); 984 else 985 return vec[comp - vk::VK_COMPONENT_SWIZZLE_R]; 986 } 987 988 template<typename ScalarType> 989 tcu::Vector<ScalarType, 4> swizzle (const tcu::Vector<ScalarType, 4>& vec, const vk::VkComponentMapping& swz) 990 { 991 return tcu::Vector<ScalarType, 4>(getSwizzledComp(vec, swz.r, 0), 992 getSwizzledComp(vec, swz.g, 1), 993 getSwizzledComp(vec, swz.b, 2), 994 getSwizzledComp(vec, swz.a, 3)); 995 } 996 997 tcu::Vec4 swizzleScaleBias (const tcu::Vec4& vec, const vk::VkComponentMapping& swz) 998 { 999 const float channelValues[] = 1000 { 1001 1.0f, // -1 1002 1.0f, // 0 1003 1.0f, 1004 vec.x(), 1005 vec.y(), 1006 vec.z(), 1007 vec.w() 1008 }; 1009 1010 return tcu::Vec4(channelValues[swz.r], channelValues[swz.g], channelValues[swz.b], channelValues[swz.a]); 1011 } 1012 1013 template<typename ScalarType> 1014 void swizzleT (const tcu::ConstPixelBufferAccess& src, const tcu::PixelBufferAccess& dst, const vk::VkComponentMapping& swz) 1015 { 1016 for (int z = 0; z < dst.getDepth(); ++z) 1017 for (int y = 0; y < dst.getHeight(); ++y) 1018 for (int x = 0; x < dst.getWidth(); ++x) 1019 dst.setPixel(swizzle(src.getPixelT<ScalarType>(x, y, z), swz), x, y, z); 1020 } 1021 1022 void swizzleFromSRGB (const tcu::ConstPixelBufferAccess& src, const tcu::PixelBufferAccess& dst, const vk::VkComponentMapping& swz) 1023 { 1024 for (int z = 0; z < dst.getDepth(); ++z) 1025 for (int y = 0; y < dst.getHeight(); ++y) 1026 for (int x = 0; x < dst.getWidth(); ++x) 1027 dst.setPixel(swizzle(tcu::sRGBToLinear(src.getPixelT<float>(x, y, z)), swz), x, y, z); 1028 } 1029 1030 void swizzle (const tcu::ConstPixelBufferAccess& src, const tcu::PixelBufferAccess& dst, const vk::VkComponentMapping& swz) 1031 { 1032 const tcu::TextureChannelClass chnClass = tcu::getTextureChannelClass(dst.getFormat().type); 1033 1034 DE_ASSERT(src.getWidth() == dst.getWidth() && 1035 src.getHeight() == dst.getHeight() && 1036 src.getDepth() == dst.getDepth()); 1037 1038 if (chnClass == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER) 1039 swizzleT<deInt32>(src, dst, swz); 1040 else if (chnClass == tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER) 1041 swizzleT<deUint32>(src, dst, swz); 1042 else if (tcu::isSRGB(src.getFormat()) && !tcu::isSRGB(dst.getFormat())) 1043 swizzleFromSRGB(src, dst, swz); 1044 else 1045 swizzleT<float>(src, dst, swz); 1046 } 1047 1048 bool isIdentitySwizzle (const vk::VkComponentMapping& swz) 1049 { 1050 return (swz.r == vk::VK_COMPONENT_SWIZZLE_IDENTITY || swz.r == vk::VK_COMPONENT_SWIZZLE_R) && 1051 (swz.g == vk::VK_COMPONENT_SWIZZLE_IDENTITY || swz.g == vk::VK_COMPONENT_SWIZZLE_G) && 1052 (swz.b == vk::VK_COMPONENT_SWIZZLE_IDENTITY || swz.b == vk::VK_COMPONENT_SWIZZLE_B) && 1053 (swz.a == vk::VK_COMPONENT_SWIZZLE_IDENTITY || swz.a == vk::VK_COMPONENT_SWIZZLE_A); 1054 } 1055 1056 template<typename TextureViewType> struct TexViewTraits; 1057 1058 template<> struct TexViewTraits<tcu::Texture1DView> { typedef tcu::Texture1D TextureType; }; 1059 template<> struct TexViewTraits<tcu::Texture1DArrayView> { typedef tcu::Texture1DArray TextureType; }; 1060 template<> struct TexViewTraits<tcu::Texture2DView> { typedef tcu::Texture2D TextureType; }; 1061 template<> struct TexViewTraits<tcu::Texture2DArrayView> { typedef tcu::Texture2DArray TextureType; }; 1062 template<> struct TexViewTraits<tcu::TextureCubeView> { typedef tcu::TextureCube TextureType; }; 1063 template<> struct TexViewTraits<tcu::TextureCubeArrayView> { typedef tcu::TextureCubeArray TextureType; }; 1064 template<> struct TexViewTraits<tcu::Texture3DView> { typedef tcu::Texture3D TextureType; }; 1065 1066 template<typename TextureViewType> 1067 typename TexViewTraits<TextureViewType>::TextureType* createSkeletonClone (tcu::TextureFormat format, const tcu::ConstPixelBufferAccess& level0); 1068 1069 tcu::TextureFormat getSwizzleTargetFormat (tcu::TextureFormat format) 1070 { 1071 // Swizzled texture needs to hold all four channels 1072 // \todo [2016-09-21 pyry] We could save some memory by using smaller formats 1073 // when possible (for example U8). 1074 1075 const tcu::TextureChannelClass chnClass = tcu::getTextureChannelClass(format.type); 1076 1077 if (chnClass == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER) 1078 return tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::SIGNED_INT32); 1079 else if (chnClass == tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER) 1080 return tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNSIGNED_INT32); 1081 else 1082 return tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::FLOAT); 1083 } 1084 1085 template<> 1086 tcu::Texture1D* createSkeletonClone<tcu::Texture1DView> (tcu::TextureFormat format, const tcu::ConstPixelBufferAccess& level0) 1087 { 1088 return new tcu::Texture1D(format, level0.getWidth()); 1089 } 1090 1091 template<> 1092 tcu::Texture1DArray* createSkeletonClone<tcu::Texture1DArrayView> (tcu::TextureFormat format, const tcu::ConstPixelBufferAccess& level0) 1093 { 1094 return new tcu::Texture1DArray(format, level0.getWidth(), level0.getHeight()); 1095 } 1096 1097 template<> 1098 tcu::Texture2D* createSkeletonClone<tcu::Texture2DView> (tcu::TextureFormat format, const tcu::ConstPixelBufferAccess& level0) 1099 { 1100 return new tcu::Texture2D(format, level0.getWidth(), level0.getHeight()); 1101 } 1102 1103 template<> 1104 tcu::Texture2DArray* createSkeletonClone<tcu::Texture2DArrayView> (tcu::TextureFormat format, const tcu::ConstPixelBufferAccess& level0) 1105 { 1106 return new tcu::Texture2DArray(format, level0.getWidth(), level0.getHeight(), level0.getDepth()); 1107 } 1108 1109 template<> 1110 tcu::Texture3D* createSkeletonClone<tcu::Texture3DView> (tcu::TextureFormat format, const tcu::ConstPixelBufferAccess& level0) 1111 { 1112 return new tcu::Texture3D(format, level0.getWidth(), level0.getHeight(), level0.getDepth()); 1113 } 1114 1115 template<> 1116 tcu::TextureCubeArray* createSkeletonClone<tcu::TextureCubeArrayView> (tcu::TextureFormat format, const tcu::ConstPixelBufferAccess& level0) 1117 { 1118 return new tcu::TextureCubeArray(format, level0.getWidth(), level0.getDepth()); 1119 } 1120 1121 template<typename TextureViewType> 1122 MovePtr<typename TexViewTraits<TextureViewType>::TextureType> createSwizzledCopy (const TextureViewType& texture, const vk::VkComponentMapping& swz) 1123 { 1124 MovePtr<typename TexViewTraits<TextureViewType>::TextureType> copy (createSkeletonClone<TextureViewType>(getSwizzleTargetFormat(texture.getLevel(0).getFormat()), texture.getLevel(0))); 1125 1126 for (int levelNdx = 0; levelNdx < texture.getNumLevels(); ++levelNdx) 1127 { 1128 copy->allocLevel(levelNdx); 1129 swizzle(texture.getLevel(levelNdx), copy->getLevel(levelNdx), swz); 1130 } 1131 1132 return copy; 1133 } 1134 1135 template<> 1136 MovePtr<tcu::TextureCube> createSwizzledCopy (const tcu::TextureCubeView& texture, const vk::VkComponentMapping& swz) 1137 { 1138 MovePtr<tcu::TextureCube> copy (new tcu::TextureCube(getSwizzleTargetFormat(texture.getLevelFace(0, tcu::CUBEFACE_NEGATIVE_X).getFormat()), texture.getSize())); 1139 1140 for (int faceNdx = 0; faceNdx < tcu::CUBEFACE_LAST; ++faceNdx) 1141 { 1142 for (int levelNdx = 0; levelNdx < texture.getNumLevels(); ++levelNdx) 1143 { 1144 copy->allocLevel((tcu::CubeFace)faceNdx, levelNdx); 1145 swizzle(texture.getLevelFace(levelNdx, (tcu::CubeFace)faceNdx), copy->getLevelFace(levelNdx, (tcu::CubeFace)faceNdx), swz); 1146 } 1147 } 1148 1149 return copy; 1150 } 1151 1152 template<typename TextureViewType> 1153 bool validateResultImage (const TextureViewType& texture, 1154 const tcu::Sampler& sampler, 1155 const vk::VkComponentMapping& swz, 1156 const tcu::ConstPixelBufferAccess& texCoords, 1157 const tcu::Vec2& lodBounds, 1158 const tcu::LookupPrecision& lookupPrecision, 1159 const tcu::Vec4& lookupScale, 1160 const tcu::Vec4& lookupBias, 1161 const tcu::ConstPixelBufferAccess& result, 1162 const tcu::PixelBufferAccess& errorMask) 1163 { 1164 if (isIdentitySwizzle(swz)) 1165 return validateResultImage(texture, sampler, texCoords, lodBounds, lookupPrecision, lookupScale, lookupBias, result, errorMask); 1166 else 1167 { 1168 // There is (currently) no way to handle swizzling inside validation loop 1169 // and thus we need to pre-swizzle the texture. 1170 UniquePtr<typename TexViewTraits<TextureViewType>::TextureType> swizzledTex (createSwizzledCopy(texture, swz)); 1171 1172 return validateResultImage(*swizzledTex, sampler, texCoords, lodBounds, lookupPrecision, swizzleScaleBias(lookupScale, swz), swizzleScaleBias(lookupBias, swz), result, errorMask); 1173 } 1174 } 1175 1176 vk::VkImageSubresourceRange resolveSubresourceRange (const TestTexture& testTexture, const vk::VkImageSubresourceRange& subresource) 1177 { 1178 vk::VkImageSubresourceRange resolved = subresource; 1179 1180 if (subresource.levelCount == VK_REMAINING_MIP_LEVELS) 1181 resolved.levelCount = testTexture.getNumLevels()-subresource.baseMipLevel; 1182 1183 if (subresource.layerCount == VK_REMAINING_ARRAY_LAYERS) 1184 resolved.layerCount = testTexture.getArraySize()-subresource.baseArrayLayer; 1185 1186 return resolved; 1187 } 1188 1189 MovePtr<tcu::Texture1DView> getTexture1DView (const TestTexture& testTexture, const vk::VkImageSubresourceRange& subresource, std::vector<tcu::ConstPixelBufferAccess>& levels) 1190 { 1191 DE_ASSERT(subresource.layerCount == 1); 1192 1193 levels.resize(subresource.levelCount); 1194 1195 for (int levelNdx = 0; levelNdx < (int)levels.size(); ++levelNdx) 1196 { 1197 const tcu::ConstPixelBufferAccess& srcLevel = testTexture.getLevel((int)subresource.baseMipLevel+levelNdx, subresource.baseArrayLayer); 1198 1199 levels[levelNdx] = tcu::getSubregion(srcLevel, 0, 0, 0, srcLevel.getWidth(), 1, 1); 1200 } 1201 1202 return MovePtr<tcu::Texture1DView>(new tcu::Texture1DView((int)levels.size(), &levels[0])); 1203 } 1204 1205 MovePtr<tcu::Texture1DArrayView> getTexture1DArrayView (const TestTexture& testTexture, const vk::VkImageSubresourceRange& subresource, std::vector<tcu::ConstPixelBufferAccess>& levels) 1206 { 1207 const TestTexture1D* tex1D = dynamic_cast<const TestTexture1D*>(&testTexture); 1208 const TestTexture1DArray* tex1DArray = dynamic_cast<const TestTexture1DArray*>(&testTexture); 1209 1210 DE_ASSERT(!!tex1D != !!tex1DArray); 1211 DE_ASSERT(tex1DArray || subresource.baseArrayLayer == 0); 1212 1213 levels.resize(subresource.levelCount); 1214 1215 for (int levelNdx = 0; levelNdx < (int)levels.size(); ++levelNdx) 1216 { 1217 const tcu::ConstPixelBufferAccess& srcLevel = tex1D ? tex1D->getTexture().getLevel((int)subresource.baseMipLevel+levelNdx) 1218 : tex1DArray->getTexture().getLevel((int)subresource.baseMipLevel+levelNdx); 1219 1220 levels[levelNdx] = tcu::getSubregion(srcLevel, 0, (int)subresource.baseArrayLayer, 0, srcLevel.getWidth(), (int)subresource.layerCount, 1); 1221 } 1222 1223 return MovePtr<tcu::Texture1DArrayView>(new tcu::Texture1DArrayView((int)levels.size(), &levels[0])); 1224 } 1225 1226 MovePtr<tcu::Texture2DView> getTexture2DView (const TestTexture& testTexture, const vk::VkImageSubresourceRange& subresource, std::vector<tcu::ConstPixelBufferAccess>& levels) 1227 { 1228 const TestTexture2D* tex2D = dynamic_cast<const TestTexture2D*>(&testTexture); 1229 const TestTexture2DArray* tex2DArray = dynamic_cast<const TestTexture2DArray*>(&testTexture); 1230 1231 DE_ASSERT(subresource.layerCount == 1); 1232 DE_ASSERT(!!tex2D != !!tex2DArray); 1233 DE_ASSERT(tex2DArray || subresource.baseArrayLayer == 0); 1234 1235 levels.resize(subresource.levelCount); 1236 1237 for (int levelNdx = 0; levelNdx < (int)levels.size(); ++levelNdx) 1238 { 1239 const tcu::ConstPixelBufferAccess& srcLevel = tex2D ? tex2D->getTexture().getLevel((int)subresource.baseMipLevel+levelNdx) 1240 : tex2DArray->getTexture().getLevel((int)subresource.baseMipLevel+levelNdx); 1241 1242 levels[levelNdx] = tcu::getSubregion(srcLevel, 0, 0, (int)subresource.baseArrayLayer, srcLevel.getWidth(), srcLevel.getHeight(), 1); 1243 } 1244 1245 return MovePtr<tcu::Texture2DView>(new tcu::Texture2DView((int)levels.size(), &levels[0])); 1246 } 1247 1248 MovePtr<tcu::Texture2DArrayView> getTexture2DArrayView (const TestTexture& testTexture, const vk::VkImageSubresourceRange& subresource, std::vector<tcu::ConstPixelBufferAccess>& levels) 1249 { 1250 const TestTexture2D* tex2D = dynamic_cast<const TestTexture2D*>(&testTexture); 1251 const TestTexture2DArray* tex2DArray = dynamic_cast<const TestTexture2DArray*>(&testTexture); 1252 1253 DE_ASSERT(!!tex2D != !!tex2DArray); 1254 DE_ASSERT(tex2DArray || subresource.baseArrayLayer == 0); 1255 1256 levels.resize(subresource.levelCount); 1257 1258 for (int levelNdx = 0; levelNdx < (int)levels.size(); ++levelNdx) 1259 { 1260 const tcu::ConstPixelBufferAccess& srcLevel = tex2D ? tex2D->getTexture().getLevel((int)subresource.baseMipLevel+levelNdx) 1261 : tex2DArray->getTexture().getLevel((int)subresource.baseMipLevel+levelNdx); 1262 1263 levels[levelNdx] = tcu::getSubregion(srcLevel, 0, 0, (int)subresource.baseArrayLayer, srcLevel.getWidth(), srcLevel.getHeight(), (int)subresource.layerCount); 1264 } 1265 1266 return MovePtr<tcu::Texture2DArrayView>(new tcu::Texture2DArrayView((int)levels.size(), &levels[0])); 1267 } 1268 1269 MovePtr<tcu::TextureCubeView> getTextureCubeView (const TestTexture& testTexture, const vk::VkImageSubresourceRange& subresource, std::vector<tcu::ConstPixelBufferAccess>& levels) 1270 { 1271 const static tcu::CubeFace s_faceMap[tcu::CUBEFACE_LAST] = 1272 { 1273 tcu::CUBEFACE_POSITIVE_X, 1274 tcu::CUBEFACE_NEGATIVE_X, 1275 tcu::CUBEFACE_POSITIVE_Y, 1276 tcu::CUBEFACE_NEGATIVE_Y, 1277 tcu::CUBEFACE_POSITIVE_Z, 1278 tcu::CUBEFACE_NEGATIVE_Z 1279 }; 1280 1281 const TestTextureCube* texCube = dynamic_cast<const TestTextureCube*>(&testTexture); 1282 const TestTextureCubeArray* texCubeArray = dynamic_cast<const TestTextureCubeArray*>(&testTexture); 1283 1284 DE_ASSERT(!!texCube != !!texCubeArray); 1285 DE_ASSERT(subresource.layerCount == 6); 1286 DE_ASSERT(texCubeArray || subresource.baseArrayLayer == 0); 1287 1288 levels.resize(subresource.levelCount*tcu::CUBEFACE_LAST); 1289 1290 for (int faceNdx = 0; faceNdx < tcu::CUBEFACE_LAST; ++faceNdx) 1291 { 1292 for (int levelNdx = 0; levelNdx < (int)subresource.levelCount; ++levelNdx) 1293 { 1294 const tcu::ConstPixelBufferAccess& srcLevel = texCubeArray ? texCubeArray->getTexture().getLevel((int)subresource.baseMipLevel+levelNdx) 1295 : texCube->getTexture().getLevelFace(levelNdx, s_faceMap[faceNdx]); 1296 1297 levels[faceNdx*subresource.levelCount + levelNdx] = tcu::getSubregion(srcLevel, 0, 0, (int)subresource.baseArrayLayer + (texCubeArray ? faceNdx : 0), srcLevel.getWidth(), srcLevel.getHeight(), 1); 1298 } 1299 } 1300 1301 { 1302 const tcu::ConstPixelBufferAccess* reordered[tcu::CUBEFACE_LAST]; 1303 1304 for (int faceNdx = 0; faceNdx < tcu::CUBEFACE_LAST; ++faceNdx) 1305 reordered[s_faceMap[faceNdx]] = &levels[faceNdx*subresource.levelCount]; 1306 1307 return MovePtr<tcu::TextureCubeView>(new tcu::TextureCubeView((int)subresource.levelCount, reordered)); 1308 } 1309 } 1310 1311 MovePtr<tcu::TextureCubeArrayView> getTextureCubeArrayView (const TestTexture& testTexture, const vk::VkImageSubresourceRange& subresource, std::vector<tcu::ConstPixelBufferAccess>& levels) 1312 { 1313 const TestTextureCubeArray* texCubeArray = dynamic_cast<const TestTextureCubeArray*>(&testTexture); 1314 1315 DE_ASSERT(texCubeArray); 1316 DE_ASSERT(subresource.layerCount%6 == 0); 1317 1318 levels.resize(subresource.levelCount); 1319 1320 for (int levelNdx = 0; levelNdx < (int)subresource.levelCount; ++levelNdx) 1321 { 1322 const tcu::ConstPixelBufferAccess& srcLevel = texCubeArray->getTexture().getLevel((int)subresource.baseMipLevel+levelNdx); 1323 1324 levels[levelNdx] = tcu::getSubregion(srcLevel, 0, 0, (int)subresource.baseArrayLayer, srcLevel.getWidth(), srcLevel.getHeight(), (int)subresource.layerCount); 1325 } 1326 1327 return MovePtr<tcu::TextureCubeArrayView>(new tcu::TextureCubeArrayView((int)levels.size(), &levels[0])); 1328 } 1329 1330 MovePtr<tcu::Texture3DView> getTexture3DView (const TestTexture& testTexture, const vk::VkImageSubresourceRange& subresource, std::vector<tcu::ConstPixelBufferAccess>& levels) 1331 { 1332 DE_ASSERT(subresource.baseArrayLayer == 0 && subresource.layerCount == 1); 1333 1334 levels.resize(subresource.levelCount); 1335 1336 for (int levelNdx = 0; levelNdx < (int)levels.size(); ++levelNdx) 1337 levels[levelNdx] = testTexture.getLevel((int)subresource.baseMipLevel+levelNdx, subresource.baseArrayLayer); 1338 1339 return MovePtr<tcu::Texture3DView>(new tcu::Texture3DView((int)levels.size(), &levels[0])); 1340 } 1341 1342 bool validateResultImage (const TestTexture& texture, 1343 const VkImageViewType imageViewType, 1344 const VkImageSubresourceRange& subresource, 1345 const tcu::Sampler& sampler, 1346 const vk::VkComponentMapping& componentMapping, 1347 const tcu::ConstPixelBufferAccess& coordAccess, 1348 const tcu::Vec2& lodBounds, 1349 const tcu::LookupPrecision& lookupPrecision, 1350 const tcu::Vec4& lookupScale, 1351 const tcu::Vec4& lookupBias, 1352 const tcu::ConstPixelBufferAccess& resultAccess, 1353 const tcu::PixelBufferAccess& errorAccess) 1354 { 1355 std::vector<tcu::ConstPixelBufferAccess> levels; 1356 1357 switch (imageViewType) 1358 { 1359 case VK_IMAGE_VIEW_TYPE_1D: 1360 { 1361 UniquePtr<tcu::Texture1DView> texView(getTexture1DView(texture, subresource, levels)); 1362 1363 return validateResultImage(*texView, sampler, componentMapping, coordAccess, lodBounds, lookupPrecision, lookupScale, lookupBias, resultAccess, errorAccess); 1364 } 1365 1366 case VK_IMAGE_VIEW_TYPE_1D_ARRAY: 1367 { 1368 UniquePtr<tcu::Texture1DArrayView> texView(getTexture1DArrayView(texture, subresource, levels)); 1369 1370 return validateResultImage(*texView, sampler, componentMapping, coordAccess, lodBounds, lookupPrecision, lookupScale, lookupBias, resultAccess, errorAccess); 1371 } 1372 1373 case VK_IMAGE_VIEW_TYPE_2D: 1374 { 1375 UniquePtr<tcu::Texture2DView> texView(getTexture2DView(texture, subresource, levels)); 1376 1377 return validateResultImage(*texView, sampler, componentMapping, coordAccess, lodBounds, lookupPrecision, lookupScale, lookupBias, resultAccess, errorAccess); 1378 } 1379 1380 case VK_IMAGE_VIEW_TYPE_2D_ARRAY: 1381 { 1382 UniquePtr<tcu::Texture2DArrayView> texView(getTexture2DArrayView(texture, subresource, levels)); 1383 1384 return validateResultImage(*texView, sampler, componentMapping, coordAccess, lodBounds, lookupPrecision, lookupScale, lookupBias, resultAccess, errorAccess); 1385 } 1386 1387 case VK_IMAGE_VIEW_TYPE_CUBE: 1388 { 1389 UniquePtr<tcu::TextureCubeView> texView(getTextureCubeView(texture, subresource, levels)); 1390 1391 return validateResultImage(*texView, sampler, componentMapping, coordAccess, lodBounds, lookupPrecision, lookupScale, lookupBias, resultAccess, errorAccess); 1392 } 1393 1394 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY: 1395 { 1396 UniquePtr<tcu::TextureCubeArrayView> texView(getTextureCubeArrayView(texture, subresource, levels)); 1397 1398 return validateResultImage(*texView, sampler, componentMapping, coordAccess, lodBounds, lookupPrecision, lookupScale, lookupBias, resultAccess, errorAccess); 1399 break; 1400 } 1401 1402 case VK_IMAGE_VIEW_TYPE_3D: 1403 { 1404 UniquePtr<tcu::Texture3DView> texView(getTexture3DView(texture, subresource, levels)); 1405 1406 return validateResultImage(*texView, sampler, componentMapping, coordAccess, lodBounds, lookupPrecision, lookupScale, lookupBias, resultAccess, errorAccess); 1407 } 1408 1409 default: 1410 DE_ASSERT(false); 1411 return false; 1412 } 1413 } 1414 1415 } // anonymous 1416 1417 tcu::TestStatus ImageSamplingInstance::verifyImage (void) 1418 { 1419 const VkPhysicalDeviceLimits& limits = m_context.getDeviceProperties().limits; 1420 // \note Color buffer is used to capture coordinates - not sampled texture values 1421 const tcu::TextureFormat colorFormat (tcu::TextureFormat::RGBA, tcu::TextureFormat::FLOAT); 1422 const tcu::TextureFormat depthStencilFormat; // Undefined depth/stencil format. 1423 const CoordinateCaptureProgram coordCaptureProgram; 1424 const rr::Program rrProgram = coordCaptureProgram.getReferenceProgram(); 1425 ReferenceRenderer refRenderer (m_renderSize.x(), m_renderSize.y(), 1, colorFormat, depthStencilFormat, &rrProgram); 1426 1427 bool compareOkAll = true; 1428 bool anyWarnings = false; 1429 1430 tcu::Vec4 lookupScale (1.0f); 1431 tcu::Vec4 lookupBias (0.0f); 1432 1433 getLookupScaleBias(m_imageFormat, lookupScale, lookupBias); 1434 1435 // Render out coordinates 1436 { 1437 const rr::RenderState renderState(refRenderer.getViewportState()); 1438 refRenderer.draw(renderState, rr::PRIMITIVETYPE_TRIANGLES, m_vertices); 1439 } 1440 1441 // Verify results 1442 { 1443 const tcu::Sampler sampler = mapVkSampler(m_samplerParams); 1444 const float referenceLod = de::clamp(m_samplerParams.mipLodBias + m_samplerLod, m_samplerParams.minLod, m_samplerParams.maxLod); 1445 const float lodError = 1.0f / static_cast<float>((1u << limits.mipmapPrecisionBits) - 1u); 1446 const tcu::Vec2 lodBounds (referenceLod - lodError, referenceLod + lodError); 1447 const vk::VkImageSubresourceRange subresource = resolveSubresourceRange(*m_texture, m_subresourceRange); 1448 1449 const tcu::ConstPixelBufferAccess coordAccess = refRenderer.getAccess(); 1450 tcu::TextureLevel errorMask (tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNORM_INT8), (int)m_renderSize.x(), (int)m_renderSize.y()); 1451 const tcu::PixelBufferAccess errorAccess = errorMask.getAccess(); 1452 1453 const bool allowSnorm8Bug = m_texture->getTextureFormat().type == tcu::TextureFormat::SNORM_INT8 && 1454 (m_samplerParams.minFilter == VK_FILTER_LINEAR || m_samplerParams.magFilter == VK_FILTER_LINEAR); 1455 const bool isNearestOnly = (m_samplerParams.minFilter == VK_FILTER_NEAREST && m_samplerParams.magFilter == VK_FILTER_NEAREST); 1456 1457 tcu::LookupPrecision lookupPrecision; 1458 1459 // Set precision requirements - very low for these tests as 1460 // the point of the test is not to validate accuracy. 1461 lookupPrecision.coordBits = tcu::IVec3(17, 17, 17); 1462 lookupPrecision.uvwBits = tcu::IVec3(5, 5, 5); 1463 lookupPrecision.colorMask = tcu::BVec4(true); 1464 lookupPrecision.colorThreshold = tcu::computeFixedPointThreshold(max((tcu::IVec4(8, 8, 8, 8) - (isNearestOnly ? 1 : 2)), tcu::IVec4(0))) / swizzleScaleBias(lookupScale, m_componentMapping); 1465 1466 if (tcu::isSRGB(m_texture->getTextureFormat())) 1467 lookupPrecision.colorThreshold += tcu::Vec4(4.f / 255.f); 1468 1469 for (int imgNdx = 0; imgNdx < m_imageCount; ++imgNdx) 1470 { 1471 // Read back result image 1472 UniquePtr<tcu::TextureLevel> result (readColorAttachment(m_context.getDeviceInterface(), 1473 m_context.getDevice(), 1474 m_context.getUniversalQueue(), 1475 m_context.getUniversalQueueFamilyIndex(), 1476 m_context.getDefaultAllocator(), 1477 **m_colorImages[imgNdx], 1478 m_colorFormat, 1479 m_renderSize)); 1480 const tcu::ConstPixelBufferAccess resultAccess = result->getAccess(); 1481 bool compareOk = validateResultImage(*m_texture, 1482 m_imageViewType, 1483 subresource, 1484 sampler, 1485 m_componentMapping, 1486 coordAccess, 1487 lodBounds, 1488 lookupPrecision, 1489 lookupScale, 1490 lookupBias, 1491 resultAccess, 1492 errorAccess); 1493 1494 if (!compareOk && allowSnorm8Bug) 1495 { 1496 // HW waiver (VK-GL-CTS issue: 229) 1497 // 1498 // Due to an error in bit replication of the fixed point SNORM values, linear filtered 1499 // negative SNORM values will differ slightly from ideal precision in the last bit, moving 1500 // the values towards 0. 1501 // 1502 // This occurs on all members of the PowerVR Rogue family of GPUs 1503 tcu::LookupPrecision relaxedPrecision; 1504 1505 relaxedPrecision.colorThreshold += tcu::Vec4(4.f / 255.f); 1506 1507 m_context.getTestContext().getLog() 1508 << tcu::TestLog::Message 1509 << "Warning: Strict validation failed, re-trying with lower precision for SNORM8 format" 1510 << tcu::TestLog::EndMessage; 1511 anyWarnings = true; 1512 1513 compareOk = validateResultImage(*m_texture, 1514 m_imageViewType, 1515 subresource, 1516 sampler, 1517 m_componentMapping, 1518 coordAccess, 1519 lodBounds, 1520 relaxedPrecision, 1521 lookupScale, 1522 lookupBias, 1523 resultAccess, 1524 errorAccess); 1525 } 1526 1527 if (!compareOk) 1528 m_context.getTestContext().getLog() 1529 << tcu::TestLog::Image("Result", "Result Image", resultAccess) 1530 << tcu::TestLog::Image("ErrorMask", "Error Mask", errorAccess); 1531 1532 compareOkAll = compareOkAll && compareOk; 1533 } 1534 } 1535 1536 if (compareOkAll) 1537 { 1538 if (anyWarnings) 1539 return tcu::TestStatus(QP_TEST_RESULT_QUALITY_WARNING, "Inaccurate filtering results"); 1540 else 1541 return tcu::TestStatus::pass("Result image matches reference"); 1542 } 1543 else 1544 return tcu::TestStatus::fail("Image mismatch"); 1545 } 1546 1547 } // pipeline 1548 } // vkt 1549